blob: e5d3adf9ed0e05a7c85dd7a8a0de75fe8c05ec48 [file] [log] [blame]
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
[SERVICE]
# Flush
# =====
# set an interval of seconds before to flush records to a destination
flush 1
# Daemon
# ======
# instruct Fluent Bit to run in foreground or background mode.
daemon Off
# Log_Level
# =========
# Set the verbosity level of the service, values can be:
#
# - error
# - warning
# - info
# - debug
# - trace
#
# by default 'info' is set, that means it includes 'error' and 'warning'.
log_level info
# Parsers File
# ============
# specify an optional 'Parsers' configuration file
parsers_file parsers.conf
# Plugins File
# ============
# specify an optional 'Plugins' configuration file to load external plugins.
#
# plugins_file plugins.conf
# HTTP Server
# ===========
# Enable/Disable the built-in HTTP Server for metrics
http_server Off
# http_listen 0.0.0.0
# http_port 2020
# Storage
# =======
# Fluent Bit can use memory and filesystem buffering based mechanisms
#
# - https://docs.fluentbit.io/manual/administration/buffering-and-storage
#
# storage metrics
# ---------------
# publish storage pipeline metrics in '/api/v1/storage'. The metrics are
# exported only if the 'http_server' option is enabled.
#
storage.metrics on
# storage.path
# ------------
# absolute file system path to store filesystem data buffers (chunks).
#
# storage.path /tmp/storage
# storage.sync
# ------------
# configure the synchronization mode used to store the data into the
# filesystem. It can take the values normal or full.
#
# storage.sync normal
# storage.checksum
# ----------------
# enable the data integrity check when writing and reading data from the
# filesystem. The storage layer uses the CRC32 algorithm.
#
# storage.checksum off
# storage.backlog.mem_limit
# -------------------------
# if storage.path is set, Fluent Bit will look for data chunks that were
# not delivered and are still in the storage layer, these are called
# backlog data. This option configure a hint of maximum value of memory
# to use when processing these records.
#
# storage.backlog.mem_limit 5M
# Collects docker.service logs.
[INPUT]
Name systemd
Tag cos_docker
Systemd_Filter _SYSTEMD_UNIT=docker.service
DB /var/log/google-fluentbit/docker.log.db
Read_From_Tail False
# Collects COS system services logs.
[INPUT]
Name systemd
Tag cos_system
Systemd_Filter _SYSTEMD_UNIT=docker-events-collector.service
Systemd_Filter _SYSTEMD_UNIT=konlet-startup.service
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Systemd_Filter _SYSTEMD_UNIT=crash-reporter.service
Systemd_Filter _SYSTEMD_UNIT=crash-sender.service
Systemd_Filter _SYSTEMD_UNIT=crash-boot-collect.service
Systemd_Filter _SYSTEMD_UNIT=kdump-load-kernel.service
Systemd_Filter _SYSTEMD_UNIT=kdump-save-dump.service
Systemd_Filter _SYSTEMD_UNIT=cis-level1.service
Systemd_Filter _SYSTEMD_UNIT=cis-level2.service
Systemd_Filter _SYSTEMD_UNIT=cis-compliance-scanner.service
Systemd_Filter SYSLOG_IDENTIFIER=crash-sender.sh
DB /var/log/google-fluentbit/system.log.db
Read_From_Tail False
# Collects COS audit logs.
[INPUT]
Name systemd
Tag cos_audit
Systemd_Filter SYSLOG_IDENTIFIER=audit
DB /var/log/google-fluentbit/audit.log.db
Read_From_Tail False
# Collects all journal logs with priority >= warning
# Change priority levels to make it more/less verbose.
[INPUT]
Name systemd
Tag cos_journal_warning
Systemd_Filter PRIORITY=0
Systemd_Filter PRIORITY=1
Systemd_Filter PRIORITY=2
Systemd_Filter PRIORITY=3
Systemd_Filter PRIORITY=4
DB /var/log/google-fluentbit/journal.db
Read_From_Tail False
# Docker container logs (when not running Kubernetes).
# This will collect logs from all containers using json file logging driver.
# To query logs for specific container, use below filter on GCP logging:
# jsonPayload.container_id=CONTAINER_ID
[INPUT]
Name tail
Tag cos_containers
Path /var/lib/docker/containers/*/*.log
Parser docker
DB /var/log/google-fluentbit/containers.log.db
Path_Key file_name
Read_from_Head True
Mem_Buf_Limit 5MB
Skip_Long_Lines On
# 1) Add container_id field in container logs.
[FILTER]
Name parser
Match cos_containers
Key_Name file_name
Parser container_filename_to_id
Reserve_Data On
# 2) The following two filters save attrs[tag] as container_name field in
# the record.
[FILTER]
Name nest
Match cos_containers
Operation lift
Nested_under attrs
Add_prefix attrs_
[FILTER]
Name modify
Match cos_containers
Condition Key_exists attrs_tag
Rename attrs_tag cos.googleapis.com/container_name
# If attrs[tag] doesn't exist, set container_name field as 'UNKNOWN'
[FILTER]
Name modify
Match cos_containers
Condition Key_does_not_exist attrs_tag
Add cos.googleapis.com/container_name UNKNOWN
[FILTER]
Name modify
Match cos_containers
Rename container_id cos.googleapis.com/container_id
# 3) Rename field 'stream' to avoid collisions from container logs where
# users may be also using 'stream' as a key
Rename stream cos.googleapis.com/stream
# 4) Rename field 'log' to a more generic field 'message'. This way Logs
# Explorer UI will display the log message as summary of the log entry.
Rename log message
# Remove other fields under attrs.
Remove_wildcard attrs_
[OUTPUT]
Name stackdriver
Match *
Resource gce_instance