无法监控kibana仪表板中的Elasticsearch服务器日志



无法监控Kibana仪表板中的Elasticsearch服务器日志。我有2个RHEL虚拟机进行测试。我之所以使用这种方法,是因为产品有不同的架构VM1, Elasticsearch Kibana RsyslogVM2 - FluentD

我想从VM1推送Elasticsearch日志,使用Rsyslog推送,然后将其发送到安装了Fluentd的VM2, Fluentd应该将其发送回VM1 Elasticsearch。下面是配置。我尝试在elasticsearch VM中安装fluentd,并且能够在kibana中看到弹性日志。但我的要求是使用rsyslog并将其发送到FLuentd。因为,fluentD没有安装在ELasticsearch VM的

td-agent.conf

log_level info
worker 2
</system>
<source>
@type tcp
port 5142
bind 0.0.0.0
<parse>
@type multiline
format_firstline /^(?<date>[.*?])/
format1 /(?<date>[.*?])(?<logLevel>[.*?])(?<service>[.*?]) (?<node_name>[.*?]) (?<LogMessage>.*)/
</parse>
tag es_logs
</source>
<source>
@type syslog
port 5145
<transport tcp>
</transport>
bind 0.0.0.0
tag syslog
</source>
<filter es_logs**>
@type parser
format json
time_key time_msec
key_name message
reserve_data true # tells Fluentd to keep the encompasing JSON - off by default
remove_key_name_field true # removes the key of the parsed JSON: message - off by default
</filter>
<match es**>
@type elasticsearch
host vm1ip
port 9200
index_name es_logs_write
include_timestamp true
type_name fluentd
# connection configs
reconnect_on_error true
reload_on_failure true
slow_flush_log_threshold 90
# buffer configs
<buffer>
@type file
path /data/opt/fluentd/buffer/elaticsearch_logs
chunk_limit_size 2MB
total_limit_size 1GB
flush_thread_count 8
flush_mode interval
retry_type exponential_backoff
retry_timeout 10s
retry_max_interval 30
overflow_action drop_oldest_chunk
flush_interval 5s
</buffer>
</match>```

rsyslog.conf

# Sample rsyslog configuration file
#
$ModLoad imfile
$ModLoad immark
$ModLoad imtcp
$ModLoad imudp
#$ModLoad imsolaris
$ModLoad imuxsock

module(load="omelasticsearch")
template(name="es_logs" type="list" option.json="on") {
constant(value="{")
constant(value=""@timestamp":"")     property(name="timereported" dateFormat="rfc3339")
constant(value="","host":"")        property(name="hostname")
constant(value="","severity-num":")  property(name="syslogseverity")
constant(value=","facility-num":")    property(name="syslogfacility")
constant(value=","severity":"")      property(name="syslogseverity-text")
constant(value="","facility":"")    property(name="syslogfacility-text")
constant(value="","syslogtag":"")   property(name="syslogtag")
constant(value="","message":"")     property(name="msg")
constant(value=""}")
}

$UDPServerRun 514
#### GLOBAL DIRECTIVES ####
# Use default timestamp format
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
# Where to place auxiliary files
$WorkDirectory /var/lib/rsyslog
#### RULES ####
# Log all kernel messages to the console.
# Logging much else clutters up the screen.
#kern.*                                                 /dev/console
# Log anything (except mail) of level info or higher.
# Don't log private authentication messages!
*.none;mail.none;authpriv.none;cron.none;local6.none    /var/log/messages
# Log auth.info separate
auth.info                                               /var/log/authlog
# The authpriv file has restricted access.
authpriv.*                                              /var/log/secure
# Log all the mail messages in one place.
mail.*                                                  -/var/log/maillog
# Log cron stuff
cron.*                                                  /var/log/cron
# Everybody gets emergency messages
*.emerg                                                 :omusrmsg:*
# Save news errors of level crit and higher in a special file.
uucp,news.crit                                          /var/log/spooler
# Save boot messages also to boot.log
local7.*                                                /var/log/boot.log
# ### begin forwarding rule ###
# The statement between the begin ... end define a SINGLE forwarding
# rule. They belong together, do NOT split them. If you create multiple
# forwarding rules, duplicate the whole block!
# Remote Logging (we use TCP for reliable delivery)
#
# An on-disk queue is created for this action. If the remote host is
# down, messages are spooled to disk and sent when it is up again.
$ActionQueueFileName fwdRule1 # unique name prefix for spool files
$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible)
$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
$ActionQueueType LinkedList # run asynchronously
$ActionResumeRetryCount -1 # infinite retries if host is down
$MaxMessageSize 64k
# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional
# Forward output to Fluentd
#local8.*                                              /data/elastic_logs/elasticdemo.log
*.* @Vm1Ip:5142;es_logs

我使用下面的配置,创建了一个新文件/etc/rsyslog.d/11-elastic.confrsys:

$ModLoad imfile
$InputFilePollInterval 1
$InputFileName /var/log/elasticsearch/elasticdemo.log
$InputFileTag eslogs:
$InputFileStateFile eslogs
$InputFileFacility local0
$InputRunFileMonitor
:syslogtag, isequal, "eslogs:" {
:msg, contains, "ERROR" {
local0.* /var/log/eslog_error.log
local0.* @fluentdVMip:5141
}
stop
}
FluentD

td-agent.conf

<system>
worker 2
</system>

<source>
@type udp
port 5141
tag eslogs
<parse>
@type multiline
format_firstline /^[(?<date>.*?)]/
format1 /[(?<date>.*?)][(?<logLevel>.*?)][(?<service>.*?)] [(?<node_name>.*?)](?<LogMessage>.*)/
</parse>
</source>
<match system.**>
@type stdout
</match>
<match eslogs.**>
@type elasticsearch
host ipoftheelasticserver or domain name
port 9200
index_name es_logs_write
include_timestamp true
type_name fluentd
# connection configs
reconnect_on_error true
reload_on_failure true
slow_flush_log_threshold 90
# buffer configs
<buffer>
@type file
path /data/opt/fluentd/buffer/elaticsearch_logs
chunk_limit_size 2MB
total_limit_size 1GB
flush_thread_count 8
flush_mode interval
retry_type exponential_backoff
retry_timeout 10s
retry_max_interval 30
overflow_action drop_oldest_chunk
flush_interval 5s
</buffer>
</match>

最新更新