无法通过 filebeat 将日志发送到 Kubernetes 中的日志存储



配置

nginx.yaml

---
apiVersion: v1
kind: Namespace
metadata:
name: beats
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: beats
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
volumeMounts:
- name: nginx-data
mountPath: /var/log/nginx
volumes:
- name: nginx-data
persistentVolumeClaim:
claimName: nginx-data-pvc
---
apiVersion: v1
kind: Service
metadata:
namespace: beats
name: nginx
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
selector:
app: nginx
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx-data-pv
namespace: beats
spec:
storageClassName: manual
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt/data/nginx-data-pv
type: DirectoryOrCreate
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-data-pvc
namespace: beats
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeName: nginx-data-pv

filebeat.yaml

---
apiVersion: v1
kind: Namespace
metadata:
name: beats
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: beats
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.modules:
- module: nginx
filebeat.autodiscover:
providers:
- type: kubernetes
hints.enabled: true
templates:
- condition.contains:
kubernetes.namespace: beats
config:
- module: nginx
access:
enabled: true
var.paths: ["/mnt/data/nginx-data-pv/access.log*"]
subPath: access.log
tags: ["access"]
error:
enabled: true
var.paths: ["/mnt/data/nginx-data-pv/error.log*"]
subPath: error.log
tags: ["error"]
output.logstash:
hosts: ["logstash:5044"]
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: beats
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.10.0
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate

logstash.yaml

---
apiVersion: v1
kind: Namespace
metadata:
name: beats
---
apiVersion: v1
kind: Service
metadata:
namespace: beats
labels:
app: logstash
name: logstash
spec:
ports:
- name: "5044"
port: 5044
targetPort: 5044
selector:
app: logstash
status:
loadBalancer: {}
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: beats
name: logstash-configmap
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
xpack.monitoring.enabled: false
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}"}
}
date {
match => [ "time", "dd/MMM/YYYY:HH:mm:ss Z" ]
}
geoip {
source => "remote_ip"
target => "geoip"
}
useragent {
source => "agent"
target => "user_agent"
}
}
output {
stdout { codec => rubydebug }
}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: logstash-nginx-to-X
namespace: beats
spec:
serviceName: "logstash"
selector:
matchLabels:
app: logstash
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: logstash
spec:
initContainers:
- name: init-logstash
image: docker.elastic.co/logstash/logstash:7.10.0
securityContext:
privileged: true
command: ['sh', '-c', 'bin/logstash-plugin install logstash-output-XXX']
containers:
- name: logstash
image: docker.elastic.co/logstash/logstash:7.10.0
resources:
limits:
memory: 2Gi
ports:
- containerPort: 5044
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
volumes:
- name: config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf

日志

filebeat pod

kubectl logs -f filebeat-t2585 -n beats
2020-12-07T10:46:01.635Z    INFO    [monitoring]    log/log.go:145  Non-zero metrics in the last 30s    {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":1700,"time":{"ms":200}},"total":{"ticks":2210,"time":{"ms":244},"value":2210},"user":{"ticks":510,"time":{"ms":44}}},"handles":{"limit":{"hard":1048576,"soft":1048576},"open":10},"info":{"ephemeral_id":"6aa78d6d-8b17-43fc-9e2a-c456eeee4f61","uptime":{"ms":240241}},"memstats":{"gc_next":18802656,"memory_alloc":10736760,"memory_total":48437904},"runtime":{"goroutines":43}},"filebeat":{"harvester":{"open_files":0,"running":0}},"libbeat":{"config":{"module":{"running":0}},"pipeline":{"clients":3,"events":{"active":0}}},"registrar":{"states":{"current":0}},"system":{"load":{"1":1.28,"15":1.26,"5":1.39,"norm":{"1":0.32,"15":0.315,"5":0.3475}}}}}}
E1207 10:46:15.956993       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Pod: pods is forbidden: User "system:serviceaccount:beats:default" cannot list resource "pods" in API group "" at the cluster scope

logstash吊舱

kubectl logs -f logstash-nginx-to-X-0 -n beats
[INFO ] 2020-12-07 12:26:39.559 [main] runner - Starting Logstash {"logstash.version"=>"7.10.0", "jruby.version"=>"jruby 9.2.13.0 (2.5.7) 2020-08-03 9a89c94bcc OpenJDK 64-Bit Server VM 11.0.8+10 on 11.0.8+10 +jit [linux-x86_64]"}
[INFO ] 2020-12-07 12:26:39.641 [main] writabledirectory - Creating directory {:setting=>"path.queue", :path=>"/usr/share/logstash/data/queue"}
[INFO ] 2020-12-07 12:26:39.645 [main] writabledirectory - Creating directory {:setting=>"path.dead_letter_queue", :path=>"/usr/share/logstash/data/dead_letter_queue"}
[WARN ] 2020-12-07 12:26:40.278 [LogStash::Runner] multilocal - Ignoring the 'pipelines.yml' file because modules or command line options are specified
[INFO ] 2020-12-07 12:26:40.324 [LogStash::Runner] agent - No persistent UUID file found. Generating new UUID {:uuid=>"3ed4d3f3-8de9-4747-ab5d-67c9f8175a5c", :path=>"/usr/share/logstash/data/uuid"}
[INFO ] 2020-12-07 12:26:41.764 [Converge PipelineAction::Create<main>] Reflections - Reflections took 80 ms to scan 1 urls, producing 23 keys and 47 values
[INFO ] 2020-12-07 12:26:43.568 [[main]-pipeline-manager] geoip - Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-City.mmdb"}
[INFO ] 2020-12-07 12:26:44.032 [[main]-pipeline-manager] javapipeline - Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>1, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>125, "pipeline.sources"=>["/usr/share/logstash/pipeline/logstash.conf"], :thread=>"#<Thread:0x7d2f64b2@/usr/share/logstash/logstash-core/lib/logstash/java_pipeline.rb:125 run>"}
[INFO ] 2020-12-07 12:26:46.277 [[main]-pipeline-manager] javapipeline - Pipeline Java execution initialization time {"seconds"=>2.24}
[INFO ] 2020-12-07 12:26:46.279 [[main]-pipeline-manager] beats - Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[INFO ] 2020-12-07 12:26:46.339 [[main]-pipeline-manager] javapipeline - Pipeline started {"pipeline.id"=>"main"}
[INFO ] 2020-12-07 12:26:46.385 [Agent thread] agent - Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[INFO ] 2020-12-07 12:26:46.575 [Api Webserver] agent - Successfully started Logstash API endpoint {:port=>9600}
[INFO ] 2020-12-07 12:26:46.760 [[main]<beats] Server - Starting server on port: 5044
[WARN ] 2020-12-07 12:49:13.107 [nioEventLoopGroup-2-2] DefaultChannelPipeline - An exceptionCaught() event was fired, and it reached at the tail of the pipeline. It usually means the last handler in the pipeline did not handle the exception.
io.netty.handler.codec.DecoderException: org.logstash.beats.InvalidFrameProtocolException: Invalid version of beats protocol: 69
at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:471) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.handler.codec.ByteToMessageDecoder.channelInputClosed(ByteToMessageDecoder.java:404) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.handler.codec.ByteToMessageDecoder.channelInputClosed(ByteToMessageDecoder.java:371) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.handler.codec.ByteToMessageDecoder.channelInactive(ByteToMessageDecoder.java:354) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelInactive(AbstractChannelHandlerContext.java:262) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.channel.AbstractChannelHandlerContext.access$300(AbstractChannelHandlerContext.java:61) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.channel.AbstractChannelHandlerContext$4.run(AbstractChannelHandlerContext.java:253) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.util.concurrent.DefaultEventExecutor.run(DefaultEventExecutor.java:66) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) [netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) [netty-all-4.1.49.Final.jar:4.1.49.Final]
at java.lang.Thread.run(Thread.java:834) [?:?]
Caused by: org.logstash.beats.InvalidFrameProtocolException: Invalid version of beats protocol: 69
at org.logstash.beats.Protocol.version(Protocol.java:22) ~[logstash-input-beats-6.0.11.jar:?]
at org.logstash.beats.BeatsParser.decode(BeatsParser.java:62) ~[logstash-input-beats-6.0.11.jar:?]
at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:501) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:440) ~[netty-all-4.1.49.Final.jar:4.1.49.Final]
... 11 more

问题

  • 为什么在logstash的输出日志中访问nginx web UI后看不到nginx的访问日志
  • 在filebeat的pod中,尝试访问logstash:5044,但无法通信。原因是什么
  • 在filebeat的pod中也是如此,无法访问/mnt/data/路径,那么如何获取nginx的共享数据呢
  • 在logstash中,想要在initContainers安装一个输出插件,但登录到容器中看不到。如何为容器运行推荐
  • 将主机:[quot;logstash:55044"]更改为主机:[quot;logstach.beats.svc.cluster.local:55044"]
  • 创建服务帐户
  • 删除此:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet

最新更新