1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
| ====================================================================================== * 规划 通过在要收集日志的服务器上安装filebeat进行日志收集,之后将收集到的数据写入到redis服务器,再通过logstash服务器取出数据并写入到elasticsearch集群中。也可以通过filebeat收集日志后转发给logstash服务器,再由logstash服务器写入到redis中,再由另一台logstash服务器从redis服务器中取出数据并写入到elasticsearch集群中。
* 环境 准备三台主机,做els集群,配置:双核处理器,2G内存。地址:192.168.1.20,192.168.1.21,192.168.1.22 准备一台主机,安装nginx,地址:192.168.1.30 准备一台主机,安装kibana,地址:192.168.1.23 准备一台主机,安装redis,地址:192.168.1.24 准备一台主机,安装haproxy,地址:192.168.1.31
* 软件版本 1. 操作系统:CentOS7.4.1708 2. jdk: jdk-8u201 官方rpm或gz包 3. elasticsearch:6.5.4 官方当前最新rpm 4. logstash:6.5.4 官方当前最新rpm 5. kibana:6.5.4 官方当前最新rpm 6. filebeat:6.5.4 官方当前最新rpm 7. nginx:1.12.2 8. redis:5.0.3 官方最新源码包 ====================================================================================== ------------- redis ------------- [root@redis ~]# yum install -y gcc [root@redis ~]# tar xf redis-5.0.3.tar.gz -C /usr/local/src/ [root@redis redis-5.0.3]# cd /usr/local/src/redis-5.0.3/ [root@redis redis-5.0.3]# make [root@redis redis-5.0.3]# ln -sv /usr/local/src/redis-5.0.3/ /usr/local/redis ‘/usr/local/redis’ -> ‘/usr/local/src/redis-5.0.3/’ [root@redis redis-5.0.3]# cp src/redis-server src/redis-cli /usr/bin [root@redis redis-5.0.3]# cd /usr/local/redis/ [root@redis redis]# vim redis.conf bind 0.0.0.0 daemonize yes # 让redis以守护进程方式运行 save "" #save 900 1 #save 300 10 #save 60 10000 # 因为不需要持久存储,所以将save ""打开,将下面三行关闭。 requirepass centos logfile "/var/log/redis.log" # 输出日志路径,默认会输出到/dev/null中 [root@redis redis]# redis-server /usr/local/redis/redis.conf # 启动redis,启动时指定配置文件 [root@redis redis]# ss -tln [root@redis redis]# redis-cli 127.0.0.1:6379> KEYS * (error) NOAUTH Authentication required. 127.0.0.1:6379> AUTH centos OK 127.0.0.1:6379> KEYS * (empty list or set)
------------- haproxy ------------- [root@haproxy ~]# yum install jdk-8u201-linux-x64.rpm [root@haproxy ~]# yum install logstash-6.5.4.rpm [root@haproxy ~]# yum install -y haproxy [root@haproxy ~]# vim /etc/haproxy/haproxy.cfg global log 127.0.0.1 local6 # 这行很重要,local6是haproxy的日志级别,在rsyslog配置文件中也要定义这个级别 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon stats socket /var/lib/haproxy/stats defaults mode http log global option httplog option dontlognull option http-server-close option forwardfor except 127.0.0.0/8 option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 frontend myweb bind *:80 default_backend websrvs backend websrvs balance roundrobin server srv1 192.168.1.30:80 check # 这里只简单地将haproxy反代到nginx服务器上,实现访问haproxy就会反代到nginx服务器上 [root@haproxy ~]# vim /etc/rsyslog.d/haproxy.conf $ModLoad imudp $UDPServerRun 514 $ModLoad imtcp $InputTCPServerRun 514 local6.* @@192.168.1.31:5140 # 接收日志的logstash服务器IP:PORT,local6对应haproxy的日志级别。这里将日志传给了本机的logstash [root@haproxy ~]# vim /etc/logstash/conf.d/syslog.conf input { syslog { type => "system-rsyslog" port => "5140" # 从logstash的5140端口输入,实际也是定义logstash监听的端口 } }
output { stdout { codec => rubydebug } redis { data_type => "list" # redis的数据类型要使用list key => "system-rsyslog" # 写入redis的键名 host => "192.168.1.24" # redis地址 port => "6379" # redis端口 db => "0" # redis库 password => "centos" # redis密码 } } [root@haproxy ~]# systemctl restart rsyslog # rsyslog监听在514端口 [root@haproxy ~]# systemctl start haproxy [root@haproxy ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/syslog.conf 在本机设置hosts文件,解析haproxy地址到www.test.com,访问www.test.com,这时应该显示一些信息,如: { "timestamp" => "Jan 25 12:53:39", "@timestamp" => 2019-01-25T04:53:39.000Z, "message" => "192.168.1.9:33634 [25/Jan/2019:12:53:39.002] myweb websrvs/srv1 0/0/1/1/2 304 175 - - ---- 2/2/0/0/0 0/0 \"GET / HTTP/1.1\"\n", "logsource" => "localhost", "type" => "system-rsyslog", "host" => "192.168.1.31", "priority" => 182, "facility_label" => "local6", "severity" => 6, "program" => "haproxy", "facility" => 22, "severity_label" => "Informational", "pid" => "3817", "@version" => "1" } [root@haproxy ~]# ss -tln # 可以看到监听了5140端口 [root@haproxy ~]# systemctl start logstash # 在后台启动
------------- redis ------------- [root@redis redis]# redis-cli -a centos 127.0.0.1:6379> SELECT 0 OK 127.0.0.1:6379> KEYS * 1) "system-rsyslog" 127.0.0.1:6379> LLEN system-rsyslog (integer) 3 127.0.0.1:6379> LPOP system-rsyslog "{\"facility\":22,\"host\":\"192.168.1.31\",\"logsource\":\"localhost\",\"program\":\"haproxy\",\"@version\":\"1\",\"facility_label\":\"local6\",\"message\":\"192.168.1.9:33540 [25/Jan/2019:12:37:37.836] myweb websrvs/srv1 0/0/0/1/1 304 175 - - ---- 2/2/0/1/0 0/0 \\\"GET / HTTP/1.1\\\"\\n\",\"priority\":182,\"pid\":\"3817\",\"timestamp\":\"Jan 25 12:37:37\",\"severity\":6,\"severity_label\":\"Informational\",\"type\":\"system-rsyslog\",\"@timestamp\":\"2019-01-25T04:37:37.000Z\"}" # 可以看到相关数据了
------------- haproxy ------------- [root@haproxy ~]# vim /etc/logstash/conf.d/syslog.conf input { syslog { type => "system-rsyslog" port => "5140" } }
output { stdout { codec => rubydebug } redis { data_type => "list" key => "haproxy-log-31" # 改变输出到redis中的键名 host => "192.168.1.24" port => "6379" db => "0" password => "centos" } } [root@haproxy ~]# systemctl restart logstash
------------- redis ------------- 127.0.0.1:6379> KEYS * 1) "haproxy-log-31" 2) "system-rsyslog" # 可以看到从logstash传入新的键了
====================================================================================== 下面设置收集TCP/UDP日志 ====================================================================================== ------------- haproxy ------------- [root@haproxy ~]# vim /etc/logstash/conf.d/tcp.conf input { tcp { port => "5500" type => "tcp-syslog" mode => "server" } }
output { stdout { codec => rubydebug } redis { data_type => "list" key => "tcp-syslog" host => "192.168.1.24" port => "6379" db => "0" password => "centos" } } [root@haproxy ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp.conf -t WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console [WARN ] 2019-01-25 13:16:45.820 [LogStash::Runner] multilocal - Ignoring the 'pipelines.yml' file because modules or command line options are specified Configuration OK [INFO ] 2019-01-25 13:16:47.307 [LogStash::Runner] runner - Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash # 测试配置文件没有问题 [root@haproxy ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp.conf # logstash启动时还会监听本地的9600端口 [root@haproxy ~]# yum install -y nc [root@haproxy ~]# echo "nc test"|nc 192.168.1.31 5500 这时logstash会在终端输出内容: { "host" => "haproxy", "message" => "nc test", "port" => 39566, "type" => "tcp-syslog", "@timestamp" => 2019-01-25T05:29:16.034Z, "@version" => "1" } [root@haproxy ~]# nc 192.168.1.31 5500 < /root/anaconda-ks.cfg [root@haproxy ~]# echo "伪设备" > /dev/tcp/192.168.1.31/5500 # /dev后面的部分是没有的,要手动输入,在logstash中也会显示
------------- redis ------------- 127.0.0.1:6379> KEYS * 1) "tcp-syslog" 2) "haproxy-log-31" 3) "system-rsyslog" 127.0.0.1:6379> LLEN tcp-syslog (integer) 52 # redis中也可以看到数据了
------------- logstash ------------- # 使用nginx主机中的logstash收集redis中的数据 [root@nginx1 ~]# vim /etc/logstash/conf.d/tcp.conf input { port => "6379" key => "haproxy-log-31" db => "0" password => "centos" } data_type => "list" host => "192.168.1.24" port => "6379" key => "tcp-syslog" db => "0" password => "centos" } }
output { if [type] == "tcp-syslog" { elasticsearch { hosts => ["192.168.1.20:9200"] index => "tcp-rsyslog-%{+YYYY.MM.dd}" } } if [type] == "system-rsyslog" { # 这里判断的是haproxy服务器的logstash设置中input定义的type elasticsearch { hosts => ["192.168.1.20:9200"] index => "haproxy-log-31-%{+YYYY.MM.dd}" } } } # 从redis中取行数据,之后输出到elasticsearch集群中。 [root@nginx1 ~]# systemctl start logstash [root@nginx1 ~]# tail -f /var/log/logstash/logstash-plain.log # 可以通过这个日志文件查看logstash运行是否正常,如是否可以连接到redis服务器 访问els服务器:192.168.1.20:9200,这时可以看到haproxy-log-31-*这个索引了
|