Установка Elasticsearch, Kibana и Logstash для анализа по NetFlow и сбора Syslog-сообщений.

ELA_1_001_logos

Сначала установить JAVA: sudo yum install java-1.8.0-openjdk
Затем по последним мануалам ставим Elasticsearch, Kibana и Logstash:
https://www.elastic.co/guide/en/elasticsearch/reference/current/rpm.html
https://www.elastic.co/guide/en/kibana/6.2/rpm.html
https://www.elastic.co/guide/en/logstash/current/installing-logstash.html

Запускаем ELK:

systemctl start elasticsearch
systemctl start kibana
systemctl start logstash
Убираем FW:
systemctl stop firewalld
systemctl disable firewalld

Если на данном этапе Kibana благополучно открывается (веб-интерфейс слушает порт 5601), то можно поставить перед Kibana nginx, который будет проксировать трафик с 80 порта на 5601.

Конфиг nginx.conf: (то, что выделено жирным, добавлено в дефолтные настройки)

server {
 listen 80 default_server;
 listen [::]:80 default_server;
 server_name _;
 root /usr/share/nginx/html;

# Load configuration files for the default server block.
 include /etc/nginx/default.d/*.conf;
 set $kibana 172.17.214.33;
 location / {
   proxy_pass http://$kibana:5601;
   proxy_set_header Host $host;
   proxy_set_header Referer "";
   proxy_set_header X-Real-IP $remote_addr;
   proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
   proxy_http_version 1.1;
   proxy_connect_timeout 150;
   proxy_send_timeout 100;
   proxy_read_timeout 100;
   proxy_buffers 16 64k;
   proxy_busy_buffers_size 64k;
   client_max_body_size 256k;
   client_body_buffer_size 128k;
 }

error_page 404 /404.html;
 location = /40x.html {
 }

error_page 500 502 503 504 /50x.html;
 location = /50x.html {
 }

}
  

 

Для запуска логстеш вводим:

/usr/share/logstash/bin/logstash --modules netflow -M netflow.var.input.udp.port=9966 - запустить сборщик netflow
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/01-input.conf --path.data /tmp - для запуска сбора сислог
ps -aux | grep logstash and kill process - завершить pipeline

В cron добавляем запись, чтобы не хранить логи старше 7 дней:
00 18 * * * curl -X DELETE ‘localhost:9200/netflow-$(date -d »-7 days« +\%Y.\%m.\%d)’
00 18 * * * curl -X DELETE ‘localhost:9200/switch-$(date -d »-7 days» +\%Y.\%m.\%d)’
00 18 * * * curl -X DELETE ‘localhost:9200/asa-$(date -d »-7 days» +\%Y.\%m.\%d)’

На сервере Syslog, с которого данные будут пересылаться на ELK, должен быть запущен Filebeat (в моем случае в контейнере), который отсылает информацию из лог-файлов, например, в /var/log/cisco и /var/log/cisco-asa на наш сервер ELK на порт, который указан в настройках.

Настройки сбора сислог-сообщений — когфигурационные файлы Logstash, по умолчанию должны находиться в /etc/logstash/conf.d

input {

  beats {
    port => 5044
    ssl => false
    type => "asa"
  }

  beats { 
    port => 5045 
    ssl => false
    type => "switches" 
  }

}

filter {
 
 if [type] == "switches" { 
   grok {
     match => { "message" => "%{SYSLOGTIMESTAMP} %{USERNAME:hostname} %{NUMBER}: %  {NUMBER}: %{CISCOTIMESTAMP } %{WORD}: %{GREEDYDATA:message}" }
     overwrite => [ "message" ]
   }
 }

 if [type] == "asa" {
   grok {
   match => { "message" => "%{SYSLOGTIMESTAMP} %{USERNAME:hostname} : %{GREEDYDATA:message}" }
   overwrite => [ "message" ]
 }


   grok {
   patterns_dir => ["/etc/logstash/asa_patterns"]
   match => [
 "message", "%{CISCOFW106001}",
 "message", "%{CISCOFW106006_106007_106010}",
 "message", "%{CISCOFW106014}",
 "message", "%{CISCOFW106015}",
 "message", "%{CISCOFW106021}",
 "message", "%{CISCOFW106023}",
 "message", "%{CISCOFW106100}",
 "message", "%{CISCOFW110002}",
 "message", "%{CISCOFW302010}",
 "message", "%{CISCOFW302013_302014_302015_302016}",
 "message", "%{CISCOFW302020_302021}",
 "message", "%{CISCOFW305011}",
 "message", "%{CISCOFW313001_313004_313008}",
 "message", "%{CISCOFW313005}",
 "message", "%{CISCOFW402117}",
 "message", "%{CISCOFW402119}",
 "message", "%{CISCOFW419001}",
 "message", "%{CISCOFW419002}",
 "message", "%{CISCOFW500004}",
 "message", "%{CISCOFW602303_602304}",
 "message", "%{CISCOFW710001_710002_710003_710005_710006}",
 "message", "%{CISCOFW713172}",
 "message", "%{CISCOFW733100}",
 "message", "%{WORD:action} %{WORD:protocol} %{CISCO_REASON:reason} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}; %{GREEDYDATA:dnssec_validation}",
 "message", "%{CISCO_ACTION:action} %{WORD:protocol} %{CISCO_REASON:reason}.*(%{IP:src_ip}).*%{IP:dst_ip} on interface %{GREEDYDATA:interface}",
 "message", "Connection limit exceeded %{INT:inuse_connections}/%{INT:connection_limit} for input packet from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} on interface %{GREEDYDATA:interface}",
 "message", "TCP Intercept %{DATA:threat_detection} to %{IP:ext_nat_ip}/%{INT:ext_nat_port}.*(%{IP:int_nat_ip}/%{INT:int_nat_port}).*Average rate of %{INT:syn_avg_rate} SYNs/sec exceeded the threshold of %{INT:syn_threshold}.#%{INT}",
 "message", "Embryonic connection limit exceeded %{INT:econns}/%{INT:limit} for %{WORD:direction} packet from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} on interface %{GREEDYDATA:interface}"
 ]
   overwrite => [ "message" ]
 }

 if [src_ip] {
   cidr {
     add_tag => [ "private" ]
     address => [ "%{src_ip}" ]
     network => [ "192.168.0.0/16", "172.16.0.0/12", "10.0.0.0/8" ]
   }
 }
 
 if "private" not in [tags] {
   geoip {
     source => "src_ip"
     target => "geoip"
     add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
     add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
 } #geoip
 } #if private...
 }#if 

} #filter


output {

 if [type] == "asa" {
   elasticsearch { 
     hosts => ["localhost:9200"]
     index => "asa-%{+YYYY.MM.dd}"
   }
 }

 if [type] == "switches" {
   elasticsearch { 
     hosts => ["localhost:9200"] 
     index => "switches-%{+YYYY.MM.dd}"
   }
 }
}

Logstash принимает сислог-сообщения на 5044 и 5045, в нашем случае из файлов /var/log/cisco и /var/log/cisco-asa соответственно, далее разбирает их grok-ом и отсылает в elasticsearch.

Ниже пример двух юнит-файлов для запуска сбора логов автоматом после перезагрузки сервера, их надо положить в /etc/systemd/system elk-netflow.service и elk-syslog.service, сделать systemctl daemon-reload. После можно будет делать старт/стоп сбора.

[Unit]
Description=ELK-NetFlow-Collector-Starter
After=logstash.service
Requires=elasticsearch.service
Requires=kibana.service
[Service]
Type=simple
User=root
Group=wheel
OOMScoreAdjust=-100
Environment=RACK_ENV=production
ExecStart=/usr/share/logstash/bin/logstash --modules netflow -M netflow.var.input.udp.port=9966
ExecStop= nohup kill -9 $(ps ax | grep 9966 | awk 'NR == 1{print $1}') &
ExecRestart=kill -9 $(ps ax | grep 9966 | awk 'NR == 1{print $1}') | /usr/share/logstash/bin/logstash --modules netflow -M netflow.var.input.udp.port=9966
Restart=always
[Install]
WantedBy=multi-user.target
[vfomin@localhost ~]$ cat /etc/systemd/system/elk-syslog.service 
[Unit]
Description=ELK-NetFlow-Collector-Starter
After=logstash.service
Requires=elasticsearch.service
Requires=kibana.service
[Service]
Type=simple
User=root
Group=wheel
OOMScoreAdjust=-100
Environment=RACK_ENV=production
ExecStart=/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/01-input.conf --path.data /tmp
ExecStop= nohup kill -9 $(ps ax | grep '01-input.conf' | grep java | awk 'NR == 1{print $1}') &
ExecRestart=kill -9 $(ps ax | grep '01-input.conf' | grep java | awk 'NR == 1{print $1}') | /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/01-input.conf --path.data /tmp
Restart=always

Patterns надо положить в файл /etc/logstash/asa_patterns:

 #== Cisco ASA ==
 CISCO_TAGGED_SYSLOG ^<%{POSINT:syslog_pri}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:sysloghost})? ?: %%{CISCOTAG:ciscotag}:
 CISCOTIMESTAMP %{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME}
 CISCOTAG [A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+)
 # Common Particles
 CISCO_ACTION Built|Teardown|Deny|Denied|denied|requested|permitted|denied by ACL|discarded|est-allowed|Dropping|created|deleted
 CISCO_REASON Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)*
 CISCO_DIRECTION Inbound|inbound|Outbound|outbound
 CISCO_INTERVAL first hit|%{INT}-second interval
 CISCO_XLATE_TYPE static|dynamic
 # ASA-1-104001
 CISCOFW104001 \((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:switch_reason}
 # ASA-1-104002
 CISCOFW104002 \((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:switch_reason}
 # ASA-1-104003
 CISCOFW104003 \((?:Primary|Secondary)\) Switching to FAILED\.
 # ASA-1-104004
 CISCOFW104004 \((?:Primary|Secondary)\) Switching to OK\.
 # ASA-1-105003
 CISCOFW105003 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} waiting
 # ASA-1-105004
 CISCOFW105004 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{GREEDYDATA:interface_name} normal
 # ASA-1-105005
 CISCOFW105005 \((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{GREEDYDATA:interface_name}
 # ASA-1-105008
 CISCOFW105008 \((?:Primary|Secondary)\) Testing [Ii]nterface %{GREEDYDATA:interface_name}
 # ASA-1-105009
 CISCOFW105009 \((?:Primary|Secondary)\) Testing on [Ii]nterface %{GREEDYDATA:interface_name} (?:Passed|Failed)
 # ASA-2-106001
 CISCOFW106001 %{CISCO_DIRECTION:direction} %{WORD:protocol} connection %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{GREEDYDATA:tcp_flags} on interface %{GREEDYDATA:interface}
 # ASA-2-106006, ASA-2-106007, ASA-2-106010
 CISCOFW106006_106007_106010 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} (?:from|src) %{IP:src_ip}/%{INT:src_port}(\(%{DATA:src_fwuser}\))? (?:to|dst) %{IP:dst_ip}/%{INT:dst_port}(\(%{DATA:dst_fwuser}\))? (?:on interface %{DATA:interface}|due to %{CISCO_REASON:reason})
 # ASA-3-106014
 CISCOFW106014 %{CISCO_ACTION:action} %{CISCO_DIRECTION:direction} %{WORD:protocol} src %{DATA:src_interface}:%{IP:src_ip}(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{IP:dst_ip}(\(%{DATA:dst_fwuser}\))? \(type %{INT:icmp_type}, code %{INT:icmp_code}\)
 # ASA-6-106015
 CISCOFW106015 %{CISCO_ACTION:action} %{WORD:protocol} \(%{DATA:policy_id}\) from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port} flags %{DATA:tcp_flags} on interface %{GREEDYDATA:interface}
 # ASA-1-106021
 CISCOFW106021 %{CISCO_ACTION:action} %{WORD:protocol} reverse path check from %{IP:src_ip} to %{IP:dst_ip} on interface %{GREEDYDATA:interface}
 # ASA-4-106023
 CISCOFW106023 %{CISCO_ACTION:action}( protocol)? %{WORD:protocol} src %{DATA:src_interface}:%{DATA:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? dst %{DATA:dst_interface}:%{DATA:dst_ip}(/%{INT:dst_port})?(\(%{DATA:dst_fwuser}\))?( \(type %{INT:icmp_type}, code %{INT:icmp_code}\))? by access-group "?%{DATA:policy_id}"? \[%{DATA:hashcode1}, %{DATA:hashcode2}\]
 # ASA-4-106100, ASA-4-106102, ASA-4-106103
 CISCOFW106100_2_3 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} for user '%{DATA:src_fwuser}' %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\) -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\) hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\]
 # ASA-5-106100
 CISCOFW106100 access-list %{NOTSPACE:policy_id} %{CISCO_ACTION:action} %{WORD:protocol} %{DATA:src_interface}/%{IP:src_ip}\(%{INT:src_port}\)(\(%{DATA:src_fwuser}\))? -> %{DATA:dst_interface}/%{IP:dst_ip}\(%{INT:dst_port}\)(\(%{DATA:src_fwuser}\))? hit-cnt %{INT:hit_count} %{CISCO_INTERVAL:interval} \[%{DATA:hashcode1}, %{DATA:hashcode2}\]
 # ASA-5-304001
 CISCOFW304001 %{IP:src_ip}(\(%{DATA:src_fwuser}\))? Accessed URL %{IP:dst_ip}:%{GREEDYDATA:dst_url}
 # ASA-6-110002
 CISCOFW110002 %{CISCO_REASON:reason} for %{WORD:protocol} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port}
 # ASA-6-302010
 CISCOFW302010 %{INT:connection_count} in use, %{INT:connection_count_max} most used
 # ASA-6-302013, ASA-6-302014, ASA-6-302015, ASA-6-302016
 CISCOFW302013_302014_302015_302016 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection %{INT:connection_id} for %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port}( \(%{IP:src_mapped_ip}/%{INT:src_mapped_port}\))?(\(%{DATA:src_fwuser}\))? to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}( \(%{IP:dst_mapped_ip}/%{INT:dst_mapped_port}\))?(\(%{DATA:dst_fwuser}\))?( duration %{TIME:duration} bytes %{INT:bytes})?(?: %{CISCO_REASON:reason})?( \(%{DATA:user}\))?
 # ASA-6-302020, ASA-6-302021
 CISCOFW302020_302021 %{CISCO_ACTION:action}(?: %{CISCO_DIRECTION:direction})? %{WORD:protocol} connection for faddr %{IP:dst_ip}/%{INT:icmp_seq_num}(?:\(%{DATA:fwuser}\))? gaddr %{IP:src_xlated_ip}/%{INT:icmp_code_xlated} laddr %{IP:src_ip}/%{INT:icmp_code}( \(%{DATA:user}\))?
 # ASA-6-305011
 CISCOFW305011 %{CISCO_ACTION:action} %{CISCO_XLATE_TYPE:xlate_type} %{WORD:protocol} translation from %{DATA:src_interface}:%{IP:src_ip}(/%{INT:src_port})?(\(%{DATA:src_fwuser}\))? to %{DATA:src_xlated_interface}:%{IP:src_xlated_ip}/%{DATA:src_xlated_port}
 # ASA-3-313001, ASA-3-313004, ASA-3-313008
 CISCOFW313001_313004_313008 %{CISCO_ACTION:action} %{WORD:protocol} type=%{INT:icmp_type}, code=%{INT:icmp_code} from %{IP:src_ip} on interface %{DATA:interface}( to %{IP:dst_ip})?
 # ASA-4-313005
 CISCOFW313005 %{CISCO_REASON:reason} for %{WORD:protocol} error message: %{WORD:err_protocol} src %{DATA:err_src_interface}:%{IP:err_src_ip}(\(%{DATA:err_src_fwuser}\))? dst %{DATA:err_dst_interface}:%{IP:err_dst_ip}(\(%{DATA:err_dst_fwuser}\))? \(type %{INT:err_icmp_type}, code %{INT:err_icmp_code}\) on %{DATA:interface} interface\. Original IP payload: %{WORD:protocol} src %{IP:orig_src_ip}/%{INT:orig_src_port}(\(%{DATA:orig_src_fwuser}\))? dst %{IP:orig_dst_ip}/%{INT:orig_dst_port}(\(%{DATA:orig_dst_fwuser}\))?
 # ASA-5-321001
 CISCOFW321001 Resource '%{WORD:resource_name}' limit of %{POSINT:resource_limit} reached for system
 # ASA-4-402117
 CISCOFW402117 %{WORD:protocol}: Received a non-IPSec packet \(protocol= %{WORD:orig_protocol}\) from %{IP:src_ip} to %{IP:dst_ip}
 # ASA-4-402119
 CISCOFW402119 %{WORD:protocol}: Received an %{WORD:orig_protocol} packet \(SPI= %{DATA:spi}, sequence number= %{DATA:seq_num}\) from %{IP:src_ip} \(user= %{DATA:user}\) to %{IP:dst_ip} that failed anti-replay checking
 # ASA-4-419001
 CISCOFW419001 %{CISCO_ACTION:action} %{WORD:protocol} packet from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}, reason: %{GREEDYDATA:reason}
 # ASA-4-419002
 CISCOFW419002 %{CISCO_REASON:reason} from %{DATA:src_interface}:%{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port} with different initial sequence number
 # ASA-4-500004
 CISCOFW500004 %{CISCO_REASON:reason} for protocol=%{WORD:protocol}, from %{IP:src_ip}/%{INT:src_port} to %{IP:dst_ip}/%{INT:dst_port}
 # ASA-6-602303, ASA-6-602304
 CISCOFW602303_602304 %{WORD:protocol}: An %{CISCO_DIRECTION:direction} %{GREEDYDATA:tunnel_type} SA \(SPI= %{DATA:spi}\) between %{IP:src_ip} and %{IP:dst_ip} \(user= %{DATA:user}\) has been %{CISCO_ACTION:action}
 # ASA-7-710001, ASA-7-710002, ASA-7-710003, ASA-7-710005, ASA-7-710006
 CISCOFW710001_710002_710003_710005_710006 %{WORD:protocol} (?:request|access) %{CISCO_ACTION:action} from %{IP:src_ip}/%{INT:src_port} to %{DATA:dst_interface}:%{IP:dst_ip}/%{INT:dst_port}
 # ASA-6-713172
 CISCOFW713172 Group = %{GREEDYDATA:group}, IP = %{IP:src_ip}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:is_remote_natted}\s*behind a NAT device\s+This\s+end\s*%{DATA:is_local_natted}\s*behind a NAT device
 # ASA-4-733100
 CISCOFW733100 \[\s*%{DATA:drop_type}\s*\] drop %{DATA:drop_rate_id} exceeded. Current burst rate is %{INT:drop_rate_current_burst} per second, max configured rate is %{INT:drop_rate_max_burst}; Current average rate is %{INT:drop_rate_current_avg} per second, max configured rate is %{INT:drop_rate_max_avg}; Cumulative total count is %{INT:drop_total_count}
 #== End Cisco ASA ==