From 3aad404aeeed49addc4949f381d7700d6c3efe92 Mon Sep 17 00:00:00 2001
From: Helmut <Helmut.Hutzler@th-nuernberg.de>
Date: Fri, 18 Sep 2020 10:14:51 +0200
Subject: [PATCH] Updating README.md - Minor Changes

---
 README.md                   |   3 +-
 docker-compose-es-stack.yml |  45 ++++++
 docker-compose.yml          |  22 +++
 myfluentd/Dockerfile        |  18 +++
 myfluentd/conf/fluent.conf  | 312 ++++++++++++++++++++++++++++++++++++
 mynginx/Dockerfile          |   4 +
 mynginx/nginx.conf          |  47 ++++++
 triggerLogEntry.bat         |   1 +
 8 files changed, 450 insertions(+), 2 deletions(-)
 create mode 100644 docker-compose-es-stack.yml
 create mode 100644 docker-compose.yml
 create mode 100644 myfluentd/Dockerfile
 create mode 100644 myfluentd/conf/fluent.conf
 create mode 100644 mynginx/Dockerfile
 create mode 100644 mynginx/nginx.conf
 create mode 100644 triggerLogEntry.bat

diff --git a/README.md b/README.md
index 518219c..c55d43a 100644
--- a/README.md
+++ b/README.md
@@ -11,7 +11,7 @@ NGinx Service definiert wird. Nebenbei  demonstriert der NGINX Stack das Sharing
 * [Docker: Share Compose File Configurations](https://docs.docker.com/compose/extends/)
 
 ## Installation
-Voraussetzung: Einloggen im Netz der TH Nürnberg bzw. eine VPN Verbindung zu öffnen.
+Voraussetzung: Einloggen im Netz der TH Nürnberg bzw. eine VPN Verbindung öffnen.
 
 ### Lokaler Build
 Das Projekt kann mit Docker selbst gebaut werden. Es ist nur docker-compose erforderlich. Alle nötigen Build Tools werden bereitgestellt und es muss keine Java, NodeJS oder Angular Installation lokal vorhanden sein.
@@ -22,7 +22,6 @@ Mit dem nachfolgenden docker-compose build werden der NGinx und der Fluentd cont
 
 ## Starten der Demo
 #### Starten des Elasticsearch Stacks
-Zum Starten  folgenden Befehl ausführen:
 
     docker stack deploy --compose-file docker-compose-es-stack.yml ElastikSearchStack
 
diff --git a/docker-compose-es-stack.yml b/docker-compose-es-stack.yml
new file mode 100644
index 0000000..258b7e9
--- /dev/null
+++ b/docker-compose-es-stack.yml
@@ -0,0 +1,45 @@
+version: "3.7"
+
+services:
+  elasticsearch:
+    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
+    container_name: elasticsearch
+    environment:
+      - xpack.security.enabled=false
+      - discovery.type=single-node
+    ulimits:
+      memlock:
+        soft: -1
+        hard: -1
+      nofile:
+        soft: 65536
+        hard: 65536
+    cap_add:
+      - IPC_LOCK
+    volumes:
+      - elasticsearch-data:/usr/share/elasticsearch/data
+    #ports:
+     # - 9200:9200
+     # - 9300:9300
+    networks:
+      - logger-network  
+  kibana:
+    container_name: kibana
+    image: docker.elastic.co/kibana/kibana:7.8.0
+    environment:
+     - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
+    ports:
+      - 5601:5601
+    networks:
+      - logger-network 
+    depends_on:
+      - elasticsearch 
+     
+networks:
+  logger-network:
+    external: true 
+    driver: overlay
+      
+volumes:
+  elasticsearch-data:
+    driver: local   
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..644afcb
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,22 @@
+version: "3.7"
+
+services:
+  logging:
+    build: myfluentd
+    restart: unless-stopped
+    image: myfluentd
+    volumes:
+      - ./myfluentd/conf:/fluentd/etc
+      - ./log:/log
+    ports:
+      - "24224:24224"
+      - "24224:24224/udp"
+    networks:
+      - logger-network
+ 
+networks:
+  logger-network:
+    external: true
+    driver: overlay
+
+  
diff --git a/myfluentd/Dockerfile b/myfluentd/Dockerfile
new file mode 100644
index 0000000..7de52e7
--- /dev/null
+++ b/myfluentd/Dockerfile
@@ -0,0 +1,18 @@
+FROM fluent/fluentd:v1.10
+
+# Use root account to use fluent-gem
+USER root
+
+# below RUN includes plugin as examples elasticsearch is not required
+# you may customize including plugins as you wish
+RUN id && echo " --->" Starting Grep Pluign Installation \
+  && gem install fluent-plugin-grep \
+  && gem install fluent-plugin-elasticsearch \
+  && gem install fluent-plugin-rewrite-tag-filter \
+  && gem sources --clear-all
+
+COPY conf/fluent.conf /fluentd/etc/
+# COPY entrypoint.sh /bin/
+
+USER fluent
+# RUN id && echo " --> Starting fluentd" 
diff --git a/myfluentd/conf/fluent.conf b/myfluentd/conf/fluent.conf
new file mode 100644
index 0000000..0343d5e
--- /dev/null
+++ b/myfluentd/conf/fluent.conf
@@ -0,0 +1,312 @@
+<system>
+  log_level debug
+</system>
+
+# All docker images should be armed with tag option in the logging section 
+#logging: 
+#      driver: "fluentd"
+#      options:
+#        tag: "nginx.logs"
+#        fluentd-address: "localhost:24224"
+#        fluentd-async-connect: "true"
+		
+<source>
+  @type forward
+  port 24224
+  bind 0.0.0.0
+  #tag nginx_test
+</source>
+
+#
+# For debugging - enbable tag nginx_test in above source command
+#
+#<match nginx_test>
+#  @type copy 
+#    <store>
+#      @type stdout
+#    </store>
+#</match>	
+
+<match database.logs>
+ @type copy
+    <store>
+	  @type file
+      path /log/${tag}
+	     # Read configuring buffers read:  https://docs.fluentd.org/configuration/buffer-section#buffering-parameters
+	  <buffer tag,time>
+        @type file
+        path /log/buffer/database
+		  # Timeframe for collecting chunks before flushing
+		  # For production use 3600 or higher 
+  		  # Setup: Write new Logfiles every hour  
+        timekey 60
+		  # Output plugin will write chunks after timekey_wait seconds later after timekey expiration
+        timekey_wait 10
+		  # If gzip is set, Fluentd compresses data records before writing to buffer chunks.
+		compress gzip
+      </buffer>
+    </store> 	
+    <store>
+      @type stdout
+    </store> 
+	<store>
+	  @type elasticsearch
+		port 9200
+		host elasticsearch
+		logstash_format true
+		suppress_type_name true
+		logstash_prefix database
+		logstash_dateformat %Y%m%d
+		include_tag_key true
+	  <buffer>
+		flush_interval 1
+      </buffer>	
+	</store>   
+</match>
+
+<filter es.logs>
+  @type parser
+  key_name log
+  reserve_data true
+  remove_key_name_field true
+  # hash_value_field parsed
+  <parse>
+    @type json
+  </parse>
+</filter>
+
+<match es.logs>
+  @type copy
+    <store>
+	  @type file
+      path /log/${tag}
+	     # Read configuring buffers read:  https://docs.fluentd.org/configuration/buffer-section#buffering-parameters
+	  <buffer tag,time>
+        @type file
+        path /log/buffer/es_stack
+		  # Timeframe for collecting chunks before flushing 
+  		  # Setup: Flush chunks to  Logfiles every minute  
+        timekey 60
+		  # Output plugin will write chunks after timekey_wait seconds later after timekey expiration
+        timekey_wait 10
+		  # If gzip is set, Fluentd compresses data records before writing to buffer chunks.
+		compress gzip
+      </buffer>
+  </store> 	
+    <store>
+      @type stdout
+    </store> 
+	<store>
+	  @type elasticsearch
+		port 9200
+		host elasticsearch
+		logstash_format true
+		suppress_type_name true
+		logstash_prefix es_stack
+		logstash_dateformat %Y%m%d
+		include_tag_key true
+	  <buffer>
+		flush_interval 1
+      </buffer>	
+	</store>   
+</match>
+
+<filter payara.logs>
+  @type parser
+  key_name log
+  reserve_data true
+  remove_key_name_field true
+  # hash_value_field parsed
+  <parse>
+    @type json
+  </parse>
+</filter>
+
+<match payara.logs>
+@type copy
+    <store>
+	  @type file
+      path /log/${tag}
+	     # Read configuring buffers read:  https://docs.fluentd.org/configuration/buffer-section#buffering-parameters
+	  <buffer tag,time>
+        @type file
+        path /log/buffer/payara
+		  # Timeframe for collecting chunks before flushing
+		  # For production use 3600 or higher 
+  		  # Setup: Write new Logfiles every hour  
+        timekey 60
+		  # Output plugin will write chunks after timekey_wait seconds later after timekey expiration
+        timekey_wait 10
+		  # If gzip is set, Fluentd compresses data records before writing to buffer chunks.
+		compress gzip
+      </buffer>
+  </store> 	
+    <store>
+      @type stdout
+    </store> 
+	<store>
+	  @type elasticsearch
+		port 9200
+		host elasticsearch
+		logstash_format true
+		suppress_type_name true
+		logstash_prefix fluentd_payara
+		logstash_dateformat %Y%m%d
+		include_tag_key true
+	  <buffer>
+		flush_interval 1
+      </buffer>	
+	</store>   
+</match>
+
+
+#
+# Filter Nginx Log Entries by using rewrite_tag_filter 
+#  Log record starts with an Date-String "2020/08/17 12:23:35"       -> Type:     Nginx  Error log    -> Tag: error.nginx.logs
+#  Log record starts with a String       "\"ngx_time_local\":        -> Log Type: Nginx Access Log    -> Tag: access.nginx.logs
+#
+<match nginx.logs>
+  @type rewrite_tag_filter
+  <rule>
+    #
+	# Nginx Error logs cannot be configured to send data in JSON format
+	# Typical Nginx Error log record:  "log":"2020/08/17 12:23:35 [error] 21#21: *14 open() \"/usr/share/nginx/html/xxx\" failed (2: No such file or directory)" ..
+    # In short when Log records starts wiht a Date the record will be tagged as a Nginx Errorlog
+	#
+	key log
+    pattern /^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}/
+    tag error.nginx.logs
+  </rule>
+  <rule>
+    #
+    # Assuming Nginx is sending JSON formatted Access Logs:
+	# - Nginx Logformat is configured in nginx.conf: 
+	#   log_format  json_combined escape=json
+    #                  '"ngx_time_local":"$time_local", 
+	#                  ..
+	# Typical Nginx Access log record:  "log":"\"ngx_time_local\":\"28/Aug/2020:08:59:36 +0000\", \"remote_user\":\"\", \"remote_addr\":\"172.18.0.1\", \"request\":\"GET /xxx HTTP/1.1\", ..
+	# Log Records starting with a JSON escapced ngx_time_local string will be taggesd as a Nginx Accesslog 
+ 	#
+	key log
+    #pattern /^\"ngx_time_local\":/
+	pattern /"ngx_time_local"/
+    tag access.nginx.logs
+  </rule>
+</match>
+
+<filter error.nginx.logs>
+  @type parser
+     #
+     # As Nginx Errorlog are not formatted in JSON Format a Regex Formatter will be used 
+     #
+	 key_name log
+    format /^(?<time>\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}) \[(?<log_level>\w+)\] (?<pid>\d+).(?<tid>\d+): (?<message>.*)$/
+</filter>
+
+<match error.nginx.logs>
+  @type copy
+	<store>
+	  @type file
+      path /log/${tag}
+	     # Read configuring buffers read:  https://docs.fluentd.org/configuration/buffer-section#buffering-parameters
+	  <buffer tag,time>
+        @type file
+        path /log/buffer/error_nginx
+		  # Timeframe for collecting chunks before flushing
+		  # For production use 3600 or higher 
+  		  # Setup: Write new Logfiles every hour  
+        timekey 60
+		  # Output plugin will write chunks after timekey_wait seconds later after timekey expiration
+        timekey_wait 10
+		  # If gzip is set, Fluentd compresses data records before writing to buffer chunks.
+		compress gzip
+      </buffer>
+  </store> 
+    <store>
+      @type stdout
+    </store> 
+	<store>
+	  @type elasticsearch
+		port 9200
+		host elasticsearch
+		logstash_format true
+		suppress_type_name true
+		logstash_prefix fluentd_nginx_error
+		logstash_dateformat %Y%m%d
+		include_tag_key true
+	  <buffer>
+		flush_interval 1
+      </buffer>	
+	</store>   
+</match>
+
+#
+# This filter is not needed anymore as we configured Nginx to send Accesslogs in JSON format
+# If we disable this we need to re-enable this filter
+#
+#<filter access.nginx.logs>
+#  @type parser
+#    key_name log
+#    format nginx
+#</filter>
+#
+
+# 
+#
+# reserve_data          :  Keeps the original key-value pair in the parsed result.
+#
+# remove_key_name_field : Removes key_name field when parsing is succeeded.
+# This removes the log: entry from input data - in short removes the nested JSON parsing 
+#    input data:  {"key":"value","log":"{\"user\":1,\"num\":2}"}
+#    output data: {"key":"value","user":1,"num":2}
+# This leads to simple parsing in our elasticsearch instance and we don't need to pre-create
+# an ES index with "type":"nested" for "log" field  
+#
+<filter access.nginx.logs>
+  @type parser
+  key_name log
+  reserve_data true
+  remove_key_name_field true
+  # hash_value_field parsed
+  <parse>
+    @type json
+  </parse>
+</filter>
+
+<match access.nginx.logs>
+  @type copy
+    <store>
+	  @type file
+      path /log/${tag}
+	     # Read configuring buffers read:  https://docs.fluentd.org/configuration/buffer-section#buffering-parameters
+	  append true	 
+	  <buffer tag,time>
+        @type file
+        path /log/buffer/access_nginx
+		  # Timeframe for collecting chunks before flushing
+  		  # Setup: Write new Logfiles every 10 minutes  
+        timekey 600
+		  # Output plugin will write chunks after timekey_wait seconds later after timekey expiration
+        timekey_wait 10
+		  # If gzip is set, Fluentd compresses data records before writing to buffer chunks.
+		compress gzip
+      </buffer>
+    </store> 
+    <store>
+      @type stdout
+    </store> 
+	<store>
+	  @type elasticsearch
+		port 9200
+		host elasticsearch
+		logstash_format true
+		suppress_type_name true
+		logstash_prefix fluentd_nginx_log
+		logstash_dateformat %Y%m%d
+		include_tag_key true
+	  <buffer>
+		flush_interval 1
+      </buffer>	
+	</store>   
+</match>
+
diff --git a/mynginx/Dockerfile b/mynginx/Dockerfile
new file mode 100644
index 0000000..995b348
--- /dev/null
+++ b/mynginx/Dockerfile
@@ -0,0 +1,4 @@
+FROM nginx
+RUN chmod +w /etc/nginx/nginx.conf
+COPY nginx.conf /etc/nginx/nginx.conf
+RUN cat /etc/nginx/nginx.conf
diff --git a/mynginx/nginx.conf b/mynginx/nginx.conf
new file mode 100644
index 0000000..79a4c56
--- /dev/null
+++ b/mynginx/nginx.conf
@@ -0,0 +1,47 @@
+user  nginx;
+worker_processes  auto;
+
+pid        /var/run/nginx.pid;
+
+events {
+    worker_connections  1024;
+}
+
+http {
+ 
+    include       /etc/nginx/mime.types;
+    default_type  application/octet-stream;
+	
+#	resolver 127.0.0.11 ipv6=off;
+#    resolver_timeout      10s;
+
+#    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
+#                      '$status $body_bytes_sent "$http_referer" '
+#                      '"$http_user_agent" "$http_x_forwarded_for"';
+   log_format  json_combined escape=json '{'
+                      '"ngx_time_local":"$time_local", '
+                      '"ngx_remote_user":"$remote_user", '
+                      '"ngx_remote_addr":"$remote_addr", '
+                      '"ngx_request":"$request", '
+                      '"ngx_status": "$status", '
+                      '"ngx_body_bytes_sent":"$body_bytes_sent", '
+                      '"ngx_request_time":"$request_time", '
+                      '"ngx_http_referrer":"$http_referer", '
+                      '"ngx_http_user_agent":"$http_user_agent" '
+					  '}';
+
+#	access_log syslog:server=logging:22224,tag=nginx_access  json_combined; 
+	access_log  /var/log/nginx/access.log json_combined;  
+
+#	error_log  syslog:server=logging:21224,tag=nginx_error,severity=info;
+	error_log  /var/log/nginx/error.log info;  
+
+    sendfile        on;
+    #tcp_nopush     on;
+
+    keepalive_timeout  65;
+
+    #gzip  on;
+
+    include /etc/nginx/conf.d/*.conf;
+}
\ No newline at end of file
diff --git a/triggerLogEntry.bat b/triggerLogEntry.bat
new file mode 100644
index 0000000..e838230
--- /dev/null
+++ b/triggerLogEntry.bat
@@ -0,0 +1 @@
+ docker run --log-driver=fluentd --log-opt tag="nginx.logs" --log-opt  fluentd-address="127.0.0.1:24224" ubuntu echo "{\"ngx_time_local\":\"28/Aug/2020:09:42:10 +0000\", \"ngx_remote_user\":\"Helmut\", \"ngx_remote_addr\":\"172.18.0.1\", \"ngx_request\":\"GET /Page_NOT_found HTTP/1.1\", \"ngx_status\":\"404\" }"
-- 
GitLab