initial commit
This commit is contained in:
		
							
								
								
									
										334
									
								
								OLD/install_elasticsearch-7.00.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										334
									
								
								OLD/install_elasticsearch-7.00.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,334 @@
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# NOTE:
 | 
			
		||||
#    we will install a package of ElasticSearch which contains only features 
 | 
			
		||||
#    that are available under the Apache 2.0 license
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Install 'apt-transport-https' package
 | 
			
		||||
#
 | 
			
		||||
apt-get install apt-transport-https
 | 
			
		||||
 | 
			
		||||
# Import the Elasticsearch PGP Key
 | 
			
		||||
#
 | 
			
		||||
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
 | 
			
		||||
 | 
			
		||||
# Save the repository definition to '/etc/apt/sources.list.d/elastic-7.x.list'
 | 
			
		||||
#
 | 
			
		||||
echo "deb https://artifacts.elastic.co/packages/oss-7.x/apt stable main" \
 | 
			
		||||
   | sudo tee -a /etc/apt/sources.list.d/elastic-7.x.list
 | 
			
		||||
 | 
			
		||||
# Install the Elasticsearch Debian package with:
 | 
			
		||||
apt-get update
 | 
			
		||||
apt-get install elasticsearch-oss
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
ELASTIC_SEARCH_PUBLISH_IP="0.0.0.0"
 | 
			
		||||
ELASTIC_SEARCH_PORT=9200
 | 
			
		||||
 | 
			
		||||
NODE_NAME="verdi-es"
 | 
			
		||||
NODE_NAME="oolm-shop-es"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Set System properties
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Set set sysctl value 'vm.max_map_count' to '524288'
 | 
			
		||||
#
 | 
			
		||||
# Add to /etc/sysctl.conf:
 | 
			
		||||
#
 | 
			
		||||
#    vm.max_map_count = 524288
 | 
			
		||||
#
 | 
			
		||||
# Note: 
 | 
			
		||||
#    if installing ElasticSearch into a LX_Container, do this at
 | 
			
		||||
#    the host system
 | 
			
		||||
#
 | 
			
		||||
cat << EOF >> /etc/sysctl.conf
 | 
			
		||||
 | 
			
		||||
# Needed by ElasticSearch Installation on virtual guest
 | 
			
		||||
# systems (LX-Containers)
 | 
			
		||||
#
 | 
			
		||||
# The error message there was:
 | 
			
		||||
#    max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
 | 
			
		||||
#
 | 
			
		||||
vm.max_map_count = 524288
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Adjust file '/etc/elasticsearch/elasticsearch.yml'
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Set network.host to '$ELASTIC_SEARCH_PUBLIC_IP'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*network.host:\s+${ELASTIC_SEARCH_PUBLISH_IP}" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#network.host:" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
		perl -i.ORIG -n -p -e "s/^(#network.host:.*)/\1\nnetwork.host: ${ELASTIC_SEARCH_PUBLISH_IP}/" \
 | 
			
		||||
		   /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (network.host:)
 | 
			
		||||
#
 | 
			
		||||
network.host: $ELASTIC_SEARCH_PUBLISH_IP
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set http.port to '$ELASTIC_SEARCH_PORT'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*http.port:\s+${ELASTIC_SEARCH_PORT}" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#http.port:" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
		perl -i.ORIG -n -p -e "s/^(#http.port:.*)/\1\nhttp.port: ${ELASTIC_SEARCH_PORT}/" \
 | 
			
		||||
		   /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (http.port:)
 | 
			
		||||
#
 | 
			
		||||
http.port: $ELASTIC_SEARCH_PORT
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set values
 | 
			
		||||
#    node.name: to '$NODE_NAME'
 | 
			
		||||
#    cluster.initial_master_nodes: to '[${NODE_NAME}]'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*cluster.initial_master_nodes:\s+${NODE_NAME}" \
 | 
			
		||||
	/etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#cluster.initial_master_nodes:" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
		perl -i.ORIG -n -p -e "s/^(#cluster.initial_master_nodes:.*)/\1\nnode.name: ${NODE_NAME}\ncluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \
 | 
			
		||||
		   /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Settings:
 | 
			
		||||
#
 | 
			
		||||
#    node.name:
 | 
			
		||||
#    cluster.initial_master_nodes:
 | 
			
		||||
#
 | 
			
		||||
node.name: ${NODE_NAME}
 | 
			
		||||
cluster.initial_master_nodes: ["${NODE_NAME}"]
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Activate sysctl settings at file '/etc/sysctl.conf'
 | 
			
		||||
#
 | 
			
		||||
sysctl -p
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    If running as systemd managed process in a LX-Container, the following
 | 
			
		||||
#    systemd parameters will be set:
 | 
			
		||||
#       LimitNOFILE=65535
 | 
			
		||||
#       LimitNPROC=4096
 | 
			
		||||
#       LimitAS=infinity
 | 
			
		||||
#       LimitFSIZE=infinity
 | 
			
		||||
#
 | 
			
		||||
# Take care, your container satisfies this values.
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Test Elasticsearch Setup
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The Elasticsearch service is ready to use. You can test it using curl command line 
 | 
			
		||||
# utility. Run the simple GET command using curl to verify the setup. You will see 
 | 
			
		||||
# the Elasticsearch cluster details with the version on your screen.
 | 
			
		||||
#
 | 
			
		||||
# example output:
 | 
			
		||||
#
 | 
			
		||||
#    verdi-es:~ # curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200
 | 
			
		||||
#    {
 | 
			
		||||
#      "name" : "verdi-es",
 | 
			
		||||
#      "cluster_name" : "elasticsearch",
 | 
			
		||||
#      "cluster_uuid" : "J54WIwEqQe203nUbtgOOEA",
 | 
			
		||||
#      "version" : {
 | 
			
		||||
#        "number" : "7.2.0",
 | 
			
		||||
#        "build_flavor" : "oss",
 | 
			
		||||
#        "build_type" : "deb",
 | 
			
		||||
#        "build_hash" : "508c38a",
 | 
			
		||||
#        "build_date" : "2019-06-20T15:54:18.811730Z",
 | 
			
		||||
#        "build_snapshot" : false,
 | 
			
		||||
#        "lucene_version" : "8.0.0",
 | 
			
		||||
#        "minimum_wire_compatibility_version" : "6.8.0",
 | 
			
		||||
#        "minimum_index_compatibility_version" : "6.0.0-beta1"
 | 
			
		||||
#      },
 | 
			
		||||
#      "tagline" : "You Know, for Search"
 | 
			
		||||
#    }
 | 
			
		||||
#    verdi-es:~ #
 | 
			
		||||
#
 | 
			
		||||
curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Kibana (kibana-oss)
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# Same as Elasticsearch, we will install the latest version of Kibana using the 
 | 
			
		||||
# apt package manager from the official Elastic repository:
 | 
			
		||||
#
 | 
			
		||||
apt-get install kibana-oss
 | 
			
		||||
 | 
			
		||||
# Specifies the address to which the Kibana server will bind. IP addresses and host 
 | 
			
		||||
# names are both valid values. The default is 'localhost', which usually means 
 | 
			
		||||
# remote machines will not be able to connect. 
 | 
			
		||||
# To allow connections from remote users, set this parameter to a non-loopback address.
 | 
			
		||||
#
 | 
			
		||||
# Set server.host to 'localhost'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*server.host:\s+localhost" /etc/kibana/kibana.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#server.host:" /etc/kibana/kibana.yml ; then
 | 
			
		||||
		perl -i.ORIG -n -p -e "s/^(#server.host:.*)/\1\nserver.host: localhost/" \
 | 
			
		||||
		   /etc/kibana/kibana.yml
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (server.host:)
 | 
			
		||||
#
 | 
			
		||||
server.host: localhost
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Start the Kibana service and set it to start automatically on boot:
 | 
			
		||||
#
 | 
			
		||||
systemctl restart kibana
 | 
			
		||||
systemctl enable kibana
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure nginx webserver
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
WEBSITE=verdi-elk.warenform.de
 | 
			
		||||
 | 
			
		||||
cat <<EOF > /etc/nginx/sites-available/${WEBSITE}.conf
 | 
			
		||||
server {
 | 
			
		||||
   listen 80;
 | 
			
		||||
   listen [::]:80 ;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
   return 301 https://\$host\$request_uri;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
server {
 | 
			
		||||
   listen 443 ssl http2;
 | 
			
		||||
   listen [::]:443 ssl http2;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
 | 
			
		||||
   root /var/www/html;
 | 
			
		||||
   index index.html index.htm index.nginx-debian.html;
 | 
			
		||||
 | 
			
		||||
   # Include location directive for Let's Encrypt ACME Challenge
 | 
			
		||||
   #
 | 
			
		||||
   # Needed for (automated) updating certificate
 | 
			
		||||
   #
 | 
			
		||||
   include snippets/letsencrypt-acme-challenge.conf;
 | 
			
		||||
 | 
			
		||||
   # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
 | 
			
		||||
   #
 | 
			
		||||
   # To generate a dhparam.pem file, run in a terminal
 | 
			
		||||
   #    openssl dhparam -out /etc/nginx/ssl/dhparam.pem 2048
 | 
			
		||||
   #
 | 
			
		||||
   ssl_dhparam /etc/nginx/ssl/dhparam.pem;
 | 
			
		||||
 | 
			
		||||
   # Eable session resumption to improve https performance
 | 
			
		||||
   ssl_session_cache shared:SSL:50m;
 | 
			
		||||
   ssl_session_timeout 10m;
 | 
			
		||||
   ssl_session_tickets off;
 | 
			
		||||
   ssl_ecdh_curve secp384r1;
 | 
			
		||||
 | 
			
		||||
   ssl_certificate /var/lib/dehydrated/certs/${WEBSITE}/fullchain.pem;
 | 
			
		||||
   ssl_certificate_key /var/lib/dehydrated/certs/${WEBSITE}/privkey.pem;
 | 
			
		||||
 | 
			
		||||
   ssl_protocols TLSv1.2 TLSv1.3;
 | 
			
		||||
 | 
			
		||||
   # ECDHE better than DHE (faster)  ECDHE & DHE GCM better than CBC (attacks on AES)
 | 
			
		||||
   # Everything better than SHA1 (deprecated)
 | 
			
		||||
   #
 | 
			
		||||
   #ssl_ciphers 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA'
 | 
			
		||||
   ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;
 | 
			
		||||
   ssl_prefer_server_ciphers on;
 | 
			
		||||
 | 
			
		||||
   #resolver 192.168.42.129 8.8.8.8 valid=300s;
 | 
			
		||||
   #resolver_timeout 5s;
 | 
			
		||||
 | 
			
		||||
   add_header Strict-Transport-Security "max-age=31536000 always";
 | 
			
		||||
   add_header X-Frame-Options DENY;
 | 
			
		||||
   add_header X-Content-Type-Options nosniff;
 | 
			
		||||
   add_header X-XSS-Protection "1; mode=block";
 | 
			
		||||
 | 
			
		||||
   access_log  /var/log/nginx/${WEBSITE}_access.log;
 | 
			
		||||
   error_log  /var/log/nginx/${WEBSITE}_error.log;
 | 
			
		||||
 | 
			
		||||
   auth_basic "Authentication Required";
 | 
			
		||||
   auth_basic_user_file /etc/nginx/htpasswd.kibana;
 | 
			
		||||
 | 
			
		||||
   location / {
 | 
			
		||||
      proxy_pass http://localhost:5601;
 | 
			
		||||
      proxy_http_version 1.1;
 | 
			
		||||
      proxy_set_header Upgrade \$http_upgrade;
 | 
			
		||||
      proxy_set_header Connection 'upgrade';
 | 
			
		||||
      proxy_set_header Host \$host;
 | 
			
		||||
      proxy_cache_bypass \$http_upgrade;
 | 
			
		||||
   }
 | 
			
		||||
}
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Enable site ${WEBSITE}
 | 
			
		||||
# -
 | 
			
		||||
ln -s ../sites-available/${WEBSITE}.conf /etc/nginx/sites-enabled/
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Create a basic authentication file with the openssl command:
 | 
			
		||||
# -
 | 
			
		||||
# - user:     admin
 | 
			
		||||
# - password: $E%R&T/Z(U
 | 
			
		||||
# -
 | 
			
		||||
echo "admin:$(openssl passwd -apr1 '$E%R&T/Z(U')" | sudo tee -a /etc/nginx/htpasswd.kiba
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Restart Nginx Webservice
 | 
			
		||||
# -
 | 
			
		||||
systemctl restart nginx
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Logstash (logstash-oss)
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The final step is to install Logstash using the apt package manager from 
 | 
			
		||||
# the official Elastic repository.
 | 
			
		||||
#
 | 
			
		||||
apt-get install logstash-oss
 | 
			
		||||
 | 
			
		||||
# Start the Logstash service and set it to start automatically on boot:
 | 
			
		||||
#
 | 
			
		||||
systemctl restart logstash
 | 
			
		||||
systemctl enable logstash
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    The Logstash configuration depends on your personal preferences and the 
 | 
			
		||||
#    plugins you will use. You can find more information about how to configure 
 | 
			
		||||
#    Logstash here:
 | 
			
		||||
#
 | 
			
		||||
#       https://www.elastic.co/guide/en/logstash/current/configuration.html
 | 
			
		||||
# ---
 | 
			
		||||
							
								
								
									
										31
									
								
								README.JVM-heap-size
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								README.JVM-heap-size
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,31 @@
 | 
			
		||||
 | 
			
		||||
# ==========================
 | 
			
		||||
# set JVM heap size manually
 | 
			
		||||
# ==========================
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Set VVM heap size in file /etc/elasticsearch/jvm.options
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
...
 | 
			
		||||
################################################################
 | 
			
		||||
## IMPORTANT: JVM heap size
 | 
			
		||||
################################################################
 | 
			
		||||
##
 | 
			
		||||
## The heap size is automatically configured by Elasticsearch
 | 
			
		||||
## based on the available memory in your system and the roles
 | 
			
		||||
## each node is configured to fulfill. If specifying heap is
 | 
			
		||||
## required, it should be done through a file in jvm.options.d,
 | 
			
		||||
## and the min and max should be set to the same value. For
 | 
			
		||||
## example, to set the heap to 4 GB, create a new file in the
 | 
			
		||||
## jvm.options.d directory containing these lines:
 | 
			
		||||
##
 | 
			
		||||
## -Xms4g
 | 
			
		||||
## -Xmx4g
 | 
			
		||||
##
 | 
			
		||||
## See https://www.elastic.co/guide/en/elasticsearch/reference/7.17/heap-size.html
 | 
			
		||||
## for more information
 | 
			
		||||
##
 | 
			
		||||
################################################################
 | 
			
		||||
-Xms24g
 | 
			
		||||
-Xmx24g 
 | 
			
		||||
							
								
								
									
										327
									
								
								install_elasticsearch-6.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										327
									
								
								install_elasticsearch-6.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,327 @@
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Java
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# Note:
 | 
			
		||||
#    Since elasticsearch version 7, java is included in the elastic search package,
 | 
			
		||||
#    but version 6 DOES NOT contain java.
 | 
			
		||||
 | 
			
		||||
# Elasticsearch requires at least Java 8 in order to run. It supports both OpenJDK 
 | 
			
		||||
# and Oracle Java. In this guide, we will install OpenJDK version 8.
 | 
			
		||||
#
 | 
			
		||||
# We install default-jdk, whi installs (on debian 10) OpenJDK version 11
 | 
			
		||||
#
 | 
			
		||||
apt-get install default-jdk
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# NOTE:
 | 
			
		||||
#    we will install a package of ElasticSearch which contains only features 
 | 
			
		||||
#    that are available under the Apache 2.0 license (elasticsearch-oss)
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Install 'apt-transport-https' package
 | 
			
		||||
#
 | 
			
		||||
apt-get install apt-transport-https
 | 
			
		||||
 | 
			
		||||
# Import the Elasticsearch PGP Key
 | 
			
		||||
#
 | 
			
		||||
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
 | 
			
		||||
 | 
			
		||||
# Save the repository definition to '/etc/apt/sources.list.d/elastic-7.x.list'
 | 
			
		||||
#
 | 
			
		||||
echo "deb https://artifacts.elastic.co/packages/oss-6.x/apt stable main" \
 | 
			
		||||
   | sudo tee -a /etc/apt/sources.list.d/elastic-6.x.list
 | 
			
		||||
 | 
			
		||||
echo "deb https://artifacts.elastic.co/packages/oss-7.x/apt stable main" \
 | 
			
		||||
   | sudo tee -a /etc/apt/sources.list.d/elastic-7.x.list
 | 
			
		||||
 | 
			
		||||
# Install the Elasticsearch Debian package with:
 | 
			
		||||
apt-get update
 | 
			
		||||
apt-get install elasticsearch-oss
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
ELASTIC_SEARCH_PUBLISH_IP="0.0.0.0"
 | 
			
		||||
ELASTIC_SEARCH_PORT=9200
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Set System properties
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Set set sysctl value 'vm.max_map_count' to '524288'
 | 
			
		||||
#
 | 
			
		||||
# Add to /etc/sysctl.conf:
 | 
			
		||||
#
 | 
			
		||||
#    vm.max_map_count = 524288
 | 
			
		||||
#
 | 
			
		||||
# Note: 
 | 
			
		||||
#    if installing ElasticSearch into a LX_Container, do this at
 | 
			
		||||
#    the host system
 | 
			
		||||
#
 | 
			
		||||
cat << EOF >> /etc/sysctl.conf
 | 
			
		||||
 | 
			
		||||
# Needed by ElasticSearch Installation on virtual guest
 | 
			
		||||
# systems (LX-Containers)
 | 
			
		||||
#
 | 
			
		||||
# The error message there was:
 | 
			
		||||
#    max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
 | 
			
		||||
#
 | 
			
		||||
vm.max_map_count = 524288
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Adjust file '/etc/elasticsearch/elasticsearch.yml'
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Set network.host to '$ELASTIC_SEARCH_PUBLIC_IP'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*network.host:\s+${ELASTIC_SEARCH_PUBLISH_IP}" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#network.host:" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
		perl -i.ORIG -n -p -e "s/^(#network.host:.*)/\1\nnetwork.host: ${ELASTIC_SEARCH_PUBLISH_IP}/" \
 | 
			
		||||
		   /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (network.host:)
 | 
			
		||||
#
 | 
			
		||||
network.host: $ELASTIC_SEARCH_PUBLISH_IP
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set http.port to '$ELASTIC_SEARCH_PORT'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*http.port:\s+${ELASTIC_SEARCH_PORT}" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#http.port:" /etc/elasticsearch/elasticsearch.yml ; then
 | 
			
		||||
		perl -i.ORIG -n -p -e "s/^(#http.port:.*)/\1\nhttp.port: ${ELASTIC_SEARCH_PORT}/" \
 | 
			
		||||
		   /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/elasticsearch/elasticsearch.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (http.port:)
 | 
			
		||||
#
 | 
			
		||||
http.port: $ELASTIC_SEARCH_PORT
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Activate sysctl settings at file '/etc/sysctl.conf'
 | 
			
		||||
#
 | 
			
		||||
sysctl -p
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    If running as systemd managed process in a LX-Container, the following
 | 
			
		||||
#    systemd parameters will be set:
 | 
			
		||||
#       LimitNOFILE=65535
 | 
			
		||||
#       LimitNPROC=4096
 | 
			
		||||
#       LimitAS=infinity
 | 
			
		||||
#       LimitFSIZE=infinity
 | 
			
		||||
#
 | 
			
		||||
# Take care, your container satisfies this values.
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Test Elasticsearch Setup
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The Elasticsearch service is ready to use. You can test it using curl command line 
 | 
			
		||||
# utility. Run the simple GET command using curl to verify the setup. You will see 
 | 
			
		||||
# the Elasticsearch cluster details with the version on your screen.
 | 
			
		||||
#
 | 
			
		||||
# example output:
 | 
			
		||||
#
 | 
			
		||||
#    verdi-es:~ # curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200
 | 
			
		||||
#    {
 | 
			
		||||
#      "name" : "verdi-es",
 | 
			
		||||
#      "cluster_name" : "elasticsearch",
 | 
			
		||||
#      "cluster_uuid" : "J54WIwEqQe203nUbtgOOEA",
 | 
			
		||||
#      "version" : {
 | 
			
		||||
#        "number" : "7.2.0",
 | 
			
		||||
#        "build_flavor" : "oss",
 | 
			
		||||
#        "build_type" : "deb",
 | 
			
		||||
#        "build_hash" : "508c38a",
 | 
			
		||||
#        "build_date" : "2019-06-20T15:54:18.811730Z",
 | 
			
		||||
#        "build_snapshot" : false,
 | 
			
		||||
#        "lucene_version" : "8.0.0",
 | 
			
		||||
#        "minimum_wire_compatibility_version" : "6.8.0",
 | 
			
		||||
#        "minimum_index_compatibility_version" : "6.0.0-beta1"
 | 
			
		||||
#      },
 | 
			
		||||
#      "tagline" : "You Know, for Search"
 | 
			
		||||
#    }
 | 
			
		||||
#    verdi-es:~ #
 | 
			
		||||
#
 | 
			
		||||
curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Kibana (kibana-oss)
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# Same as Elasticsearch, we will install the latest version of Kibana using the 
 | 
			
		||||
# apt package manager from the official Elastic repository:
 | 
			
		||||
#
 | 
			
		||||
apt-get install kibana-oss
 | 
			
		||||
 | 
			
		||||
# Specifies the address to which the Kibana server will bind. IP addresses and host 
 | 
			
		||||
# names are both valid values. The default is 'localhost', which usually means 
 | 
			
		||||
# remote machines will not be able to connect. 
 | 
			
		||||
# To allow connections from remote users, set this parameter to a non-loopback address.
 | 
			
		||||
#
 | 
			
		||||
# Set server.host to 'localhost'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*server.host:\s+localhost" /etc/kibana/kibana.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#server.host:" /etc/kibana/kibana.yml ; then
 | 
			
		||||
		perl -i.ORIG -n -p -e "s/^(#server.host:.*)/\1\nserver.host: localhost/" \
 | 
			
		||||
		   /etc/kibana/kibana.yml
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (server.host:)
 | 
			
		||||
#
 | 
			
		||||
server.host: localhost
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Start the Kibana service and set it to start automatically on boot:
 | 
			
		||||
#
 | 
			
		||||
systemctl restart kibana
 | 
			
		||||
systemctl enable kibana
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure nginx webserver
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
WEBSITE=verdi-elk.warenform.de
 | 
			
		||||
 | 
			
		||||
cat <<EOF > /etc/nginx/sites-available/${WEBSITE}.conf
 | 
			
		||||
server {
 | 
			
		||||
   listen 80;
 | 
			
		||||
   listen [::]:80 ;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
   return 301 https://\$host\$request_uri;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
server {
 | 
			
		||||
   listen 443 ssl http2;
 | 
			
		||||
   listen [::]:443 ssl http2;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
 | 
			
		||||
   root /var/www/html;
 | 
			
		||||
   index index.html index.htm index.nginx-debian.html;
 | 
			
		||||
 | 
			
		||||
   # Include location directive for Let's Encrypt ACME Challenge
 | 
			
		||||
   #
 | 
			
		||||
   # Needed for (automated) updating certificate
 | 
			
		||||
   #
 | 
			
		||||
   include snippets/letsencrypt-acme-challenge.conf;
 | 
			
		||||
 | 
			
		||||
   # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
 | 
			
		||||
   #
 | 
			
		||||
   # To generate a dhparam.pem file, run in a terminal
 | 
			
		||||
   #    openssl dhparam -out /etc/nginx/ssl/dhparam.pem 2048
 | 
			
		||||
   #
 | 
			
		||||
   ssl_dhparam /etc/nginx/ssl/dhparam.pem;
 | 
			
		||||
 | 
			
		||||
   # Eable session resumption to improve https performance
 | 
			
		||||
   ssl_session_cache shared:SSL:50m;
 | 
			
		||||
   ssl_session_timeout 10m;
 | 
			
		||||
   ssl_session_tickets off;
 | 
			
		||||
   ssl_ecdh_curve secp384r1;
 | 
			
		||||
 | 
			
		||||
   ssl_certificate /var/lib/dehydrated/certs/${WEBSITE}/fullchain.pem;
 | 
			
		||||
   ssl_certificate_key /var/lib/dehydrated/certs/${WEBSITE}/privkey.pem;
 | 
			
		||||
 | 
			
		||||
   ssl_protocols TLSv1.2 TLSv1.3;
 | 
			
		||||
 | 
			
		||||
   # ECDHE better than DHE (faster)  ECDHE & DHE GCM better than CBC (attacks on AES)
 | 
			
		||||
   # Everything better than SHA1 (deprecated)
 | 
			
		||||
   #
 | 
			
		||||
   #ssl_ciphers 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA'
 | 
			
		||||
   ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;
 | 
			
		||||
   ssl_prefer_server_ciphers on;
 | 
			
		||||
 | 
			
		||||
   #resolver 192.168.42.129 8.8.8.8 valid=300s;
 | 
			
		||||
   #resolver_timeout 5s;
 | 
			
		||||
 | 
			
		||||
   add_header Strict-Transport-Security "max-age=31536000 always";
 | 
			
		||||
   add_header X-Frame-Options DENY;
 | 
			
		||||
   add_header X-Content-Type-Options nosniff;
 | 
			
		||||
   add_header X-XSS-Protection "1; mode=block";
 | 
			
		||||
 | 
			
		||||
   access_log  /var/log/nginx/${WEBSITE}_access.log;
 | 
			
		||||
   error_log  /var/log/nginx/${WEBSITE}_error.log;
 | 
			
		||||
 | 
			
		||||
   auth_basic "Authentication Required";
 | 
			
		||||
   auth_basic_user_file /etc/nginx/htpasswd.kibana;
 | 
			
		||||
 | 
			
		||||
   location / {
 | 
			
		||||
      proxy_pass http://localhost:5601;
 | 
			
		||||
      proxy_http_version 1.1;
 | 
			
		||||
      proxy_set_header Upgrade \$http_upgrade;
 | 
			
		||||
      proxy_set_header Connection 'upgrade';
 | 
			
		||||
      proxy_set_header Host \$host;
 | 
			
		||||
      proxy_cache_bypass \$http_upgrade;
 | 
			
		||||
   }
 | 
			
		||||
}
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Enable site ${WEBSITE}
 | 
			
		||||
# -
 | 
			
		||||
ln -s ../sites-available/${WEBSITE}.conf /etc/nginx/sites-enabled/
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Create a basic authentication file with the openssl command:
 | 
			
		||||
# -
 | 
			
		||||
# - user:     admin
 | 
			
		||||
# - password: $E%R&T/Z(U
 | 
			
		||||
# -
 | 
			
		||||
echo "admin:$(openssl passwd -apr1 '$E%R&T/Z(U')" | sudo tee -a /etc/nginx/htpasswd.kiba
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Restart Nginx Webservice
 | 
			
		||||
# -
 | 
			
		||||
systemctl restart nginx
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Logstash (logstash-oss)
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The final step is to install Logstash using the apt package manager from 
 | 
			
		||||
# the official Elastic repository.
 | 
			
		||||
#
 | 
			
		||||
apt-get install logstash-oss
 | 
			
		||||
 | 
			
		||||
# Start the Logstash service and set it to start automatically on boot:
 | 
			
		||||
#
 | 
			
		||||
systemctl restart logstash
 | 
			
		||||
systemctl enable logstash
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    The Logstash configuration depends on your personal preferences and the 
 | 
			
		||||
#    plugins you will use. You can find more information about how to configure 
 | 
			
		||||
#    Logstash here:
 | 
			
		||||
#
 | 
			
		||||
#       https://www.elastic.co/guide/en/logstash/current/configuration.html
 | 
			
		||||
# ---
 | 
			
		||||
							
								
								
									
										2
									
								
								install_elasticsearch-7.debian-12.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								install_elasticsearch-7.debian-12.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
# see:
 | 
			
		||||
#    https://wiki.crowncloud.net/How_to_Install_Elasticsearch_on_Debian_12?How_to_Install_Elasticsearch_on_Debian_12
 | 
			
		||||
							
								
								
									
										667
									
								
								install_elasticsearch-7.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										667
									
								
								install_elasticsearch-7.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,667 @@
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# NOTE:
 | 
			
		||||
#    we will install a package of ElasticSearch which contains only features 
 | 
			
		||||
#    that are available under the Apache 2.0 license
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Install 'apt-transport-https' package
 | 
			
		||||
#
 | 
			
		||||
apt-get install apt-transport-https
 | 
			
		||||
 | 
			
		||||
# Import the Elasticsearch PGP Key
 | 
			
		||||
#
 | 
			
		||||
# We sign all of our packages with the Elasticsearch Signing Key (PGP key D88E42B4, 
 | 
			
		||||
# available from https://pgp.mit.edu) with fingerprint:
 | 
			
		||||
#
 | 
			
		||||
#     4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4
 | 
			
		||||
#
 | 
			
		||||
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg
 | 
			
		||||
 | 
			
		||||
# You may need to install the apt-transport-https package on Debian before proceeding:
 | 
			
		||||
#
 | 
			
		||||
apt-get install apt-transport-https
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Save the repository definition to '/etc/apt/sources.list.d/elastic-7.x.list'
 | 
			
		||||
#
 | 
			
		||||
echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-7.x.list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Install the Elasticsearch Debian package with:
 | 
			
		||||
apt-get update
 | 
			
		||||
apt-get install elasticsearch
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# By default Elasticsearch is only accessible on localhost.
 | 
			
		||||
#
 | 
			
		||||
# Leave empty for accepting the default
 | 
			
		||||
#
 | 
			
		||||
ELASTIC_SEARCH_PUBLISH_IP="0.0.0.0"
 | 
			
		||||
ELASTIC_SEARCH_PUBLISH_IP=""
 | 
			
		||||
ELASTIC_SEARCH_PUBLISH_IP="127.0.0.1"
 | 
			
		||||
 | 
			
		||||
# By default Elasticsearch listens for HTTP traffic on the first free port it
 | 
			
		||||
# finds starting at 9200. Set a specific HTTP port here:
 | 
			
		||||
#
 | 
			
		||||
# Leave empty for accepting the default
 | 
			
		||||
#
 | 
			
		||||
ELASTIC_SEARCH_PORT=9200
 | 
			
		||||
 | 
			
		||||
NODE_NAME="verdi-es"
 | 
			
		||||
NODE_NAME="oolm-shop-es"
 | 
			
		||||
 | 
			
		||||
DISCOVERY_TYPE='single-node'
 | 
			
		||||
 | 
			
		||||
AUTOMATIC_CREATION_SYSTEM_INDICES=false
 | 
			
		||||
 | 
			
		||||
ELASTIC_SEARCH_CONFIG_FILE="/etc/elasticsearch/elasticsearch.yml"
 | 
			
		||||
ELASTIC_SEARCH_CONFIG_FILE="/root/elasticsearch.yml"
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Set System properties
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Set set sysctl value 'vm.max_map_count' to '524288'
 | 
			
		||||
#
 | 
			
		||||
# Add to /etc/sysctl.conf:
 | 
			
		||||
#
 | 
			
		||||
#    vm.max_map_count = 524288
 | 
			
		||||
#
 | 
			
		||||
# Note: 
 | 
			
		||||
#    if installing ElasticSearch into a LX_Container, do this at
 | 
			
		||||
#    the host system
 | 
			
		||||
#
 | 
			
		||||
cat << EOF >> /etc/sysctl.d/60-elasticsearch.conf
 | 
			
		||||
 | 
			
		||||
# Needed by ElasticSearch Installation on virtual guest
 | 
			
		||||
# systems (LX-Containers)
 | 
			
		||||
#
 | 
			
		||||
# The error message there was:
 | 
			
		||||
#    max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
 | 
			
		||||
#
 | 
			
		||||
vm.max_map_count = 524288
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Activate sysctl settings at file '/etc/sysctl.conf'
 | 
			
		||||
#
 | 
			
		||||
sysctl -p
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Adjust file '/etc/elasticsearch/elasticsearch.yml' - ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
if [[ ! -f "${ELASTIC_SEARCH_CONFIG_FILE}.ORIG" ]] ; then
 | 
			
		||||
   cp -a "${ELASTIC_SEARCH_CONFIG_FILE}" "${ELASTIC_SEARCH_CONFIG_FILE}.ORIG"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set network.host to '$ELASTIC_SEARCH_PUBLIC_IP'
 | 
			
		||||
# 
 | 
			
		||||
# By default Elasticsearch is only accessible on localhost. Set a different
 | 
			
		||||
# address here to expose this node on the network:
 | 
			
		||||
#
 | 
			
		||||
# Replce only first occurence of match
 | 
			
		||||
#
 | 
			
		||||
if [[ -n "$ELASTIC_SEARCH_PUBLISH_IP" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*network.host:\s+${ELASTIC_SEARCH_PUBLISH_IP}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#network.host:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         sed -i "0,/^\(#network.host:.*\)/ s//\1\nnetwork.host: ${ELASTIC_SEARCH_PUBLISH_IP}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# network.host
 | 
			
		||||
#
 | 
			
		||||
# address here to expose this node on the network:
 | 
			
		||||
#
 | 
			
		||||
network.host: $ELASTIC_SEARCH_PUBLISH_IP
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set http.port to '$ELASTIC_SEARCH_PORT'
 | 
			
		||||
#
 | 
			
		||||
# Replce only first occurence of match
 | 
			
		||||
#
 | 
			
		||||
if [[ -n "$ELASTIC_SEARCH_PORT" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*http.port:\s+${ELASTIC_SEARCH_PORT}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#http.port:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         sed -i "0,/^\(#http.port:.*\)/ s//\1\nhttp.port: ${ELASTIC_SEARCH_PORT}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# http.port
 | 
			
		||||
#
 | 
			
		||||
# By default Elasticsearch listens for HTTP traffic on the first free port it
 | 
			
		||||
# finds starting at 9200. Set a specific HTTP port here:
 | 
			
		||||
#
 | 
			
		||||
http.port: $ELASTIC_SEARCH_PORT
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set values
 | 
			
		||||
#    node.name: to '$NODE_NAME'
 | 
			
		||||
#  
 | 
			
		||||
#    # if discovery.type IS NOT set to 'single-node'
 | 
			
		||||
#    cluster.initial_master_nodes: to '[${NODE_NAME}]'
 | 
			
		||||
#
 | 
			
		||||
if [[ -n "$NODE_NAME" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*node.name:\s+${NODE_NAME}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#node.name:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         sed -i "0,/^\(#node.name.*\)/ s//\1\nnode.name: ${NODE_NAME}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# node.name
 | 
			
		||||
#
 | 
			
		||||
#  Use a descriptive name for the node:
 | 
			
		||||
#
 | 
			
		||||
node.name: ${NODE_NAME}
 | 
			
		||||
EOF
 | 
			
		||||
         if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then
 | 
			
		||||
            cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
cluster.initial_master_nodes: ["${NODE_NAME}"]
 | 
			
		||||
EOF
 | 
			
		||||
         else
 | 
			
		||||
            cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# setting [cluster.initial_master_nodes] is not allowed when [discovery.type] is set to [single-node]
 | 
			
		||||
#cluster.initial_master_nodes: ["${NODE_NAME}"]
 | 
			
		||||
EOF
 | 
			
		||||
         fi
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
      
 | 
			
		||||
 | 
			
		||||
   if ! grep -q -E "^\s*cluster.initial_master_nodes:\s+${NODE_NAME}" \
 | 
			
		||||
      ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#cluster.initial_master_nodes:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then
 | 
			
		||||
 | 
			
		||||
            sed -i "0,/^\(#cluster.initial_master_nodes.*\)/ s//\1\ncluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \
 | 
			
		||||
               ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
         else
 | 
			
		||||
            sed -i "0,/^\(#cluster.initial_master_nodes.*\)/ s//\1\n#cluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \
 | 
			
		||||
               ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
         fi
 | 
			
		||||
 | 
			
		||||
      elif grep -q -E "^\s*cluster.initial_master_nodes:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then
 | 
			
		||||
            sed -i "0,/^\(cluster.initial_master_nodes.*\)/ s//#\1\ncluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \
 | 
			
		||||
               ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
         else
 | 
			
		||||
            sed -i "0,/^\(cluster.initial_master_nodes.*\)/ s//#\1/" ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
         fi
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# cluster.initial_master_nodes:
 | 
			
		||||
#
 | 
			
		||||
EOF
 | 
			
		||||
         if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then
 | 
			
		||||
            cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
cluster.initial_master_nodes: ["${NODE_NAME}"]
 | 
			
		||||
 | 
			
		||||
EOF
 | 
			
		||||
         else
 | 
			
		||||
               cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# setting [cluster.initial_master_nodes] is not allowed when [discovery.type] is set to [single-node]
 | 
			
		||||
#cluster.initial_master_nodes: ["${NODE_NAME}"]
 | 
			
		||||
EOF
 | 
			
		||||
         fi
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Set value discovery.type
 | 
			
		||||
#
 | 
			
		||||
if [[ -n "$DISCOVERY_TYPE" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*discovery.type:\s+${DISCOVERY_TYPE}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#discovery.type:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
         sed -i "0,/^\(#discovery.type.*\)/ s//\1\ndiscovery.type: ${DISCOVERY_TYPE}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      elif grep -q -E "^\s*discovery.type:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
         sed -i "0,/^\(discovery.type.*\)/ s//#\1\ndiscovery.type: ${DISCOVERY_TYPE}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# discovery.type
 | 
			
		||||
#
 | 
			
		||||
# (Static) Specifies whether Elasticsearch should form a multiple-node cluster.
 | 
			
		||||
#
 | 
			
		||||
# Defaults to multi-node, which means that Elasticsearch discovers other nodes
 | 
			
		||||
# when forming a cluster and allows other nodes to join the cluster later.
 | 
			
		||||
#
 | 
			
		||||
# If set to single-node, Elasticsearch forms a single-node cluster and suppresses
 | 
			
		||||
# the timeout set by cluster.publish.timeout. For more information about when
 | 
			
		||||
# you might use this setting, see Single-node discovery:
 | 
			
		||||
#
 | 
			
		||||
#    https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#initial_master_nodes
 | 
			
		||||
#
 | 
			
		||||
#
 | 
			
		||||
# (Statisch) Gibt an, ob Elasticsearch einen Mehrknoten-Cluster bilden soll.
 | 
			
		||||
#
 | 
			
		||||
# Der Standardwert ist Multi-Node, was bedeutet, dass Elasticsearch andere Knoten
 | 
			
		||||
# bei der Bildung eines Clusters entdeckt und anderen Knoten erlaubt, dem Cluster
 | 
			
		||||
# später beizutreten.
 | 
			
		||||
#
 | 
			
		||||
# Wenn auf single-node gesetzt, bildet Elasticsearch einen Single-Node-Cluster und
 | 
			
		||||
# unterdrückt das Timeout, das durch cluster.publish.timeout gesetzt wird. Für
 | 
			
		||||
# weitere Informationen darüber, wann diese Einstellung verwendet werden kann,
 | 
			
		||||
# siehe Single-node discovery:
 | 
			
		||||
#
 | 
			
		||||
#    https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#initial_master_nodes
 | 
			
		||||
#
 | 
			
		||||
discovery.type: ${DISCOVERY_TYPE}
 | 
			
		||||
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# The default behavior of disabling security on basic licenses is deprecated. 
 | 
			
		||||
# In a later version of Elasticsearch, the value of [xpack.security.enabled] 
 | 
			
		||||
# will default to "true" , regardless of the license level. 
 | 
			
		||||
# See 
 | 
			
		||||
#    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/security-minimal-setup.html 
 | 
			
		||||
# to enable security, or explicitly disable security by setting 
 | 
			
		||||
# [xpack.security.enabled] to false in elasticsearch.yml
 | 
			
		||||
#
 | 
			
		||||
# See 
 | 
			
		||||
#    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/security-minimal-setup.html 
 | 
			
		||||
# to enable security.
 | 
			
		||||
#
 | 
			
		||||
cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# See 
 | 
			
		||||
#    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/security-minimal-setup.html 
 | 
			
		||||
# to enable security.
 | 
			
		||||
#
 | 
			
		||||
xpack.security.enabled: false
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Disable geoip usage
 | 
			
		||||
#
 | 
			
		||||
# Maybe or better hopefully avoid error
 | 
			
		||||
#
 | 
			
		||||
#  exception during geoip databases update..
 | 
			
		||||
#
 | 
			
		||||
# see also: https://discuss.elastic.co/t/how-to-disable-geoip-usage-in-7-14-0/281076/8
 | 
			
		||||
#
 | 
			
		||||
cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# Disable geoip usage
 | 
			
		||||
#
 | 
			
		||||
#  exception during geoip databases update..
 | 
			
		||||
#
 | 
			
		||||
# see also: https://discuss.elastic.co/t/how-to-disable-geoip-usage-in-7-14-0/281076/8
 | 
			
		||||
#
 | 
			
		||||
ingest.geoip.downloader.enabled: false
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Enable automatic creation of system indices
 | 
			
		||||
#
 | 
			
		||||
# Some commercial features automatically create indices within Elasticsearch. By default, 
 | 
			
		||||
# Elasticsearch is configured to allow automatic index creation, and no additional steps 
 | 
			
		||||
# are required. However, if you have disabled automatic index creation in Elasticsearch, 
 | 
			
		||||
# you must configure action.auto_create_index in elasticsearch.yml to allow the commercial 
 | 
			
		||||
# features to create the following indices:
 | 
			
		||||
#
 | 
			
		||||
if $AUTOMATIC_CREATION_SYSTEM_INDICES ; then
 | 
			
		||||
   cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# Additional User Settings:
 | 
			
		||||
#
 | 
			
		||||
#    action.auto_create_index
 | 
			
		||||
#
 | 
			
		||||
# Enable automatic creation of system indices
 | 
			
		||||
#
 | 
			
		||||
# Some commercial features automatically create indices within Elasticsearch. By default, 
 | 
			
		||||
# Elasticsearch is configured to allow automatic index creation, and no additional steps 
 | 
			
		||||
# are required. However, if you have disabled automatic index creation in Elasticsearch, 
 | 
			
		||||
# you must configure action.auto_create_index in elasticsearch.yml to allow the commercial 
 | 
			
		||||
# features to create the following indices:
 | 
			
		||||
#
 | 
			
		||||
action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml*
 | 
			
		||||
EOF
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    If running as systemd managed process in a LX-Container, the following
 | 
			
		||||
#    systemd parameters will be set:
 | 
			
		||||
#       LimitNOFILE=65535
 | 
			
		||||
#       LimitNPROC=4096
 | 
			
		||||
#       LimitAS=infinity
 | 
			
		||||
#       LimitFSIZE=infinity
 | 
			
		||||
#
 | 
			
		||||
# Take care, your container satisfies this values.
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Running Elasticsearch with systemd
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# To configure Elasticsearch to start automatically when the system boots up, run
 | 
			
		||||
# the following commands:
 | 
			
		||||
#
 | 
			
		||||
systemctl daemon-reload
 | 
			
		||||
systemctl enable elasticsearch.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Elasticsearch can be started and stopped as follows:
 | 
			
		||||
#
 | 
			
		||||
systemctl start elasticsearch.service
 | 
			
		||||
systemctl stop elasticsearch.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# If you have password-protected your Elasticsearch keystore, you will need to 
 | 
			
		||||
# provide systemd with the keystore password using a local file and systemd 
 | 
			
		||||
# environment variables. This local file should be protected while it exists and 
 | 
			
		||||
# may be safely deleted once Elasticsearch is up and running.
 | 
			
		||||
#
 | 
			
		||||
KEYSTORE_PASSPHRASE_FILE=</path/to/my_pwd_file.tmp>
 | 
			
		||||
 | 
			
		||||
systemctl stop elasticsearch.service
 | 
			
		||||
echo "keystore_password" > $KEYSTORE_PASSPHRASE_FILE
 | 
			
		||||
chmod 600 $KEYSTORE_PASSPHRASE_FILE
 | 
			
		||||
systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=$KEYSTORE_PASSPHRASE_FILE
 | 
			
		||||
systemctl start elasticsearch.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Test Elasticsearch Setup
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The Elasticsearch service is ready to use. You can test it using curl command line 
 | 
			
		||||
# utility. Run the simple GET command using curl to verify the setup. You will see 
 | 
			
		||||
# the Elasticsearch cluster details with the version on your screen.
 | 
			
		||||
#
 | 
			
		||||
# example output:
 | 
			
		||||
#
 | 
			
		||||
#    verdi-es:~ # curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200
 | 
			
		||||
#    {
 | 
			
		||||
#      "name" : "verdi-es",
 | 
			
		||||
#      "cluster_name" : "elasticsearch",
 | 
			
		||||
#      "cluster_uuid" : "J54WIwEqQe203nUbtgOOEA",
 | 
			
		||||
#      "version" : {
 | 
			
		||||
#        "number" : "7.2.0",
 | 
			
		||||
#        "build_flavor" : "oss",
 | 
			
		||||
#        "build_type" : "deb",
 | 
			
		||||
#        "build_hash" : "508c38a",
 | 
			
		||||
#        "build_date" : "2019-06-20T15:54:18.811730Z",
 | 
			
		||||
#        "build_snapshot" : false,
 | 
			
		||||
#        "lucene_version" : "8.0.0",
 | 
			
		||||
#        "minimum_wire_compatibility_version" : "6.8.0",
 | 
			
		||||
#        "minimum_index_compatibility_version" : "6.0.0-beta1"
 | 
			
		||||
#      },
 | 
			
		||||
#      "tagline" : "You Know, for Search"
 | 
			
		||||
#    }
 | 
			
		||||
#    verdi-es:~ #
 | 
			
		||||
#
 | 
			
		||||
curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Kibana
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
KIBANA_SERVER_HOST="localhost"
 | 
			
		||||
KIBANA_SERVER_PORT="5601"
 | 
			
		||||
 | 
			
		||||
# Same as Elasticsearch, we will install the latest version of Kibana using the 
 | 
			
		||||
# apt package manager from the official Elastic repository:
 | 
			
		||||
#
 | 
			
		||||
apt-get install kibana
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Adjust file '/etc/kibana/kibana.yml'
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# see also: https://www.elastic.co/guide/en/kibana/8.3/settings.html
 | 
			
		||||
 | 
			
		||||
if [[ ! -f "/etc/kibana/kibana.yml.ORIG" ]] ; then
 | 
			
		||||
   cp -a "/etc/kibana/kibana.yml" "/etc/kibana/kibana.yml.ORIG"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Specifies the address to which the Kibana server will bind. IP addresses and host 
 | 
			
		||||
# names are both valid values. The default is 'localhost', which usually means 
 | 
			
		||||
# remote machines will not be able to connect. 
 | 
			
		||||
# To allow connections from remote users, set this parameter to a non-loopback address.
 | 
			
		||||
#
 | 
			
		||||
# Set server.host to 'localhost'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*server.host:\s+${KIBANA_SERVER_HOST}" /etc/kibana/kibana.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#server.host:" /etc/kibana/kibana.yml ; then
 | 
			
		||||
 | 
			
		||||
      
 | 
			
		||||
		perl -i -n -p -e "s/^(#server.host:.*)/\1\nserver.host: ${KIBANA_SERVER_HOST}/" \
 | 
			
		||||
		   /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (server.host:)
 | 
			
		||||
#
 | 
			
		||||
server.host: ${KIBANA_SERVER_HOST}
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set server.port to '5601'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*server.port:\s+${KIBANA_SERVER_PORT}" /etc/kibana/kibana.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#server.port:" /etc/kibana/kibana.yml ; then
 | 
			
		||||
 | 
			
		||||
      
 | 
			
		||||
		perl -i -n -p -e "s/^(#server.port:.*)/\1\nserver.port: ${KIBANA_SERVER_PORT}/" \
 | 
			
		||||
		   /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (server.port:)
 | 
			
		||||
#
 | 
			
		||||
server.port: ${KIBANA_SERVER_HOST}
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Run Kibana with systemd
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Configure Kibana to start automatically when the system starts:
 | 
			
		||||
#
 | 
			
		||||
systemctl daemon-reload
 | 
			
		||||
systemctl enable kibana.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Kibana service ican be started and stopped as follows:
 | 
			
		||||
##
 | 
			
		||||
systemctl start kibana.service
 | 
			
		||||
systemctl istop kibana.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure nginx webserver
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
WEBSITE=verdi-elk.warenform.de
 | 
			
		||||
 | 
			
		||||
cat <<EOF > /etc/nginx/sites-available/${WEBSITE}.conf
 | 
			
		||||
server {
 | 
			
		||||
   listen 80;
 | 
			
		||||
   listen [::]:80 ;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
   return 301 https://\$host\$request_uri;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
server {
 | 
			
		||||
   listen 443 ssl http2;
 | 
			
		||||
   listen [::]:443 ssl http2;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
 | 
			
		||||
   root /var/www/html;
 | 
			
		||||
   index index.html index.htm index.nginx-debian.html;
 | 
			
		||||
 | 
			
		||||
   # Include location directive for Let's Encrypt ACME Challenge
 | 
			
		||||
   #
 | 
			
		||||
   # Needed for (automated) updating certificate
 | 
			
		||||
   #
 | 
			
		||||
   include snippets/letsencrypt-acme-challenge.conf;
 | 
			
		||||
 | 
			
		||||
   # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
 | 
			
		||||
   #
 | 
			
		||||
   # To generate a dhparam.pem file, run in a terminal
 | 
			
		||||
   #    openssl dhparam -out /etc/nginx/ssl/dhparam.pem 2048
 | 
			
		||||
   #
 | 
			
		||||
   ssl_dhparam /etc/nginx/ssl/dhparam.pem;
 | 
			
		||||
 | 
			
		||||
   # Eable session resumption to improve https performance
 | 
			
		||||
   ssl_session_cache shared:SSL:50m;
 | 
			
		||||
   ssl_session_timeout 10m;
 | 
			
		||||
   ssl_session_tickets off;
 | 
			
		||||
   ssl_ecdh_curve secp384r1;
 | 
			
		||||
 | 
			
		||||
   ssl_certificate /var/lib/dehydrated/certs/${WEBSITE}/fullchain.pem;
 | 
			
		||||
   ssl_certificate_key /var/lib/dehydrated/certs/${WEBSITE}/privkey.pem;
 | 
			
		||||
 | 
			
		||||
   ssl_protocols TLSv1.2 TLSv1.3;
 | 
			
		||||
 | 
			
		||||
   # ECDHE better than DHE (faster)  ECDHE & DHE GCM better than CBC (attacks on AES)
 | 
			
		||||
   # Everything better than SHA1 (deprecated)
 | 
			
		||||
   #
 | 
			
		||||
   #ssl_ciphers 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA'
 | 
			
		||||
   ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;
 | 
			
		||||
   ssl_prefer_server_ciphers on;
 | 
			
		||||
 | 
			
		||||
   #resolver 192.168.42.129 8.8.8.8 valid=300s;
 | 
			
		||||
   #resolver_timeout 5s;
 | 
			
		||||
 | 
			
		||||
   add_header Strict-Transport-Security "max-age=31536000 always";
 | 
			
		||||
   add_header X-Frame-Options DENY;
 | 
			
		||||
   add_header X-Content-Type-Options nosniff;
 | 
			
		||||
   add_header X-XSS-Protection "1; mode=block";
 | 
			
		||||
 | 
			
		||||
   access_log  /var/log/nginx/${WEBSITE}_access.log;
 | 
			
		||||
   error_log  /var/log/nginx/${WEBSITE}_error.log;
 | 
			
		||||
 | 
			
		||||
   auth_basic "Authentication Required";
 | 
			
		||||
   auth_basic_user_file /etc/nginx/htpasswd.kibana;
 | 
			
		||||
 | 
			
		||||
   location / {
 | 
			
		||||
      proxy_pass http://localhost:5601;
 | 
			
		||||
      proxy_http_version 1.1;
 | 
			
		||||
      proxy_set_header Upgrade \$http_upgrade;
 | 
			
		||||
      proxy_set_header Connection 'upgrade';
 | 
			
		||||
      proxy_set_header Host \$host;
 | 
			
		||||
      proxy_cache_bypass \$http_upgrade;
 | 
			
		||||
   }
 | 
			
		||||
}
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Enable site ${WEBSITE}
 | 
			
		||||
# -
 | 
			
		||||
ln -s ../sites-available/${WEBSITE}.conf /etc/nginx/sites-enabled/
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Create a basic authentication file with the openssl command:
 | 
			
		||||
# -
 | 
			
		||||
# - user:     admin
 | 
			
		||||
# - password: $E%R&T/Z(U
 | 
			
		||||
# -
 | 
			
		||||
echo "admin:$(openssl passwd -apr1 '$E%R&T/Z(U')" | sudo tee -a /etc/nginx/htpasswd.kiba
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Restart Nginx Webservice
 | 
			
		||||
# -
 | 
			
		||||
systemctl restart nginx
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Logstash (logstash)
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The final step is to install Logstash using the apt package manager from 
 | 
			
		||||
# the official Elastic repository.
 | 
			
		||||
#
 | 
			
		||||
apt-get install logstash
 | 
			
		||||
 | 
			
		||||
# Start the Logstash service and set it to start automatically on boot:
 | 
			
		||||
#
 | 
			
		||||
systemctl restart logstash
 | 
			
		||||
systemctl enable logstash
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    The Logstash configuration depends on your personal preferences and the 
 | 
			
		||||
#    plugins you will use. You can find more information about how to configure 
 | 
			
		||||
#    Logstash here:
 | 
			
		||||
#
 | 
			
		||||
#       https://www.elastic.co/guide/en/logstash/current/configuration.html
 | 
			
		||||
# ---
 | 
			
		||||
							
								
								
									
										631
									
								
								install_elasticsearch-8.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										631
									
								
								install_elasticsearch-8.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,631 @@
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/deb.html
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# NOTE:
 | 
			
		||||
#    we will install a package of ElasticSearch which contains only features
 | 
			
		||||
#    that are available under the Apache 2.0 license
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Install 'apt-transport-https' package
 | 
			
		||||
#
 | 
			
		||||
apt-get install apt-transport-https
 | 
			
		||||
 | 
			
		||||
# Import the Elasticsearch PGP Key
 | 
			
		||||
#
 | 
			
		||||
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg
 | 
			
		||||
 | 
			
		||||
# Save the repository definition to '/etc/apt/sources.list.d/elastic-7.x.list'
 | 
			
		||||
#
 | 
			
		||||
echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-8.x.list
 | 
			
		||||
 | 
			
		||||
# Install the Elasticsearch Debian package with:
 | 
			
		||||
#
 | 
			
		||||
# !! NOTE !!
 | 
			
		||||
#
 | 
			
		||||
#    When installing Elasticsearch, security features are enabled and configured by default.
 | 
			
		||||
#    When you install Elasticsearch, the following security configuration occurs automatically:
 | 
			
		||||
#
 | 
			
		||||
#       - Authentication and authorization are enabled, and a password is generated for the
 | 
			
		||||
#         elastic built-in superuser.
 | 
			
		||||
#
 | 
			
		||||
#       - Certificates and keys for TLS are generated for the transport and HTTP layer, and
 | 
			
		||||
#         TLS is enabled and configured with these keys and certificates.
 | 
			
		||||
#
 | 
			
		||||
#    The password and certificate and keys are output to your terminal.
 | 
			
		||||
#    ==================================================================
 | 
			
		||||
#
 | 
			
		||||
apt-get update
 | 
			
		||||
if [[ -f "/root/apt-get_install_elasticsearch.out" ]] ; then
 | 
			
		||||
   cp -a "/root/apt-get_install_elasticsearch.out" "/root/apt-get_install_elasticsearch.out.$(date +%Y-%m-%d-%H%M)"
 | 
			
		||||
fi
 | 
			
		||||
apt-get install elasticsearch | tee /root/apt-get_install_elasticsearch.out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure ElasticSearch
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# By default Elasticsearch is only accessible on localhost.
 | 
			
		||||
#
 | 
			
		||||
# Leave empty for accepting the default
 | 
			
		||||
#
 | 
			
		||||
ELASTIC_SEARCH_PUBLISH_IP="0.0.0.0"
 | 
			
		||||
ELASTIC_SEARCH_PUBLISH_IP=""
 | 
			
		||||
 | 
			
		||||
# By default Elasticsearch listens for HTTP traffic on the first free port it
 | 
			
		||||
# finds starting at 9200. Set a specific HTTP port here:
 | 
			
		||||
#
 | 
			
		||||
# Leave empty for accepting the default
 | 
			
		||||
#
 | 
			
		||||
ELASTIC_SEARCH_PORT=9200
 | 
			
		||||
 | 
			
		||||
NODE_NAME="verdi-es"
 | 
			
		||||
NODE_NAME="oolm-shop-es"
 | 
			
		||||
 | 
			
		||||
DISCOVERY_TYPE='single-node'
 | 
			
		||||
 | 
			
		||||
ELASTIC_SEARCH_CONFIG_FILE="/etc/elasticsearch/elasticsearch.yml"
 | 
			
		||||
ELASTIC_SEARCH_CONFIG_FILE="/root/elasticsearch.yml"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Set System properties
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Set set sysctl value 'vm.max_map_count' to '524288'
 | 
			
		||||
#
 | 
			
		||||
# Add to /etc/sysctl.conf:
 | 
			
		||||
#
 | 
			
		||||
#    vm.max_map_count = 524288
 | 
			
		||||
#
 | 
			
		||||
# Note:
 | 
			
		||||
#    if installing ElasticSearch into a LX_Container, do this at
 | 
			
		||||
#    the host system
 | 
			
		||||
#
 | 
			
		||||
cat << EOF >> /etc/sysctl.d/60-elasticsearch.conf
 | 
			
		||||
 | 
			
		||||
# Needed by ElasticSearch Installation on virtual guest
 | 
			
		||||
# systems (LX-Containers)
 | 
			
		||||
#
 | 
			
		||||
# The error message there was:
 | 
			
		||||
#    max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
 | 
			
		||||
#
 | 
			
		||||
vm.max_map_count = 524288
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Activate sysctl settings at file '/etc/sysctl.conf'
 | 
			
		||||
#
 | 
			
		||||
sysctl -p
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Adjust file '${ELASTIC_SEARCH_CONFIG_FILE}'
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
if [[ ! -f "${ELASTIC_SEARCH_CONFIG_FILE}.ORIG" ]] ; then
 | 
			
		||||
   cp -a "${ELASTIC_SEARCH_CONFIG_FILE}" "${ELASTIC_SEARCH_CONFIG_FILE}.ORIG"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set network.host to '$ELASTIC_SEARCH_PUBLIC_IP'
 | 
			
		||||
#
 | 
			
		||||
# By default Elasticsearch is only accessible on localhost. Set a different
 | 
			
		||||
# address here to expose this node on the network:
 | 
			
		||||
#
 | 
			
		||||
# Replce only first occurence of match
 | 
			
		||||
#
 | 
			
		||||
if [[ -n "$ELASTIC_SEARCH_PUBLISH_IP" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*network.host:\s+${ELASTIC_SEARCH_PUBLISH_IP}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#network.host:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         sed -i "0,/^\(#network.host:.*\)/ s//\1\nnetwork.host: ${ELASTIC_SEARCH_PUBLISH_IP}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# network.host
 | 
			
		||||
#
 | 
			
		||||
# address here to expose this node on the network:
 | 
			
		||||
#
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set http.port to '$ELASTIC_SEARCH_PORT'
 | 
			
		||||
#
 | 
			
		||||
# Replce only first occurence of match
 | 
			
		||||
#
 | 
			
		||||
if [[ -n "$ELASTIC_SEARCH_PORT" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*http.port:\s+${ELASTIC_SEARCH_PORT}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#http.port:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         sed -i "0,/^\(#http.port:.*\)/ s//\1\nhttp.port: ${ELASTIC_SEARCH_PORT}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# http.port
 | 
			
		||||
#
 | 
			
		||||
# By default Elasticsearch listens for HTTP traffic on the first free port it
 | 
			
		||||
# finds starting at 9200. Set a specific HTTP port here:
 | 
			
		||||
#
 | 
			
		||||
http.port: $ELASTIC_SEARCH_PORT
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set values
 | 
			
		||||
#    node.name: to '$NODE_NAME'
 | 
			
		||||
#    cluster.initial_master_nodes: to '[${NODE_NAME}]'
 | 
			
		||||
#
 | 
			
		||||
if [[ -n "$NODE_NAME" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*node.name:\s+${NODE_NAME}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#node.name:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         sed -i "0,/^\(#node.name.*\)/ s//\1\nnode.name: ${NODE_NAME}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# node.name
 | 
			
		||||
#
 | 
			
		||||
#  Use a descriptive name for the node:
 | 
			
		||||
#
 | 
			
		||||
node.name: ${NODE_NAME}
 | 
			
		||||
cluster.initial_master_nodes: ["${NODE_NAME}"]
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
   if ! grep -q -E "^\s*cluster.initial_master_nodes:\s+${NODE_NAME}" \
 | 
			
		||||
      ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#cluster.initial_master_nodes:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
 | 
			
		||||
         sed -i "0,/^\(#cluster.initial_master_nodes.*\)/ s//\1\ncluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# cluster.initial_master_nodes:
 | 
			
		||||
#
 | 
			
		||||
cluster.initial_master_nodes: ["${NODE_NAME}"]
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ -n "$DISCOVERY_TYPE" ]]; then
 | 
			
		||||
   if ! grep -q -E "^\s*discovery.type:\s+${DISCOVERY_TYPE}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
      if grep -q -E "^\s*#discovery.type:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
         sed -i "0,/^\(#discovery.type.*\)/ s//\1\ndiscovery.type: ${DISCOVERY_TYPE}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
      elif grep -q -E "^\s*discovery.type:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then
 | 
			
		||||
         sed -i "0,/^\(discovery.type.*\)/ s//#\1\ndiscovery.type: ${DISCOVERY_TYPE}/" \
 | 
			
		||||
            ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
      else
 | 
			
		||||
         cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE}
 | 
			
		||||
 | 
			
		||||
# ----------
 | 
			
		||||
# Additional User Setting:
 | 
			
		||||
# ----------
 | 
			
		||||
 | 
			
		||||
# discovery.type
 | 
			
		||||
#
 | 
			
		||||
# (Static) Specifies whether Elasticsearch should form a multiple-node cluster.
 | 
			
		||||
#
 | 
			
		||||
# Defaults to multi-node, which means that Elasticsearch discovers other nodes
 | 
			
		||||
# when forming a cluster and allows other nodes to join the cluster later.
 | 
			
		||||
#
 | 
			
		||||
# If set to single-node, Elasticsearch forms a single-node cluster and suppresses
 | 
			
		||||
# the timeout set by cluster.publish.timeout. For more information about when
 | 
			
		||||
# you might use this setting, see Single-node discovery:
 | 
			
		||||
#
 | 
			
		||||
#    https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#initial_master_nodes
 | 
			
		||||
#
 | 
			
		||||
#
 | 
			
		||||
# (Statisch) Gibt an, ob Elasticsearch einen Mehrknoten-Cluster bilden soll.
 | 
			
		||||
#
 | 
			
		||||
# Der Standardwert ist Multi-Node, was bedeutet, dass Elasticsearch andere Knoten
 | 
			
		||||
# bei der Bildung eines Clusters entdeckt und anderen Knoten erlaubt, dem Cluster
 | 
			
		||||
# später beizutreten.
 | 
			
		||||
#
 | 
			
		||||
# Wenn auf single-node gesetzt, bildet Elasticsearch einen Single-Node-Cluster und
 | 
			
		||||
# unterdrückt das Timeout, das durch cluster.publish.timeout gesetzt wird. Für
 | 
			
		||||
# weitere Informationen darüber, wann diese Einstellung verwendet werden kann,
 | 
			
		||||
# siehe Single-node discovery:
 | 
			
		||||
#
 | 
			
		||||
#    https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#initial_master_nodes
 | 
			
		||||
#
 | 
			
		||||
discovery.type: ${DISCOVERY_TYPE}
 | 
			
		||||
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
   fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    If running as systemd managed process in a LX-Container, the following
 | 
			
		||||
#    systemd parameters will be set:
 | 
			
		||||
#       LimitNOFILE=65535
 | 
			
		||||
#       LimitNPROC=4096
 | 
			
		||||
#       LimitAS=infinity
 | 
			
		||||
#       LimitFSIZE=infinity
 | 
			
		||||
#
 | 
			
		||||
# Take care, your container satisfies this values.
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Reconfigure a node to join an existing cluster
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# When you install Elasticsearch, the installation process configures
 | 
			
		||||
# a single-node cluster by default. If you want a node to join an existing
 | 
			
		||||
# cluster instead, generate an enrollment token on an existing node before
 | 
			
		||||
# you start the new node for the first time.
 | 
			
		||||
#
 | 
			
		||||
# 1. On any node in your existing cluster, generate a node enrollment token:
 | 
			
		||||
#
 | 
			
		||||
/usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# 2. Copy the enrollment token, which is output to your terminal.
 | 
			
		||||
#
 | 
			
		||||
# 3. On your new Elasticsearch node, pass the enrollment token as a parameter
 | 
			
		||||
#    to the elasticsearch-reconfigure-node tool:
 | 
			
		||||
#
 | 
			
		||||
/usr/share/elasticsearch/bin/elasticsearch-reconfigure-node --enrollment-token <enrollment-token>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# 4. Start your new node using systemd.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Enable automatic creation of system indices
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# Some commercial features automatically create indices within Elasticsearch.
 | 
			
		||||
# By default, Elasticsearch is configured to allow automatic index creation,
 | 
			
		||||
# and no additional steps are required. However, if you have disabled automatic
 | 
			
		||||
# index creation in Elasticsearch, you must configure action.auto_create_index
 | 
			
		||||
# in elasticsearch.yml to allow the commercial features to create the following
 | 
			
		||||
# indices:
 | 
			
		||||
#
 | 
			
		||||
action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml*
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Running Elasticsearch with systemd
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# To configure Elasticsearch to start automatically when the system boots up, run
 | 
			
		||||
# the following commands:
 | 
			
		||||
#
 | 
			
		||||
systemctl daemon-reload
 | 
			
		||||
systemctl enable elasticsearch.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Elasticsearch can be started and stopped as follows:
 | 
			
		||||
#
 | 
			
		||||
systemctl start elasticsearch.service
 | 
			
		||||
systemctl stop elasticsearch.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# !! NOTE !!
 | 
			
		||||
#
 | 
			
		||||
# If you have password-protected your Elasticsearch keystore, you will need to provide
 | 
			
		||||
# systemd with the keystore password using a local file and systemd environment
 | 
			
		||||
# variables. This local file should be protected while it exists and may be safely
 | 
			
		||||
# deleted once Elasticsearch is up and running.
 | 
			
		||||
#
 | 
			
		||||
KEYSTORE_PASSWORD="<keystore_password>"
 | 
			
		||||
KEYSTORE_PASSWORD_FILE="</path/to/my_pwd_file.tmp>"
 | 
			
		||||
 | 
			
		||||
echo "${KEYSTORE_PASSWORD}" > "${KEYSTORE_PASSWORD_FILE}"
 | 
			
		||||
chmod 600 "${KEYSTORE_PASSWORD_FILE}"
 | 
			
		||||
systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=${KEYSTORE_PASSWORD_FILE}
 | 
			
		||||
systemctl start elasticsearch.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Test Elasticsearch Setup
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The Elasticsearch service is ready to use. You can test it using curl command line
 | 
			
		||||
# utility. Run the simple GET command using curl to verify the setup. You will see
 | 
			
		||||
# the Elasticsearch cluster details with the version on your screen.
 | 
			
		||||
#
 | 
			
		||||
# example output:
 | 
			
		||||
#
 | 
			
		||||
#    verdi-es:~ # curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200
 | 
			
		||||
#    {
 | 
			
		||||
#      "name" : "verdi-es",
 | 
			
		||||
#      "cluster_name" : "elasticsearch",
 | 
			
		||||
#      "cluster_uuid" : "J54WIwEqQe203nUbtgOOEA",
 | 
			
		||||
#      "version" : {
 | 
			
		||||
#        "number" : "7.2.0",
 | 
			
		||||
#        "build_flavor" : "oss",
 | 
			
		||||
#        "build_type" : "deb",
 | 
			
		||||
#        "build_hash" : "508c38a",
 | 
			
		||||
#        "build_date" : "2019-06-20T15:54:18.811730Z",
 | 
			
		||||
#        "build_snapshot" : false,
 | 
			
		||||
#        "lucene_version" : "8.0.0",
 | 
			
		||||
#        "minimum_wire_compatibility_version" : "6.8.0",
 | 
			
		||||
#        "minimum_index_compatibility_version" : "6.0.0-beta1"
 | 
			
		||||
#      },
 | 
			
		||||
#      "tagline" : "You Know, for Search"
 | 
			
		||||
#    }
 | 
			
		||||
#    verdi-es:~ #
 | 
			
		||||
#
 | 
			
		||||
curl --cacert /etc/elasticsearch/certs/http_ca.crt -u elastic https://localhost:${ELASTIC_SEARCH_PORT}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Kibana (kibana-oss)
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
KIBANA_SERVER_HOST="localhost"
 | 
			
		||||
KIBANA_SERVER_PORT="5601"
 | 
			
		||||
 | 
			
		||||
# see: https://www.elastic.co/guide/en/kibana/current/deb.html
 | 
			
		||||
 | 
			
		||||
# Same as Elasticsearch, we will install the latest version of Kibana using the
 | 
			
		||||
# apt package manager from the official Elastic repository:
 | 
			
		||||
#
 | 
			
		||||
apt-get install kibana
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Adjust file '/etc/kibana/kibana.yml'
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# see also: https://www.elastic.co/guide/en/kibana/8.3/settings.html
 | 
			
		||||
 | 
			
		||||
if [[ ! -f "/etc/kibana/kibana.yml.ORIG" ]] ; then
 | 
			
		||||
   cp -a "/etc/kibana/kibana.yml" "/etc/kibana/kibana.yml.ORIG"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Specifies the address to which the Kibana server will bind. IP addresses and host
 | 
			
		||||
# names are both valid values. The default is 'localhost', which usually means
 | 
			
		||||
# remote machines will not be able to connect.
 | 
			
		||||
# To allow connections from remote users, set this parameter to a non-loopback address.
 | 
			
		||||
#
 | 
			
		||||
# Set server.host to 'localhost'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*server.host:\s+${KIBANA_SERVER_HOST}" /etc/kibana/kibana.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#server.host:" /etc/kibana/kibana.yml ; then
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		perl -i -n -p -e "s/^(#server.host:.*)/\1\nserver.host: ${KIBANA_SERVER_HOST}/" \
 | 
			
		||||
		   /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (server.host:)
 | 
			
		||||
#
 | 
			
		||||
server.host: ${KIBANA_SERVER_HOST}
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set server.port to '5601'
 | 
			
		||||
#
 | 
			
		||||
if ! grep -q -E "^\s*server.port:\s+${KIBANA_SERVER_PORT}" /etc/kibana/kibana.yml ; then
 | 
			
		||||
	if grep -q -E "^\s*#server.port:" /etc/kibana/kibana.yml ; then
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		perl -i -n -p -e "s/^(#server.port:.*)/\1\nserver.port: ${KIBANA_SERVER_PORT}/" \
 | 
			
		||||
		   /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
	else
 | 
			
		||||
	   cat << EOF >> /etc/kibana/kibana.yml
 | 
			
		||||
 | 
			
		||||
# Additional User Setting (server.port:)
 | 
			
		||||
#
 | 
			
		||||
server.port: ${KIBANA_SERVER_HOST}
 | 
			
		||||
EOF
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Start Elasticsearch and generate an enrollment token for Kibana
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# When you start Elasticsearch for the first time, the following security configuration
 | 
			
		||||
# occurs automatically:
 | 
			
		||||
#
 | 
			
		||||
#    -  Authentication and authorization are enabled, and a password is generated for
 | 
			
		||||
#       the elastic built-in superuser.
 | 
			
		||||
#
 | 
			
		||||
#    - Certificates and keys for TLS are generated for the transport and HTTP layer,
 | 
			
		||||
#      and TLS is enabled and configured with these keys and certificates.
 | 
			
		||||
#
 | 
			
		||||
# The password and certificate and keys are output to your terminal.
 | 
			
		||||
#
 | 
			
		||||
# You can then generate an enrollment token for Kibana with the
 | 
			
		||||
# 'elasticsearch-create-enrollment-token tool:'
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
bin/elasticsearch-create-enrollment-token -s kibana
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Run Kibana with systemd
 | 
			
		||||
# ---
 | 
			
		||||
 | 
			
		||||
# Configure Kibana to start automatically when the system starts:
 | 
			
		||||
#
 | 
			
		||||
systemctl daemon-reload
 | 
			
		||||
systemctl enable kibana.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Kibana service ican be started and stopped as follows:
 | 
			
		||||
##
 | 
			
		||||
systemctl start kibana.service
 | 
			
		||||
systemctl istop kibana.service
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Configure Nginx Reverse Proxy for Kibana
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# see: https://phoenixnap.com/kb/kibana-nginx-proxy
 | 
			
		||||
 | 
			
		||||
WEBSITE=oolm-shop-elk.oopen.de
 | 
			
		||||
 | 
			
		||||
cat <<EOF > /etc/nginx/sites-available/${WEBSITE}.conf
 | 
			
		||||
server {
 | 
			
		||||
   listen 80;
 | 
			
		||||
   listen [::]:80 ;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
   return 301 https://\$host\$request_uri;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
server {
 | 
			
		||||
   listen 443 ssl http2;
 | 
			
		||||
   listen [::]:443 ssl http2;
 | 
			
		||||
   server_name ${WEBSITE};
 | 
			
		||||
 | 
			
		||||
   root /var/www/html;
 | 
			
		||||
   index index.html index.htm index.nginx-debian.html;
 | 
			
		||||
 | 
			
		||||
   # Include location directive for Let's Encrypt ACME Challenge
 | 
			
		||||
   #
 | 
			
		||||
   # Needed for (automated) updating certificate
 | 
			
		||||
   #
 | 
			
		||||
   include snippets/letsencrypt-acme-challenge.conf;
 | 
			
		||||
 | 
			
		||||
   # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
 | 
			
		||||
   #
 | 
			
		||||
   # To generate a dhparam.pem file, run in a terminal
 | 
			
		||||
   #    openssl dhparam -out /etc/nginx/ssl/dhparam.pem 2048
 | 
			
		||||
   #
 | 
			
		||||
   ssl_dhparam /etc/nginx/ssl/dhparam.pem;
 | 
			
		||||
 | 
			
		||||
   # Eable session resumption to improve https performance
 | 
			
		||||
   ssl_session_cache shared:SSL:50m;
 | 
			
		||||
   ssl_session_timeout 10m;
 | 
			
		||||
   ssl_session_tickets off;
 | 
			
		||||
   ssl_ecdh_curve secp384r1;
 | 
			
		||||
 | 
			
		||||
   ssl_certificate /var/lib/dehydrated/certs/${WEBSITE}/fullchain.pem;
 | 
			
		||||
   ssl_certificate_key /var/lib/dehydrated/certs/${WEBSITE}/privkey.pem;
 | 
			
		||||
 | 
			
		||||
   ssl_protocols TLSv1.2 TLSv1.3;
 | 
			
		||||
 | 
			
		||||
   # ECDHE better than DHE (faster)  ECDHE & DHE GCM better than CBC (attacks on AES)
 | 
			
		||||
   # Everything better than SHA1 (deprecated)
 | 
			
		||||
   #
 | 
			
		||||
   #ssl_ciphers 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA'
 | 
			
		||||
   ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;
 | 
			
		||||
   ssl_prefer_server_ciphers on;
 | 
			
		||||
 | 
			
		||||
   #resolver 192.168.42.129 8.8.8.8 valid=300s;
 | 
			
		||||
   #resolver_timeout 5s;
 | 
			
		||||
 | 
			
		||||
   add_header Strict-Transport-Security "max-age=31536000 always";
 | 
			
		||||
   add_header X-Frame-Options DENY;
 | 
			
		||||
   add_header X-Content-Type-Options nosniff;
 | 
			
		||||
   add_header X-XSS-Protection "1; mode=block";
 | 
			
		||||
 | 
			
		||||
   access_log  /var/log/nginx/${WEBSITE}_access.log;
 | 
			
		||||
   error_log  /var/log/nginx/${WEBSITE}_error.log;
 | 
			
		||||
 | 
			
		||||
   auth_basic "Authentication Required";
 | 
			
		||||
   auth_basic_user_file /etc/nginx/htpasswd.kibana;
 | 
			
		||||
 | 
			
		||||
   location / {
 | 
			
		||||
      proxy_pass http://localhost:5601;
 | 
			
		||||
      proxy_http_version 1.1;
 | 
			
		||||
      proxy_set_header Upgrade \$http_upgrade;
 | 
			
		||||
      proxy_set_header Connection 'upgrade';
 | 
			
		||||
      proxy_set_header Host \$host;
 | 
			
		||||
      proxy_cache_bypass \$http_upgrade;
 | 
			
		||||
   }
 | 
			
		||||
}
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Enable site ${WEBSITE}
 | 
			
		||||
# -
 | 
			
		||||
ln -s ../sites-available/${WEBSITE}.conf /etc/nginx/sites-enabled/
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Create a basic authentication file with the openssl command:
 | 
			
		||||
# -
 | 
			
		||||
# - user:     admin
 | 
			
		||||
# - password: $E%R&T/Z(U
 | 
			
		||||
# -
 | 
			
		||||
echo "admin:$(openssl passwd -apr1 '$E%R&T/Z(U')" | sudo tee -a /etc/nginx/htpasswd.kiba
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# - Restart Nginx Webservice
 | 
			
		||||
# -
 | 
			
		||||
systemctl restart nginx
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# ==========
 | 
			
		||||
# Install Logstash (logstash)
 | 
			
		||||
# ==========
 | 
			
		||||
 | 
			
		||||
# The final step is to install Logstash using the apt package manager from
 | 
			
		||||
# the official Elastic repository.
 | 
			
		||||
#
 | 
			
		||||
apt-get install logstash
 | 
			
		||||
 | 
			
		||||
# Start the Logstash service and set it to start automatically on boot:
 | 
			
		||||
#
 | 
			
		||||
systemctl restart logstash
 | 
			
		||||
systemctl enable logstash
 | 
			
		||||
 | 
			
		||||
# ---
 | 
			
		||||
# Note:
 | 
			
		||||
#    The Logstash configuration depends on your personal preferences and the
 | 
			
		||||
#    plugins you will use. You can find more information about how to configure
 | 
			
		||||
#    Logstash here:
 | 
			
		||||
#
 | 
			
		||||
#       https://www.elastic.co/guide/en/logstash/current/configuration.html
 | 
			
		||||
# ---
 | 
			
		||||
		Reference in New Issue
	
	Block a user