# ========== # Install ElasticSearch # ========== # --- # NOTE: # we will install a package of ElasticSearch which contains only features # that are available under the Apache 2.0 license # --- # Install 'apt-transport-https' package # apt-get install apt-transport-https # Import the Elasticsearch PGP Key # # We sign all of our packages with the Elasticsearch Signing Key (PGP key D88E42B4, # available from https://pgp.mit.edu) with fingerprint: # # 4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4 # wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg # You may need to install the apt-transport-https package on Debian before proceeding: # apt-get install apt-transport-https # Save the repository definition to '/etc/apt/sources.list.d/elastic-7.x.list' # echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-7.x.list # Install the Elasticsearch Debian package with: apt-get update apt-get install elasticsearch # ========== # Configure ElasticSearch # ========== # By default Elasticsearch is only accessible on localhost. # # Leave empty for accepting the default # ELASTIC_SEARCH_PUBLISH_IP="0.0.0.0" ELASTIC_SEARCH_PUBLISH_IP="" ELASTIC_SEARCH_PUBLISH_IP="127.0.0.1" # By default Elasticsearch listens for HTTP traffic on the first free port it # finds starting at 9200. Set a specific HTTP port here: # # Leave empty for accepting the default # ELASTIC_SEARCH_PORT=9200 NODE_NAME="verdi-es" NODE_NAME="oolm-shop-es" DISCOVERY_TYPE='single-node' AUTOMATIC_CREATION_SYSTEM_INDICES=false ELASTIC_SEARCH_CONFIG_FILE="/etc/elasticsearch/elasticsearch.yml" ELASTIC_SEARCH_CONFIG_FILE="/root/elasticsearch.yml" # --- # Set System properties # --- # Set set sysctl value 'vm.max_map_count' to '524288' # # Add to /etc/sysctl.conf: # # vm.max_map_count = 524288 # # Note: # if installing ElasticSearch into a LX_Container, do this at # the host system # cat << EOF >> /etc/sysctl.d/60-elasticsearch.conf # Needed by ElasticSearch Installation on virtual guest # systems (LX-Containers) # # The error message there was: # max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144] # vm.max_map_count = 524288 EOF # Activate sysctl settings at file '/etc/sysctl.conf' # sysctl -p # --- # Adjust file '/etc/elasticsearch/elasticsearch.yml' - ${ELASTIC_SEARCH_CONFIG_FILE} # --- if [[ ! -f "${ELASTIC_SEARCH_CONFIG_FILE}.ORIG" ]] ; then cp -a "${ELASTIC_SEARCH_CONFIG_FILE}" "${ELASTIC_SEARCH_CONFIG_FILE}.ORIG" fi # Set network.host to '$ELASTIC_SEARCH_PUBLIC_IP' # # By default Elasticsearch is only accessible on localhost. Set a different # address here to expose this node on the network: # # Replce only first occurence of match # if [[ -n "$ELASTIC_SEARCH_PUBLISH_IP" ]]; then if ! grep -q -E "^\s*network.host:\s+${ELASTIC_SEARCH_PUBLISH_IP}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then if grep -q -E "^\s*#network.host:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then sed -i "0,/^\(#network.host:.*\)/ s//\1\nnetwork.host: ${ELASTIC_SEARCH_PUBLISH_IP}/" \ ${ELASTIC_SEARCH_CONFIG_FILE} else cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # ---------- # Additional User Setting: # ---------- # network.host # # address here to expose this node on the network: # network.host: $ELASTIC_SEARCH_PUBLISH_IP EOF fi fi fi # Set http.port to '$ELASTIC_SEARCH_PORT' # # Replce only first occurence of match # if [[ -n "$ELASTIC_SEARCH_PORT" ]]; then if ! grep -q -E "^\s*http.port:\s+${ELASTIC_SEARCH_PORT}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then if grep -q -E "^\s*#http.port:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then sed -i "0,/^\(#http.port:.*\)/ s//\1\nhttp.port: ${ELASTIC_SEARCH_PORT}/" \ ${ELASTIC_SEARCH_CONFIG_FILE} else cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # ---------- # Additional User Setting: # ---------- # http.port # # By default Elasticsearch listens for HTTP traffic on the first free port it # finds starting at 9200. Set a specific HTTP port here: # http.port: $ELASTIC_SEARCH_PORT EOF fi fi fi # Set values # node.name: to '$NODE_NAME' # # # if discovery.type IS NOT set to 'single-node' # cluster.initial_master_nodes: to '[${NODE_NAME}]' # if [[ -n "$NODE_NAME" ]]; then if ! grep -q -E "^\s*node.name:\s+${NODE_NAME}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then if grep -q -E "^\s*#node.name:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then sed -i "0,/^\(#node.name.*\)/ s//\1\nnode.name: ${NODE_NAME}/" \ ${ELASTIC_SEARCH_CONFIG_FILE} else cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # ---------- # Additional User Setting: # ---------- # node.name # # Use a descriptive name for the node: # node.name: ${NODE_NAME} EOF if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} cluster.initial_master_nodes: ["${NODE_NAME}"] EOF else cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # setting [cluster.initial_master_nodes] is not allowed when [discovery.type] is set to [single-node] #cluster.initial_master_nodes: ["${NODE_NAME}"] EOF fi fi fi if ! grep -q -E "^\s*cluster.initial_master_nodes:\s+${NODE_NAME}" \ ${ELASTIC_SEARCH_CONFIG_FILE} ; then if grep -q -E "^\s*#cluster.initial_master_nodes:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then sed -i "0,/^\(#cluster.initial_master_nodes.*\)/ s//\1\ncluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \ ${ELASTIC_SEARCH_CONFIG_FILE} else sed -i "0,/^\(#cluster.initial_master_nodes.*\)/ s//\1\n#cluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \ ${ELASTIC_SEARCH_CONFIG_FILE} fi elif grep -q -E "^\s*cluster.initial_master_nodes:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then sed -i "0,/^\(cluster.initial_master_nodes.*\)/ s//#\1\ncluster.initial_master_nodes: [\"${NODE_NAME}\"]/" \ ${ELASTIC_SEARCH_CONFIG_FILE} else sed -i "0,/^\(cluster.initial_master_nodes.*\)/ s//#\1/" ${ELASTIC_SEARCH_CONFIG_FILE} fi else cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # ---------- # Additional User Setting: # ---------- # cluster.initial_master_nodes: # EOF if [[ -n "${DISCOVERY_TYPE}" ]] && [[ "single-node" != "${DISCOVERY_TYPE}" ]] ; then cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} cluster.initial_master_nodes: ["${NODE_NAME}"] EOF else cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # setting [cluster.initial_master_nodes] is not allowed when [discovery.type] is set to [single-node] #cluster.initial_master_nodes: ["${NODE_NAME}"] EOF fi fi fi fi # Set value discovery.type # if [[ -n "$DISCOVERY_TYPE" ]]; then if ! grep -q -E "^\s*discovery.type:\s+${DISCOVERY_TYPE}" ${ELASTIC_SEARCH_CONFIG_FILE} ; then if grep -q -E "^\s*#discovery.type:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then sed -i "0,/^\(#discovery.type.*\)/ s//\1\ndiscovery.type: ${DISCOVERY_TYPE}/" \ ${ELASTIC_SEARCH_CONFIG_FILE} elif grep -q -E "^\s*discovery.type:" ${ELASTIC_SEARCH_CONFIG_FILE} ; then sed -i "0,/^\(discovery.type.*\)/ s//#\1\ndiscovery.type: ${DISCOVERY_TYPE}/" \ ${ELASTIC_SEARCH_CONFIG_FILE} else cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # ---------- # Additional User Setting: # ---------- # discovery.type # # (Static) Specifies whether Elasticsearch should form a multiple-node cluster. # # Defaults to multi-node, which means that Elasticsearch discovers other nodes # when forming a cluster and allows other nodes to join the cluster later. # # If set to single-node, Elasticsearch forms a single-node cluster and suppresses # the timeout set by cluster.publish.timeout. For more information about when # you might use this setting, see Single-node discovery: # # https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#initial_master_nodes # # # (Statisch) Gibt an, ob Elasticsearch einen Mehrknoten-Cluster bilden soll. # # Der Standardwert ist Multi-Node, was bedeutet, dass Elasticsearch andere Knoten # bei der Bildung eines Clusters entdeckt und anderen Knoten erlaubt, dem Cluster # später beizutreten. # # Wenn auf single-node gesetzt, bildet Elasticsearch einen Single-Node-Cluster und # unterdrückt das Timeout, das durch cluster.publish.timeout gesetzt wird. Für # weitere Informationen darüber, wann diese Einstellung verwendet werden kann, # siehe Single-node discovery: # # https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#initial_master_nodes # discovery.type: ${DISCOVERY_TYPE} EOF fi fi fi # The default behavior of disabling security on basic licenses is deprecated. # In a later version of Elasticsearch, the value of [xpack.security.enabled] # will default to "true" , regardless of the license level. # See # https://www.elastic.co/guide/en/elasticsearch/reference/7.17/security-minimal-setup.html # to enable security, or explicitly disable security by setting # [xpack.security.enabled] to false in elasticsearch.yml # # See # https://www.elastic.co/guide/en/elasticsearch/reference/7.17/security-minimal-setup.html # to enable security. # cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # See # https://www.elastic.co/guide/en/elasticsearch/reference/7.17/security-minimal-setup.html # to enable security. # xpack.security.enabled: false EOF # Disable geoip usage # # Maybe or better hopefully avoid error # # exception during geoip databases update.. # # see also: https://discuss.elastic.co/t/how-to-disable-geoip-usage-in-7-14-0/281076/8 # cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # Disable geoip usage # # exception during geoip databases update.. # # see also: https://discuss.elastic.co/t/how-to-disable-geoip-usage-in-7-14-0/281076/8 # ingest.geoip.downloader.enabled: false EOF # Enable automatic creation of system indices # # Some commercial features automatically create indices within Elasticsearch. By default, # Elasticsearch is configured to allow automatic index creation, and no additional steps # are required. However, if you have disabled automatic index creation in Elasticsearch, # you must configure action.auto_create_index in elasticsearch.yml to allow the commercial # features to create the following indices: # if $AUTOMATIC_CREATION_SYSTEM_INDICES ; then cat << EOF >> ${ELASTIC_SEARCH_CONFIG_FILE} # Additional User Settings: # # action.auto_create_index # # Enable automatic creation of system indices # # Some commercial features automatically create indices within Elasticsearch. By default, # Elasticsearch is configured to allow automatic index creation, and no additional steps # are required. However, if you have disabled automatic index creation in Elasticsearch, # you must configure action.auto_create_index in elasticsearch.yml to allow the commercial # features to create the following indices: # action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml* EOF fi # --- # Note: # If running as systemd managed process in a LX-Container, the following # systemd parameters will be set: # LimitNOFILE=65535 # LimitNPROC=4096 # LimitAS=infinity # LimitFSIZE=infinity # # Take care, your container satisfies this values. # --- # ========== # Running Elasticsearch with systemd # ========== # To configure Elasticsearch to start automatically when the system boots up, run # the following commands: # systemctl daemon-reload systemctl enable elasticsearch.service # Elasticsearch can be started and stopped as follows: # systemctl start elasticsearch.service systemctl stop elasticsearch.service # If you have password-protected your Elasticsearch keystore, you will need to # provide systemd with the keystore password using a local file and systemd # environment variables. This local file should be protected while it exists and # may be safely deleted once Elasticsearch is up and running. # KEYSTORE_PASSPHRASE_FILE= systemctl stop elasticsearch.service echo "keystore_password" > $KEYSTORE_PASSPHRASE_FILE chmod 600 $KEYSTORE_PASSPHRASE_FILE systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=$KEYSTORE_PASSPHRASE_FILE systemctl start elasticsearch.service # ========== # Test Elasticsearch Setup # ========== # The Elasticsearch service is ready to use. You can test it using curl command line # utility. Run the simple GET command using curl to verify the setup. You will see # the Elasticsearch cluster details with the version on your screen. # # example output: # # verdi-es:~ # curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200 # { # "name" : "verdi-es", # "cluster_name" : "elasticsearch", # "cluster_uuid" : "J54WIwEqQe203nUbtgOOEA", # "version" : { # "number" : "7.2.0", # "build_flavor" : "oss", # "build_type" : "deb", # "build_hash" : "508c38a", # "build_date" : "2019-06-20T15:54:18.811730Z", # "build_snapshot" : false, # "lucene_version" : "8.0.0", # "minimum_wire_compatibility_version" : "6.8.0", # "minimum_index_compatibility_version" : "6.0.0-beta1" # }, # "tagline" : "You Know, for Search" # } # verdi-es:~ # # curl -X GET http://${ELASTIC_SEARCH_PUBLISH_IP}:9200 # ========== # Install Kibana # ========== KIBANA_SERVER_HOST="localhost" KIBANA_SERVER_PORT="5601" # Same as Elasticsearch, we will install the latest version of Kibana using the # apt package manager from the official Elastic repository: # apt-get install kibana # --- # Adjust file '/etc/kibana/kibana.yml' # --- # see also: https://www.elastic.co/guide/en/kibana/8.3/settings.html if [[ ! -f "/etc/kibana/kibana.yml.ORIG" ]] ; then cp -a "/etc/kibana/kibana.yml" "/etc/kibana/kibana.yml.ORIG" fi # Specifies the address to which the Kibana server will bind. IP addresses and host # names are both valid values. The default is 'localhost', which usually means # remote machines will not be able to connect. # To allow connections from remote users, set this parameter to a non-loopback address. # # Set server.host to 'localhost' # if ! grep -q -E "^\s*server.host:\s+${KIBANA_SERVER_HOST}" /etc/kibana/kibana.yml ; then if grep -q -E "^\s*#server.host:" /etc/kibana/kibana.yml ; then perl -i -n -p -e "s/^(#server.host:.*)/\1\nserver.host: ${KIBANA_SERVER_HOST}/" \ /etc/kibana/kibana.yml else cat << EOF >> /etc/kibana/kibana.yml # Additional User Setting (server.host:) # server.host: ${KIBANA_SERVER_HOST} EOF fi fi # Set server.port to '5601' # if ! grep -q -E "^\s*server.port:\s+${KIBANA_SERVER_PORT}" /etc/kibana/kibana.yml ; then if grep -q -E "^\s*#server.port:" /etc/kibana/kibana.yml ; then perl -i -n -p -e "s/^(#server.port:.*)/\1\nserver.port: ${KIBANA_SERVER_PORT}/" \ /etc/kibana/kibana.yml else cat << EOF >> /etc/kibana/kibana.yml # Additional User Setting (server.port:) # server.port: ${KIBANA_SERVER_HOST} EOF fi fi # --- # Run Kibana with systemd # --- # Configure Kibana to start automatically when the system starts: # systemctl daemon-reload systemctl enable kibana.service # Kibana service ican be started and stopped as follows: ## systemctl start kibana.service systemctl istop kibana.service # ========== # Configure nginx webserver # ========== WEBSITE=verdi-elk.warenform.de cat < /etc/nginx/sites-available/${WEBSITE}.conf server { listen 80; listen [::]:80 ; server_name ${WEBSITE}; return 301 https://\$host\$request_uri; } server { listen 443 ssl http2; listen [::]:443 ssl http2; server_name ${WEBSITE}; root /var/www/html; index index.html index.htm index.nginx-debian.html; # Include location directive for Let's Encrypt ACME Challenge # # Needed for (automated) updating certificate # include snippets/letsencrypt-acme-challenge.conf; # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits # # To generate a dhparam.pem file, run in a terminal # openssl dhparam -out /etc/nginx/ssl/dhparam.pem 2048 # ssl_dhparam /etc/nginx/ssl/dhparam.pem; # Eable session resumption to improve https performance ssl_session_cache shared:SSL:50m; ssl_session_timeout 10m; ssl_session_tickets off; ssl_ecdh_curve secp384r1; ssl_certificate /var/lib/dehydrated/certs/${WEBSITE}/fullchain.pem; ssl_certificate_key /var/lib/dehydrated/certs/${WEBSITE}/privkey.pem; ssl_protocols TLSv1.2 TLSv1.3; # ECDHE better than DHE (faster) ECDHE & DHE GCM better than CBC (attacks on AES) # Everything better than SHA1 (deprecated) # #ssl_ciphers 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA' ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384; ssl_prefer_server_ciphers on; #resolver 192.168.42.129 8.8.8.8 valid=300s; #resolver_timeout 5s; add_header Strict-Transport-Security "max-age=31536000 always"; add_header X-Frame-Options DENY; add_header X-Content-Type-Options nosniff; add_header X-XSS-Protection "1; mode=block"; access_log /var/log/nginx/${WEBSITE}_access.log; error_log /var/log/nginx/${WEBSITE}_error.log; auth_basic "Authentication Required"; auth_basic_user_file /etc/nginx/htpasswd.kibana; location / { proxy_pass http://localhost:5601; proxy_http_version 1.1; proxy_set_header Upgrade \$http_upgrade; proxy_set_header Connection 'upgrade'; proxy_set_header Host \$host; proxy_cache_bypass \$http_upgrade; } } EOF # - Enable site ${WEBSITE} # - ln -s ../sites-available/${WEBSITE}.conf /etc/nginx/sites-enabled/ # - Create a basic authentication file with the openssl command: # - # - user: admin # - password: $E%R&T/Z(U # - echo "admin:$(openssl passwd -apr1 '$E%R&T/Z(U')" | sudo tee -a /etc/nginx/htpasswd.kiba # - Restart Nginx Webservice # - systemctl restart nginx # ========== # Install Logstash (logstash) # ========== # The final step is to install Logstash using the apt package manager from # the official Elastic repository. # apt-get install logstash # Start the Logstash service and set it to start automatically on boot: # systemctl restart logstash systemctl enable logstash # --- # Note: # The Logstash configuration depends on your personal preferences and the # plugins you will use. You can find more information about how to configure # Logstash here: # # https://www.elastic.co/guide/en/logstash/current/configuration.html # ---