diff --git a/README.md b/README.md new file mode 100644 index 0000000..0d4b595 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# fl-deploy diff --git a/docker-compose/.env.example b/docker-compose/.env.example deleted file mode 100644 index 4379f5c..0000000 --- a/docker-compose/.env.example +++ /dev/null @@ -1,12 +0,0 @@ -DOCKER_SOCK=/var/run/docker.sock # default value for docker socket (change if e.g. in rootless) -DOCKER_LIB=/var/lib/docker # default value for docker filesystem location (change if e.g. in rootless) - -ELK_VERSION=7.17.8 # from 8.0 onwards SSL is on by default -KIBANA_PORT=5601 -ELASTIC_PASSWORD=elasticpassword -KIBANA_PASSWORD=kibanapassword - -SECRET_KEY_BASE=agoodexampleofasecretkey # SECRET_KEY_BASE must be at least 20 characters long -POSTGRES_USER=postgresuser -POSTGRES_PASSWORD=postgrespassword - diff --git a/docker-compose/filebeat/filebeat.compose.yml b/docker-compose/filebeat/filebeat.compose.yml index 32ac7ed..5c86e30 100644 --- a/docker-compose/filebeat/filebeat.compose.yml +++ b/docker-compose/filebeat/filebeat.compose.yml @@ -37,4 +37,4 @@ processors: - drop_fields: fields: ["message_old"] - \ No newline at end of file + diff --git a/kind/core-secret-kibana-password.yml b/kind/core-secret-kibana-password.yml new file mode 100644 index 0000000..4ca78ab --- /dev/null +++ b/kind/core-secret-kibana-password.yml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: fl-core-secret-kibana-password + namespace: fl +type: Opaque +data: + # the value is the base64 encoding of "kibanapassword" + # this value __MUST__ be overwritten in production + secret_key_password: aRgY7Odesha8qHfvRzWB # cannot set + diff --git a/kind/core.yml b/kind/core.yml index 15154a6..52fc165 100644 --- a/kind/core.yml +++ b/kind/core.yml @@ -49,6 +49,11 @@ spec: secretKeyRef: name: fl-core-secret-postgres-password key: secret_key_password + - name: KIBANA_PASSWORD + valueFrom: + secretKeyRef: + name: fl-core-secret-kibana-password + key: secret_key_password - name: NODE_IP valueFrom: fieldRef: diff --git a/kind/elasticsearch.yml b/kind/elasticsearch.yml new file mode 100644 index 0000000..a61683a --- /dev/null +++ b/kind/elasticsearch.yml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fl-elasticsearch + namespace: fl +spec: + replicas: 1 + selector: + matchLabels: + app: fl-elasticsearch + template: + metadata: + labels: + app: fl-elasticsearch + spec: + serviceAccountName: fl-svc-account + restartPolicy: "Always" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: fl + operator: In + values: + - "core" + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0 + ports: + - containerPort: 9200 + env: + - name: discovery.type + value: single-node + - name: xpack.security.enabled + value: "true" diff --git a/kind/filebeat-config.yml b/kind/filebeat-config.yml new file mode 100644 index 0000000..42df692 --- /dev/null +++ b/kind/filebeat-config.yml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: fl-filebeat-config + namespace: fl +data: + filebeat.yml: | + filebeat.inputs: + - type: container + paths: + - /var/log/containers/*.log + processors: + - add_kubernetes_metadata: + in_cluster: true + + output.elasticsearch: + hosts: ['10.244.2.17:9200'] + diff --git a/kind/filebeat.yml b/kind/filebeat.yml new file mode 100644 index 0000000..252daf6 --- /dev/null +++ b/kind/filebeat.yml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fl-filebeat + namespace: fl +spec: + selector: + matchLabels: + app: fl-filebeat + template: + metadata: + labels: + app: fl-filebeat + spec: + serviceAccountName: fl-svc-account + restartPolicy: "Always" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: fl + operator: In + values: + - "core" + containers: + - name: filebeat + image: docker.elastic.co/beats/filebeat:8.8.0 + args: ["--strict.perms=false"] + securityContext: + runAsUser: 0 + volumeMounts: + - name: config + mountPath: /usr/share/filebeat/filebeat.yml + subPath: filebeat.yml + readOnly: true + - name: varlogcontainers + mountPath: /var/log/containers + readOnly: true + volumes: + - name: config + configMap: + name: fl-filebeat-config + - name: varlogcontainers + hostPath: + path: /var/log/containers + diff --git a/kind/kibana.yml b/kind/kibana.yml new file mode 100644 index 0000000..f67e6a4 --- /dev/null +++ b/kind/kibana.yml @@ -0,0 +1,66 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fl-kibana + namespace: fl +spec: + replicas: 1 + selector: + matchLabels: + app: fl-kibana + template: + metadata: + labels: + app: fl-kibana + spec: + serviceAccountName: fl-svc-account + restartPolicy: "Always" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: fl + operator: In + values: + - "core" + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:8.8.0 + ports: + - containerPort: 5601 + env: + - name: ELASTICSEARCH_HOSTS + value: "http://10-244-1-18.default.pod.cluster.local:9200" # set elastic ip + - name: XPACK_SECURITY_ENABLED + value: "true" + - name: ELASTICSEARCH_USERNAME + value: "kibana_system" + - name: ELASTICSEARCH_PASSWORD + value: "aRgY7Odesha8qHfvRzWB" + # value: ${KIBANA_PASSWORD} + - name: XPACK_REPORTING_ROLES_ENABLED + value: "false" + - name: XPACK_FLEET_REGISTRYURL + value: "https://epr.elastic.co" + - name: XPACK_FLEET_AGENTS_ENABLED + value: "true" + - name: xpack.encryptedSavedObjects.encryptionKey + value: "true" +--- +apiVersion: v1 +kind: Service +metadata: + name: fl-kibana + namespace: fl + labels: + app: fl-kibana +spec: + ports: + - name: http + port: 5601 + targetPort: 5601 + selector: + app: fl-kibana + + diff --git a/kind/postgres.yml b/kind/postgres.yml index 3a7efb5..875e62d 100644 --- a/kind/postgres.yml +++ b/kind/postgres.yml @@ -1,3 +1,37 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: fl-postgres-pv + namespace: fl + labels: + app: fl-postgres +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: standard + hostPath: + path: /data/postgres + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: fl-postgres-pvc + namespace: fl +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: standard + +--- + apiVersion: apps/v1 kind: Deployment metadata: @@ -45,6 +79,13 @@ spec: key: secret_key_user - name: POSTGRES_HOST_AUTH_METHOD value: trust + volumeMounts: + - name: fl-postgres-data + mountPath: /var/lib/postgresql/data + volumes: + - name: fl-postgres-data + persistentVolumeClaim: + claimName: fl-postgres-pvc --- @@ -111,3 +152,4 @@ spec: port: 5432 targetPort: 5432 protocol: TCP + diff --git a/kind/prometheus.yml b/kind/prometheus.yml index 52b126a..6b8f207 100644 --- a/kind/prometheus.yml +++ b/kind/prometheus.yml @@ -1,3 +1,35 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: fl-prometheus-pv + namespace: fl +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: standard + hostPath: + path: /data/prometheus + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: fl-prometheus-pvc + namespace: fl +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: standard + +--- + apiVersion: apps/v1 kind: Deployment metadata: @@ -21,27 +53,32 @@ spec: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - - matchExpressions: - - key: fl - operator: In - values: - - "core" + - matchExpressions: + - key: fl + operator: In + values: + - "core" + initContainers: + - name: create-data-dir + image: busybox + command: ["mkdir", "-p", "/data/prometheus"] + volumeMounts: + - name: fl-prometheus-data + mountPath: /data/prometheus volumes: - - name: fl-prometheus-config-volume - configMap: - name: fl-prometheus-configmap + - name: fl-prometheus-data + persistentVolumeClaim: + claimName: fl-prometheus-pvc containers: - - name: prometheus - image: "prom/prometheus:latest" - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9090 - volumeMounts: - - name: fl-prometheus-config-volume - mountPath: /etc/prometheus/ - + - name: prometheus + image: prom/prometheus:latest + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9090 + volumeMounts: + - name: fl-prometheus-data + mountPath: /prometheus/data --- - apiVersion: v1 kind: Service metadata: @@ -57,4 +94,5 @@ spec: - name: http port: 9090 targetPort: 9090 - protocol: TCP \ No newline at end of file + protocol: TCP + diff --git a/kind/start_kind.sh b/kind/start_kind.sh index 55761f9..8766c33 100755 --- a/kind/start_kind.sh +++ b/kind/start_kind.sh @@ -21,3 +21,8 @@ kubectl apply -f postgres.yml kubectl apply -f core-secret-key-base.yml kubectl apply -f core.yml kubectl apply -f worker.yml +kubectl apply -f core-secret-kibana-password.yml +kubectl apply -f elasticsearch.ym +kubectl apply -f filebeat-config.yml +kubectl apply -f filebeat.yml +kubectl apply -f kibana.yml diff --git a/nomad-ansible/README.md b/nomad-ansible/README.md new file mode 100644 index 0000000..92da418 --- /dev/null +++ b/nomad-ansible/README.md @@ -0,0 +1,3 @@ +## nomad-ansible + +Ansible to configure Nomad diff --git a/nomad-ansible/ansible.cfg b/nomad-ansible/ansible.cfg new file mode 100644 index 0000000..44655a3 --- /dev/null +++ b/nomad-ansible/ansible.cfg @@ -0,0 +1,6 @@ +[defaults] +inventory = hosts +stratehy = free + +[ssh_connections] +pipelining = true diff --git a/nomad-ansible/files/nomad/nomad.service.j2 b/nomad-ansible/files/nomad/nomad.service.j2 new file mode 100644 index 0000000..e773046 --- /dev/null +++ b/nomad-ansible/files/nomad/nomad.service.j2 @@ -0,0 +1,48 @@ +[Unit] +Description=Nomad +Documentation=https://www.nomadproject.io/docs/ +Wants=network-online.target +After=network-online.target + +# When using Nomad with Consul it is not necessary to start Consul first. These +# lines start Consul before Nomad as an optimization to avoid Nomad logging +# that Consul is unavailable at startup. +#Wants=consul.service +#After=consul.service + +[Service] + +# Nomad server should be run as the nomad user. Nomad clients +# should be run as root +User=root +Group=root + +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/usr/bin/nomad agent -config /etc/nomad/ +KillMode=process +KillSignal=SIGINT +LimitNOFILE=65536 +LimitNPROC=infinity +Restart=on-failure +RestartSec=2 + +## Configure unit start rate limiting. Units which are started more than +## *burst* times within an *interval* time span are not permitted to start any +## more. Use `StartLimitIntervalSec` or `StartLimitInterval` (depending on +## systemd version) to configure the checking interval and `StartLimitBurst` +## to configure how many starts per interval are allowed. The values in the +## commented lines are defaults. + +# StartLimitBurst = 5 + +## StartLimitIntervalSec is used for systemd versions >= 230 +# StartLimitIntervalSec = 10s + +## StartLimitInterval is used for systemd versions < 230 +# StartLimitInterval = 10s + +TasksMax=infinity +OOMScoreAdjust=-1000 + +[Install] +WantedBy=multi-user.target diff --git a/nomad-ansible/group_vars/client.yml b/nomad-ansible/group_vars/client.yml new file mode 100644 index 0000000..018c11a --- /dev/null +++ b/nomad-ansible/group_vars/client.yml @@ -0,0 +1,5 @@ +--- + +nomad_agent_type: client + +nomad_retry_join: ["130.136.3.153"] diff --git a/nomad-ansible/group_vars/server.yml b/nomad-ansible/group_vars/server.yml new file mode 100644 index 0000000..6e2782b --- /dev/null +++ b/nomad-ansible/group_vars/server.yml @@ -0,0 +1,7 @@ +--- + +nomad_agent_type: server + +nomad_bootstrap_expect: 1 + +nomad_retry_join: ["130.136.3.153"] diff --git a/nomad-ansible/host_vars/client1.yml b/nomad-ansible/host_vars/client1.yml new file mode 100644 index 0000000..0aadc70 --- /dev/null +++ b/nomad-ansible/host_vars/client1.yml @@ -0,0 +1,3 @@ +--- + +nomad_name_host: client1 diff --git a/nomad-ansible/host_vars/client2.yml b/nomad-ansible/host_vars/client2.yml new file mode 100644 index 0000000..08ddf51 --- /dev/null +++ b/nomad-ansible/host_vars/client2.yml @@ -0,0 +1,3 @@ +--- + +nomad_name_host: client2 diff --git a/nomad-ansible/host_vars/server1.yml b/nomad-ansible/host_vars/server1.yml new file mode 100644 index 0000000..155e53d --- /dev/null +++ b/nomad-ansible/host_vars/server1.yml @@ -0,0 +1,3 @@ +--- + +nomad_name_host: server1 diff --git a/nomad-ansible/hosts b/nomad-ansible/hosts new file mode 100644 index 0000000..da221cd --- /dev/null +++ b/nomad-ansible/hosts @@ -0,0 +1,18 @@ +all: + children: + client: + hosts: + client1: + ansible_host: 130.136.3.154 + ansible_port: 22 + ansible_user: debian + client2: + ansible_host: 130.136.3.15 + ansible_port: 22 + ansible_user: erik + server: + hosts: + server1: + ansible_host: 130.136.3.153 + ansible_port: 22 + ansible_user: debian diff --git a/nomad-ansible/playbook.yml b/nomad-ansible/playbook.yml new file mode 100644 index 0000000..e8c8487 --- /dev/null +++ b/nomad-ansible/playbook.yml @@ -0,0 +1,8 @@ +- name: Provision Nomad node + hosts: all + tasks: + - name: Setup docker + ansible.builtin.import_tasks: tasks/docker.yml + - name: Setup nomad + ansible.builtin.import_tasks: tasks/nomad.yml + diff --git a/nomad-ansible/tasks/docker.yml b/nomad-ansible/tasks/docker.yml new file mode 100644 index 0000000..1e6eb87 --- /dev/null +++ b/nomad-ansible/tasks/docker.yml @@ -0,0 +1,87 @@ +--- + +- name: Remove Docker apt sources list file + become: yes + ansible.builtin.file: + path: /etc/apt/sources.list.d/docker.list + state: absent + +- name: Update and install packages + become: yes + ansible.builtin.apt: + update_cache: yes + +- name: Install ca-certificates curl gnupg + become: yes + ansible.builtin.apt: + name: + - ca-certificates + - curl + - gnupg + state: latest + +- name: create directory /etc/apt/keyrings + become: yes + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + +# - name: Import Hashicorp GPG key +# shell: +# cmd: "curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --no-default-keyring --keyring gnupg-ring:/etc/apt/trusted.gpg.d/docker-archive-keyring.gpg --import" +# creates: "/etc/apt/trusted.gpg.d/docker-archive-keyring.gpg" +# register: gpg_key_import_output +# become: true +# +# +# - name: Fix GPG key permissions +# file: +# path: "/etc/apt/trusted.gpg.d/docker-archive-keyring.gpg" +# owner: root +# group: root +# mode: "u=rw,g=r,o=r" +# become: true +# when: gpg_key_import_output.changed +# +# +# - name: Add docker APT repository +# copy: +# content: >- +# deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian {{ ansible_lsb.codename }} main +# dest: /etc/apt/sources.list.d/docker.list +# owner: root +# group: root +# mode: "u=rw,g=r,o=r" +# become: true +# when: gpg_key_import_output.changed +# + +- name: Add Docker GPG key + become: yes + apt_key: url=https://download.docker.com/linux/debian/gpg + +- name: Add Docker APT repository + become: yes + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/debian {{ansible_distribution_release}} stable + +- name: Update and install packages + become: yes + ansible.builtin.apt: + update_cache: yes + +- name: Install Docker packages + apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + become: true + +- name: Install Docker Compose plugins + become: yes + apt: + name: docker-compose-plugin diff --git a/nomad-ansible/tasks/nomad.yml b/nomad-ansible/tasks/nomad.yml new file mode 100644 index 0000000..3340f6f --- /dev/null +++ b/nomad-ansible/tasks/nomad.yml @@ -0,0 +1,112 @@ +- name: Update all packages to their latest version + become: yes + ansible.builtin.apt: + name: "*" + state: latest + +- name: update system + become: yes + ansible.builtin.apt: + update_cache: yes + +- name: Create the nomad user + become: yes + ansible.builtin.user: + state: present + system: true + name: nomad + home: /opt/nomad + shell: /usr/sbin/nologin + +- name: Create Nomad group + group: name=nomad system=yes state=present + +- name: Ensure required utilities are present + apt: + name: + - curl + - gnupg2 + - wget + - gpg + - coreutils + state: latest + update_cache: yes + become: true + +- name: Import Hashicorp GPG key + shell: + cmd: "curl -fsSL https://apt.releases.hashicorp.com/gpg | gpg --no-default-keyring --keyring gnupg-ring:/etc/apt/trusted.gpg.d/hashicorp-archive-keyring.gpg --import" + creates: "/etc/apt/trusted.gpg.d/hashicorp-archive-keyring.gpg" + register: gpg_key_import_output + become: true + +- name: Fix GPG key permissions + file: + path: "/etc/apt/trusted.gpg.d/hashicorp-archive-keyring.gpg" + owner: root + group: root + mode: "u=rw,g=r,o=r" + become: true + when: gpg_key_import_output.changed + +- name: Add Hashicorp APT repository + copy: + content: >- + deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com {{ ansible_lsb.codename }} main + dest: /etc/apt/sources.list.d/hashicorp.list + owner: root + group: root + mode: "u=rw,g=r,o=r" + become: true + when: gpg_key_import_output.changed + +- name: Install latest version of Nomad + become: true + apt: name=nomad state=latest update_cache=yes + +- name: Include default extension files in vars/main.yml + ansible.builtin.include_vars: + dir: vars/ + depth: 1 + +- name: Creates needed directories for nomad + become: yes + file: + path: "/etc/nomad/" + state: directory + owner: nomad + group: nomad + mode: 0640 + +- name: Copy the systemd service + become: yes + ansible.builtin.template: + src: templates/nomad.hcl.j2 + dest: /etc/nomad/nomad.hcl + + +- name: Copy the systemd service + become: yes + ansible.builtin.template: + src: files/nomad/nomad.service.j2 + dest: /etc/systemd/system/nomad.service + +- name: Reload the systemd daemon + become: yes + ansible.builtin.service: + daemon_reload: true + +- name: Start and enable the nomad service + become: yes + ansible.builtin.service: + name: nomad + state: started + enabled: true + + +- name: Restart and enable the nomad service + become: yes + ansible.builtin.service: + name: nomad + state: restarted + enabled: true diff --git a/nomad-ansible/tasks/vars/main.yml b/nomad-ansible/tasks/vars/main.yml new file mode 100644 index 0000000..35fbc42 --- /dev/null +++ b/nomad-ansible/tasks/vars/main.yml @@ -0,0 +1,18 @@ +--- +# Override these in playbook with 'vars:' block + +# Debug level +nomad_log_level: DEBUG + +# The data center in which the agent is running +nomad_data_center: dc1 + +# The nomad port http UI +nomad_port_http: 4646 + +# clients port to communicate with the Nomad servers +nomad_port_rpc: 4647 + +# port to detect node failures +nomad_port_serf: 4648 + diff --git a/nomad-ansible/templates/nomad.hcl.j2 b/nomad-ansible/templates/nomad.hcl.j2 new file mode 100644 index 0000000..6ff4c00 --- /dev/null +++ b/nomad-ansible/templates/nomad.hcl.j2 @@ -0,0 +1,46 @@ +# {{ ansible_managed }}. +# +# See https://www.nomadproject.io/docs/configuration + +log_level = "{{ nomad_log_level }}" + +datacenter = "{{ nomad_data_center }}" + +name = "{{ nomad_name_host }}" + +data_dir = "/opt/nomad" + +advertise { + http = "{{ ansible_default_ipv4.address }}:{{ nomad_port_http }}" + rpc = "{{ ansible_default_ipv4.address }}:{{ nomad_port_rpc }}" + serf = "{{ ansible_default_ipv4.address }}:{{ nomad_port_serf }}" +} + +{% if nomad_agent_type == 'server' %} +server { + enabled = true + + bootstrap_expect = {{ nomad_bootstrap_expect }} + + server_join { + retry_join = {{ nomad_retry_join | to_json }} + } + +} +{% else %} +client { + enabled = true + + server_join { + retry_join = {{ nomad_retry_join | to_json }} + } +} +{% endif %} + + +plugin "raw_exec" { + config { + enabled = true + } +} + diff --git a/nomad/consul.hcl b/nomad/consul.hcl new file mode 100644 index 0000000..a493af8 --- /dev/null +++ b/nomad/consul.hcl @@ -0,0 +1,26 @@ +job "consul" { + datacenters = ["dc1"] + type = "service" + group "consul" { + count = 1 + + network { + mode = "host" + } + + task "consul" { + driver = "docker" + + + config { + image = "consul:latest" + } + + resources { + cpu = 500 + memory = 256 + } + + } + } +} diff --git a/nomad/core.hcl b/nomad/core.hcl new file mode 100644 index 0000000..9c7ce5f --- /dev/null +++ b/nomad/core.hcl @@ -0,0 +1,46 @@ + job "core-service" { + datacenters = ["dc1"] + type = "service" + + group "core" { + count = 1 + + network { + mode = "host" + port "funless" { + static = 4000 + } + } + + task "core" { + driver = "docker" + + config { + image = "ghcr.io/funlessdev/core:latest" + ports = ["funless"] + } + + env { + PGHOST = "postgres-postgres-postgres" + PGUSER = "postgresuser" + PGPASSWORD = "postgrespassword" + PGDATABASE = "funless" + PGPORT = "5432" + SECRET_KEY_BASE = "agoodexampleofasecretkey" + } + + resources { + cpu = 1000 + memory = 2000 + } + + service { + tags = ["funless"] + port = "funless" + provider = "consul" + } + + + } + } +} diff --git a/nomad/elasticsearch.hcl b/nomad/elasticsearch.hcl new file mode 100644 index 0000000..6792d02 --- /dev/null +++ b/nomad/elasticsearch.hcl @@ -0,0 +1,45 @@ +job "elasticsearch" { + datacenters = ["dc1"] + + group "elasticsearch" { + count = 1 + + network { + mode = "bridge" + port "elastic" {} + } + + task "elastic" { + driver = "docker" + + env = { + "ES_JAVA_OPTS" = "-Xms512m -Xmx512m" + "ELASTIC_USER" = "elastic" + "ELASTIC_PASSWORD" = "elasticpassword" + "discovery.type" = "single-node" + "bootstrap.memory_lock" = "true" + "xpack.license.self_generated.type" = "basic" + "xpack.security.enabled" = "true" + "xpack.security.authc.api_key.enabled" = "true" + } + + config { + network_mode = "host" + image = "docker.elastic.co/elasticsearch/elasticsearch:8.8.0" + ports = ["elastic"] + } + + resources { + cpu = 5000 + memory = 1000 + } + + service { + tags = ["elasticsearch"] + port = "elastic" + provider = "consul" + } + + } + } +} diff --git a/nomad/filebeat.hcl b/nomad/filebeat.hcl new file mode 100644 index 0000000..50ca6c8 --- /dev/null +++ b/nomad/filebeat.hcl @@ -0,0 +1,36 @@ +job "filebeat" { + datacenters = ["dc1"] + type = "service" + + group "filebeat" { + count = 1 + + task "filebeat" { + driver = "docker" + + config { + image = "docker.elastic.co/beats/filebeat:8.8.0" # Replace with the desired Filebeat version + } + + template { + data = < cat runFunction.vegeta | ./vegeta attack -rate=1 -duration=100s | tee result.bin | ./vegeta report +Requests [total, rate, throughput] 100, 1.01, 1.01 +Duration [total, attack, wait] 1m39s, 1m39s, 11.181ms +Latencies [min, mean, 50, 90, 95, 99, max] 5.753ms, 8.562ms, 6.85ms, 13.516ms, 15.14ms, 16.385ms, 16.455ms +Bytes In [total, mean] 3400, 34.00 +Bytes Out [total, mean] 3200, 32.00 +Success [ratio] 100.00% +Status Codes [code:count] 200:100 +Error Set: + +KUBERNETES RESULT +> cat runFunction.vegeta | ./vegeta attack -rate=1 -duration=100s | tee result.bin | ./vegeta report +Requests [total, rate, throughput] 100, 1.01, 1.01 +Duration [total, attack, wait] 1m39s, 1m39s, 6.349ms +Latencies [min, mean, 50, 90, 95, 99, max] 5.997ms, 7.792ms, 6.774ms, 10.419ms, 15.066ms, 21.08ms, 22.634ms +Bytes In [total, mean] 3400, 34.00 +Bytes Out [total, mean] 3200, 32.00 +Success [ratio] 100.00% +Status Codes [code:count] 200:100 +Error Set: + + +NOMAD RESULT +> cat runFunction.vegeta | ./vegeta attack -rate=1 -duration=100s | tee result.bin | ./vegeta report +Requests [total, rate, throughput] 100, 1.01, 1.01 +Duration [total, attack, wait] 1m39s, 1m39s, 10.349ms +Latencies [min, mean, 50, 90, 95, 99, max] 5.789ms, 8.792ms, 6.639ms, 12.919ms, 15.216ms, 16.511ms, 16.964ms +Bytes In [total, mean] 3400, 34.00 +Bytes Out [total, mean] 3200, 32.00 +Success [ratio] 100.00% +Status Codes [code:count] 200:100 +Error Set: + + + + diff --git a/requests/runFunction.body.json b/requests/runFunction.body.json new file mode 100644 index 0000000..f4d8eed --- /dev/null +++ b/requests/runFunction.body.json @@ -0,0 +1,3 @@ +{ + "args": {"name": "erik"} +} diff --git a/requests/runFunction.vegeta b/requests/runFunction.vegeta new file mode 100644 index 0000000..6b77ee0 --- /dev/null +++ b/requests/runFunction.vegeta @@ -0,0 +1,7 @@ +POST http://localhost:4000/v1/fn/hello/hello +accept: application/json +Content-Type: application/json +Authorization: Bearer SFMyNTY.g2gDdAAAAAF3BHVzZXJtAAAABWd1ZXN0bgYAJrz53YoBYgABUYA.jvJQVlp2Ox6vK_jatgMYDuZNI9fGXPE1yGZecQ1SA9c +@runFunction.body.json + + diff --git a/requests/vegeta b/requests/vegeta new file mode 100755 index 0000000..796ceef Binary files /dev/null and b/requests/vegeta differ