From 2e863666c9c296c28e1c6d3d51010eedaac24e4f Mon Sep 17 00:00:00 2001 From: Jan Schumacher <155645800+jschumacher-wire@users.noreply.github.com> Date: Tue, 10 Sep 2024 14:31:11 +0200 Subject: [PATCH] Wiab automation (#728) * adding initial autodeploy script, offline-vm-setup.sh fix * extending autodeploy.sh functionality, adding coturn, updating docs and nftables hetzner firewall * fixing markdown link --- ansible/files/hetzner_server_nftables.conf.j2 | 20 +- bin/autodeploy.sh | 419 ++++++++++++++++++ bin/offline-vm-setup.sh | 8 +- offline/coturn.md | 94 ++-- offline/docs_ubuntu_22.04.md | 4 + .../single_hetzner_machine_installation.md | 18 +- 6 files changed, 500 insertions(+), 63 deletions(-) create mode 100755 bin/autodeploy.sh diff --git a/ansible/files/hetzner_server_nftables.conf.j2 b/ansible/files/hetzner_server_nftables.conf.j2 index 66b386f10..3d133ba90 100644 --- a/ansible/files/hetzner_server_nftables.conf.j2 +++ b/ansible/files/hetzner_server_nftables.conf.j2 @@ -3,6 +3,8 @@ flush ruleset define KUBENODEIP = 192.168.122.21 +define SFTIP = 192.168.122.21 +define COTURNIP = 192.168.122.23 define INF_WAN = {{ ansible_default_ipv4.interface }} table inet filter { @@ -25,9 +27,10 @@ table inet filter { } chain FORWARD { type filter hook forward priority 0; - #iifname virbr0 oifname $INF_WAN counter accept comment "allow internet for internal VMs, enable this rule only for letsencrypt cert issue" + iifname virbr0 oifname $INF_WAN counter accept comment "allow internet for internal VMs, needed fo things like letsencrypt cert issue" iifname virbr0 oifname virbr0 counter accept comment "allow traffic between VMs" iifname $INF_WAN oifname virbr0 ct status dnat counter accept comment "allow DNAT forward from external interface to virbr0" + iifname docker0 oifname virbr0 counter accept jump block_definitions } chain OUTPUT { @@ -38,11 +41,16 @@ table inet filter { table ip nat { chain PREROUTING { type nat hook prerouting priority -100; - iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $KUBENODEIP:31772 - iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $KUBENODEIP:31773 - udp dport 3478 dnat ip to 192.168.122.31:3478 - tcp dport 3478 dnat ip to 192.168.122.31:3478 - udp dport 32768-60999 dnat ip to 192.168.122.31:32768-60999 + + iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $KUBENODEIP:31772 comment "HTTP ingress" + iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $KUBENODEIP:31773 comment "HTTPS ingress" + + iifname { $INF_WAN, virbr0 } tcp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control TCP" + iifname { $INF_WAN, virbr0 } udp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control UDP" + + iifname { $INF_WAN, virbr0 } udp dport 32768-46883 fib daddr type local dnat to $COTURNIP comment "COTURN UDP range" + iifname { $INF_WAN, virbr0 } udp dport 46884-61000 fib daddr type local dnat to $SFTIP comment "SFT UDP range" + fib daddr type local counter jump DOCKER } chain POSTROUTING { diff --git a/bin/autodeploy.sh b/bin/autodeploy.sh new file mode 100755 index 000000000..31c077b19 --- /dev/null +++ b/bin/autodeploy.sh @@ -0,0 +1,419 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2087 +set -Eeuo pipefail + +msg() { + echo >&2 -e "${1-}" +} + +trap cleanup SIGINT SIGTERM ERR EXIT + +usage() { + cat </dev/null 2>&1 ; then + msg "INFO: DNS A record exists: $SUBDOMAIN.$TARGET_SYSTEM" + else + die "ERROR: DNS A record for $SUBDOMAIN.$TARGET_SYSTEM does not exist. Exiting. Please check DNS record set." + fi +done + +if ssh -q -o ConnectTimeout=5 -p "$SSH_PORT" "$SSH_USER"@webapp."$TARGET_SYSTEM" id | grep -q "$SSH_USER"; then + msg "" + msg "INFO: Successfully logged into $TARGET_SYSTEM as $SSH_USER" +else + die "ERROR: Can't log into $TARGET_SYSTEM via SSH, please check SSH connectivity." +fi + +if curl --head --silent --fail https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-"$ARTIFACT_HASH".tgz >/dev/null 2>&1 ; then + msg "INFO: Artifact exists https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-$ARTIFACT_HASH.tgz" +else + die "ERROR: No artifact found via https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-$ARTIFACT_HASH.tgz" +fi + +system_cleanup_meta() { + msg "" + msg "INFO: Cleaning up all VMs, docker resources and wire-server-deploy files on $TARGET_SYSTEM." + msg "" + sleep 5 + ssh -p "$SSH_PORT" "$SSH_USER"@webapp."$TARGET_SYSTEM" "bash -s" <&2 -e "${1-}" + } + cd $SCRIPT_DIR &>/dev/null || exit 1 + + bash bin/offline-vm-setup.sh + msg "" + while sudo virsh list --all | grep -Fq running; do + sleep 20 + msg "INFO: VM deployment still in progress ..." + done + sleep 20 + msg "" + msg "INFO: VM deployment done. Starting all VMs:" + msg "" + for VM in $(sudo virsh list --all --name); do sudo virsh start "$VM"; done + sleep 60 + + msg "" + msg "INFO: Setting up offline environment (this will take a while)." + msg "" + # Rather than sourcing wire-server-deploy/bin/offline-env.sh, we invoke + # the relevant commands below, declaring "d" as a function instead of an alias. + ZAUTH_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') + export ZAUTH_CONTAINER + WSD_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') + d() { + sudo docker run --network=host -v "${SSH_AUTH_SOCK:-nonexistent}":/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v "$HOME"/.ssh:/root/.ssh -v "$PWD":/wire-server-deploy "$WSD_CONTAINER" "$@" + } + export -f d + + bash bin/offline-secrets.sh + + HOST_IP=$(dig @resolver4.opendns.com myip.opendns.com +short) + + cat >ansible/inventory/offline/hosts.ini<:80\"/- \"turn:$HOST_IP:3478\"/g" values/wire-server/values.yaml + sed -i "s/# - \"turn::80?transport=tcp\"/- \"turn:$HOST_IP:3478?transport=tcp\"/g" values/wire-server/values.yaml + + d helm install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml + + sed -i "s/example.com/$TARGET_SYSTEM/" values/webapp/prod-values.example.yaml + d helm install webapp ./charts/webapp --values ./values/webapp/prod-values.example.yaml + + sed -i "s/example.com/$TARGET_SYSTEM/" values/team-settings/prod-values.example.yaml + d helm install team-settings ./charts/team-settings --values ./values/team-settings/prod-values.example.yaml --values ./values/team-settings/prod-secrets.example.yaml + + sed -i "s/example.com/$TARGET_SYSTEM/" values/account-pages/prod-values.example.yaml + d helm install account-pages ./charts/account-pages --values ./values/account-pages/prod-values.example.yaml + + cp values/ingress-nginx-controller/prod-values.example.yaml ./values/ingress-nginx-controller/values.yaml + d helm install ingress-nginx-controller ./charts/ingress-nginx-controller --values ./values/ingress-nginx-controller/values.yaml + + KUBENODEIP=$(d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=IP:.status.hostIP --no-headers) + sudo sed -i "s/define KUBENODEIP.*/define KUBENODEIP = $KUBENODEIP/" /etc/nftables.conf + sudo systemctl restart nftables + + INGRESSNODE=$(d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NODE:.spec.nodeName --no-headers) + d kubectl cordon "$INGRESSNODE" + + wget https://charts.jetstack.io/charts/cert-manager-v1.13.2.tgz + tar -C ./charts -xzf cert-manager-v1.13.2.tgz + + cp ./values/nginx-ingress-services/prod-values.example.yaml ./values/nginx-ingress-services/values.yaml + cp ./values/nginx-ingress-services/prod-secrets.example.yaml ./values/nginx-ingress-services/secrets.yaml + sed -i 's/useCertManager: false/useCertManager: true/g' values/nginx-ingress-services/values.yaml + sed -i 's/certmasterEmail:/certmasterEmail: backend+wiabautodeploy@wire.com/g' values/nginx-ingress-services/values.yaml + sed -i "s/example.com/$TARGET_SYSTEM/" values/nginx-ingress-services/values.yaml + + d kubectl create namespace cert-manager-ns + d helm upgrade --install -n cert-manager-ns --set 'installCRDs=true' cert-manager charts/cert-manager + + d kubectl uncordon "$INGRESSNODE" + + d helm upgrade --install nginx-ingress-services charts/nginx-ingress-services -f values/nginx-ingress-services/values.yaml + + d kubectl get certificate + + cp values/sftd/prod-values.example.yaml values/sftd/values.yaml + sed -i "s/webapp.example.com/webapp.$TARGET_SYSTEM/" values/sftd/values.yaml + sed -i "s/sftd.example.com/sft.$TARGET_SYSTEM/" values/sftd/values.yaml + sed -i 's/name: letsencrypt-prod/name: letsencrypt-http01/' values/sftd/values.yaml + sed -i "s/replicaCount: 3/replicaCount: 1/" values/sftd/values.yaml + d kubectl label node kubenode1 wire.com/role=sftd + d helm upgrade --install sftd ./charts/sftd --set 'nodeSelector.wire\.com/role=sftd' --set 'node_annotations="{'wire\.com/external-ip': '"$HOST_IP"'}"' --values values/sftd/values.yaml + + ZREST_SECRET=$(grep -A1 turn values/wire-server/secrets.yaml | grep secret | tr -d '"' | awk '{print $NF}') + + cat >values/coturn/values.yaml<values/coturn/secrets.yaml</dev/null" || true) +EXISTING_VMS=$(ssh -p "$SSH_PORT" "$SSH_USER"@webapp."$TARGET_SYSTEM" "virsh list --all --name") +EXISTING_CONTAINERS=$(ssh -p "$SSH_PORT" "$SSH_USER"@webapp."$TARGET_SYSTEM" "docker ps -q --all") + +if [[ "$EXISTING_INSTALL" ]]; then + msg "" + msg "WARNING: existing wire-server-deploy installation found: $EXISTING_INSTALL" + DO_SYSTEM_CLEANUP=true +fi +if [[ "$EXISTING_VMS" ]]; then + msg "" + msg "WARNING: existing libvirt VMs found: $EXISTING_VMS" + DO_SYSTEM_CLEANUP=true +fi +if [[ "$EXISTING_CONTAINERS" ]]; then + msg "" + msg "WARNING: existing Docker containers found." + DO_SYSTEM_CLEANUP=true +fi + +if [ "$DO_SYSTEM_CLEANUP" = false ]; then + msg "" + msg "INFO: Target system clean, no previous wire-server-deploy installation found." +fi +if [ "$DO_SYSTEM_CLEANUP" = true ] && [ "$FORCE_REDEPLOY" = 0 ]; then + msg "" + IFS= read -r -p "Do you want to wipe all wire-server-deploy components from $TARGET_SYSTEM? (y/n) " PROMPT_CLEANUP + if [[ $PROMPT_CLEANUP == "n" || $PROMPT_CLEANUP == "N" ]]; then + msg "" + die "Aborting, not cleaning up $TARGET_SYSTEM" + fi + system_cleanup_meta +fi +if [ "$DO_SYSTEM_CLEANUP" = true ] && [ "$FORCE_REDEPLOY" = 1 ]; then + system_cleanup_meta +fi + +msg "INFO: Commencing Wire-in-a-box deployment on $TARGET_SYSTEM." +preprovision_hetzner +ssh -p "$SSH_PORT" "$DEMO_USER"@webapp."$TARGET_SYSTEM" "bash -s" <&2 -e "${1-}" +} + if [[ $EUID -eq 0 ]]; then msg "Please don't run me as root" 1>&2 exit 1 @@ -59,10 +63,6 @@ cleanup() { rm -r "$DEPLOY_DIR"/nocloud/* 2>/dev/null } -msg() { - echo >&2 -e "${1-}" -} - die() { local msg=$1 local code=${2-1} # default exit status 1 diff --git a/offline/coturn.md b/offline/coturn.md index 653c15be2..353d6c583 100644 --- a/offline/coturn.md +++ b/offline/coturn.md @@ -182,17 +182,13 @@ demo@install-docs:~/wire-server-deploy$ ip addr In this case, the external IP address is `5.9.84.121`. -```{note} - -Note this step is also documented in the [ Wire install docs](docs_ubuntu_22.04.md) - -``` +Please note: This step is also documented in the [Wire install docs](docs_ubuntu_22.04) We must make sure that Coturn pods and SFT pods do not run on the same kubernetes nodes. This means we must label the kubernetes nodes to run on nodes that we did not select to run Coturn in the previous step. -In this example, we've decided to run Coturn on the first kubernetes node, `kubenode1`, which has an IP address of `192.168.122.21`. +In this example, we've decided to run Coturn on the third kubernetes node, `kubenode3`, which has an IP address of `192.168.122.23`. First we make sure the SFT chart is configured to only run on kubernetes nodes with the right label (`sftd`). @@ -205,7 +201,7 @@ nodeSelector: ``` -Then we label the `kubenode1` machine with the `wire.com/role: coturn` label: +Then we label the `kubenode1` machine with the `wire.com/role: sftd` label: ```bash @@ -217,7 +213,7 @@ We must also annotate the node with the exrenal IP address we will be listening ```bash -kubectl annotate node kubenode3 wire.com/external-ip='your.public.ip.address' +d kubectl annotate node kubenode3 wire.com/external-ip='your.public.ip.address' ``` @@ -269,23 +265,21 @@ We will do the following modifications: First, we create some definitions in the beginning of the file for readability: ``` -define COTURNIP = 192.168.122.21 -define SFTIP = 192.168.122.23 - -define ANSNODEIP = 192.168.122.31 -define ASSETHOSTIP= 192.168.122.10 - +define SFTIP = 192.168.122.21 +define COTURNIP = 192.168.122.23 +define KUBENODEIP = 192.168.122.21 define INF_WAN = enp41s0 ``` Where: -* `COTURNIP` is the IP address of the machine where Coturn will run (in our example, the first kubernetes node, `kubenode1`). -* `SFTIP` is the IP address of the machine where SFT will run (in our example, the third kubernetes node, `kubenode3`). -* `ANSNODEIP` is the IP address the first machine where ansible will install non-kubernetes services (in our example, the first ansible node, `ansnode1`). -* `ASSETHOSTIP` is the IP address of the machine where the assethost will run (see earlier steps in the installation process.) +* `SFTIP` is the IP address of the machine where SFT will run (in our example, the first kubernetes node, `kubenode1`). +* `COTURNIP` is the IP address of the machine where Coturn will run (in our example, the third kubernetes node, `kubenode3`). +* `KUBENODEIP` is the IP address of the machine running nginx HTTP / HTTPS ingress. * `INF_WAN` is the name of the WAN interface exposed to the outside world (the Internet). +Please note that while in this example, the IPs for SFTIP and KUBENODEIP point to the same host (kubenode1), this might change depending on where K8s deploys our nginx ingress. + Then, we edit the `table ip nat` / `chain PREROUTING` section of the file: ```nft @@ -295,17 +289,14 @@ table ip nat { type nat hook prerouting priority -100; - iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $SFTIP:31772 - iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $SFTIP:31773 + iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $KUBENODEIP:31772 comment "HTTP ingress" + iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $KUBENODEIP:31773 comment "HTTPS ingress" - udp dport 80 dnat ip to $ANSNODEIP:80 - udp dport 1194 dnat ip to $ASSETHOSTIP:1194 + iifname { $INF_WAN, virbr0 } tcp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control TCP" + iifname { $INF_WAN, virbr0 } udp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control UDP" - iifname $INF_WAN ip daddr 5.9.84.121 udp dport 32768-46883 dnat to $COTURNIP - iifname $INF_WAN ip daddr 5.9.84.121 udp dport 46884-61000 dnat to $SFTIP - - iifname $INF_WAN udp dport 3478 dnat to $COTURNIP:3478 - iifname $INF_WAN tcp dport 3478 dnat to $COTURNIP:3478 + iifname { $INF_WAN, virbr0 } udp dport 32768-46883 fib daddr type local dnat to $COTURNIP comment "COTURN UDP range" + iifname { $INF_WAN, virbr0 } udp dport 46884-61000 fib daddr type local dnat to $SFTIP comment "SFT UDP range" fib daddr type local counter jump DOCKER } @@ -314,25 +305,25 @@ table ip nat { Some explanations: -This is used for the SFT control: +This is used for the HTTP(S) ingress: ```nft - iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $SFTIP:31772 - iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $SFTIP:31773 + iifname { $INF_WAN, virbr0 } tcp dport 80 fib daddr type local dnat to $KUBENODEIP:31772 comment "HTTP ingress" + iifname { $INF_WAN, virbr0 } tcp dport 443 fib daddr type local dnat to $KUBENODEIP:31773 comment "HTTPS ingress" ``` This is the part that distributes the UDP packets (media/calling traffic) in two different port ranges for SFT and Coturn: ```nft - iifname $INF_WAN ip daddr 5.9.84.121 udp dport 32768-46883 dnat to $COTURNIP - iifname $INF_WAN ip daddr 5.9.84.121 udp dport 46884-61000 dnat to $SFTIP + iifname { $INF_WAN, virbr0 } udp dport 32768-46883 fib daddr type local dnat to $COTURNIP comment "COTURN UDP range" + iifname { $INF_WAN, virbr0 } udp dport 46884-61000 fib daddr type local dnat to $SFTIP comment "SFT UDP range" ``` This is the part that redirects the control traffic to the Coturn port: ```nft - iifname $INF_WAN udp dport 3478 dnat to $COTURNIP:3478 - iifname $INF_WAN tcp dport 3478 dnat to $COTURNIP:3478 + iifname { $INF_WAN, virbr0 } tcp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control TCP" + iifname { $INF_WAN, virbr0 } udp dport 3478 fib daddr type local dnat to $COTURNIP comment "COTURN control UDP" ``` @@ -412,19 +403,9 @@ Instead, we configure it to use the external IP addres we found above, and the C As we have changed our Wire-Server configuration, we must re-deploy the Wire-Server chart to apply the new configuration: -If wire-server is already installed, first uninstall it: - ```bash -d helm uninstall wire-server - -``` - -Then install wire-server with: - -```bash - -d helm install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml +d helm upgrade --install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml ``` @@ -482,10 +463,6 @@ NAME READY STATUS RESTARTS AGE coturn-0 1/1 Running 0 1d ``` - - - - ## Appendix: Debugging procedure. If coturn has already been installed once (for example if something went wrong and you are re-trying), before running a new deploy of Coturn first do: @@ -506,6 +483,24 @@ And then re-run the `helm install` command. d helm install coturn ./charts/coturn --timeout=15m0s --values values/coturn/values.yaml --values values/coturn/secret.yaml ``` +For further debugging, enable `verboseLogging` in `charts/coturn/values.yaml` and redeploy coturn: + +```yaml +config: + verboseLogging: true +``` + +```bash +d helm uninstall coturn +d helm install coturn ./charts/coturn --timeout=15m0s --values values/coturn/values.yaml --values values/coturn/secret.yaml +``` + +Debug log should now be visible in the coturn pod stdout: + +```bash +d kubectl logs coturn-0 +``` + ## Appendix: Note on migration. The current guide is written with the assumption that you are setting up Coturn for the first time, on a fresh Wire-Server installation. @@ -521,4 +516,3 @@ These are the additional steps to ensure a smooth transition: 3. Re-deploy the Wire-Server chart to apply the new configuration. 4. Wait at least 24 hours for all clients to retrieve the new configuration. 5. Once you are sure all clients have migrated to Coturn, you can disable Restund as described in this guide. - diff --git a/offline/docs_ubuntu_22.04.md b/offline/docs_ubuntu_22.04.md index 6becf6dbb..eba8459e9 100644 --- a/offline/docs_ubuntu_22.04.md +++ b/offline/docs_ubuntu_22.04.md @@ -4,6 +4,10 @@ We have a pipeline in `wire-server-deploy` producing container images, static binaries, ansible playbooks, debian package sources and everything required to install Wire. +## Demo / Testing installation + +To install a self-hosted instance of Wire deployed on one Server ("Wire in a box") for testing purposes, we recommend the [autodeploy.sh](../bin/autodeploy.sh) script. See also: [Automated full install](single_hetzner_machine_installation.md#automated-full-install) section in the Single Hetzner Machine installation readme. + ## Installing docker Note: If you are using a Hetzner machine, docker should already be installed (you can check with `docker version`) and you can skip this section. diff --git a/offline/single_hetzner_machine_installation.md b/offline/single_hetzner_machine_installation.md index 61e188e13..55e912b7c 100644 --- a/offline/single_hetzner_machine_installation.md +++ b/offline/single_hetzner_machine_installation.md @@ -1,8 +1,8 @@ # Scope -This document gives exact instructions for performing an offline installation of Wire on a single VM from Hetzner. it uses the KVM based virtual machine system to create all of the required virtual machines. +This document gives exact instructions for performing an offline demo installation of Wire on a single dedicated Hetzner server. It uses the KVM based virtual machine system to create all of the required virtual machines. -Bootstrapping a single dedicated Hetzner server for virtual machine deployment as well as wire-server-deploy artifact download has largely been automated with ansible and bash. +Bootstrapping a single dedicated Hetzner server for virtual machine deployment, the wire-server-deploy artifact download as well as the wire-server k8s installation have been fully automated. ## Use the hetzner robot console to create a new server. @@ -18,6 +18,19 @@ If not using Hetzner, for reference, the specs of the ax101 server are: The main public IPv4 address of the Hetzner server to connect to with SSH / ansible can be found in the "Server" tab in the Hetzner Robot console, next to the Server Name. As soon as the initial Hetzner server deployment is finished, we'll use Ansible to further provision the system. +## Automated full install + +If you wish to set up "Wire in a box" for demo or testing purposes, use the script [autodeploy.sh](../bin/autodeploy.sh). It supports several config flags, which can be reviewed by calling the script using a helper flag: + +```bash +autodeploy.sh -h +``` + +Running the script against a valid dedicated (Hetzner) server will install a fully functioning "Wire in a box" demo environment, based on the instructions provided in [docs_ubuntu_22.04.md](docs_ubuntu_22.04.md) and [coturn.md](coturn.md). + +This process takes approximately 90 minutes. If this script suits your needs and the installation is a success, there's no need to follow the individualized instructions below. + + ## Adjust ansible playbook vars as needed Take a look at the "vars:" section in wire-server-deploy/ansible/hetzner-single-deploy.yml and adjust vars as needed. Example: @@ -105,4 +118,3 @@ But this does not work for resolving hostnames between VMs at this point. We'll ### From this point: Switch to [the Ubuntu 22.04 Wire install docs](docs_ubuntu_22.04.md) -