Skip to content

Commit

Permalink
Fix EFM witness deployment support
Browse files Browse the repository at this point in the history
  • Loading branch information
jt-edb authored and vibhorkumar123 committed Oct 21, 2021
1 parent 08c8520 commit 54534e8
Show file tree
Hide file tree
Showing 5 changed files with 200 additions and 9 deletions.
180 changes: 180 additions & 0 deletions plugins/lookup/efm_nodes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type

DOCUMENTATION = """
name: efm_nodes
author: Julien Tachoires
short_description: Lookup for EFM nodes
description:
- "Retrieves the EFM nodes list, based on node's private IP"
options:
_terms:
description: The private IP of one member of the EFM cluster.
required: False
default:
description: The private IP of the current node is used.
"""

EXAMPLES = """
- name: Show all members of the EFM cluster that the current node is part of
debug: msg="{{ lookup('efm_nodes') }}"
- name: Show all members of the EFM cluster that the {{ primary_private_ip }} is part of
debug: msg="{{ lookup('efm_nodes', primary_private_ip) }}"
"""

RETURN = """
_value:
description:
- List of Postgres nodes
type: list
elements: dict
"""

from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase


class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):

efm_clusters = {}
efm_standbys = {}
efm_witnesses = {}
efm_primary_map = {}

myvars = getattr(self._templar, '_available_variables', {})

# If no terms, we'll used the current private IP
if len(terms) == 0:
node_private_ip = myvars['hostvars'][variables['inventory_hostname']]['private_ip'] # noqa
else:
node_private_ip = terms[0]

# If no primary found in the inventory we return an empty list
if 'primary' not in variables['groups']:
return []

# Initiate efm_clusters and efm_primary_map for each primary node we have
# in the inventory.
for host in variables['groups']['primary']:
hostvars = myvars['hostvars'][host]
private_ip = hostvars['private_ip']

efm_clusters[private_ip] = []
efm_clusters[private_ip].append(
dict(
node_type='primary',
ansible_host=hostvars['ansible_host'],
hostname=hostvars.get('hostname',
hostvars.get('ansible_hostname')),
private_ip=hostvars['private_ip'],
upstream_node_private_ip=None,
replication_type=None,
inventory_hostname=hostvars['inventory_hostname']
)
)
efm_primary_map[private_ip] = private_ip

# Populate efm_standbys dict if we have standby nodes in the inventory
if 'standby' in variables['groups']:
for host in variables['groups']['standby']:
hostvars = myvars['hostvars'][host]
efm_standbys[host] = dict(
node_type='standby',
ansible_host=hostvars['ansible_host'],
hostname=hostvars.get('hostname',
hostvars.get('ansible_hostname')),
private_ip=hostvars['private_ip'],
upstream_node_private_ip=hostvars['upstream_node_private_ip'],
replication_type=hostvars.get('replication_type',
'asynchronous'),
inventory_hostname=hostvars['inventory_hostname']
)

efm_standbys_len = len(efm_standbys.keys())

# Populate efm_witnesses dict if we have witness nodes in the inventory
if 'witness' in variables['groups']:
for host in variables['groups']['witness']:
hostvars = myvars['hostvars'][host]
efm_witnesses[host] = dict(
node_type='witness',
ansible_host=hostvars['ansible_host'],
hostname=hostvars.get('hostname',
hostvars.get('ansible_hostname')),
private_ip=hostvars['private_ip'],
upstream_node_private_ip=hostvars['upstream_node_private_ip'],
replication_type=None,
inventory_hostname=hostvars['inventory_hostname']
)

efm_witnesses_len = len(efm_witnesses.keys())

# Append the standby nodes into the right efm_clusters item, based on
# standby's upstream node.
while efm_standbys_len != 0:

for k in list(efm_standbys.keys()):
sby = efm_standbys[k]

if sby['upstream_node_private_ip'] in efm_primary_map:
upstream_private_ip = sby['upstream_node_private_ip']
primary_private_ip = efm_primary_map[upstream_private_ip]
efm_primary_map[sby['private_ip']] = primary_private_ip
efm_clusters[primary_private_ip].append(sby)
del(efm_standbys[k])

# Case when at least one host has not been handled in this loop
# iteration.
if efm_standbys_len == len(efm_standbys.keys()):
raise AnsibleError(
"Inventory error with the following standbys nodes %s. "
"Upstream node is not configured or not found"
% [s for s in efm_standbys.keys()]
)

efm_standbys_len = len(efm_standbys.keys())

# Append witness nodes into the right efm_clusters item, based on
# witness's upstream node.
while efm_witnesses_len != 0:

for k in list(efm_witnesses.keys()):
wit = efm_witnesses[k]

if wit['upstream_node_private_ip'] in efm_primary_map:
upstream_private_ip = wit['upstream_node_private_ip']
primary_private_ip = efm_primary_map[upstream_private_ip]
efm_primary_map[wit['private_ip']] = primary_private_ip
efm_clusters[primary_private_ip].append(wit)
del(efm_witnesses[k])

# Case when at least one host has not been handled in this loop
# iteration.
if efm_witnesses_len == len(efm_witnesses.keys()):
raise AnsibleError(
"Inventory error with the following witness nodes %s. "
"Upstream node is not configured or not found"
% [s for s in efm_witnesses.keys()]
)

efm_witnesses_len = len(efm_witnesses.keys())


if node_private_ip in efm_primary_map:
# Current node is part of one of the SR clusters found
return efm_clusters[efm_primary_map[node_private_ip]]
else:
primary_private_ips = list(efm_clusters.keys())
# If the current node is not part of any EFM cluster found, but,
# only one EFM cluster has been found, then we return this EFM
# cluster because there is no doubt.
if len(primary_private_ips) == 1:
return efm_clusters[primary_private_ips[0]]
else:
raise AnsibleError(
"Unable to find the EFM cluster topology because multiple "
"EFM clusters were found and this current node does not "
"appear to be part of any of them"
)
7 changes: 6 additions & 1 deletion plugins/lookup/supported_roles.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,12 @@
],
'hammerdbserver': [
'setup_hammerdbserver'
]
],
'witness': [
'setup_repo',
'install_dbserver',
'setup_efm'
],
}


Expand Down
2 changes: 2 additions & 0 deletions roles/setup_efm/tasks/efm_hba.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,5 @@
tasks_from: manage_hba_conf
vars:
pg_hba_ip_addresses: "{{ pg_allow_ip_addresses }}"
when:
- "'witness' not in group_names"
4 changes: 2 additions & 2 deletions roles/setup_efm/tasks/prepare_hba_value_list.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
}
]
when: not pg_ssl
loop: "{{ pg_cluster_nodes }}"
loop: "{{ efm_cluster_nodes }}"
loop_control:
loop_var: node
run_once: true
Expand All @@ -39,7 +39,7 @@
}
]
when: pg_ssl
loop: "{{ pg_cluster_nodes }}"
loop: "{{ efm_cluster_nodes }}"
loop_control:
loop_var: node
run_once: true
Expand Down
16 changes: 10 additions & 6 deletions roles/setup_efm/tasks/setup_efm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,15 @@
stat:
path: "{{ pg_data }}/pg_wal"
register: waldir
when:
- "'witness' not in group_names"

- name: Ensure parent WAL dir has correct permissions
file:
path: "{{ waldir.stat.lnk_source|dirname }}"
owner: "{{ pg_owner }}"
when:
- "'witness' not in group_names"
- waldir is defined
- waldir.stat.islnk

Expand Down Expand Up @@ -63,9 +66,9 @@
- ansible_facts.services['firewalld.service'].status == 'enabled'
become: yes

- name: Gather the cluste_nodes information and efm_install_version
- name: Gather the EFM cluster informations and efm_install_version
set_fact:
pg_cluster_nodes: "{{ lookup('edb_devops.edb_postgres.pg_sr_cluster_nodes', wantlist=True) }}"
efm_cluster_nodes: "{{ lookup('edb_devops.edb_postgres.efm_nodes', wantlist=True) }}"
efm_install_version : "{{ efm_version | replace('.','') }}"
run_once: true

Expand All @@ -81,7 +84,7 @@
- name: Prepare efm_nodes_list based on use_hostname
set_fact:
efm_nodes_list: "{{ efm_nodes_list + node.inventory_hostname + ':' + efm_port | string + ' ' }}"
loop: "{{ pg_cluster_nodes }}"
loop: "{{ efm_cluster_nodes }}"
loop_control:
loop_var: node
when: use_hostname
Expand All @@ -93,7 +96,7 @@
efm_nodes_list: "{{ efm_nodes_list + node.private_ip + ':' + efm_port | string + ' ' }}"
when:
- not use_hostname
loop: "{{ pg_cluster_nodes }}"
loop: "{{ efm_cluster_nodes }}"
loop_control:
loop_var: node
run_once: true
Expand All @@ -107,7 +110,7 @@
primary_public_ip: "{{ node.ansible_host }}"
primary_private_ip: "{{ node.private_ip }}"
when: node.node_type == 'primary'
loop: "{{ pg_cluster_nodes }}"
loop: "{{ efm_cluster_nodes }}"
loop_control:
loop_var: node
run_once: true
Expand Down Expand Up @@ -144,6 +147,7 @@
become: yes
when:
- efm_pgpool2_integration
- "'witness' not in group_names"

- name: EFM configuration
block:
Expand All @@ -152,7 +156,7 @@
no_log: "{{ disable_logging }}"
become: yes

- name: Efm parameters settings
- name: EFM parameters settings
include_tasks: efm_cluster_set_params.yml
no_log: "{{ disable_logging }}"

Expand Down

0 comments on commit 54534e8

Please sign in to comment.