diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index b00c9c51f9f..af74bf44aa0 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -447,6 +447,8 @@ multi-asic-t1-lag: - process_monitoring/test_critical_process_monitoring.py - container_checker/test_container_checker.py - http/test_http_copy.py + - telemetry/test_telemetry_cert_rotation.py + - telemetry/test_telemetry.py dpu: - dash/test_dash_vnet.py @@ -461,10 +463,16 @@ onboarding_t0: - lldp/test_lldp_syncd.py # Flaky, we will triage and fix it later, move to onboarding to unblock pr check - dhcp_relay/test_dhcp_relay_stress.py + - arp/test_arp_update.py + - decap/test_subnet_decap.py + - fdb/test_fdb_mac_learning.py + - ip/test_mgmt_ipv6_only.py onboarding_t1: - lldp/test_lldp_syncd.py + - mpls/test_mpls.py + - vxlan/test_vxlan_route_advertisement.py specific_param: diff --git a/.azure-pipelines/pr_test_skip_scripts.yaml b/.azure-pipelines/pr_test_skip_scripts.yaml index e3aaa8fb502..f233f470736 100644 --- a/.azure-pipelines/pr_test_skip_scripts.yaml +++ b/.azure-pipelines/pr_test_skip_scripts.yaml @@ -216,6 +216,7 @@ tgen: - snappi_tests/bgp/test_bgp_rib_in_convergence.py - snappi_tests/bgp/test_bgp_scalability.py - snappi_tests/ecn/test_dequeue_ecn_with_snappi.py + - snappi_tests/ecn/test_ecn_marking_cisco8000.py - snappi_tests/ecn/test_red_accuracy_with_snappi.py - snappi_tests/multidut/bgp/test_bgp_outbound_downlink_port_flap.py - snappi_tests/multidut/bgp/test_bgp_outbound_downlink_process_crash.py @@ -239,6 +240,7 @@ tgen: - snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py - snappi_tests/multidut/pfcwd/test_multidut_pfcwd_burst_storm_with_snappi.py - snappi_tests/multidut/pfcwd/test_multidut_pfcwd_m2o_with_snappi.py + - snappi_tests/multidut/pfcwd/test_multidut_pfcwd_runtime_traffic_with_snappi.py - snappi_tests/lacp/test_add_remove_link_from_dut.py - snappi_tests/lacp/test_add_remove_link_physically.py - snappi_tests/lacp/test_lacp_timers_effect.py @@ -255,6 +257,7 @@ tgen: - snappi_tests/pfcwd/test_pfcwd_m2o_with_snappi.py - snappi_tests/pfcwd/test_pfcwd_runtime_traffic_with_snappi.py - snappi_tests/qos/test_ipip_packet_reorder_with_snappi.py + - snappi_tests/test_multidut_snappi.py snappi: # Snappi test only support on physical snappi testbed diff --git a/.azure-pipelines/pytest-collect-only.yml b/.azure-pipelines/pytest-collect-only.yml index 6d1d2bca9e0..afbfef7cd7a 100644 --- a/.azure-pipelines/pytest-collect-only.yml +++ b/.azure-pipelines/pytest-collect-only.yml @@ -34,6 +34,11 @@ steps: - script: | set -x + if [ -n "${{ parameters.MGMT_BRANCH }}" ]; then + git branch -D ${{ parameters.MGMT_BRANCH }} || true + git checkout -b ${{ parameters.MGMT_BRANCH }} origin/${{ parameters.MGMT_BRANCH }} + fi + sudo docker exec -t -w /var/src/sonic-mgmt/tests sonic-mgmt-collect \ python3 -m pytest --inventory ../ansible/veos_vtb --host-pattern all \ --testbed_file vtestbed.yaml --testbed vms-kvm-t0 \ diff --git a/ansible/generate_topo.py b/ansible/generate_topo.py new file mode 100755 index 00000000000..b78b15bf724 --- /dev/null +++ b/ansible/generate_topo.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 + +from typing import Any, Dict, List +import ipaddress +import click +import jinja2 + +# Define the roles for the devices in the topology +roles_cfg = { + "t0": { + "asn": 65100, + "downlink": None, + "uplink": {"role": "t1", "asn": 64600}, + "peer": {"role": "pt0", "asn": 65100}, + }, + "t1": { + "asn": 65100, + "downlink": {"role": "t0", "asn": 64000}, + "uplink": {"role": "t2", "asn": 65200}, + "peer": None, + }, +} + + +# Utility functions to calculate IP addresses +def calc_ipv4_pair(subnet_str, port_id): + subnet = ipaddress.IPv4Network(subnet_str) + return (str(subnet.network_address + 2*port_id), str(subnet.network_address + 2*port_id + 1)) + + +def calc_ipv6_pair(subnet_str, port_id): + subnet = ipaddress.IPv6Network(subnet_str) + return (str(subnet.network_address + 4*port_id+1), str(subnet.network_address + 4*port_id + 2)) + + +def calc_ipv4(subnet_str, port_id): + subnet = ipaddress.IPv4Network(subnet_str) + return str(subnet.network_address + port_id) + + +def calc_ipv6(subnet_str, port_id): + subnet = ipaddress.IPv6Network(subnet_str) + return str(subnet.network_address + port_id) + + +class VM: + """ Class to represent a VM in the topology """ + def __init__(self, + port_id: int, + vm_id: int, + name_id: int, + dut_asn: int, + role_cfg: Dict[str, Any], + ip_offset: int = None): + + self.role = role_cfg["role"] + + # IDs of the VM + self.port_id = port_id + self.vm_offset = vm_id + self.ip_offset = vm_id if ip_offset is None else ip_offset + self.name = f"ARISTA{name_id:02d}{self.role.upper()}" + + # VLAN configuration + self.vlans = [port_id] + + # BGP configuration + self.asn = role_cfg["asn"] + self.peer_asn = dut_asn + + # IP addresses + self.dut_intf_ipv4, self.pc_intf_ipv4 = calc_ipv4_pair("10.0.0.0", self.ip_offset) + self.dut_intf_ipv6, self.pc_intf_ipv6 = calc_ipv6_pair("FC00::", self.ip_offset) + self.loopback_ipv4 = calc_ipv4("100.1.0.0", self.ip_offset+1) + self.loopback_ipv6 = calc_ipv6("2064:100::", self.ip_offset+1) + + # Backplane IPs will go with the VM ID + self.bp_ipv4 = calc_ipv4("10.10.246.1", self.vm_offset+1) + self.bp_ipv6 = calc_ipv6("fc0a::1", (self.vm_offset+1)) + + +class HostInterface: + """ Class to represent a host interface in the topology """ + def __init__(self, port_id: int): + self.port_id = port_id + + +def generate_topo(role: str, port_count: int, uplink_ports: List[int], peer_ports: List[int]): + dut_role_cfg = roles_cfg[role] + + vm_list = [] + hostif_list = [] + per_role_vm_count = {} + for port_id in range(0, port_count): + vm = None + hostif = None + + # Get the VM configuration based on the port ID + vm_role_cfg = None + if port_id in uplink_ports: + if dut_role_cfg["uplink"] is None: + raise ValueError("Uplink port specified for a role that doesn't have an uplink") + + vm_role_cfg = dut_role_cfg["uplink"] + + elif port_id in peer_ports: + if dut_role_cfg["peer"] is None: + raise ValueError("Peer port specified for a role that doesn't have a peer") + + vm_role_cfg = dut_role_cfg["peer"] + + else: + # If downlink is not specified, we consider it is host interface + if dut_role_cfg["downlink"] is not None: + vm_role_cfg = dut_role_cfg["downlink"] + vm_role_cfg["asn"] += 1 + + # Create the VM or host interface based on the configuration + if vm_role_cfg is not None: + if vm_role_cfg["role"] not in per_role_vm_count: + per_role_vm_count[vm_role_cfg["role"]] = 0 + per_role_vm_count[vm_role_cfg["role"]] += 1 + + vm = VM(port_id, len(vm_list), per_role_vm_count[vm_role_cfg["role"]], dut_role_cfg["asn"], vm_role_cfg) + vm_list.append(vm) + + else: + hostif = HostInterface(port_id) + hostif_list.append(hostif) + + return vm_list, hostif_list + + +def generate_topo_file_content(role: str, + template_file: str, + vm_list: List[VM], + hostif_list: List[HostInterface]): + + with open(template_file) as f: + template = jinja2.Template(f.read()) + + output = template.render(role=role, + dut=roles_cfg[role], + vm_list=vm_list, + hostif_list=hostif_list) + + return output + + +def output_topo_file(role: str, + keyword: str, + downlink_port_count: int, + uplink_port_count: int, + peer_port_count: int, + file_content: str): + downlink_keyword = f"d{downlink_port_count}" if downlink_port_count > 0 else "" + uplink_keyword = f"u{uplink_port_count}" if uplink_port_count > 0 else "" + peer_keyword = f"s{peer_port_count}" if peer_port_count > 0 else "" + + file_path = f"vars/topo_{role}-{keyword}-{downlink_keyword}{uplink_keyword}{peer_keyword}.yml" + + with open(file_path, "w") as f: + f.write(file_content) + + print(f"Generated topology file: {file_path}") + + +@click.command() +@click.option("--role", "-r", required=True, type=click.Choice(['t1']), help="Role of the device") +@click.option("--keyword", "-k", required=True, type=str, help="Keyword for the topology file") +@click.option("--template", "-t", required=True, type=str, help="Path to the Jinja template file") +@click.option("--port-count", "-c", required=True, type=int, help="Number of ports on the device") +@click.option("--uplinks", "-u", required=False, type=str, default="", help="Comma-separated list of uplink ports") +@click.option("--peers", "-p", required=False, type=str, default="", help="Comma-separated list of peer ports") +def main(role: str, keyword: str, template: str, port_count: int, uplinks: str, peers: str): + """ + Generate a topology file for a device: + + \b + Examples (in the ansible directory): + - ./generate_topo.py -r t1 -k isolated -t t1-isolated -c 128 + - ./generate_topo.py -r t1 -k isolated -t t1-isolated -c 232 -u 48,49,58,59,164,165,174,175 + """ + uplink_ports = [int(port) for port in uplinks.split(",")] if uplinks != "" else [] + peer_ports = [int(port) for port in peers.split(",")] if peers != "" else [] + + vm_list, hostif_list = generate_topo(role, port_count, uplink_ports, peer_ports) + file_content = generate_topo_file_content(role, f"templates/topo_{template}.j2", vm_list, hostif_list) + output_topo_file(role, keyword, port_count - len(uplink_ports) - len(peer_ports), len(uplink_ports), + len(peer_ports), file_content) + + +if __name__ == "__main__": + main() diff --git a/ansible/library/exabgp.py b/ansible/library/exabgp.py index 3a8b41de911..f46b262211d 100644 --- a/ansible/library/exabgp.py +++ b/ansible/library/exabgp.py @@ -78,6 +78,16 @@ def run_command(): return "OK\\n" if __name__ == '__main__': + # with werkzeug 3.x the default size of max_form_memory_size + # is 500K. Routes reach a bit beyond that and the client + # receives HTTP 413. + # Configure the max size to 4 MB to be safe. + if not six.PY2: + from werkzeug import Request + max_content_length = 4 * 1024 * 1024 + Request.max_content_length = max_content_length + Request.max_form_memory_size = max_content_length + Request.max_form_parts = max_content_length app.run(host='0.0.0.0', port=sys.argv[1]) ''' diff --git a/ansible/library/testbed_vm_info.py b/ansible/library/testbed_vm_info.py index a3df677d351..61898d5c22e 100644 --- a/ansible/library/testbed_vm_info.py +++ b/ansible/library/testbed_vm_info.py @@ -149,8 +149,7 @@ def main(): else: err_msg = "Cannot find the vm {} in VM inventory file {}, please make sure you have enough VMs" \ "for the topology you are using." - err_msg.format(vm_name, vm_facts.vm_file) - module.fail_json(msg=err_msg) + module.fail_json(msg=err_msg.format(vm_name, vm_facts.vm_file)) module.exit_json( ansible_facts={'neighbor_eosvm_mgmt': vm_mgmt_ip, 'topoall': vm_facts.topoall}) except (IOError, OSError): diff --git a/ansible/roles/eos/templates/smartswitch-t1-spine.j2 b/ansible/roles/eos/templates/smartswitch-t1-spine.j2 new file mode 120000 index 00000000000..1029da8a546 --- /dev/null +++ b/ansible/roles/eos/templates/smartswitch-t1-spine.j2 @@ -0,0 +1 @@ +t1-28-lag-spine.j2 \ No newline at end of file diff --git a/ansible/roles/eos/templates/smartswitch-t1-tor.j2 b/ansible/roles/eos/templates/smartswitch-t1-tor.j2 new file mode 120000 index 00000000000..7e09b9a1dce --- /dev/null +++ b/ansible/roles/eos/templates/smartswitch-t1-tor.j2 @@ -0,0 +1 @@ +t1-28-lag-tor.j2 \ No newline at end of file diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index b30a4de578e..1dfafae8765 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -285,6 +285,10 @@ r, ".* INFO .*[duty_cycle_map]: illegal pwm value .*" r, ".* INFO .*command '/usr/sbin/smartctl' failed: [116] Stale file handle.*" r, ".* INFO healthd.*Key 'TEMPERATURE_INFO|ASIC' field 'high_threshold' unavailable in database 'STATE_DB'.*" r, ".* INFO healthd.*Key 'TEMPERATURE_INFO|ASIC' field 'temperature' unavailable in database 'STATE_DB'.*" +r, ".* ERR kernel:.*cisco-fpga-p2pm-m-slot p2pm-m-slot\.\d+: cisco_fpga_select_new_acpi_companion: searching for child status\d+ 0x[0-9a-f]+; fpga_id 0x[0-9a-f]+.*" +r, ".* ERR kernel:.*cisco-fpga-pci \d+:\d+:\d+\.\d+: cisco_fpga_select_new_acpi_companion: searching for child status\d+ 0x[0-9a-f]+; fpga_id 0x[0-9a-f]+.*" +r, ".* WARNING kernel:.*pcieport.*device.*error.*status/mask=.*" + # Ignore rsyslog librelp error if rsyslogd on host or container is down or going down r, ".* ERR .*#rsyslogd: librelp error 10008 forwarding to server .* - suspending.*" diff --git a/ansible/templates/topo_t1-isolated.j2 b/ansible/templates/topo_t1-isolated.j2 new file mode 100644 index 00000000000..0c58680063d --- /dev/null +++ b/ansible/templates/topo_t1-isolated.j2 @@ -0,0 +1,47 @@ +topology: + VMs: +{%- for vm in vm_list %} + {{ vm.name }}: + vlans: + - {{ vm.vlans[0] }} + vm_offset: {{ vm.vm_offset }} +{%- endfor %} + +configuration_properties: + common: + dut_asn: {{ dut.asn }} + dut_type: LeafRouter + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine: + swrole: spine + tor: + swrole: tor + +configuration: +{%- for vm in vm_list %} + {{vm.name}}: + properties: + - common + bgp: + asn: {{vm.asn}} + peers: + {{vm.peer_asn}}: + - {{vm.dut_intf_ipv4}} + - {{vm.dut_intf_ipv6}} + interfaces: + Loopback0: + ipv4: {{vm.loopback_ipv4}}/32 + ipv6: {{vm.loopback_ipv6}}/128 + Ethernet1: + ipv4: {{vm.pc_intf_ipv4}}/31 + ipv6: {{vm.pc_intf_ipv6}}/126 + bp_interfaces: + ipv4: {{vm.bp_ipv4}}/24 + ipv6: {{vm.bp_ipv6}}/64 +{%- endfor %} diff --git a/ansible/testbed.csv b/ansible/testbed.csv index 3892122bbbe..a91a8284a30 100644 --- a/ansible/testbed.csv +++ b/ansible/testbed.csv @@ -1,16 +1,16 @@ -# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment -ptf1-m,ptf1,ptf32,docker-ptf,ptf_ptf1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Test ptf Mellanox -ptf2-b,ptf2,ptf64,docker-ptf,ptf_ptf2,10.255.0.189/24,,server_1,,lab-s6100-01,lab,False,Test ptf Broadcom -vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf_vms1-1,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms -vms-sn2700-t1-lag,vms1-2,t1-lag,docker-ptf,ptf_vms1-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms -vms-sn2700-t0,vms1-3,t0,docker-ptf,ptf_vms1-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms -vms-s6000-t0,vms2-1,t0,docker-ptf,ptf_vms2-1,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,lab,True,Tests Dell S6000 vms -vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf_vms3-1,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,lab,True,Tests Arista A7260 vms -vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf_vms4-1,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms -vms-s6100-t1,vms4-2,t1-64,docker-ptf,ptf_vms4-2,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms -vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf_vms5-1,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,lab,True,ests Dell S6100 vms -vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf_vms1-duts,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],lab,True,Example Multi DUTs testbed -vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,lab,True,superman -ixanvl-vs-conf,anvl,ptf32,docker-ptf-anvl,ptf_anvl,10.250.0.100/24,,server_1,,vlab-01,lab,True,Test ptf ANVL SONIC VM -vms-snappi-sonic,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,sonic-s6100-dut1,snappi-sonic,True,Batman -vms-snappi-sonic-multidut,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,[sonic-s6100-dut1;sonic-s6100-dut2],snappi-sonic,True,Batman +# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,is_smartswitch,comment +ptf1-m,ptf1,ptf32,docker-ptf,ptf_ptf1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,,Test ptf Mellanox +ptf2-b,ptf2,ptf64,docker-ptf,ptf_ptf2,10.255.0.189/24,,server_1,,lab-s6100-01,lab,False,,Test ptf Broadcom +vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf_vms1-1,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,,Tests Mellanox SN2700 vms +vms-sn2700-t1-lag,vms1-2,t1-lag,docker-ptf,ptf_vms1-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,,Tests Mellanox SN2700 vms +vms-sn2700-t0,vms1-3,t0,docker-ptf,ptf_vms1-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,,Tests Mellanox SN2700 vms +vms-s6000-t0,vms2-1,t0,docker-ptf,ptf_vms2-1,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,lab,True,,Tests Dell S6000 vms +vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf_vms3-1,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,lab,True,,Tests Arista A7260 vms +vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf_vms4-1,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,lab,True,,Tests Dell S6100 vms +vms-s6100-t1,vms4-2,t1-64,docker-ptf,ptf_vms4-2,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,lab,True,,Tests Dell S6100 vms +vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf_vms5-1,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,lab,True,,ests Dell S6100 vms +vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf_vms1-duts,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],lab,True,,Example Multi DUTs testbed +vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,lab,True,,superman +ixanvl-vs-conf,anvl,ptf32,docker-ptf-anvl,ptf_anvl,10.250.0.100/24,,server_1,,vlab-01,lab,True,,Test ptf ANVL SONIC VM +vms-snappi-sonic,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,sonic-s6100-dut1,snappi-sonic,True,,Batman +vms-snappi-sonic-multidut,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,[sonic-s6100-dut1;sonic-s6100-dut2],snappi-sonic,True,,Batman diff --git a/ansible/vars/topo_smartswitch-t1.yml b/ansible/vars/topo_smartswitch-t1.yml new file mode 100644 index 00000000000..c17de99876d --- /dev/null +++ b/ansible/vars/topo_smartswitch-t1.yml @@ -0,0 +1,669 @@ +topology: + VMs: + ARISTA01T2: + vlans: + - 0 + - 1 + vm_offset: 0 + ARISTA03T2: + vlans: + - 2 + - 3 + vm_offset: 1 + ARISTA05T2: + vlans: + - 4 + - 5 + vm_offset: 2 + ARISTA07T2: + vlans: + - 6 + - 7 + vm_offset: 3 + ARISTA01T0: + vlans: + - 8 + vm_offset: 4 + ARISTA02T0: + vlans: + - 9 + vm_offset: 5 + ARISTA03T0: + vlans: + - 10 + vm_offset: 6 + ARISTA04T0: + vlans: + - 11 + vm_offset: 7 + ARISTA05T0: + vlans: + - 12 + vm_offset: 8 + ARISTA06T0: + vlans: + - 13 + vm_offset: 9 + ARISTA07T0: + vlans: + - 14 + vm_offset: 10 + ARISTA08T0: + vlans: + - 15 + vm_offset: 11 + ARISTA09T0: + vlans: + - 16 + vm_offset: 12 + ARISTA10T0: + vlans: + - 17 + vm_offset: 13 + ARISTA11T0: + vlans: + - 18 + vm_offset: 14 + ARISTA12T0: + vlans: + - 19 + vm_offset: 15 + ARISTA13T0: + vlans: + - 20 + vm_offset: 16 + ARISTA14T0: + vlans: + - 21 + vm_offset: 17 + ARISTA15T0: + vlans: + - 22 + vm_offset: 18 + ARISTA16T0: + vlans: + - 23 + vm_offset: 19 + ARISTA17T0: + vlans: + - 24 + vm_offset: 20 + ARISTA18T0: + vlans: + - 25 + vm_offset: 21 + ARISTA19T0: + vlans: + - 26 + vm_offset: 22 + ARISTA20T0: + vlans: + - 27 + vm_offset: 23 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: LeafRouter + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 20 + tor_subnet_number: 2 + max_tor_subnet_number: 20 + tor_subnet_size: 128 + spine: + swrole: spine + tor: + swrole: tor + +configuration: + ARISTA01T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.0 + - FC00::1 + interfaces: + Loopback0: + ipv4: 100.1.0.1/32 + ipv6: 2064:100::1/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.1/31 + ipv6: fc00::2/126 + bp_interface: + ipv4: 10.10.246.1/24 + ipv6: fc0a::2/64 + + ARISTA03T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.4 + - FC00::9 + interfaces: + Loopback0: + ipv4: 100.1.0.3/32 + ipv6: 2064:100::3/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.5/31 + ipv6: fc00::a/126 + bp_interface: + ipv4: 10.10.246.3/24 + ipv6: fc0a::6/64 + + ARISTA05T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.8 + - FC00::11 + interfaces: + Loopback0: + ipv4: 100.1.0.5/32 + ipv6: 2064:100::5/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.9/31 + ipv6: fc00::12/126 + bp_interface: + ipv4: 10.10.246.5/24 + ipv6: fc0a::a/64 + + ARISTA07T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.12 + - FC00::19 + interfaces: + Loopback0: + ipv4: 100.1.0.7/32 + ipv6: 2064:100::7/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.13/31 + ipv6: fc00::1a/126 + bp_interface: + ipv4: 10.10.246.7/24 + ipv6: fc0a::e/64 + + ARISTA01T0: + properties: + - common + - tor + tornum: 1 + bgp: + asn: 64001 + peers: + 65100: + - 10.0.0.32 + - FC00::41 + interfaces: + Loopback0: + ipv4: 100.1.0.17/32 + ipv6: 2064:100::11/128 + Ethernet1: + ipv4: 10.0.0.33/31 + ipv6: fc00::42/126 + bp_interface: + ipv4: 10.10.246.17/24 + ipv6: fc0a::22/64 + vips: + ipv4: + prefixes: + - 200.0.1.0/26 + asn: 64700 + + ARISTA02T0: + properties: + - common + - tor + tornum: 2 + bgp: + asn: 64002 + peers: + 65100: + - 10.0.0.34 + - FC00::45 + interfaces: + Loopback0: + ipv4: 100.1.0.18/32 + ipv6: 2064:100::12/128 + Ethernet1: + ipv4: 10.0.0.35/31 + ipv6: fc00::46/126 + bp_interface: + ipv4: 10.10.246.18/24 + ipv6: fc0a::25/64 + + ARISTA03T0: + properties: + - common + - tor + tornum: 3 + bgp: + asn: 64003 + peers: + 65100: + - 10.0.0.36 + - FC00::49 + interfaces: + Loopback0: + ipv4: 100.1.0.19/32 + ipv6: 2064:100::13/128 + Ethernet1: + ipv4: 10.0.0.37/31 + ipv6: fc00::4a/126 + bp_interface: + ipv4: 10.10.246.19/24 + ipv6: fc0a::26/64 + vips: + ipv4: + prefixes: + - 200.0.1.0/26 + asn: 64700 + + ARISTA04T0: + properties: + - common + - tor + tornum: 4 + bgp: + asn: 64004 + peers: + 65100: + - 10.0.0.38 + - FC00::4D + interfaces: + Loopback0: + ipv4: 100.1.0.20/32 + ipv6: 2064:100::14/128 + Ethernet1: + ipv4: 10.0.0.39/31 + ipv6: fc00::4e/126 + bp_interface: + ipv4: 10.10.246.20/24 + ipv6: fc0a::29/64 + + ARISTA05T0: + properties: + - common + - tor + tornum: 5 + bgp: + asn: 64005 + peers: + 65100: + - 10.0.0.40 + - FC00::51 + interfaces: + Loopback0: + ipv4: 100.1.0.21/32 + ipv6: 2064:100::15/128 + Ethernet1: + ipv4: 10.0.0.41/31 + ipv6: fc00::52/126 + bp_interface: + ipv4: 10.10.246.21/24 + ipv6: fc0a::2a/64 + + ARISTA06T0: + properties: + - common + - tor + tornum: 6 + bgp: + asn: 64006 + peers: + 65100: + - 10.0.0.42 + - FC00::55 + interfaces: + Loopback0: + ipv4: 100.1.0.22/32 + ipv6: 2064:100::16/128 + Ethernet1: + ipv4: 10.0.0.43/31 + ipv6: fc00::56/126 + bp_interface: + ipv4: 10.10.246.22/24 + ipv6: fc0a::2d/64 + + ARISTA07T0: + properties: + - common + - tor + tornum: 7 + bgp: + asn: 64007 + peers: + 65100: + - 10.0.0.44 + - FC00::59 + interfaces: + Loopback0: + ipv4: 100.1.0.23/32 + ipv6: 2064:100::17/128 + Ethernet1: + ipv4: 10.0.0.45/31 + ipv6: fc00::5a/126 + bp_interface: + ipv4: 10.10.246.23/24 + ipv6: fc0a::2e/64 + + ARISTA08T0: + properties: + - common + - tor + tornum: 8 + bgp: + asn: 64008 + peers: + 65100: + - 10.0.0.46 + - FC00::5D + interfaces: + Loopback0: + ipv4: 100.1.0.24/32 + ipv6: 2064:100::18/128 + Ethernet1: + ipv4: 10.0.0.47/31 + ipv6: fc00::5e/126 + bp_interface: + ipv4: 10.10.246.24/24 + ipv6: fc0a::31/64 + + ARISTA09T0: + properties: + - common + - tor + tornum: 9 + bgp: + asn: 64009 + peers: + 65100: + - 10.0.0.48 + - FC00::61 + interfaces: + Loopback0: + ipv4: 100.1.0.25/32 + ipv6: 2064:100::19/128 + Ethernet1: + ipv4: 10.0.0.49/31 + ipv6: fc00::62/126 + bp_interface: + ipv4: 10.10.246.25/24 + ipv6: fc0a::32/64 + + ARISTA10T0: + properties: + - common + - tor + tornum: 10 + bgp: + asn: 64010 + peers: + 65100: + - 10.0.0.50 + - FC00::65 + interfaces: + Loopback0: + ipv4: 100.1.0.26/32 + ipv6: 2064:100::1a/128 + Ethernet1: + ipv4: 10.0.0.51/31 + ipv6: fc00::66/126 + bp_interface: + ipv4: 10.10.246.26/24 + ipv6: fc0a::35/64 + + ARISTA11T0: + properties: + - common + - tor + tornum: 11 + bgp: + asn: 64011 + peers: + 65100: + - 10.0.0.52 + - FC00::69 + interfaces: + Loopback0: + ipv4: 100.1.0.27/32 + ipv6: 2064:100::1b/128 + Ethernet1: + ipv4: 10.0.0.53/31 + ipv6: fc00::6a/126 + bp_interface: + ipv4: 10.10.246.27/24 + ipv6: fc0a::36/64 + + ARISTA12T0: + properties: + - common + - tor + tornum: 12 + bgp: + asn: 64012 + peers: + 65100: + - 10.0.0.54 + - FC00::6D + interfaces: + Loopback0: + ipv4: 100.1.0.28/32 + ipv6: 2064:100::1c/128 + Ethernet1: + ipv4: 10.0.0.55/31 + ipv6: fc00::6e/126 + bp_interface: + ipv4: 10.10.246.28/24 + ipv6: fc0a::39/64 + + ARISTA13T0: + properties: + - common + - tor + tornum: 13 + bgp: + asn: 64013 + peers: + 65100: + - 10.0.0.56 + - FC00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interface: + ipv4: 10.10.246.29/24 + ipv6: fc0a::3a/64 + + ARISTA14T0: + properties: + - common + - tor + tornum: 14 + bgp: + asn: 64014 + peers: + 65100: + - 10.0.0.58 + - FC00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interface: + ipv4: 10.10.246.30/24 + ipv6: fc0a::3d/64 + + ARISTA15T0: + properties: + - common + - tor + tornum: 15 + bgp: + asn: 64015 + peers: + 65100: + - 10.0.0.60 + - FC00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interface: + ipv4: 10.10.246.31/24 + ipv6: fc0a::3e/64 + + ARISTA16T0: + properties: + - common + - tor + tornum: 16 + bgp: + asn: 64016 + peers: + 65100: + - 10.0.0.62 + - FC00::7D + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100::20/128 + Ethernet1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + bp_interface: + ipv4: 10.10.246.32/24 + ipv6: fc0a::41/64 + + ARISTA17T0: + properties: + - common + - tor + tornum: 17 + bgp: + asn: 64017 + peers: + 65100: + - 10.0.0.64 + - FC00::81 + interfaces: + Loopback0: + ipv4: 100.1.0.33/32 + ipv6: 2064:100::21/128 + Ethernet1: + ipv4: 10.0.0.65/31 + ipv6: fc00::82/126 + bp_interface: + ipv4: 10.10.246.33/24 + ipv6: fc0a::42/64 + + ARISTA18T0: + properties: + - common + - tor + tornum: 18 + bgp: + asn: 64018 + peers: + 65100: + - 10.0.0.66 + - FC00::85 + interfaces: + Loopback0: + ipv4: 100.1.0.34/32 + ipv6: 2064:100::22/128 + Ethernet1: + ipv4: 10.0.0.67/31 + ipv6: fc00::86/126 + bp_interface: + ipv4: 10.10.246.34/24 + ipv6: fc0a::45/64 + + ARISTA19T0: + properties: + - common + - tor + tornum: 19 + bgp: + asn: 64019 + peers: + 65100: + - 10.0.0.68 + - FC00::89 + interfaces: + Loopback0: + ipv4: 100.1.0.35/32 + ipv6: 2064:100::23/128 + Ethernet1: + ipv4: 10.0.0.69/31 + ipv6: fc00::8a/126 + bp_interface: + ipv4: 10.10.246.35/24 + ipv6: fc0a::46/64 + + ARISTA20T0: + properties: + - common + - tor + tornum: 20 + bgp: + asn: 64020 + peers: + 65100: + - 10.0.0.70 + - FC00::8D + interfaces: + Loopback0: + ipv4: 100.1.0.36/32 + ipv6: 2064:100::24/128 + Ethernet1: + ipv4: 10.0.0.71/31 + ipv6: fc00::8e/126 + bp_interface: + ipv4: 10.10.246.36/24 + ipv6: fc0a::49/64 diff --git a/ansible/vars/topo_t1-isolated-d128.yml b/ansible/vars/topo_t1-isolated-d128.yml new file mode 100644 index 00000000000..1873728bd2e --- /dev/null +++ b/ansible/vars/topo_t1-isolated-d128.yml @@ -0,0 +1,2964 @@ +topology: + VMs: + ARISTA01T0: + vlans: + - 0 + vm_offset: 0 + ARISTA02T0: + vlans: + - 1 + vm_offset: 1 + ARISTA03T0: + vlans: + - 2 + vm_offset: 2 + ARISTA04T0: + vlans: + - 3 + vm_offset: 3 + ARISTA05T0: + vlans: + - 4 + vm_offset: 4 + ARISTA06T0: + vlans: + - 5 + vm_offset: 5 + ARISTA07T0: + vlans: + - 6 + vm_offset: 6 + ARISTA08T0: + vlans: + - 7 + vm_offset: 7 + ARISTA09T0: + vlans: + - 8 + vm_offset: 8 + ARISTA10T0: + vlans: + - 9 + vm_offset: 9 + ARISTA11T0: + vlans: + - 10 + vm_offset: 10 + ARISTA12T0: + vlans: + - 11 + vm_offset: 11 + ARISTA13T0: + vlans: + - 12 + vm_offset: 12 + ARISTA14T0: + vlans: + - 13 + vm_offset: 13 + ARISTA15T0: + vlans: + - 14 + vm_offset: 14 + ARISTA16T0: + vlans: + - 15 + vm_offset: 15 + ARISTA17T0: + vlans: + - 16 + vm_offset: 16 + ARISTA18T0: + vlans: + - 17 + vm_offset: 17 + ARISTA19T0: + vlans: + - 18 + vm_offset: 18 + ARISTA20T0: + vlans: + - 19 + vm_offset: 19 + ARISTA21T0: + vlans: + - 20 + vm_offset: 20 + ARISTA22T0: + vlans: + - 21 + vm_offset: 21 + ARISTA23T0: + vlans: + - 22 + vm_offset: 22 + ARISTA24T0: + vlans: + - 23 + vm_offset: 23 + ARISTA25T0: + vlans: + - 24 + vm_offset: 24 + ARISTA26T0: + vlans: + - 25 + vm_offset: 25 + ARISTA27T0: + vlans: + - 26 + vm_offset: 26 + ARISTA28T0: + vlans: + - 27 + vm_offset: 27 + ARISTA29T0: + vlans: + - 28 + vm_offset: 28 + ARISTA30T0: + vlans: + - 29 + vm_offset: 29 + ARISTA31T0: + vlans: + - 30 + vm_offset: 30 + ARISTA32T0: + vlans: + - 31 + vm_offset: 31 + ARISTA33T0: + vlans: + - 32 + vm_offset: 32 + ARISTA34T0: + vlans: + - 33 + vm_offset: 33 + ARISTA35T0: + vlans: + - 34 + vm_offset: 34 + ARISTA36T0: + vlans: + - 35 + vm_offset: 35 + ARISTA37T0: + vlans: + - 36 + vm_offset: 36 + ARISTA38T0: + vlans: + - 37 + vm_offset: 37 + ARISTA39T0: + vlans: + - 38 + vm_offset: 38 + ARISTA40T0: + vlans: + - 39 + vm_offset: 39 + ARISTA41T0: + vlans: + - 40 + vm_offset: 40 + ARISTA42T0: + vlans: + - 41 + vm_offset: 41 + ARISTA43T0: + vlans: + - 42 + vm_offset: 42 + ARISTA44T0: + vlans: + - 43 + vm_offset: 43 + ARISTA45T0: + vlans: + - 44 + vm_offset: 44 + ARISTA46T0: + vlans: + - 45 + vm_offset: 45 + ARISTA47T0: + vlans: + - 46 + vm_offset: 46 + ARISTA48T0: + vlans: + - 47 + vm_offset: 47 + ARISTA49T0: + vlans: + - 48 + vm_offset: 48 + ARISTA50T0: + vlans: + - 49 + vm_offset: 49 + ARISTA51T0: + vlans: + - 50 + vm_offset: 50 + ARISTA52T0: + vlans: + - 51 + vm_offset: 51 + ARISTA53T0: + vlans: + - 52 + vm_offset: 52 + ARISTA54T0: + vlans: + - 53 + vm_offset: 53 + ARISTA55T0: + vlans: + - 54 + vm_offset: 54 + ARISTA56T0: + vlans: + - 55 + vm_offset: 55 + ARISTA57T0: + vlans: + - 56 + vm_offset: 56 + ARISTA58T0: + vlans: + - 57 + vm_offset: 57 + ARISTA59T0: + vlans: + - 58 + vm_offset: 58 + ARISTA60T0: + vlans: + - 59 + vm_offset: 59 + ARISTA61T0: + vlans: + - 60 + vm_offset: 60 + ARISTA62T0: + vlans: + - 61 + vm_offset: 61 + ARISTA63T0: + vlans: + - 62 + vm_offset: 62 + ARISTA64T0: + vlans: + - 63 + vm_offset: 63 + ARISTA65T0: + vlans: + - 64 + vm_offset: 64 + ARISTA66T0: + vlans: + - 65 + vm_offset: 65 + ARISTA67T0: + vlans: + - 66 + vm_offset: 66 + ARISTA68T0: + vlans: + - 67 + vm_offset: 67 + ARISTA69T0: + vlans: + - 68 + vm_offset: 68 + ARISTA70T0: + vlans: + - 69 + vm_offset: 69 + ARISTA71T0: + vlans: + - 70 + vm_offset: 70 + ARISTA72T0: + vlans: + - 71 + vm_offset: 71 + ARISTA73T0: + vlans: + - 72 + vm_offset: 72 + ARISTA74T0: + vlans: + - 73 + vm_offset: 73 + ARISTA75T0: + vlans: + - 74 + vm_offset: 74 + ARISTA76T0: + vlans: + - 75 + vm_offset: 75 + ARISTA77T0: + vlans: + - 76 + vm_offset: 76 + ARISTA78T0: + vlans: + - 77 + vm_offset: 77 + ARISTA79T0: + vlans: + - 78 + vm_offset: 78 + ARISTA80T0: + vlans: + - 79 + vm_offset: 79 + ARISTA81T0: + vlans: + - 80 + vm_offset: 80 + ARISTA82T0: + vlans: + - 81 + vm_offset: 81 + ARISTA83T0: + vlans: + - 82 + vm_offset: 82 + ARISTA84T0: + vlans: + - 83 + vm_offset: 83 + ARISTA85T0: + vlans: + - 84 + vm_offset: 84 + ARISTA86T0: + vlans: + - 85 + vm_offset: 85 + ARISTA87T0: + vlans: + - 86 + vm_offset: 86 + ARISTA88T0: + vlans: + - 87 + vm_offset: 87 + ARISTA89T0: + vlans: + - 88 + vm_offset: 88 + ARISTA90T0: + vlans: + - 89 + vm_offset: 89 + ARISTA91T0: + vlans: + - 90 + vm_offset: 90 + ARISTA92T0: + vlans: + - 91 + vm_offset: 91 + ARISTA93T0: + vlans: + - 92 + vm_offset: 92 + ARISTA94T0: + vlans: + - 93 + vm_offset: 93 + ARISTA95T0: + vlans: + - 94 + vm_offset: 94 + ARISTA96T0: + vlans: + - 95 + vm_offset: 95 + ARISTA97T0: + vlans: + - 96 + vm_offset: 96 + ARISTA98T0: + vlans: + - 97 + vm_offset: 97 + ARISTA99T0: + vlans: + - 98 + vm_offset: 98 + ARISTA100T0: + vlans: + - 99 + vm_offset: 99 + ARISTA101T0: + vlans: + - 100 + vm_offset: 100 + ARISTA102T0: + vlans: + - 101 + vm_offset: 101 + ARISTA103T0: + vlans: + - 102 + vm_offset: 102 + ARISTA104T0: + vlans: + - 103 + vm_offset: 103 + ARISTA105T0: + vlans: + - 104 + vm_offset: 104 + ARISTA106T0: + vlans: + - 105 + vm_offset: 105 + ARISTA107T0: + vlans: + - 106 + vm_offset: 106 + ARISTA108T0: + vlans: + - 107 + vm_offset: 107 + ARISTA109T0: + vlans: + - 108 + vm_offset: 108 + ARISTA110T0: + vlans: + - 109 + vm_offset: 109 + ARISTA111T0: + vlans: + - 110 + vm_offset: 110 + ARISTA112T0: + vlans: + - 111 + vm_offset: 111 + ARISTA113T0: + vlans: + - 112 + vm_offset: 112 + ARISTA114T0: + vlans: + - 113 + vm_offset: 113 + ARISTA115T0: + vlans: + - 114 + vm_offset: 114 + ARISTA116T0: + vlans: + - 115 + vm_offset: 115 + ARISTA117T0: + vlans: + - 116 + vm_offset: 116 + ARISTA118T0: + vlans: + - 117 + vm_offset: 117 + ARISTA119T0: + vlans: + - 118 + vm_offset: 118 + ARISTA120T0: + vlans: + - 119 + vm_offset: 119 + ARISTA121T0: + vlans: + - 120 + vm_offset: 120 + ARISTA122T0: + vlans: + - 121 + vm_offset: 121 + ARISTA123T0: + vlans: + - 122 + vm_offset: 122 + ARISTA124T0: + vlans: + - 123 + vm_offset: 123 + ARISTA125T0: + vlans: + - 124 + vm_offset: 124 + ARISTA126T0: + vlans: + - 125 + vm_offset: 125 + ARISTA127T0: + vlans: + - 126 + vm_offset: 126 + ARISTA128T0: + vlans: + - 127 + vm_offset: 127 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: LeafRouter + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine: + swrole: spine + tor: + swrole: tor + +configuration: + ARISTA01T0: + properties: + - common + bgp: + asn: 64001 + peers: + 65100: + - 10.0.0.0 + - fc00::1 + interfaces: + Loopback0: + ipv4: 100.1.0.1/32 + ipv6: 2064:100::1/128 + Ethernet1: + ipv4: 10.0.0.1/31 + ipv6: fc00::2/126 + bp_interfaces: + ipv4: 10.10.246.2/24 + ipv6: fc0a::2/64 + ARISTA02T0: + properties: + - common + bgp: + asn: 64002 + peers: + 65100: + - 10.0.0.2 + - fc00::5 + interfaces: + Loopback0: + ipv4: 100.1.0.2/32 + ipv6: 2064:100::2/128 + Ethernet1: + ipv4: 10.0.0.3/31 + ipv6: fc00::6/126 + bp_interfaces: + ipv4: 10.10.246.3/24 + ipv6: fc0a::3/64 + ARISTA03T0: + properties: + - common + bgp: + asn: 64003 + peers: + 65100: + - 10.0.0.4 + - fc00::9 + interfaces: + Loopback0: + ipv4: 100.1.0.3/32 + ipv6: 2064:100::3/128 + Ethernet1: + ipv4: 10.0.0.5/31 + ipv6: fc00::a/126 + bp_interfaces: + ipv4: 10.10.246.4/24 + ipv6: fc0a::4/64 + ARISTA04T0: + properties: + - common + bgp: + asn: 64004 + peers: + 65100: + - 10.0.0.6 + - fc00::d + interfaces: + Loopback0: + ipv4: 100.1.0.4/32 + ipv6: 2064:100::4/128 + Ethernet1: + ipv4: 10.0.0.7/31 + ipv6: fc00::e/126 + bp_interfaces: + ipv4: 10.10.246.5/24 + ipv6: fc0a::5/64 + ARISTA05T0: + properties: + - common + bgp: + asn: 64005 + peers: + 65100: + - 10.0.0.8 + - fc00::11 + interfaces: + Loopback0: + ipv4: 100.1.0.5/32 + ipv6: 2064:100::5/128 + Ethernet1: + ipv4: 10.0.0.9/31 + ipv6: fc00::12/126 + bp_interfaces: + ipv4: 10.10.246.6/24 + ipv6: fc0a::6/64 + ARISTA06T0: + properties: + - common + bgp: + asn: 64006 + peers: + 65100: + - 10.0.0.10 + - fc00::15 + interfaces: + Loopback0: + ipv4: 100.1.0.6/32 + ipv6: 2064:100::6/128 + Ethernet1: + ipv4: 10.0.0.11/31 + ipv6: fc00::16/126 + bp_interfaces: + ipv4: 10.10.246.7/24 + ipv6: fc0a::7/64 + ARISTA07T0: + properties: + - common + bgp: + asn: 64007 + peers: + 65100: + - 10.0.0.12 + - fc00::19 + interfaces: + Loopback0: + ipv4: 100.1.0.7/32 + ipv6: 2064:100::7/128 + Ethernet1: + ipv4: 10.0.0.13/31 + ipv6: fc00::1a/126 + bp_interfaces: + ipv4: 10.10.246.8/24 + ipv6: fc0a::8/64 + ARISTA08T0: + properties: + - common + bgp: + asn: 64008 + peers: + 65100: + - 10.0.0.14 + - fc00::1d + interfaces: + Loopback0: + ipv4: 100.1.0.8/32 + ipv6: 2064:100::8/128 + Ethernet1: + ipv4: 10.0.0.15/31 + ipv6: fc00::1e/126 + bp_interfaces: + ipv4: 10.10.246.9/24 + ipv6: fc0a::9/64 + ARISTA09T0: + properties: + - common + bgp: + asn: 64009 + peers: + 65100: + - 10.0.0.16 + - fc00::21 + interfaces: + Loopback0: + ipv4: 100.1.0.9/32 + ipv6: 2064:100::9/128 + Ethernet1: + ipv4: 10.0.0.17/31 + ipv6: fc00::22/126 + bp_interfaces: + ipv4: 10.10.246.10/24 + ipv6: fc0a::a/64 + ARISTA10T0: + properties: + - common + bgp: + asn: 64010 + peers: + 65100: + - 10.0.0.18 + - fc00::25 + interfaces: + Loopback0: + ipv4: 100.1.0.10/32 + ipv6: 2064:100::a/128 + Ethernet1: + ipv4: 10.0.0.19/31 + ipv6: fc00::26/126 + bp_interfaces: + ipv4: 10.10.246.11/24 + ipv6: fc0a::b/64 + ARISTA11T0: + properties: + - common + bgp: + asn: 64011 + peers: + 65100: + - 10.0.0.20 + - fc00::29 + interfaces: + Loopback0: + ipv4: 100.1.0.11/32 + ipv6: 2064:100::b/128 + Ethernet1: + ipv4: 10.0.0.21/31 + ipv6: fc00::2a/126 + bp_interfaces: + ipv4: 10.10.246.12/24 + ipv6: fc0a::c/64 + ARISTA12T0: + properties: + - common + bgp: + asn: 64012 + peers: + 65100: + - 10.0.0.22 + - fc00::2d + interfaces: + Loopback0: + ipv4: 100.1.0.12/32 + ipv6: 2064:100::c/128 + Ethernet1: + ipv4: 10.0.0.23/31 + ipv6: fc00::2e/126 + bp_interfaces: + ipv4: 10.10.246.13/24 + ipv6: fc0a::d/64 + ARISTA13T0: + properties: + - common + bgp: + asn: 64013 + peers: + 65100: + - 10.0.0.24 + - fc00::31 + interfaces: + Loopback0: + ipv4: 100.1.0.13/32 + ipv6: 2064:100::d/128 + Ethernet1: + ipv4: 10.0.0.25/31 + ipv6: fc00::32/126 + bp_interfaces: + ipv4: 10.10.246.14/24 + ipv6: fc0a::e/64 + ARISTA14T0: + properties: + - common + bgp: + asn: 64014 + peers: + 65100: + - 10.0.0.26 + - fc00::35 + interfaces: + Loopback0: + ipv4: 100.1.0.14/32 + ipv6: 2064:100::e/128 + Ethernet1: + ipv4: 10.0.0.27/31 + ipv6: fc00::36/126 + bp_interfaces: + ipv4: 10.10.246.15/24 + ipv6: fc0a::f/64 + ARISTA15T0: + properties: + - common + bgp: + asn: 64015 + peers: + 65100: + - 10.0.0.28 + - fc00::39 + interfaces: + Loopback0: + ipv4: 100.1.0.15/32 + ipv6: 2064:100::f/128 + Ethernet1: + ipv4: 10.0.0.29/31 + ipv6: fc00::3a/126 + bp_interfaces: + ipv4: 10.10.246.16/24 + ipv6: fc0a::10/64 + ARISTA16T0: + properties: + - common + bgp: + asn: 64016 + peers: + 65100: + - 10.0.0.30 + - fc00::3d + interfaces: + Loopback0: + ipv4: 100.1.0.16/32 + ipv6: 2064:100::10/128 + Ethernet1: + ipv4: 10.0.0.31/31 + ipv6: fc00::3e/126 + bp_interfaces: + ipv4: 10.10.246.17/24 + ipv6: fc0a::11/64 + ARISTA17T0: + properties: + - common + bgp: + asn: 64017 + peers: + 65100: + - 10.0.0.32 + - fc00::41 + interfaces: + Loopback0: + ipv4: 100.1.0.17/32 + ipv6: 2064:100::11/128 + Ethernet1: + ipv4: 10.0.0.33/31 + ipv6: fc00::42/126 + bp_interfaces: + ipv4: 10.10.246.18/24 + ipv6: fc0a::12/64 + ARISTA18T0: + properties: + - common + bgp: + asn: 64018 + peers: + 65100: + - 10.0.0.34 + - fc00::45 + interfaces: + Loopback0: + ipv4: 100.1.0.18/32 + ipv6: 2064:100::12/128 + Ethernet1: + ipv4: 10.0.0.35/31 + ipv6: fc00::46/126 + bp_interfaces: + ipv4: 10.10.246.19/24 + ipv6: fc0a::13/64 + ARISTA19T0: + properties: + - common + bgp: + asn: 64019 + peers: + 65100: + - 10.0.0.36 + - fc00::49 + interfaces: + Loopback0: + ipv4: 100.1.0.19/32 + ipv6: 2064:100::13/128 + Ethernet1: + ipv4: 10.0.0.37/31 + ipv6: fc00::4a/126 + bp_interfaces: + ipv4: 10.10.246.20/24 + ipv6: fc0a::14/64 + ARISTA20T0: + properties: + - common + bgp: + asn: 64020 + peers: + 65100: + - 10.0.0.38 + - fc00::4d + interfaces: + Loopback0: + ipv4: 100.1.0.20/32 + ipv6: 2064:100::14/128 + Ethernet1: + ipv4: 10.0.0.39/31 + ipv6: fc00::4e/126 + bp_interfaces: + ipv4: 10.10.246.21/24 + ipv6: fc0a::15/64 + ARISTA21T0: + properties: + - common + bgp: + asn: 64021 + peers: + 65100: + - 10.0.0.40 + - fc00::51 + interfaces: + Loopback0: + ipv4: 100.1.0.21/32 + ipv6: 2064:100::15/128 + Ethernet1: + ipv4: 10.0.0.41/31 + ipv6: fc00::52/126 + bp_interfaces: + ipv4: 10.10.246.22/24 + ipv6: fc0a::16/64 + ARISTA22T0: + properties: + - common + bgp: + asn: 64022 + peers: + 65100: + - 10.0.0.42 + - fc00::55 + interfaces: + Loopback0: + ipv4: 100.1.0.22/32 + ipv6: 2064:100::16/128 + Ethernet1: + ipv4: 10.0.0.43/31 + ipv6: fc00::56/126 + bp_interfaces: + ipv4: 10.10.246.23/24 + ipv6: fc0a::17/64 + ARISTA23T0: + properties: + - common + bgp: + asn: 64023 + peers: + 65100: + - 10.0.0.44 + - fc00::59 + interfaces: + Loopback0: + ipv4: 100.1.0.23/32 + ipv6: 2064:100::17/128 + Ethernet1: + ipv4: 10.0.0.45/31 + ipv6: fc00::5a/126 + bp_interfaces: + ipv4: 10.10.246.24/24 + ipv6: fc0a::18/64 + ARISTA24T0: + properties: + - common + bgp: + asn: 64024 + peers: + 65100: + - 10.0.0.46 + - fc00::5d + interfaces: + Loopback0: + ipv4: 100.1.0.24/32 + ipv6: 2064:100::18/128 + Ethernet1: + ipv4: 10.0.0.47/31 + ipv6: fc00::5e/126 + bp_interfaces: + ipv4: 10.10.246.25/24 + ipv6: fc0a::19/64 + ARISTA25T0: + properties: + - common + bgp: + asn: 64025 + peers: + 65100: + - 10.0.0.48 + - fc00::61 + interfaces: + Loopback0: + ipv4: 100.1.0.25/32 + ipv6: 2064:100::19/128 + Ethernet1: + ipv4: 10.0.0.49/31 + ipv6: fc00::62/126 + bp_interfaces: + ipv4: 10.10.246.26/24 + ipv6: fc0a::1a/64 + ARISTA26T0: + properties: + - common + bgp: + asn: 64026 + peers: + 65100: + - 10.0.0.50 + - fc00::65 + interfaces: + Loopback0: + ipv4: 100.1.0.26/32 + ipv6: 2064:100::1a/128 + Ethernet1: + ipv4: 10.0.0.51/31 + ipv6: fc00::66/126 + bp_interfaces: + ipv4: 10.10.246.27/24 + ipv6: fc0a::1b/64 + ARISTA27T0: + properties: + - common + bgp: + asn: 64027 + peers: + 65100: + - 10.0.0.52 + - fc00::69 + interfaces: + Loopback0: + ipv4: 100.1.0.27/32 + ipv6: 2064:100::1b/128 + Ethernet1: + ipv4: 10.0.0.53/31 + ipv6: fc00::6a/126 + bp_interfaces: + ipv4: 10.10.246.28/24 + ipv6: fc0a::1c/64 + ARISTA28T0: + properties: + - common + bgp: + asn: 64028 + peers: + 65100: + - 10.0.0.54 + - fc00::6d + interfaces: + Loopback0: + ipv4: 100.1.0.28/32 + ipv6: 2064:100::1c/128 + Ethernet1: + ipv4: 10.0.0.55/31 + ipv6: fc00::6e/126 + bp_interfaces: + ipv4: 10.10.246.29/24 + ipv6: fc0a::1d/64 + ARISTA29T0: + properties: + - common + bgp: + asn: 64029 + peers: + 65100: + - 10.0.0.56 + - fc00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interfaces: + ipv4: 10.10.246.30/24 + ipv6: fc0a::1e/64 + ARISTA30T0: + properties: + - common + bgp: + asn: 64030 + peers: + 65100: + - 10.0.0.58 + - fc00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interfaces: + ipv4: 10.10.246.31/24 + ipv6: fc0a::1f/64 + ARISTA31T0: + properties: + - common + bgp: + asn: 64031 + peers: + 65100: + - 10.0.0.60 + - fc00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interfaces: + ipv4: 10.10.246.32/24 + ipv6: fc0a::20/64 + ARISTA32T0: + properties: + - common + bgp: + asn: 64032 + peers: + 65100: + - 10.0.0.62 + - fc00::7d + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100::20/128 + Ethernet1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + bp_interfaces: + ipv4: 10.10.246.33/24 + ipv6: fc0a::21/64 + ARISTA33T0: + properties: + - common + bgp: + asn: 64033 + peers: + 65100: + - 10.0.0.64 + - fc00::81 + interfaces: + Loopback0: + ipv4: 100.1.0.33/32 + ipv6: 2064:100::21/128 + Ethernet1: + ipv4: 10.0.0.65/31 + ipv6: fc00::82/126 + bp_interfaces: + ipv4: 10.10.246.34/24 + ipv6: fc0a::22/64 + ARISTA34T0: + properties: + - common + bgp: + asn: 64034 + peers: + 65100: + - 10.0.0.66 + - fc00::85 + interfaces: + Loopback0: + ipv4: 100.1.0.34/32 + ipv6: 2064:100::22/128 + Ethernet1: + ipv4: 10.0.0.67/31 + ipv6: fc00::86/126 + bp_interfaces: + ipv4: 10.10.246.35/24 + ipv6: fc0a::23/64 + ARISTA35T0: + properties: + - common + bgp: + asn: 64035 + peers: + 65100: + - 10.0.0.68 + - fc00::89 + interfaces: + Loopback0: + ipv4: 100.1.0.35/32 + ipv6: 2064:100::23/128 + Ethernet1: + ipv4: 10.0.0.69/31 + ipv6: fc00::8a/126 + bp_interfaces: + ipv4: 10.10.246.36/24 + ipv6: fc0a::24/64 + ARISTA36T0: + properties: + - common + bgp: + asn: 64036 + peers: + 65100: + - 10.0.0.70 + - fc00::8d + interfaces: + Loopback0: + ipv4: 100.1.0.36/32 + ipv6: 2064:100::24/128 + Ethernet1: + ipv4: 10.0.0.71/31 + ipv6: fc00::8e/126 + bp_interfaces: + ipv4: 10.10.246.37/24 + ipv6: fc0a::25/64 + ARISTA37T0: + properties: + - common + bgp: + asn: 64037 + peers: + 65100: + - 10.0.0.72 + - fc00::91 + interfaces: + Loopback0: + ipv4: 100.1.0.37/32 + ipv6: 2064:100::25/128 + Ethernet1: + ipv4: 10.0.0.73/31 + ipv6: fc00::92/126 + bp_interfaces: + ipv4: 10.10.246.38/24 + ipv6: fc0a::26/64 + ARISTA38T0: + properties: + - common + bgp: + asn: 64038 + peers: + 65100: + - 10.0.0.74 + - fc00::95 + interfaces: + Loopback0: + ipv4: 100.1.0.38/32 + ipv6: 2064:100::26/128 + Ethernet1: + ipv4: 10.0.0.75/31 + ipv6: fc00::96/126 + bp_interfaces: + ipv4: 10.10.246.39/24 + ipv6: fc0a::27/64 + ARISTA39T0: + properties: + - common + bgp: + asn: 64039 + peers: + 65100: + - 10.0.0.76 + - fc00::99 + interfaces: + Loopback0: + ipv4: 100.1.0.39/32 + ipv6: 2064:100::27/128 + Ethernet1: + ipv4: 10.0.0.77/31 + ipv6: fc00::9a/126 + bp_interfaces: + ipv4: 10.10.246.40/24 + ipv6: fc0a::28/64 + ARISTA40T0: + properties: + - common + bgp: + asn: 64040 + peers: + 65100: + - 10.0.0.78 + - fc00::9d + interfaces: + Loopback0: + ipv4: 100.1.0.40/32 + ipv6: 2064:100::28/128 + Ethernet1: + ipv4: 10.0.0.79/31 + ipv6: fc00::9e/126 + bp_interfaces: + ipv4: 10.10.246.41/24 + ipv6: fc0a::29/64 + ARISTA41T0: + properties: + - common + bgp: + asn: 64041 + peers: + 65100: + - 10.0.0.80 + - fc00::a1 + interfaces: + Loopback0: + ipv4: 100.1.0.41/32 + ipv6: 2064:100::29/128 + Ethernet1: + ipv4: 10.0.0.81/31 + ipv6: fc00::a2/126 + bp_interfaces: + ipv4: 10.10.246.42/24 + ipv6: fc0a::2a/64 + ARISTA42T0: + properties: + - common + bgp: + asn: 64042 + peers: + 65100: + - 10.0.0.82 + - fc00::a5 + interfaces: + Loopback0: + ipv4: 100.1.0.42/32 + ipv6: 2064:100::2a/128 + Ethernet1: + ipv4: 10.0.0.83/31 + ipv6: fc00::a6/126 + bp_interfaces: + ipv4: 10.10.246.43/24 + ipv6: fc0a::2b/64 + ARISTA43T0: + properties: + - common + bgp: + asn: 64043 + peers: + 65100: + - 10.0.0.84 + - fc00::a9 + interfaces: + Loopback0: + ipv4: 100.1.0.43/32 + ipv6: 2064:100::2b/128 + Ethernet1: + ipv4: 10.0.0.85/31 + ipv6: fc00::aa/126 + bp_interfaces: + ipv4: 10.10.246.44/24 + ipv6: fc0a::2c/64 + ARISTA44T0: + properties: + - common + bgp: + asn: 64044 + peers: + 65100: + - 10.0.0.86 + - fc00::ad + interfaces: + Loopback0: + ipv4: 100.1.0.44/32 + ipv6: 2064:100::2c/128 + Ethernet1: + ipv4: 10.0.0.87/31 + ipv6: fc00::ae/126 + bp_interfaces: + ipv4: 10.10.246.45/24 + ipv6: fc0a::2d/64 + ARISTA45T0: + properties: + - common + bgp: + asn: 64045 + peers: + 65100: + - 10.0.0.88 + - fc00::b1 + interfaces: + Loopback0: + ipv4: 100.1.0.45/32 + ipv6: 2064:100::2d/128 + Ethernet1: + ipv4: 10.0.0.89/31 + ipv6: fc00::b2/126 + bp_interfaces: + ipv4: 10.10.246.46/24 + ipv6: fc0a::2e/64 + ARISTA46T0: + properties: + - common + bgp: + asn: 64046 + peers: + 65100: + - 10.0.0.90 + - fc00::b5 + interfaces: + Loopback0: + ipv4: 100.1.0.46/32 + ipv6: 2064:100::2e/128 + Ethernet1: + ipv4: 10.0.0.91/31 + ipv6: fc00::b6/126 + bp_interfaces: + ipv4: 10.10.246.47/24 + ipv6: fc0a::2f/64 + ARISTA47T0: + properties: + - common + bgp: + asn: 64047 + peers: + 65100: + - 10.0.0.92 + - fc00::b9 + interfaces: + Loopback0: + ipv4: 100.1.0.47/32 + ipv6: 2064:100::2f/128 + Ethernet1: + ipv4: 10.0.0.93/31 + ipv6: fc00::ba/126 + bp_interfaces: + ipv4: 10.10.246.48/24 + ipv6: fc0a::30/64 + ARISTA48T0: + properties: + - common + bgp: + asn: 64048 + peers: + 65100: + - 10.0.0.94 + - fc00::bd + interfaces: + Loopback0: + ipv4: 100.1.0.48/32 + ipv6: 2064:100::30/128 + Ethernet1: + ipv4: 10.0.0.95/31 + ipv6: fc00::be/126 + bp_interfaces: + ipv4: 10.10.246.49/24 + ipv6: fc0a::31/64 + ARISTA49T0: + properties: + - common + bgp: + asn: 64049 + peers: + 65100: + - 10.0.0.96 + - fc00::c1 + interfaces: + Loopback0: + ipv4: 100.1.0.49/32 + ipv6: 2064:100::31/128 + Ethernet1: + ipv4: 10.0.0.97/31 + ipv6: fc00::c2/126 + bp_interfaces: + ipv4: 10.10.246.50/24 + ipv6: fc0a::32/64 + ARISTA50T0: + properties: + - common + bgp: + asn: 64050 + peers: + 65100: + - 10.0.0.98 + - fc00::c5 + interfaces: + Loopback0: + ipv4: 100.1.0.50/32 + ipv6: 2064:100::32/128 + Ethernet1: + ipv4: 10.0.0.99/31 + ipv6: fc00::c6/126 + bp_interfaces: + ipv4: 10.10.246.51/24 + ipv6: fc0a::33/64 + ARISTA51T0: + properties: + - common + bgp: + asn: 64051 + peers: + 65100: + - 10.0.0.100 + - fc00::c9 + interfaces: + Loopback0: + ipv4: 100.1.0.51/32 + ipv6: 2064:100::33/128 + Ethernet1: + ipv4: 10.0.0.101/31 + ipv6: fc00::ca/126 + bp_interfaces: + ipv4: 10.10.246.52/24 + ipv6: fc0a::34/64 + ARISTA52T0: + properties: + - common + bgp: + asn: 64052 + peers: + 65100: + - 10.0.0.102 + - fc00::cd + interfaces: + Loopback0: + ipv4: 100.1.0.52/32 + ipv6: 2064:100::34/128 + Ethernet1: + ipv4: 10.0.0.103/31 + ipv6: fc00::ce/126 + bp_interfaces: + ipv4: 10.10.246.53/24 + ipv6: fc0a::35/64 + ARISTA53T0: + properties: + - common + bgp: + asn: 64053 + peers: + 65100: + - 10.0.0.104 + - fc00::d1 + interfaces: + Loopback0: + ipv4: 100.1.0.53/32 + ipv6: 2064:100::35/128 + Ethernet1: + ipv4: 10.0.0.105/31 + ipv6: fc00::d2/126 + bp_interfaces: + ipv4: 10.10.246.54/24 + ipv6: fc0a::36/64 + ARISTA54T0: + properties: + - common + bgp: + asn: 64054 + peers: + 65100: + - 10.0.0.106 + - fc00::d5 + interfaces: + Loopback0: + ipv4: 100.1.0.54/32 + ipv6: 2064:100::36/128 + Ethernet1: + ipv4: 10.0.0.107/31 + ipv6: fc00::d6/126 + bp_interfaces: + ipv4: 10.10.246.55/24 + ipv6: fc0a::37/64 + ARISTA55T0: + properties: + - common + bgp: + asn: 64055 + peers: + 65100: + - 10.0.0.108 + - fc00::d9 + interfaces: + Loopback0: + ipv4: 100.1.0.55/32 + ipv6: 2064:100::37/128 + Ethernet1: + ipv4: 10.0.0.109/31 + ipv6: fc00::da/126 + bp_interfaces: + ipv4: 10.10.246.56/24 + ipv6: fc0a::38/64 + ARISTA56T0: + properties: + - common + bgp: + asn: 64056 + peers: + 65100: + - 10.0.0.110 + - fc00::dd + interfaces: + Loopback0: + ipv4: 100.1.0.56/32 + ipv6: 2064:100::38/128 + Ethernet1: + ipv4: 10.0.0.111/31 + ipv6: fc00::de/126 + bp_interfaces: + ipv4: 10.10.246.57/24 + ipv6: fc0a::39/64 + ARISTA57T0: + properties: + - common + bgp: + asn: 64057 + peers: + 65100: + - 10.0.0.112 + - fc00::e1 + interfaces: + Loopback0: + ipv4: 100.1.0.57/32 + ipv6: 2064:100::39/128 + Ethernet1: + ipv4: 10.0.0.113/31 + ipv6: fc00::e2/126 + bp_interfaces: + ipv4: 10.10.246.58/24 + ipv6: fc0a::3a/64 + ARISTA58T0: + properties: + - common + bgp: + asn: 64058 + peers: + 65100: + - 10.0.0.114 + - fc00::e5 + interfaces: + Loopback0: + ipv4: 100.1.0.58/32 + ipv6: 2064:100::3a/128 + Ethernet1: + ipv4: 10.0.0.115/31 + ipv6: fc00::e6/126 + bp_interfaces: + ipv4: 10.10.246.59/24 + ipv6: fc0a::3b/64 + ARISTA59T0: + properties: + - common + bgp: + asn: 64059 + peers: + 65100: + - 10.0.0.116 + - fc00::e9 + interfaces: + Loopback0: + ipv4: 100.1.0.59/32 + ipv6: 2064:100::3b/128 + Ethernet1: + ipv4: 10.0.0.117/31 + ipv6: fc00::ea/126 + bp_interfaces: + ipv4: 10.10.246.60/24 + ipv6: fc0a::3c/64 + ARISTA60T0: + properties: + - common + bgp: + asn: 64060 + peers: + 65100: + - 10.0.0.118 + - fc00::ed + interfaces: + Loopback0: + ipv4: 100.1.0.60/32 + ipv6: 2064:100::3c/128 + Ethernet1: + ipv4: 10.0.0.119/31 + ipv6: fc00::ee/126 + bp_interfaces: + ipv4: 10.10.246.61/24 + ipv6: fc0a::3d/64 + ARISTA61T0: + properties: + - common + bgp: + asn: 64061 + peers: + 65100: + - 10.0.0.120 + - fc00::f1 + interfaces: + Loopback0: + ipv4: 100.1.0.61/32 + ipv6: 2064:100::3d/128 + Ethernet1: + ipv4: 10.0.0.121/31 + ipv6: fc00::f2/126 + bp_interfaces: + ipv4: 10.10.246.62/24 + ipv6: fc0a::3e/64 + ARISTA62T0: + properties: + - common + bgp: + asn: 64062 + peers: + 65100: + - 10.0.0.122 + - fc00::f5 + interfaces: + Loopback0: + ipv4: 100.1.0.62/32 + ipv6: 2064:100::3e/128 + Ethernet1: + ipv4: 10.0.0.123/31 + ipv6: fc00::f6/126 + bp_interfaces: + ipv4: 10.10.246.63/24 + ipv6: fc0a::3f/64 + ARISTA63T0: + properties: + - common + bgp: + asn: 64063 + peers: + 65100: + - 10.0.0.124 + - fc00::f9 + interfaces: + Loopback0: + ipv4: 100.1.0.63/32 + ipv6: 2064:100::3f/128 + Ethernet1: + ipv4: 10.0.0.125/31 + ipv6: fc00::fa/126 + bp_interfaces: + ipv4: 10.10.246.64/24 + ipv6: fc0a::40/64 + ARISTA64T0: + properties: + - common + bgp: + asn: 64064 + peers: + 65100: + - 10.0.0.126 + - fc00::fd + interfaces: + Loopback0: + ipv4: 100.1.0.64/32 + ipv6: 2064:100::40/128 + Ethernet1: + ipv4: 10.0.0.127/31 + ipv6: fc00::fe/126 + bp_interfaces: + ipv4: 10.10.246.65/24 + ipv6: fc0a::41/64 + ARISTA65T0: + properties: + - common + bgp: + asn: 64065 + peers: + 65100: + - 10.0.0.128 + - fc00::101 + interfaces: + Loopback0: + ipv4: 100.1.0.65/32 + ipv6: 2064:100::41/128 + Ethernet1: + ipv4: 10.0.0.129/31 + ipv6: fc00::102/126 + bp_interfaces: + ipv4: 10.10.246.66/24 + ipv6: fc0a::42/64 + ARISTA66T0: + properties: + - common + bgp: + asn: 64066 + peers: + 65100: + - 10.0.0.130 + - fc00::105 + interfaces: + Loopback0: + ipv4: 100.1.0.66/32 + ipv6: 2064:100::42/128 + Ethernet1: + ipv4: 10.0.0.131/31 + ipv6: fc00::106/126 + bp_interfaces: + ipv4: 10.10.246.67/24 + ipv6: fc0a::43/64 + ARISTA67T0: + properties: + - common + bgp: + asn: 64067 + peers: + 65100: + - 10.0.0.132 + - fc00::109 + interfaces: + Loopback0: + ipv4: 100.1.0.67/32 + ipv6: 2064:100::43/128 + Ethernet1: + ipv4: 10.0.0.133/31 + ipv6: fc00::10a/126 + bp_interfaces: + ipv4: 10.10.246.68/24 + ipv6: fc0a::44/64 + ARISTA68T0: + properties: + - common + bgp: + asn: 64068 + peers: + 65100: + - 10.0.0.134 + - fc00::10d + interfaces: + Loopback0: + ipv4: 100.1.0.68/32 + ipv6: 2064:100::44/128 + Ethernet1: + ipv4: 10.0.0.135/31 + ipv6: fc00::10e/126 + bp_interfaces: + ipv4: 10.10.246.69/24 + ipv6: fc0a::45/64 + ARISTA69T0: + properties: + - common + bgp: + asn: 64069 + peers: + 65100: + - 10.0.0.136 + - fc00::111 + interfaces: + Loopback0: + ipv4: 100.1.0.69/32 + ipv6: 2064:100::45/128 + Ethernet1: + ipv4: 10.0.0.137/31 + ipv6: fc00::112/126 + bp_interfaces: + ipv4: 10.10.246.70/24 + ipv6: fc0a::46/64 + ARISTA70T0: + properties: + - common + bgp: + asn: 64070 + peers: + 65100: + - 10.0.0.138 + - fc00::115 + interfaces: + Loopback0: + ipv4: 100.1.0.70/32 + ipv6: 2064:100::46/128 + Ethernet1: + ipv4: 10.0.0.139/31 + ipv6: fc00::116/126 + bp_interfaces: + ipv4: 10.10.246.71/24 + ipv6: fc0a::47/64 + ARISTA71T0: + properties: + - common + bgp: + asn: 64071 + peers: + 65100: + - 10.0.0.140 + - fc00::119 + interfaces: + Loopback0: + ipv4: 100.1.0.71/32 + ipv6: 2064:100::47/128 + Ethernet1: + ipv4: 10.0.0.141/31 + ipv6: fc00::11a/126 + bp_interfaces: + ipv4: 10.10.246.72/24 + ipv6: fc0a::48/64 + ARISTA72T0: + properties: + - common + bgp: + asn: 64072 + peers: + 65100: + - 10.0.0.142 + - fc00::11d + interfaces: + Loopback0: + ipv4: 100.1.0.72/32 + ipv6: 2064:100::48/128 + Ethernet1: + ipv4: 10.0.0.143/31 + ipv6: fc00::11e/126 + bp_interfaces: + ipv4: 10.10.246.73/24 + ipv6: fc0a::49/64 + ARISTA73T0: + properties: + - common + bgp: + asn: 64073 + peers: + 65100: + - 10.0.0.144 + - fc00::121 + interfaces: + Loopback0: + ipv4: 100.1.0.73/32 + ipv6: 2064:100::49/128 + Ethernet1: + ipv4: 10.0.0.145/31 + ipv6: fc00::122/126 + bp_interfaces: + ipv4: 10.10.246.74/24 + ipv6: fc0a::4a/64 + ARISTA74T0: + properties: + - common + bgp: + asn: 64074 + peers: + 65100: + - 10.0.0.146 + - fc00::125 + interfaces: + Loopback0: + ipv4: 100.1.0.74/32 + ipv6: 2064:100::4a/128 + Ethernet1: + ipv4: 10.0.0.147/31 + ipv6: fc00::126/126 + bp_interfaces: + ipv4: 10.10.246.75/24 + ipv6: fc0a::4b/64 + ARISTA75T0: + properties: + - common + bgp: + asn: 64075 + peers: + 65100: + - 10.0.0.148 + - fc00::129 + interfaces: + Loopback0: + ipv4: 100.1.0.75/32 + ipv6: 2064:100::4b/128 + Ethernet1: + ipv4: 10.0.0.149/31 + ipv6: fc00::12a/126 + bp_interfaces: + ipv4: 10.10.246.76/24 + ipv6: fc0a::4c/64 + ARISTA76T0: + properties: + - common + bgp: + asn: 64076 + peers: + 65100: + - 10.0.0.150 + - fc00::12d + interfaces: + Loopback0: + ipv4: 100.1.0.76/32 + ipv6: 2064:100::4c/128 + Ethernet1: + ipv4: 10.0.0.151/31 + ipv6: fc00::12e/126 + bp_interfaces: + ipv4: 10.10.246.77/24 + ipv6: fc0a::4d/64 + ARISTA77T0: + properties: + - common + bgp: + asn: 64077 + peers: + 65100: + - 10.0.0.152 + - fc00::131 + interfaces: + Loopback0: + ipv4: 100.1.0.77/32 + ipv6: 2064:100::4d/128 + Ethernet1: + ipv4: 10.0.0.153/31 + ipv6: fc00::132/126 + bp_interfaces: + ipv4: 10.10.246.78/24 + ipv6: fc0a::4e/64 + ARISTA78T0: + properties: + - common + bgp: + asn: 64078 + peers: + 65100: + - 10.0.0.154 + - fc00::135 + interfaces: + Loopback0: + ipv4: 100.1.0.78/32 + ipv6: 2064:100::4e/128 + Ethernet1: + ipv4: 10.0.0.155/31 + ipv6: fc00::136/126 + bp_interfaces: + ipv4: 10.10.246.79/24 + ipv6: fc0a::4f/64 + ARISTA79T0: + properties: + - common + bgp: + asn: 64079 + peers: + 65100: + - 10.0.0.156 + - fc00::139 + interfaces: + Loopback0: + ipv4: 100.1.0.79/32 + ipv6: 2064:100::4f/128 + Ethernet1: + ipv4: 10.0.0.157/31 + ipv6: fc00::13a/126 + bp_interfaces: + ipv4: 10.10.246.80/24 + ipv6: fc0a::50/64 + ARISTA80T0: + properties: + - common + bgp: + asn: 64080 + peers: + 65100: + - 10.0.0.158 + - fc00::13d + interfaces: + Loopback0: + ipv4: 100.1.0.80/32 + ipv6: 2064:100::50/128 + Ethernet1: + ipv4: 10.0.0.159/31 + ipv6: fc00::13e/126 + bp_interfaces: + ipv4: 10.10.246.81/24 + ipv6: fc0a::51/64 + ARISTA81T0: + properties: + - common + bgp: + asn: 64081 + peers: + 65100: + - 10.0.0.160 + - fc00::141 + interfaces: + Loopback0: + ipv4: 100.1.0.81/32 + ipv6: 2064:100::51/128 + Ethernet1: + ipv4: 10.0.0.161/31 + ipv6: fc00::142/126 + bp_interfaces: + ipv4: 10.10.246.82/24 + ipv6: fc0a::52/64 + ARISTA82T0: + properties: + - common + bgp: + asn: 64082 + peers: + 65100: + - 10.0.0.162 + - fc00::145 + interfaces: + Loopback0: + ipv4: 100.1.0.82/32 + ipv6: 2064:100::52/128 + Ethernet1: + ipv4: 10.0.0.163/31 + ipv6: fc00::146/126 + bp_interfaces: + ipv4: 10.10.246.83/24 + ipv6: fc0a::53/64 + ARISTA83T0: + properties: + - common + bgp: + asn: 64083 + peers: + 65100: + - 10.0.0.164 + - fc00::149 + interfaces: + Loopback0: + ipv4: 100.1.0.83/32 + ipv6: 2064:100::53/128 + Ethernet1: + ipv4: 10.0.0.165/31 + ipv6: fc00::14a/126 + bp_interfaces: + ipv4: 10.10.246.84/24 + ipv6: fc0a::54/64 + ARISTA84T0: + properties: + - common + bgp: + asn: 64084 + peers: + 65100: + - 10.0.0.166 + - fc00::14d + interfaces: + Loopback0: + ipv4: 100.1.0.84/32 + ipv6: 2064:100::54/128 + Ethernet1: + ipv4: 10.0.0.167/31 + ipv6: fc00::14e/126 + bp_interfaces: + ipv4: 10.10.246.85/24 + ipv6: fc0a::55/64 + ARISTA85T0: + properties: + - common + bgp: + asn: 64085 + peers: + 65100: + - 10.0.0.168 + - fc00::151 + interfaces: + Loopback0: + ipv4: 100.1.0.85/32 + ipv6: 2064:100::55/128 + Ethernet1: + ipv4: 10.0.0.169/31 + ipv6: fc00::152/126 + bp_interfaces: + ipv4: 10.10.246.86/24 + ipv6: fc0a::56/64 + ARISTA86T0: + properties: + - common + bgp: + asn: 64086 + peers: + 65100: + - 10.0.0.170 + - fc00::155 + interfaces: + Loopback0: + ipv4: 100.1.0.86/32 + ipv6: 2064:100::56/128 + Ethernet1: + ipv4: 10.0.0.171/31 + ipv6: fc00::156/126 + bp_interfaces: + ipv4: 10.10.246.87/24 + ipv6: fc0a::57/64 + ARISTA87T0: + properties: + - common + bgp: + asn: 64087 + peers: + 65100: + - 10.0.0.172 + - fc00::159 + interfaces: + Loopback0: + ipv4: 100.1.0.87/32 + ipv6: 2064:100::57/128 + Ethernet1: + ipv4: 10.0.0.173/31 + ipv6: fc00::15a/126 + bp_interfaces: + ipv4: 10.10.246.88/24 + ipv6: fc0a::58/64 + ARISTA88T0: + properties: + - common + bgp: + asn: 64088 + peers: + 65100: + - 10.0.0.174 + - fc00::15d + interfaces: + Loopback0: + ipv4: 100.1.0.88/32 + ipv6: 2064:100::58/128 + Ethernet1: + ipv4: 10.0.0.175/31 + ipv6: fc00::15e/126 + bp_interfaces: + ipv4: 10.10.246.89/24 + ipv6: fc0a::59/64 + ARISTA89T0: + properties: + - common + bgp: + asn: 64089 + peers: + 65100: + - 10.0.0.176 + - fc00::161 + interfaces: + Loopback0: + ipv4: 100.1.0.89/32 + ipv6: 2064:100::59/128 + Ethernet1: + ipv4: 10.0.0.177/31 + ipv6: fc00::162/126 + bp_interfaces: + ipv4: 10.10.246.90/24 + ipv6: fc0a::5a/64 + ARISTA90T0: + properties: + - common + bgp: + asn: 64090 + peers: + 65100: + - 10.0.0.178 + - fc00::165 + interfaces: + Loopback0: + ipv4: 100.1.0.90/32 + ipv6: 2064:100::5a/128 + Ethernet1: + ipv4: 10.0.0.179/31 + ipv6: fc00::166/126 + bp_interfaces: + ipv4: 10.10.246.91/24 + ipv6: fc0a::5b/64 + ARISTA91T0: + properties: + - common + bgp: + asn: 64091 + peers: + 65100: + - 10.0.0.180 + - fc00::169 + interfaces: + Loopback0: + ipv4: 100.1.0.91/32 + ipv6: 2064:100::5b/128 + Ethernet1: + ipv4: 10.0.0.181/31 + ipv6: fc00::16a/126 + bp_interfaces: + ipv4: 10.10.246.92/24 + ipv6: fc0a::5c/64 + ARISTA92T0: + properties: + - common + bgp: + asn: 64092 + peers: + 65100: + - 10.0.0.182 + - fc00::16d + interfaces: + Loopback0: + ipv4: 100.1.0.92/32 + ipv6: 2064:100::5c/128 + Ethernet1: + ipv4: 10.0.0.183/31 + ipv6: fc00::16e/126 + bp_interfaces: + ipv4: 10.10.246.93/24 + ipv6: fc0a::5d/64 + ARISTA93T0: + properties: + - common + bgp: + asn: 64093 + peers: + 65100: + - 10.0.0.184 + - fc00::171 + interfaces: + Loopback0: + ipv4: 100.1.0.93/32 + ipv6: 2064:100::5d/128 + Ethernet1: + ipv4: 10.0.0.185/31 + ipv6: fc00::172/126 + bp_interfaces: + ipv4: 10.10.246.94/24 + ipv6: fc0a::5e/64 + ARISTA94T0: + properties: + - common + bgp: + asn: 64094 + peers: + 65100: + - 10.0.0.186 + - fc00::175 + interfaces: + Loopback0: + ipv4: 100.1.0.94/32 + ipv6: 2064:100::5e/128 + Ethernet1: + ipv4: 10.0.0.187/31 + ipv6: fc00::176/126 + bp_interfaces: + ipv4: 10.10.246.95/24 + ipv6: fc0a::5f/64 + ARISTA95T0: + properties: + - common + bgp: + asn: 64095 + peers: + 65100: + - 10.0.0.188 + - fc00::179 + interfaces: + Loopback0: + ipv4: 100.1.0.95/32 + ipv6: 2064:100::5f/128 + Ethernet1: + ipv4: 10.0.0.189/31 + ipv6: fc00::17a/126 + bp_interfaces: + ipv4: 10.10.246.96/24 + ipv6: fc0a::60/64 + ARISTA96T0: + properties: + - common + bgp: + asn: 64096 + peers: + 65100: + - 10.0.0.190 + - fc00::17d + interfaces: + Loopback0: + ipv4: 100.1.0.96/32 + ipv6: 2064:100::60/128 + Ethernet1: + ipv4: 10.0.0.191/31 + ipv6: fc00::17e/126 + bp_interfaces: + ipv4: 10.10.246.97/24 + ipv6: fc0a::61/64 + ARISTA97T0: + properties: + - common + bgp: + asn: 64097 + peers: + 65100: + - 10.0.0.192 + - fc00::181 + interfaces: + Loopback0: + ipv4: 100.1.0.97/32 + ipv6: 2064:100::61/128 + Ethernet1: + ipv4: 10.0.0.193/31 + ipv6: fc00::182/126 + bp_interfaces: + ipv4: 10.10.246.98/24 + ipv6: fc0a::62/64 + ARISTA98T0: + properties: + - common + bgp: + asn: 64098 + peers: + 65100: + - 10.0.0.194 + - fc00::185 + interfaces: + Loopback0: + ipv4: 100.1.0.98/32 + ipv6: 2064:100::62/128 + Ethernet1: + ipv4: 10.0.0.195/31 + ipv6: fc00::186/126 + bp_interfaces: + ipv4: 10.10.246.99/24 + ipv6: fc0a::63/64 + ARISTA99T0: + properties: + - common + bgp: + asn: 64099 + peers: + 65100: + - 10.0.0.196 + - fc00::189 + interfaces: + Loopback0: + ipv4: 100.1.0.99/32 + ipv6: 2064:100::63/128 + Ethernet1: + ipv4: 10.0.0.197/31 + ipv6: fc00::18a/126 + bp_interfaces: + ipv4: 10.10.246.100/24 + ipv6: fc0a::64/64 + ARISTA100T0: + properties: + - common + bgp: + asn: 64100 + peers: + 65100: + - 10.0.0.198 + - fc00::18d + interfaces: + Loopback0: + ipv4: 100.1.0.100/32 + ipv6: 2064:100::64/128 + Ethernet1: + ipv4: 10.0.0.199/31 + ipv6: fc00::18e/126 + bp_interfaces: + ipv4: 10.10.246.101/24 + ipv6: fc0a::65/64 + ARISTA101T0: + properties: + - common + bgp: + asn: 64101 + peers: + 65100: + - 10.0.0.200 + - fc00::191 + interfaces: + Loopback0: + ipv4: 100.1.0.101/32 + ipv6: 2064:100::65/128 + Ethernet1: + ipv4: 10.0.0.201/31 + ipv6: fc00::192/126 + bp_interfaces: + ipv4: 10.10.246.102/24 + ipv6: fc0a::66/64 + ARISTA102T0: + properties: + - common + bgp: + asn: 64102 + peers: + 65100: + - 10.0.0.202 + - fc00::195 + interfaces: + Loopback0: + ipv4: 100.1.0.102/32 + ipv6: 2064:100::66/128 + Ethernet1: + ipv4: 10.0.0.203/31 + ipv6: fc00::196/126 + bp_interfaces: + ipv4: 10.10.246.103/24 + ipv6: fc0a::67/64 + ARISTA103T0: + properties: + - common + bgp: + asn: 64103 + peers: + 65100: + - 10.0.0.204 + - fc00::199 + interfaces: + Loopback0: + ipv4: 100.1.0.103/32 + ipv6: 2064:100::67/128 + Ethernet1: + ipv4: 10.0.0.205/31 + ipv6: fc00::19a/126 + bp_interfaces: + ipv4: 10.10.246.104/24 + ipv6: fc0a::68/64 + ARISTA104T0: + properties: + - common + bgp: + asn: 64104 + peers: + 65100: + - 10.0.0.206 + - fc00::19d + interfaces: + Loopback0: + ipv4: 100.1.0.104/32 + ipv6: 2064:100::68/128 + Ethernet1: + ipv4: 10.0.0.207/31 + ipv6: fc00::19e/126 + bp_interfaces: + ipv4: 10.10.246.105/24 + ipv6: fc0a::69/64 + ARISTA105T0: + properties: + - common + bgp: + asn: 64105 + peers: + 65100: + - 10.0.0.208 + - fc00::1a1 + interfaces: + Loopback0: + ipv4: 100.1.0.105/32 + ipv6: 2064:100::69/128 + Ethernet1: + ipv4: 10.0.0.209/31 + ipv6: fc00::1a2/126 + bp_interfaces: + ipv4: 10.10.246.106/24 + ipv6: fc0a::6a/64 + ARISTA106T0: + properties: + - common + bgp: + asn: 64106 + peers: + 65100: + - 10.0.0.210 + - fc00::1a5 + interfaces: + Loopback0: + ipv4: 100.1.0.106/32 + ipv6: 2064:100::6a/128 + Ethernet1: + ipv4: 10.0.0.211/31 + ipv6: fc00::1a6/126 + bp_interfaces: + ipv4: 10.10.246.107/24 + ipv6: fc0a::6b/64 + ARISTA107T0: + properties: + - common + bgp: + asn: 64107 + peers: + 65100: + - 10.0.0.212 + - fc00::1a9 + interfaces: + Loopback0: + ipv4: 100.1.0.107/32 + ipv6: 2064:100::6b/128 + Ethernet1: + ipv4: 10.0.0.213/31 + ipv6: fc00::1aa/126 + bp_interfaces: + ipv4: 10.10.246.108/24 + ipv6: fc0a::6c/64 + ARISTA108T0: + properties: + - common + bgp: + asn: 64108 + peers: + 65100: + - 10.0.0.214 + - fc00::1ad + interfaces: + Loopback0: + ipv4: 100.1.0.108/32 + ipv6: 2064:100::6c/128 + Ethernet1: + ipv4: 10.0.0.215/31 + ipv6: fc00::1ae/126 + bp_interfaces: + ipv4: 10.10.246.109/24 + ipv6: fc0a::6d/64 + ARISTA109T0: + properties: + - common + bgp: + asn: 64109 + peers: + 65100: + - 10.0.0.216 + - fc00::1b1 + interfaces: + Loopback0: + ipv4: 100.1.0.109/32 + ipv6: 2064:100::6d/128 + Ethernet1: + ipv4: 10.0.0.217/31 + ipv6: fc00::1b2/126 + bp_interfaces: + ipv4: 10.10.246.110/24 + ipv6: fc0a::6e/64 + ARISTA110T0: + properties: + - common + bgp: + asn: 64110 + peers: + 65100: + - 10.0.0.218 + - fc00::1b5 + interfaces: + Loopback0: + ipv4: 100.1.0.110/32 + ipv6: 2064:100::6e/128 + Ethernet1: + ipv4: 10.0.0.219/31 + ipv6: fc00::1b6/126 + bp_interfaces: + ipv4: 10.10.246.111/24 + ipv6: fc0a::6f/64 + ARISTA111T0: + properties: + - common + bgp: + asn: 64111 + peers: + 65100: + - 10.0.0.220 + - fc00::1b9 + interfaces: + Loopback0: + ipv4: 100.1.0.111/32 + ipv6: 2064:100::6f/128 + Ethernet1: + ipv4: 10.0.0.221/31 + ipv6: fc00::1ba/126 + bp_interfaces: + ipv4: 10.10.246.112/24 + ipv6: fc0a::70/64 + ARISTA112T0: + properties: + - common + bgp: + asn: 64112 + peers: + 65100: + - 10.0.0.222 + - fc00::1bd + interfaces: + Loopback0: + ipv4: 100.1.0.112/32 + ipv6: 2064:100::70/128 + Ethernet1: + ipv4: 10.0.0.223/31 + ipv6: fc00::1be/126 + bp_interfaces: + ipv4: 10.10.246.113/24 + ipv6: fc0a::71/64 + ARISTA113T0: + properties: + - common + bgp: + asn: 64113 + peers: + 65100: + - 10.0.0.224 + - fc00::1c1 + interfaces: + Loopback0: + ipv4: 100.1.0.113/32 + ipv6: 2064:100::71/128 + Ethernet1: + ipv4: 10.0.0.225/31 + ipv6: fc00::1c2/126 + bp_interfaces: + ipv4: 10.10.246.114/24 + ipv6: fc0a::72/64 + ARISTA114T0: + properties: + - common + bgp: + asn: 64114 + peers: + 65100: + - 10.0.0.226 + - fc00::1c5 + interfaces: + Loopback0: + ipv4: 100.1.0.114/32 + ipv6: 2064:100::72/128 + Ethernet1: + ipv4: 10.0.0.227/31 + ipv6: fc00::1c6/126 + bp_interfaces: + ipv4: 10.10.246.115/24 + ipv6: fc0a::73/64 + ARISTA115T0: + properties: + - common + bgp: + asn: 64115 + peers: + 65100: + - 10.0.0.228 + - fc00::1c9 + interfaces: + Loopback0: + ipv4: 100.1.0.115/32 + ipv6: 2064:100::73/128 + Ethernet1: + ipv4: 10.0.0.229/31 + ipv6: fc00::1ca/126 + bp_interfaces: + ipv4: 10.10.246.116/24 + ipv6: fc0a::74/64 + ARISTA116T0: + properties: + - common + bgp: + asn: 64116 + peers: + 65100: + - 10.0.0.230 + - fc00::1cd + interfaces: + Loopback0: + ipv4: 100.1.0.116/32 + ipv6: 2064:100::74/128 + Ethernet1: + ipv4: 10.0.0.231/31 + ipv6: fc00::1ce/126 + bp_interfaces: + ipv4: 10.10.246.117/24 + ipv6: fc0a::75/64 + ARISTA117T0: + properties: + - common + bgp: + asn: 64117 + peers: + 65100: + - 10.0.0.232 + - fc00::1d1 + interfaces: + Loopback0: + ipv4: 100.1.0.117/32 + ipv6: 2064:100::75/128 + Ethernet1: + ipv4: 10.0.0.233/31 + ipv6: fc00::1d2/126 + bp_interfaces: + ipv4: 10.10.246.118/24 + ipv6: fc0a::76/64 + ARISTA118T0: + properties: + - common + bgp: + asn: 64118 + peers: + 65100: + - 10.0.0.234 + - fc00::1d5 + interfaces: + Loopback0: + ipv4: 100.1.0.118/32 + ipv6: 2064:100::76/128 + Ethernet1: + ipv4: 10.0.0.235/31 + ipv6: fc00::1d6/126 + bp_interfaces: + ipv4: 10.10.246.119/24 + ipv6: fc0a::77/64 + ARISTA119T0: + properties: + - common + bgp: + asn: 64119 + peers: + 65100: + - 10.0.0.236 + - fc00::1d9 + interfaces: + Loopback0: + ipv4: 100.1.0.119/32 + ipv6: 2064:100::77/128 + Ethernet1: + ipv4: 10.0.0.237/31 + ipv6: fc00::1da/126 + bp_interfaces: + ipv4: 10.10.246.120/24 + ipv6: fc0a::78/64 + ARISTA120T0: + properties: + - common + bgp: + asn: 64120 + peers: + 65100: + - 10.0.0.238 + - fc00::1dd + interfaces: + Loopback0: + ipv4: 100.1.0.120/32 + ipv6: 2064:100::78/128 + Ethernet1: + ipv4: 10.0.0.239/31 + ipv6: fc00::1de/126 + bp_interfaces: + ipv4: 10.10.246.121/24 + ipv6: fc0a::79/64 + ARISTA121T0: + properties: + - common + bgp: + asn: 64121 + peers: + 65100: + - 10.0.0.240 + - fc00::1e1 + interfaces: + Loopback0: + ipv4: 100.1.0.121/32 + ipv6: 2064:100::79/128 + Ethernet1: + ipv4: 10.0.0.241/31 + ipv6: fc00::1e2/126 + bp_interfaces: + ipv4: 10.10.246.122/24 + ipv6: fc0a::7a/64 + ARISTA122T0: + properties: + - common + bgp: + asn: 64122 + peers: + 65100: + - 10.0.0.242 + - fc00::1e5 + interfaces: + Loopback0: + ipv4: 100.1.0.122/32 + ipv6: 2064:100::7a/128 + Ethernet1: + ipv4: 10.0.0.243/31 + ipv6: fc00::1e6/126 + bp_interfaces: + ipv4: 10.10.246.123/24 + ipv6: fc0a::7b/64 + ARISTA123T0: + properties: + - common + bgp: + asn: 64123 + peers: + 65100: + - 10.0.0.244 + - fc00::1e9 + interfaces: + Loopback0: + ipv4: 100.1.0.123/32 + ipv6: 2064:100::7b/128 + Ethernet1: + ipv4: 10.0.0.245/31 + ipv6: fc00::1ea/126 + bp_interfaces: + ipv4: 10.10.246.124/24 + ipv6: fc0a::7c/64 + ARISTA124T0: + properties: + - common + bgp: + asn: 64124 + peers: + 65100: + - 10.0.0.246 + - fc00::1ed + interfaces: + Loopback0: + ipv4: 100.1.0.124/32 + ipv6: 2064:100::7c/128 + Ethernet1: + ipv4: 10.0.0.247/31 + ipv6: fc00::1ee/126 + bp_interfaces: + ipv4: 10.10.246.125/24 + ipv6: fc0a::7d/64 + ARISTA125T0: + properties: + - common + bgp: + asn: 64125 + peers: + 65100: + - 10.0.0.248 + - fc00::1f1 + interfaces: + Loopback0: + ipv4: 100.1.0.125/32 + ipv6: 2064:100::7d/128 + Ethernet1: + ipv4: 10.0.0.249/31 + ipv6: fc00::1f2/126 + bp_interfaces: + ipv4: 10.10.246.126/24 + ipv6: fc0a::7e/64 + ARISTA126T0: + properties: + - common + bgp: + asn: 64126 + peers: + 65100: + - 10.0.0.250 + - fc00::1f5 + interfaces: + Loopback0: + ipv4: 100.1.0.126/32 + ipv6: 2064:100::7e/128 + Ethernet1: + ipv4: 10.0.0.251/31 + ipv6: fc00::1f6/126 + bp_interfaces: + ipv4: 10.10.246.127/24 + ipv6: fc0a::7f/64 + ARISTA127T0: + properties: + - common + bgp: + asn: 64127 + peers: + 65100: + - 10.0.0.252 + - fc00::1f9 + interfaces: + Loopback0: + ipv4: 100.1.0.127/32 + ipv6: 2064:100::7f/128 + Ethernet1: + ipv4: 10.0.0.253/31 + ipv6: fc00::1fa/126 + bp_interfaces: + ipv4: 10.10.246.128/24 + ipv6: fc0a::80/64 + ARISTA128T0: + properties: + - common + bgp: + asn: 64128 + peers: + 65100: + - 10.0.0.254 + - fc00::1fd + interfaces: + Loopback0: + ipv4: 100.1.0.128/32 + ipv6: 2064:100::80/128 + Ethernet1: + ipv4: 10.0.0.255/31 + ipv6: fc00::1fe/126 + bp_interfaces: + ipv4: 10.10.246.129/24 + ipv6: fc0a::81/64 diff --git a/ansible/vars/topo_t1-isolated-d224u8.yaml b/ansible/vars/topo_t1-isolated-d224u8.yml similarity index 91% rename from ansible/vars/topo_t1-isolated-d224u8.yaml rename to ansible/vars/topo_t1-isolated-d224u8.yml index 5f97f7fd713..f2f809366d5 100644 --- a/ansible/vars/topo_t1-isolated-d224u8.yaml +++ b/ansible/vars/topo_t1-isolated-d224u8.yml @@ -192,741 +192,741 @@ topology: vlans: - 47 vm_offset: 47 - ARISTA49T2: + ARISTA01T2: vlans: - 48 vm_offset: 48 - ARISTA50T2: + ARISTA02T2: vlans: - 49 vm_offset: 49 - ARISTA51T0: + ARISTA49T0: vlans: - - 56 + - 50 vm_offset: 50 - ARISTA52T0: + ARISTA50T0: vlans: - - 57 + - 51 vm_offset: 51 - ARISTA53T0: + ARISTA51T0: vlans: - - 58 + - 52 vm_offset: 52 - ARISTA54T0: + ARISTA52T0: vlans: - - 59 + - 53 vm_offset: 53 - ARISTA55T0: + ARISTA53T0: vlans: - - 60 + - 54 vm_offset: 54 - ARISTA56T0: + ARISTA54T0: vlans: - - 61 + - 55 vm_offset: 55 - ARISTA57T0: + ARISTA55T0: vlans: - - 62 + - 56 vm_offset: 56 - ARISTA58T0: + ARISTA56T0: vlans: - - 63 + - 57 vm_offset: 57 - ARISTA59T2: + ARISTA03T2: vlans: - - 64 + - 58 vm_offset: 58 - ARISTA60T2: + ARISTA04T2: vlans: - - 65 + - 59 vm_offset: 59 - ARISTA61T0: + ARISTA57T0: vlans: - - 72 + - 60 vm_offset: 60 - ARISTA62T0: + ARISTA58T0: vlans: - - 73 + - 61 vm_offset: 61 - ARISTA63T0: + ARISTA59T0: vlans: - - 74 + - 62 vm_offset: 62 - ARISTA64T0: + ARISTA60T0: vlans: - - 75 + - 63 vm_offset: 63 - ARISTA65T0: + ARISTA61T0: vlans: - - 76 + - 64 vm_offset: 64 - ARISTA66T0: + ARISTA62T0: vlans: - - 77 + - 65 vm_offset: 65 - ARISTA67T0: + ARISTA63T0: vlans: - - 78 + - 66 vm_offset: 66 - ARISTA68T0: + ARISTA64T0: vlans: - - 79 + - 67 vm_offset: 67 - ARISTA69T0: + ARISTA65T0: vlans: - - 80 + - 68 vm_offset: 68 - ARISTA70T0: + ARISTA66T0: vlans: - - 81 + - 69 vm_offset: 69 - ARISTA71T0: + ARISTA67T0: vlans: - - 82 + - 70 vm_offset: 70 - ARISTA72T0: + ARISTA68T0: vlans: - - 83 + - 71 vm_offset: 71 - ARISTA73T0: + ARISTA69T0: vlans: - - 84 + - 72 vm_offset: 72 - ARISTA74T0: + ARISTA70T0: vlans: - - 85 + - 73 vm_offset: 73 - ARISTA75T0: + ARISTA71T0: vlans: - - 86 + - 74 vm_offset: 74 - ARISTA76T0: + ARISTA72T0: vlans: - - 87 + - 75 vm_offset: 75 - ARISTA77T0: + ARISTA73T0: vlans: - - 88 + - 76 vm_offset: 76 - ARISTA78T0: + ARISTA74T0: vlans: - - 89 + - 77 vm_offset: 77 - ARISTA79T0: + ARISTA75T0: vlans: - - 90 + - 78 vm_offset: 78 - ARISTA80T0: + ARISTA76T0: vlans: - - 91 + - 79 vm_offset: 79 - ARISTA81T0: + ARISTA77T0: vlans: - - 92 + - 80 vm_offset: 80 - ARISTA82T0: + ARISTA78T0: vlans: - - 93 + - 81 vm_offset: 81 - ARISTA83T0: + ARISTA79T0: vlans: - - 94 + - 82 vm_offset: 82 - ARISTA84T0: + ARISTA80T0: vlans: - - 95 + - 83 vm_offset: 83 - ARISTA85T0: + ARISTA81T0: vlans: - - 96 + - 84 vm_offset: 84 - ARISTA86T0: + ARISTA82T0: vlans: - - 97 + - 85 vm_offset: 85 - ARISTA87T0: + ARISTA83T0: vlans: - - 98 + - 86 vm_offset: 86 - ARISTA88T0: + ARISTA84T0: vlans: - - 99 + - 87 vm_offset: 87 - ARISTA89T0: + ARISTA85T0: vlans: - - 100 + - 88 vm_offset: 88 - ARISTA90T0: + ARISTA86T0: vlans: - - 101 + - 89 vm_offset: 89 - ARISTA91T0: + ARISTA87T0: vlans: - - 102 + - 90 vm_offset: 90 - ARISTA92T0: + ARISTA88T0: vlans: - - 103 + - 91 vm_offset: 91 - ARISTA93T0: + ARISTA89T0: vlans: - - 104 + - 92 vm_offset: 92 - ARISTA94T0: + ARISTA90T0: vlans: - - 105 + - 93 vm_offset: 93 - ARISTA95T0: + ARISTA91T0: vlans: - - 106 + - 94 vm_offset: 94 - ARISTA96T0: + ARISTA92T0: vlans: - - 107 + - 95 vm_offset: 95 - ARISTA97T0: + ARISTA93T0: vlans: - - 108 + - 96 vm_offset: 96 - ARISTA98T0: + ARISTA94T0: vlans: - - 109 + - 97 vm_offset: 97 - ARISTA99T0: + ARISTA95T0: vlans: - - 110 + - 98 vm_offset: 98 - ARISTA100T0: + ARISTA96T0: vlans: - - 111 + - 99 vm_offset: 99 - ARISTA101T0: + ARISTA97T0: vlans: - - 112 + - 100 vm_offset: 100 - ARISTA102T0: + ARISTA98T0: vlans: - - 113 + - 101 vm_offset: 101 - ARISTA103T0: + ARISTA99T0: vlans: - - 114 + - 102 vm_offset: 102 - ARISTA104T0: + ARISTA100T0: vlans: - - 115 + - 103 vm_offset: 103 - ARISTA105T0: + ARISTA101T0: vlans: - - 116 + - 104 vm_offset: 104 - ARISTA106T0: + ARISTA102T0: vlans: - - 117 + - 105 vm_offset: 105 - ARISTA107T0: + ARISTA103T0: vlans: - - 118 + - 106 vm_offset: 106 - ARISTA108T0: + ARISTA104T0: vlans: - - 119 + - 107 vm_offset: 107 - ARISTA109T0: + ARISTA105T0: vlans: - - 120 + - 108 vm_offset: 108 - ARISTA110T0: + ARISTA106T0: vlans: - - 121 + - 109 vm_offset: 109 - ARISTA111T0: + ARISTA107T0: vlans: - - 122 + - 110 vm_offset: 110 - ARISTA112T0: + ARISTA108T0: vlans: - - 123 + - 111 vm_offset: 111 - ARISTA113T0: + ARISTA109T0: vlans: - - 124 + - 112 vm_offset: 112 - ARISTA114T0: + ARISTA110T0: vlans: - - 125 + - 113 vm_offset: 113 - ARISTA115T0: + ARISTA111T0: vlans: - - 126 + - 114 vm_offset: 114 - ARISTA116T0: + ARISTA112T0: vlans: - - 127 + - 115 vm_offset: 115 - ARISTA117T0: + ARISTA113T0: vlans: - - 128 + - 116 vm_offset: 116 - ARISTA118T0: + ARISTA114T0: vlans: - - 129 + - 117 vm_offset: 117 - ARISTA119T0: + ARISTA115T0: vlans: - - 130 + - 118 vm_offset: 118 - ARISTA120T0: + ARISTA116T0: vlans: - - 131 + - 119 vm_offset: 119 - ARISTA121T0: + ARISTA117T0: vlans: - - 132 + - 120 vm_offset: 120 - ARISTA122T0: + ARISTA118T0: vlans: - - 133 + - 121 vm_offset: 121 - ARISTA123T0: + ARISTA119T0: vlans: - - 134 + - 122 vm_offset: 122 - ARISTA124T0: + ARISTA120T0: vlans: - - 135 + - 123 vm_offset: 123 - ARISTA125T0: + ARISTA121T0: vlans: - - 136 + - 124 vm_offset: 124 - ARISTA126T0: + ARISTA122T0: vlans: - - 137 + - 125 vm_offset: 125 - ARISTA127T0: + ARISTA123T0: vlans: - - 138 + - 126 vm_offset: 126 - ARISTA128T0: + ARISTA124T0: vlans: - - 139 + - 127 vm_offset: 127 - ARISTA129T0: + ARISTA125T0: vlans: - - 140 + - 128 vm_offset: 128 - ARISTA130T0: + ARISTA126T0: vlans: - - 141 + - 129 vm_offset: 129 - ARISTA131T0: + ARISTA127T0: vlans: - - 142 + - 130 vm_offset: 130 - ARISTA132T0: + ARISTA128T0: vlans: - - 143 + - 131 vm_offset: 131 - ARISTA133T0: + ARISTA129T0: vlans: - - 144 + - 132 vm_offset: 132 - ARISTA134T0: + ARISTA130T0: vlans: - - 145 + - 133 vm_offset: 133 - ARISTA135T0: + ARISTA131T0: vlans: - - 146 + - 134 vm_offset: 134 - ARISTA136T0: + ARISTA132T0: vlans: - - 147 + - 135 vm_offset: 135 - ARISTA137T0: + ARISTA133T0: vlans: - - 148 + - 136 vm_offset: 136 - ARISTA138T0: + ARISTA134T0: vlans: - - 149 + - 137 vm_offset: 137 - ARISTA139T0: + ARISTA135T0: vlans: - - 150 + - 138 vm_offset: 138 - ARISTA140T0: + ARISTA136T0: vlans: - - 151 + - 139 vm_offset: 139 - ARISTA141T0: + ARISTA137T0: vlans: - - 152 + - 140 vm_offset: 140 - ARISTA142T0: + ARISTA138T0: vlans: - - 153 + - 141 vm_offset: 141 - ARISTA143T0: + ARISTA139T0: vlans: - - 154 + - 142 vm_offset: 142 - ARISTA144T0: + ARISTA140T0: vlans: - - 155 + - 143 vm_offset: 143 - ARISTA145T0: + ARISTA141T0: vlans: - - 156 + - 144 vm_offset: 144 - ARISTA146T0: + ARISTA142T0: vlans: - - 157 + - 145 vm_offset: 145 - ARISTA147T0: + ARISTA143T0: vlans: - - 158 + - 146 vm_offset: 146 - ARISTA148T0: + ARISTA144T0: vlans: - - 159 + - 147 vm_offset: 147 - ARISTA149T0: + ARISTA145T0: vlans: - - 160 + - 148 vm_offset: 148 - ARISTA150T0: + ARISTA146T0: vlans: - - 161 + - 149 vm_offset: 149 - ARISTA151T0: + ARISTA147T0: vlans: - - 162 + - 150 vm_offset: 150 - ARISTA152T0: + ARISTA148T0: vlans: - - 163 + - 151 vm_offset: 151 - ARISTA153T0: + ARISTA149T0: vlans: - - 164 + - 152 vm_offset: 152 - ARISTA154T0: + ARISTA150T0: vlans: - - 165 + - 153 vm_offset: 153 - ARISTA155T0: + ARISTA151T0: vlans: - - 166 + - 154 vm_offset: 154 - ARISTA156T0: + ARISTA152T0: vlans: - - 167 + - 155 vm_offset: 155 - ARISTA157T0: + ARISTA153T0: vlans: - - 168 + - 156 vm_offset: 156 - ARISTA158T0: + ARISTA154T0: vlans: - - 169 + - 157 vm_offset: 157 - ARISTA159T0: + ARISTA155T0: vlans: - - 170 + - 158 vm_offset: 158 - ARISTA160T0: + ARISTA156T0: vlans: - - 171 + - 159 vm_offset: 159 - ARISTA161T0: + ARISTA157T0: vlans: - - 172 + - 160 vm_offset: 160 - ARISTA162T0: + ARISTA158T0: vlans: - - 173 + - 161 vm_offset: 161 - ARISTA163T0: + ARISTA159T0: vlans: - - 174 + - 162 vm_offset: 162 - ARISTA164T0: + ARISTA160T0: vlans: - - 175 + - 163 vm_offset: 163 - ARISTA165T2: + ARISTA05T2: vlans: - - 176 + - 164 vm_offset: 164 - ARISTA166T2: + ARISTA06T2: vlans: - - 177 + - 165 vm_offset: 165 - ARISTA167T0: + ARISTA161T0: vlans: - - 184 + - 166 vm_offset: 166 - ARISTA168T0: + ARISTA162T0: vlans: - - 185 + - 167 vm_offset: 167 - ARISTA169T0: + ARISTA163T0: vlans: - - 186 + - 168 vm_offset: 168 - ARISTA170T0: + ARISTA164T0: vlans: - - 187 + - 169 vm_offset: 169 - ARISTA171T0: + ARISTA165T0: vlans: - - 188 + - 170 vm_offset: 170 - ARISTA172T0: + ARISTA166T0: vlans: - - 189 + - 171 vm_offset: 171 - ARISTA173T0: + ARISTA167T0: vlans: - - 190 + - 172 vm_offset: 172 - ARISTA174T0: + ARISTA168T0: vlans: - - 191 + - 173 vm_offset: 173 - ARISTA175T2: + ARISTA07T2: vlans: - - 192 + - 174 vm_offset: 174 - ARISTA176T2: + ARISTA08T2: vlans: - - 193 + - 175 vm_offset: 175 - ARISTA177T0: + ARISTA169T0: vlans: - - 200 + - 176 vm_offset: 176 - ARISTA178T0: + ARISTA170T0: vlans: - - 201 + - 177 vm_offset: 177 - ARISTA179T0: + ARISTA171T0: vlans: - - 202 + - 178 vm_offset: 178 - ARISTA180T0: + ARISTA172T0: vlans: - - 203 + - 179 vm_offset: 179 - ARISTA181T0: + ARISTA173T0: vlans: - - 204 + - 180 vm_offset: 180 - ARISTA182T0: + ARISTA174T0: vlans: - - 205 + - 181 vm_offset: 181 - ARISTA183T0: + ARISTA175T0: vlans: - - 206 + - 182 vm_offset: 182 - ARISTA184T0: + ARISTA176T0: vlans: - - 207 + - 183 vm_offset: 183 - ARISTA185T0: + ARISTA177T0: vlans: - - 208 + - 184 vm_offset: 184 - ARISTA186T0: + ARISTA178T0: vlans: - - 209 + - 185 vm_offset: 185 - ARISTA187T0: + ARISTA179T0: vlans: - - 210 + - 186 vm_offset: 186 - ARISTA188T0: + ARISTA180T0: vlans: - - 211 + - 187 vm_offset: 187 - ARISTA189T0: + ARISTA181T0: vlans: - - 212 + - 188 vm_offset: 188 - ARISTA190T0: + ARISTA182T0: vlans: - - 213 + - 189 vm_offset: 189 - ARISTA191T0: + ARISTA183T0: vlans: - - 214 + - 190 vm_offset: 190 - ARISTA192T0: + ARISTA184T0: vlans: - - 215 + - 191 vm_offset: 191 - ARISTA193T0: + ARISTA185T0: vlans: - - 216 + - 192 vm_offset: 192 - ARISTA194T0: + ARISTA186T0: vlans: - - 217 + - 193 vm_offset: 193 - ARISTA195T0: + ARISTA187T0: vlans: - - 218 + - 194 vm_offset: 194 - ARISTA196T0: + ARISTA188T0: vlans: - - 219 + - 195 vm_offset: 195 - ARISTA197T0: + ARISTA189T0: vlans: - - 220 + - 196 vm_offset: 196 - ARISTA198T0: + ARISTA190T0: vlans: - - 221 + - 197 vm_offset: 197 - ARISTA199T0: + ARISTA191T0: vlans: - - 222 + - 198 vm_offset: 198 - ARISTA200T0: + ARISTA192T0: vlans: - - 223 + - 199 vm_offset: 199 - ARISTA201T0: + ARISTA193T0: vlans: - - 224 + - 200 vm_offset: 200 - ARISTA202T0: + ARISTA194T0: vlans: - - 225 + - 201 vm_offset: 201 - ARISTA203T0: + ARISTA195T0: vlans: - - 226 + - 202 vm_offset: 202 - ARISTA204T0: + ARISTA196T0: vlans: - - 227 + - 203 vm_offset: 203 - ARISTA205T0: + ARISTA197T0: vlans: - - 228 + - 204 vm_offset: 204 - ARISTA206T0: + ARISTA198T0: vlans: - - 229 + - 205 vm_offset: 205 - ARISTA207T0: + ARISTA199T0: vlans: - - 230 + - 206 vm_offset: 206 - ARISTA208T0: + ARISTA200T0: vlans: - - 231 + - 207 vm_offset: 207 - ARISTA209T0: + ARISTA201T0: vlans: - - 232 + - 208 vm_offset: 208 - ARISTA210T0: + ARISTA202T0: vlans: - - 233 + - 209 vm_offset: 209 - ARISTA211T0: + ARISTA203T0: vlans: - - 234 + - 210 vm_offset: 210 - ARISTA212T0: + ARISTA204T0: vlans: - - 235 + - 211 vm_offset: 211 - ARISTA213T0: + ARISTA205T0: vlans: - - 236 + - 212 vm_offset: 212 - ARISTA214T0: + ARISTA206T0: vlans: - - 237 + - 213 vm_offset: 213 - ARISTA215T0: + ARISTA207T0: vlans: - - 238 + - 214 vm_offset: 214 - ARISTA216T0: + ARISTA208T0: vlans: - - 239 + - 215 vm_offset: 215 - ARISTA217T0: + ARISTA209T0: vlans: - - 240 + - 216 vm_offset: 216 - ARISTA218T0: + ARISTA210T0: vlans: - - 241 + - 217 vm_offset: 217 - ARISTA219T0: + ARISTA211T0: vlans: - - 242 + - 218 vm_offset: 218 - ARISTA220T0: + ARISTA212T0: vlans: - - 243 + - 219 vm_offset: 219 - ARISTA221T0: + ARISTA213T0: vlans: - - 244 + - 220 vm_offset: 220 - ARISTA222T0: + ARISTA214T0: vlans: - - 245 + - 221 vm_offset: 221 - ARISTA223T0: + ARISTA215T0: vlans: - - 246 + - 222 vm_offset: 222 - ARISTA224T0: + ARISTA216T0: vlans: - - 247 + - 223 vm_offset: 223 - ARISTA225T0: + ARISTA217T0: vlans: - - 248 + - 224 vm_offset: 224 - ARISTA226T0: + ARISTA218T0: vlans: - - 249 + - 225 vm_offset: 225 - ARISTA227T0: + ARISTA219T0: vlans: - - 250 + - 226 vm_offset: 226 - ARISTA228T0: + ARISTA220T0: vlans: - - 251 + - 227 vm_offset: 227 - ARISTA229T0: + ARISTA221T0: vlans: - - 252 + - 228 vm_offset: 228 - ARISTA230T0: + ARISTA222T0: vlans: - - 253 + - 229 vm_offset: 229 - ARISTA231T0: + ARISTA223T0: vlans: - - 254 + - 230 vm_offset: 230 - ARISTA232T0: + ARISTA224T0: vlans: - - 255 + - 231 vm_offset: 231 configuration_properties: @@ -950,7 +950,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64001 peers: 65100: - 10.0.0.0 @@ -988,7 +988,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64003 peers: 65100: - 10.0.0.4 @@ -1007,7 +1007,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64004 peers: 65100: - 10.0.0.6 @@ -1026,7 +1026,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64005 peers: 65100: - 10.0.0.8 @@ -1045,7 +1045,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64006 peers: 65100: - 10.0.0.10 @@ -1064,7 +1064,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64007 peers: 65100: - 10.0.0.12 @@ -1083,7 +1083,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64008 peers: 65100: - 10.0.0.14 @@ -1102,7 +1102,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64009 peers: 65100: - 10.0.0.16 @@ -1121,7 +1121,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64010 peers: 65100: - 10.0.0.18 @@ -1140,7 +1140,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64011 peers: 65100: - 10.0.0.20 @@ -1159,7 +1159,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64012 peers: 65100: - 10.0.0.22 @@ -1178,7 +1178,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64013 peers: 65100: - 10.0.0.24 @@ -1197,7 +1197,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64014 peers: 65100: - 10.0.0.26 @@ -1216,7 +1216,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64015 peers: 65100: - 10.0.0.28 @@ -1235,7 +1235,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64016 peers: 65100: - 10.0.0.30 @@ -1254,7 +1254,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64017 peers: 65100: - 10.0.0.32 @@ -1273,7 +1273,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64018 peers: 65100: - 10.0.0.34 @@ -1292,7 +1292,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64019 peers: 65100: - 10.0.0.36 @@ -1311,7 +1311,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64020 peers: 65100: - 10.0.0.38 @@ -1330,7 +1330,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64021 peers: 65100: - 10.0.0.40 @@ -1349,7 +1349,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64022 peers: 65100: - 10.0.0.42 @@ -1368,7 +1368,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64023 peers: 65100: - 10.0.0.44 @@ -1387,7 +1387,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64024 peers: 65100: - 10.0.0.46 @@ -1406,7 +1406,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64025 peers: 65100: - 10.0.0.48 @@ -1425,7 +1425,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64026 peers: 65100: - 10.0.0.50 @@ -1444,7 +1444,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64027 peers: 65100: - 10.0.0.52 @@ -1463,7 +1463,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64028 peers: 65100: - 10.0.0.54 @@ -1482,7 +1482,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64029 peers: 65100: - 10.0.0.56 @@ -1501,7 +1501,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64030 peers: 65100: - 10.0.0.58 @@ -1520,7 +1520,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64031 peers: 65100: - 10.0.0.60 @@ -1539,7 +1539,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64032 peers: 65100: - 10.0.0.62 @@ -1558,7 +1558,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64033 peers: 65100: - 10.0.0.64 @@ -1577,7 +1577,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64034 peers: 65100: - 10.0.0.66 @@ -1596,7 +1596,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64035 peers: 65100: - 10.0.0.68 @@ -1615,7 +1615,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64036 peers: 65100: - 10.0.0.70 @@ -1634,7 +1634,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64037 peers: 65100: - 10.0.0.72 @@ -1653,7 +1653,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64038 peers: 65100: - 10.0.0.74 @@ -1672,7 +1672,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64039 peers: 65100: - 10.0.0.76 @@ -1691,7 +1691,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64040 peers: 65100: - 10.0.0.78 @@ -1710,7 +1710,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64041 peers: 65100: - 10.0.0.80 @@ -1729,7 +1729,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64042 peers: 65100: - 10.0.0.82 @@ -1748,7 +1748,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64043 peers: 65100: - 10.0.0.84 @@ -1767,7 +1767,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64044 peers: 65100: - 10.0.0.86 @@ -1786,7 +1786,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64045 peers: 65100: - 10.0.0.88 @@ -1805,7 +1805,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64046 peers: 65100: - 10.0.0.90 @@ -1824,7 +1824,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64047 peers: 65100: - 10.0.0.92 @@ -1843,7 +1843,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64048 peers: 65100: - 10.0.0.94 @@ -1858,7 +1858,7 @@ configuration: bp_interfaces: ipv4: 10.10.246.49/24 ipv6: fc0a::31/64 - ARISTA49T2: + ARISTA01T2: properties: - common bgp: @@ -1877,7 +1877,7 @@ configuration: bp_interfaces: ipv4: 10.10.246.50/24 ipv6: fc0a::32/64 - ARISTA50T2: + ARISTA02T2: properties: - common bgp: @@ -1896,3461 +1896,3461 @@ configuration: bp_interfaces: ipv4: 10.10.246.51/24 ipv6: fc0a::33/64 - ARISTA51T0: + ARISTA49T0: properties: - common bgp: - asn: 64002 + asn: 64049 peers: 65100: - - 10.0.0.112 - - fc00::e1 + - 10.0.0.100 + - fc00::c9 interfaces: Loopback0: - ipv4: 100.1.0.57/32 - ipv6: 2064:100::39/128 + ipv4: 100.1.0.51/32 + ipv6: 2064:100::33/128 Ethernet1: - ipv4: 10.0.0.113/31 - ipv6: fc00::e2/126 + ipv4: 10.0.0.101/31 + ipv6: fc00::ca/126 bp_interfaces: ipv4: 10.10.246.52/24 ipv6: fc0a::34/64 - ARISTA52T0: + ARISTA50T0: properties: - common bgp: - asn: 64002 + asn: 64050 peers: 65100: - - 10.0.0.114 - - fc00::e5 + - 10.0.0.102 + - fc00::cd interfaces: Loopback0: - ipv4: 100.1.0.58/32 - ipv6: 2064:100::3a/128 + ipv4: 100.1.0.52/32 + ipv6: 2064:100::34/128 Ethernet1: - ipv4: 10.0.0.115/31 - ipv6: fc00::e6/126 + ipv4: 10.0.0.103/31 + ipv6: fc00::ce/126 bp_interfaces: ipv4: 10.10.246.53/24 ipv6: fc0a::35/64 - ARISTA53T0: + ARISTA51T0: properties: - common bgp: - asn: 64002 + asn: 64051 peers: 65100: - - 10.0.0.116 - - fc00::e9 + - 10.0.0.104 + - fc00::d1 interfaces: Loopback0: - ipv4: 100.1.0.59/32 - ipv6: 2064:100::3b/128 + ipv4: 100.1.0.53/32 + ipv6: 2064:100::35/128 Ethernet1: - ipv4: 10.0.0.117/31 - ipv6: fc00::ea/126 + ipv4: 10.0.0.105/31 + ipv6: fc00::d2/126 bp_interfaces: ipv4: 10.10.246.54/24 ipv6: fc0a::36/64 - ARISTA54T0: + ARISTA52T0: properties: - common bgp: - asn: 64002 + asn: 64052 peers: 65100: - - 10.0.0.118 - - fc00::ed + - 10.0.0.106 + - fc00::d5 interfaces: Loopback0: - ipv4: 100.1.0.60/32 - ipv6: 2064:100::3c/128 + ipv4: 100.1.0.54/32 + ipv6: 2064:100::36/128 Ethernet1: - ipv4: 10.0.0.119/31 - ipv6: fc00::ee/126 + ipv4: 10.0.0.107/31 + ipv6: fc00::d6/126 bp_interfaces: ipv4: 10.10.246.55/24 ipv6: fc0a::37/64 - ARISTA55T0: + ARISTA53T0: properties: - common bgp: - asn: 64002 + asn: 64053 peers: 65100: - - 10.0.0.120 - - fc00::f1 + - 10.0.0.108 + - fc00::d9 interfaces: Loopback0: - ipv4: 100.1.0.61/32 - ipv6: 2064:100::3d/128 + ipv4: 100.1.0.55/32 + ipv6: 2064:100::37/128 Ethernet1: - ipv4: 10.0.0.121/31 - ipv6: fc00::f2/126 + ipv4: 10.0.0.109/31 + ipv6: fc00::da/126 bp_interfaces: ipv4: 10.10.246.56/24 ipv6: fc0a::38/64 - ARISTA56T0: + ARISTA54T0: properties: - common bgp: - asn: 64002 + asn: 64054 peers: 65100: - - 10.0.0.122 - - fc00::f5 + - 10.0.0.110 + - fc00::dd interfaces: Loopback0: - ipv4: 100.1.0.62/32 - ipv6: 2064:100::3e/128 + ipv4: 100.1.0.56/32 + ipv6: 2064:100::38/128 Ethernet1: - ipv4: 10.0.0.123/31 - ipv6: fc00::f6/126 + ipv4: 10.0.0.111/31 + ipv6: fc00::de/126 bp_interfaces: ipv4: 10.10.246.57/24 ipv6: fc0a::39/64 - ARISTA57T0: + ARISTA55T0: properties: - common bgp: - asn: 64002 + asn: 64055 peers: 65100: - - 10.0.0.124 - - fc00::f9 + - 10.0.0.112 + - fc00::e1 interfaces: Loopback0: - ipv4: 100.1.0.63/32 - ipv6: 2064:100::3f/128 + ipv4: 100.1.0.57/32 + ipv6: 2064:100::39/128 Ethernet1: - ipv4: 10.0.0.125/31 - ipv6: fc00::fa/126 + ipv4: 10.0.0.113/31 + ipv6: fc00::e2/126 bp_interfaces: ipv4: 10.10.246.58/24 ipv6: fc0a::3a/64 - ARISTA58T0: + ARISTA56T0: properties: - common bgp: - asn: 64002 + asn: 64056 peers: 65100: - - 10.0.0.126 - - fc00::fd + - 10.0.0.114 + - fc00::e5 interfaces: Loopback0: - ipv4: 100.1.0.64/32 - ipv6: 2064:100::40/128 + ipv4: 100.1.0.58/32 + ipv6: 2064:100::3a/128 Ethernet1: - ipv4: 10.0.0.127/31 - ipv6: fc00::fe/126 + ipv4: 10.0.0.115/31 + ipv6: fc00::e6/126 bp_interfaces: ipv4: 10.10.246.59/24 ipv6: fc0a::3b/64 - ARISTA59T2: + ARISTA03T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.0.128 - - fc00::101 + - 10.0.0.116 + - fc00::e9 interfaces: Loopback0: - ipv4: 100.1.0.65/32 - ipv6: 2064:100::41/128 + ipv4: 100.1.0.59/32 + ipv6: 2064:100::3b/128 Ethernet1: - ipv4: 10.0.0.129/31 - ipv6: fc00::102/126 + ipv4: 10.0.0.117/31 + ipv6: fc00::ea/126 bp_interfaces: ipv4: 10.10.246.60/24 ipv6: fc0a::3c/64 - ARISTA60T2: + ARISTA04T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.0.130 - - fc00::105 + - 10.0.0.118 + - fc00::ed interfaces: Loopback0: - ipv4: 100.1.0.66/32 - ipv6: 2064:100::42/128 + ipv4: 100.1.0.60/32 + ipv6: 2064:100::3c/128 Ethernet1: - ipv4: 10.0.0.131/31 - ipv6: fc00::106/126 + ipv4: 10.0.0.119/31 + ipv6: fc00::ee/126 bp_interfaces: ipv4: 10.10.246.61/24 ipv6: fc0a::3d/64 - ARISTA61T0: + ARISTA57T0: properties: - common bgp: - asn: 64002 + asn: 64057 peers: - 65100: - - 10.0.0.144 - - fc00::121 + 65100: + - 10.0.0.120 + - fc00::f1 interfaces: Loopback0: - ipv4: 100.1.0.73/32 - ipv6: 2064:100::49/128 + ipv4: 100.1.0.61/32 + ipv6: 2064:100::3d/128 Ethernet1: - ipv4: 10.0.0.145/31 - ipv6: fc00::122/126 + ipv4: 10.0.0.121/31 + ipv6: fc00::f2/126 bp_interfaces: ipv4: 10.10.246.62/24 ipv6: fc0a::3e/64 - ARISTA62T0: + ARISTA58T0: properties: - common bgp: - asn: 64002 + asn: 64058 peers: 65100: - - 10.0.0.146 - - fc00::125 + - 10.0.0.122 + - fc00::f5 interfaces: Loopback0: - ipv4: 100.1.0.74/32 - ipv6: 2064:100::4a/128 + ipv4: 100.1.0.62/32 + ipv6: 2064:100::3e/128 Ethernet1: - ipv4: 10.0.0.147/31 - ipv6: fc00::126/126 + ipv4: 10.0.0.123/31 + ipv6: fc00::f6/126 bp_interfaces: ipv4: 10.10.246.63/24 ipv6: fc0a::3f/64 - ARISTA63T0: + ARISTA59T0: properties: - common bgp: - asn: 64002 + asn: 64059 peers: 65100: - - 10.0.0.148 - - fc00::129 + - 10.0.0.124 + - fc00::f9 interfaces: Loopback0: - ipv4: 100.1.0.75/32 - ipv6: 2064:100::4b/128 + ipv4: 100.1.0.63/32 + ipv6: 2064:100::3f/128 Ethernet1: - ipv4: 10.0.0.149/31 - ipv6: fc00::12a/126 + ipv4: 10.0.0.125/31 + ipv6: fc00::fa/126 bp_interfaces: ipv4: 10.10.246.64/24 ipv6: fc0a::40/64 - ARISTA64T0: + ARISTA60T0: properties: - common bgp: - asn: 64002 + asn: 64060 peers: 65100: - - 10.0.0.150 - - fc00::12d + - 10.0.0.126 + - fc00::fd interfaces: Loopback0: - ipv4: 100.1.0.76/32 - ipv6: 2064:100::4c/128 + ipv4: 100.1.0.64/32 + ipv6: 2064:100::40/128 Ethernet1: - ipv4: 10.0.0.151/31 - ipv6: fc00::12e/126 + ipv4: 10.0.0.127/31 + ipv6: fc00::fe/126 bp_interfaces: ipv4: 10.10.246.65/24 ipv6: fc0a::41/64 - ARISTA65T0: + ARISTA61T0: properties: - common bgp: - asn: 64002 + asn: 64061 peers: 65100: - - 10.0.0.152 - - fc00::131 + - 10.0.0.128 + - fc00::101 interfaces: Loopback0: - ipv4: 100.1.0.77/32 - ipv6: 2064:100::4d/128 + ipv4: 100.1.0.65/32 + ipv6: 2064:100::41/128 Ethernet1: - ipv4: 10.0.0.153/31 - ipv6: fc00::132/126 + ipv4: 10.0.0.129/31 + ipv6: fc00::102/126 bp_interfaces: ipv4: 10.10.246.66/24 ipv6: fc0a::42/64 - ARISTA66T0: + ARISTA62T0: properties: - common bgp: - asn: 64002 + asn: 64062 peers: 65100: - - 10.0.0.154 - - fc00::135 + - 10.0.0.130 + - fc00::105 interfaces: Loopback0: - ipv4: 100.1.0.78/32 - ipv6: 2064:100::4e/128 + ipv4: 100.1.0.66/32 + ipv6: 2064:100::42/128 Ethernet1: - ipv4: 10.0.0.155/31 - ipv6: fc00::136/126 + ipv4: 10.0.0.131/31 + ipv6: fc00::106/126 bp_interfaces: ipv4: 10.10.246.67/24 ipv6: fc0a::43/64 - ARISTA67T0: + ARISTA63T0: properties: - common bgp: - asn: 64002 + asn: 64063 peers: 65100: - - 10.0.0.156 - - fc00::139 + - 10.0.0.132 + - fc00::109 interfaces: Loopback0: - ipv4: 100.1.0.79/32 - ipv6: 2064:100::4f/128 + ipv4: 100.1.0.67/32 + ipv6: 2064:100::43/128 Ethernet1: - ipv4: 10.0.0.157/31 - ipv6: fc00::13a/126 + ipv4: 10.0.0.133/31 + ipv6: fc00::10a/126 bp_interfaces: ipv4: 10.10.246.68/24 ipv6: fc0a::44/64 - ARISTA68T0: + ARISTA64T0: properties: - common bgp: - asn: 64002 + asn: 64064 peers: 65100: - - 10.0.0.158 - - fc00::13d + - 10.0.0.134 + - fc00::10d interfaces: Loopback0: - ipv4: 100.1.0.80/32 - ipv6: 2064:100::50/128 + ipv4: 100.1.0.68/32 + ipv6: 2064:100::44/128 Ethernet1: - ipv4: 10.0.0.159/31 - ipv6: fc00::13e/126 + ipv4: 10.0.0.135/31 + ipv6: fc00::10e/126 bp_interfaces: ipv4: 10.10.246.69/24 ipv6: fc0a::45/64 - ARISTA69T0: + ARISTA65T0: properties: - common bgp: - asn: 64002 + asn: 64065 peers: 65100: - - 10.0.0.160 - - fc00::141 + - 10.0.0.136 + - fc00::111 interfaces: Loopback0: - ipv4: 100.1.0.81/32 - ipv6: 2064:100::51/128 + ipv4: 100.1.0.69/32 + ipv6: 2064:100::45/128 Ethernet1: - ipv4: 10.0.0.161/31 - ipv6: fc00::142/126 + ipv4: 10.0.0.137/31 + ipv6: fc00::112/126 bp_interfaces: ipv4: 10.10.246.70/24 ipv6: fc0a::46/64 - ARISTA70T0: + ARISTA66T0: properties: - common bgp: - asn: 64002 + asn: 64066 peers: 65100: - - 10.0.0.162 - - fc00::145 + - 10.0.0.138 + - fc00::115 interfaces: Loopback0: - ipv4: 100.1.0.82/32 - ipv6: 2064:100::52/128 + ipv4: 100.1.0.70/32 + ipv6: 2064:100::46/128 Ethernet1: - ipv4: 10.0.0.163/31 - ipv6: fc00::146/126 + ipv4: 10.0.0.139/31 + ipv6: fc00::116/126 bp_interfaces: ipv4: 10.10.246.71/24 ipv6: fc0a::47/64 - ARISTA71T0: + ARISTA67T0: properties: - common bgp: - asn: 64002 + asn: 64067 peers: 65100: - - 10.0.0.164 - - fc00::149 + - 10.0.0.140 + - fc00::119 interfaces: Loopback0: - ipv4: 100.1.0.83/32 - ipv6: 2064:100::53/128 + ipv4: 100.1.0.71/32 + ipv6: 2064:100::47/128 Ethernet1: - ipv4: 10.0.0.165/31 - ipv6: fc00::14a/126 + ipv4: 10.0.0.141/31 + ipv6: fc00::11a/126 bp_interfaces: ipv4: 10.10.246.72/24 ipv6: fc0a::48/64 - ARISTA72T0: + ARISTA68T0: properties: - common bgp: - asn: 64002 + asn: 64068 peers: 65100: - - 10.0.0.166 - - fc00::14d + - 10.0.0.142 + - fc00::11d interfaces: Loopback0: - ipv4: 100.1.0.84/32 - ipv6: 2064:100::54/128 + ipv4: 100.1.0.72/32 + ipv6: 2064:100::48/128 Ethernet1: - ipv4: 10.0.0.167/31 - ipv6: fc00::14e/126 + ipv4: 10.0.0.143/31 + ipv6: fc00::11e/126 bp_interfaces: ipv4: 10.10.246.73/24 ipv6: fc0a::49/64 - ARISTA73T0: + ARISTA69T0: properties: - common bgp: - asn: 64002 + asn: 64069 peers: 65100: - - 10.0.0.168 - - fc00::151 + - 10.0.0.144 + - fc00::121 interfaces: Loopback0: - ipv4: 100.1.0.85/32 - ipv6: 2064:100::55/128 + ipv4: 100.1.0.73/32 + ipv6: 2064:100::49/128 Ethernet1: - ipv4: 10.0.0.169/31 - ipv6: fc00::152/126 + ipv4: 10.0.0.145/31 + ipv6: fc00::122/126 bp_interfaces: ipv4: 10.10.246.74/24 ipv6: fc0a::4a/64 - ARISTA74T0: + ARISTA70T0: properties: - common bgp: - asn: 64002 + asn: 64070 peers: 65100: - - 10.0.0.170 - - fc00::155 + - 10.0.0.146 + - fc00::125 interfaces: Loopback0: - ipv4: 100.1.0.86/32 - ipv6: 2064:100::56/128 + ipv4: 100.1.0.74/32 + ipv6: 2064:100::4a/128 Ethernet1: - ipv4: 10.0.0.171/31 - ipv6: fc00::156/126 + ipv4: 10.0.0.147/31 + ipv6: fc00::126/126 bp_interfaces: ipv4: 10.10.246.75/24 ipv6: fc0a::4b/64 - ARISTA75T0: + ARISTA71T0: properties: - common bgp: - asn: 64002 + asn: 64071 peers: 65100: - - 10.0.0.172 - - fc00::159 + - 10.0.0.148 + - fc00::129 interfaces: Loopback0: - ipv4: 100.1.0.87/32 - ipv6: 2064:100::57/128 + ipv4: 100.1.0.75/32 + ipv6: 2064:100::4b/128 Ethernet1: - ipv4: 10.0.0.173/31 - ipv6: fc00::15a/126 + ipv4: 10.0.0.149/31 + ipv6: fc00::12a/126 bp_interfaces: ipv4: 10.10.246.76/24 ipv6: fc0a::4c/64 - ARISTA76T0: + ARISTA72T0: properties: - common bgp: - asn: 64002 + asn: 64072 peers: 65100: - - 10.0.0.174 - - fc00::15d + - 10.0.0.150 + - fc00::12d interfaces: Loopback0: - ipv4: 100.1.0.88/32 - ipv6: 2064:100::58/128 + ipv4: 100.1.0.76/32 + ipv6: 2064:100::4c/128 Ethernet1: - ipv4: 10.0.0.175/31 - ipv6: fc00::15e/126 + ipv4: 10.0.0.151/31 + ipv6: fc00::12e/126 bp_interfaces: ipv4: 10.10.246.77/24 ipv6: fc0a::4d/64 - ARISTA77T0: + ARISTA73T0: properties: - common bgp: - asn: 64002 + asn: 64073 peers: 65100: - - 10.0.0.176 - - fc00::161 + - 10.0.0.152 + - fc00::131 interfaces: Loopback0: - ipv4: 100.1.0.89/32 - ipv6: 2064:100::59/128 + ipv4: 100.1.0.77/32 + ipv6: 2064:100::4d/128 Ethernet1: - ipv4: 10.0.0.177/31 - ipv6: fc00::162/126 + ipv4: 10.0.0.153/31 + ipv6: fc00::132/126 bp_interfaces: ipv4: 10.10.246.78/24 ipv6: fc0a::4e/64 - ARISTA78T0: + ARISTA74T0: properties: - common bgp: - asn: 64002 + asn: 64074 peers: 65100: - - 10.0.0.178 - - fc00::165 + - 10.0.0.154 + - fc00::135 interfaces: Loopback0: - ipv4: 100.1.0.90/32 - ipv6: 2064:100::5a/128 + ipv4: 100.1.0.78/32 + ipv6: 2064:100::4e/128 Ethernet1: - ipv4: 10.0.0.179/31 - ipv6: fc00::166/126 + ipv4: 10.0.0.155/31 + ipv6: fc00::136/126 bp_interfaces: ipv4: 10.10.246.79/24 ipv6: fc0a::4f/64 - ARISTA79T0: + ARISTA75T0: properties: - common bgp: - asn: 64002 + asn: 64075 peers: 65100: - - 10.0.0.180 - - fc00::169 + - 10.0.0.156 + - fc00::139 interfaces: Loopback0: - ipv4: 100.1.0.91/32 - ipv6: 2064:100::5b/128 + ipv4: 100.1.0.79/32 + ipv6: 2064:100::4f/128 Ethernet1: - ipv4: 10.0.0.181/31 - ipv6: fc00::16a/126 + ipv4: 10.0.0.157/31 + ipv6: fc00::13a/126 bp_interfaces: ipv4: 10.10.246.80/24 ipv6: fc0a::50/64 - ARISTA80T0: + ARISTA76T0: properties: - common bgp: - asn: 64002 + asn: 64076 peers: 65100: - - 10.0.0.182 - - fc00::16d + - 10.0.0.158 + - fc00::13d interfaces: Loopback0: - ipv4: 100.1.0.92/32 - ipv6: 2064:100::5c/128 + ipv4: 100.1.0.80/32 + ipv6: 2064:100::50/128 Ethernet1: - ipv4: 10.0.0.183/31 - ipv6: fc00::16e/126 + ipv4: 10.0.0.159/31 + ipv6: fc00::13e/126 bp_interfaces: ipv4: 10.10.246.81/24 ipv6: fc0a::51/64 - ARISTA81T0: + ARISTA77T0: properties: - common bgp: - asn: 64002 + asn: 64077 peers: 65100: - - 10.0.0.184 - - fc00::171 + - 10.0.0.160 + - fc00::141 interfaces: Loopback0: - ipv4: 100.1.0.93/32 - ipv6: 2064:100::5d/128 + ipv4: 100.1.0.81/32 + ipv6: 2064:100::51/128 Ethernet1: - ipv4: 10.0.0.185/31 - ipv6: fc00::172/126 + ipv4: 10.0.0.161/31 + ipv6: fc00::142/126 bp_interfaces: ipv4: 10.10.246.82/24 ipv6: fc0a::52/64 - ARISTA82T0: + ARISTA78T0: properties: - common bgp: - asn: 64002 + asn: 64078 peers: 65100: - - 10.0.0.186 - - fc00::175 + - 10.0.0.162 + - fc00::145 interfaces: Loopback0: - ipv4: 100.1.0.94/32 - ipv6: 2064:100::5e/128 + ipv4: 100.1.0.82/32 + ipv6: 2064:100::52/128 Ethernet1: - ipv4: 10.0.0.187/31 - ipv6: fc00::176/126 + ipv4: 10.0.0.163/31 + ipv6: fc00::146/126 bp_interfaces: ipv4: 10.10.246.83/24 ipv6: fc0a::53/64 - ARISTA83T0: + ARISTA79T0: properties: - common bgp: - asn: 64002 + asn: 64079 peers: 65100: - - 10.0.0.188 - - fc00::179 + - 10.0.0.164 + - fc00::149 interfaces: Loopback0: - ipv4: 100.1.0.95/32 - ipv6: 2064:100::5f/128 + ipv4: 100.1.0.83/32 + ipv6: 2064:100::53/128 Ethernet1: - ipv4: 10.0.0.189/31 - ipv6: fc00::17a/126 + ipv4: 10.0.0.165/31 + ipv6: fc00::14a/126 bp_interfaces: ipv4: 10.10.246.84/24 ipv6: fc0a::54/64 - ARISTA84T0: + ARISTA80T0: properties: - common bgp: - asn: 64002 + asn: 64080 peers: 65100: - - 10.0.0.190 - - fc00::17d + - 10.0.0.166 + - fc00::14d interfaces: Loopback0: - ipv4: 100.1.0.96/32 - ipv6: 2064:100::60/128 + ipv4: 100.1.0.84/32 + ipv6: 2064:100::54/128 Ethernet1: - ipv4: 10.0.0.191/31 - ipv6: fc00::17e/126 + ipv4: 10.0.0.167/31 + ipv6: fc00::14e/126 bp_interfaces: ipv4: 10.10.246.85/24 ipv6: fc0a::55/64 - ARISTA85T0: + ARISTA81T0: properties: - common bgp: - asn: 64002 + asn: 64081 peers: 65100: - - 10.0.0.192 - - fc00::181 + - 10.0.0.168 + - fc00::151 interfaces: Loopback0: - ipv4: 100.1.0.97/32 - ipv6: 2064:100::61/128 + ipv4: 100.1.0.85/32 + ipv6: 2064:100::55/128 Ethernet1: - ipv4: 10.0.0.193/31 - ipv6: fc00::182/126 + ipv4: 10.0.0.169/31 + ipv6: fc00::152/126 bp_interfaces: ipv4: 10.10.246.86/24 ipv6: fc0a::56/64 - ARISTA86T0: + ARISTA82T0: properties: - common bgp: - asn: 64002 + asn: 64082 peers: 65100: - - 10.0.0.194 - - fc00::185 + - 10.0.0.170 + - fc00::155 interfaces: Loopback0: - ipv4: 100.1.0.98/32 - ipv6: 2064:100::62/128 + ipv4: 100.1.0.86/32 + ipv6: 2064:100::56/128 Ethernet1: - ipv4: 10.0.0.195/31 - ipv6: fc00::186/126 + ipv4: 10.0.0.171/31 + ipv6: fc00::156/126 bp_interfaces: ipv4: 10.10.246.87/24 ipv6: fc0a::57/64 - ARISTA87T0: + ARISTA83T0: properties: - common bgp: - asn: 64002 + asn: 64083 peers: 65100: - - 10.0.0.196 - - fc00::189 + - 10.0.0.172 + - fc00::159 interfaces: Loopback0: - ipv4: 100.1.0.99/32 - ipv6: 2064:100::63/128 + ipv4: 100.1.0.87/32 + ipv6: 2064:100::57/128 Ethernet1: - ipv4: 10.0.0.197/31 - ipv6: fc00::18a/126 + ipv4: 10.0.0.173/31 + ipv6: fc00::15a/126 bp_interfaces: ipv4: 10.10.246.88/24 ipv6: fc0a::58/64 - ARISTA88T0: + ARISTA84T0: properties: - common bgp: - asn: 64002 + asn: 64084 peers: 65100: - - 10.0.0.198 - - fc00::18d + - 10.0.0.174 + - fc00::15d interfaces: Loopback0: - ipv4: 100.1.0.100/32 - ipv6: 2064:100::64/128 + ipv4: 100.1.0.88/32 + ipv6: 2064:100::58/128 Ethernet1: - ipv4: 10.0.0.199/31 - ipv6: fc00::18e/126 + ipv4: 10.0.0.175/31 + ipv6: fc00::15e/126 bp_interfaces: ipv4: 10.10.246.89/24 ipv6: fc0a::59/64 - ARISTA89T0: + ARISTA85T0: properties: - common bgp: - asn: 64002 + asn: 64085 peers: 65100: - - 10.0.0.200 - - fc00::191 + - 10.0.0.176 + - fc00::161 interfaces: Loopback0: - ipv4: 100.1.0.101/32 - ipv6: 2064:100::65/128 + ipv4: 100.1.0.89/32 + ipv6: 2064:100::59/128 Ethernet1: - ipv4: 10.0.0.201/31 - ipv6: fc00::192/126 + ipv4: 10.0.0.177/31 + ipv6: fc00::162/126 bp_interfaces: ipv4: 10.10.246.90/24 ipv6: fc0a::5a/64 - ARISTA90T0: + ARISTA86T0: properties: - common bgp: - asn: 64002 + asn: 64086 peers: 65100: - - 10.0.0.202 - - fc00::195 + - 10.0.0.178 + - fc00::165 interfaces: Loopback0: - ipv4: 100.1.0.102/32 - ipv6: 2064:100::66/128 + ipv4: 100.1.0.90/32 + ipv6: 2064:100::5a/128 Ethernet1: - ipv4: 10.0.0.203/31 - ipv6: fc00::196/126 + ipv4: 10.0.0.179/31 + ipv6: fc00::166/126 bp_interfaces: ipv4: 10.10.246.91/24 ipv6: fc0a::5b/64 - ARISTA91T0: + ARISTA87T0: properties: - common bgp: - asn: 64002 + asn: 64087 peers: 65100: - - 10.0.0.204 - - fc00::199 + - 10.0.0.180 + - fc00::169 interfaces: Loopback0: - ipv4: 100.1.0.103/32 - ipv6: 2064:100::67/128 + ipv4: 100.1.0.91/32 + ipv6: 2064:100::5b/128 Ethernet1: - ipv4: 10.0.0.205/31 - ipv6: fc00::19a/126 + ipv4: 10.0.0.181/31 + ipv6: fc00::16a/126 bp_interfaces: ipv4: 10.10.246.92/24 ipv6: fc0a::5c/64 - ARISTA92T0: + ARISTA88T0: properties: - common bgp: - asn: 64002 + asn: 64088 peers: 65100: - - 10.0.0.206 - - fc00::19d + - 10.0.0.182 + - fc00::16d interfaces: Loopback0: - ipv4: 100.1.0.104/32 - ipv6: 2064:100::68/128 + ipv4: 100.1.0.92/32 + ipv6: 2064:100::5c/128 Ethernet1: - ipv4: 10.0.0.207/31 - ipv6: fc00::19e/126 + ipv4: 10.0.0.183/31 + ipv6: fc00::16e/126 bp_interfaces: ipv4: 10.10.246.93/24 ipv6: fc0a::5d/64 - ARISTA93T0: + ARISTA89T0: properties: - common bgp: - asn: 64002 + asn: 64089 peers: 65100: - - 10.0.0.208 - - fc00::1a1 + - 10.0.0.184 + - fc00::171 interfaces: Loopback0: - ipv4: 100.1.0.105/32 - ipv6: 2064:100::69/128 + ipv4: 100.1.0.93/32 + ipv6: 2064:100::5d/128 Ethernet1: - ipv4: 10.0.0.209/31 - ipv6: fc00::1a2/126 + ipv4: 10.0.0.185/31 + ipv6: fc00::172/126 bp_interfaces: ipv4: 10.10.246.94/24 ipv6: fc0a::5e/64 - ARISTA94T0: + ARISTA90T0: properties: - common bgp: - asn: 64002 + asn: 64090 peers: 65100: - - 10.0.0.210 - - fc00::1a5 + - 10.0.0.186 + - fc00::175 interfaces: Loopback0: - ipv4: 100.1.0.106/32 - ipv6: 2064:100::6a/128 + ipv4: 100.1.0.94/32 + ipv6: 2064:100::5e/128 Ethernet1: - ipv4: 10.0.0.211/31 - ipv6: fc00::1a6/126 + ipv4: 10.0.0.187/31 + ipv6: fc00::176/126 bp_interfaces: ipv4: 10.10.246.95/24 ipv6: fc0a::5f/64 - ARISTA95T0: + ARISTA91T0: properties: - common bgp: - asn: 64002 + asn: 64091 peers: 65100: - - 10.0.0.212 - - fc00::1a9 + - 10.0.0.188 + - fc00::179 interfaces: Loopback0: - ipv4: 100.1.0.107/32 - ipv6: 2064:100::6b/128 + ipv4: 100.1.0.95/32 + ipv6: 2064:100::5f/128 Ethernet1: - ipv4: 10.0.0.213/31 - ipv6: fc00::1aa/126 + ipv4: 10.0.0.189/31 + ipv6: fc00::17a/126 bp_interfaces: ipv4: 10.10.246.96/24 ipv6: fc0a::60/64 - ARISTA96T0: + ARISTA92T0: properties: - common bgp: - asn: 64002 + asn: 64092 peers: 65100: - - 10.0.0.214 - - fc00::1ad + - 10.0.0.190 + - fc00::17d interfaces: Loopback0: - ipv4: 100.1.0.108/32 - ipv6: 2064:100::6c/128 + ipv4: 100.1.0.96/32 + ipv6: 2064:100::60/128 Ethernet1: - ipv4: 10.0.0.215/31 - ipv6: fc00::1ae/126 + ipv4: 10.0.0.191/31 + ipv6: fc00::17e/126 bp_interfaces: ipv4: 10.10.246.97/24 ipv6: fc0a::61/64 - ARISTA97T0: + ARISTA93T0: properties: - common bgp: - asn: 64002 + asn: 64093 peers: 65100: - - 10.0.0.216 - - fc00::1b1 + - 10.0.0.192 + - fc00::181 interfaces: Loopback0: - ipv4: 100.1.0.109/32 - ipv6: 2064:100::6d/128 - Ethernet1: - ipv4: 10.0.0.217/31 - ipv6: fc00::1b2/126 + ipv4: 100.1.0.97/32 + ipv6: 2064:100::61/128 + Ethernet1: + ipv4: 10.0.0.193/31 + ipv6: fc00::182/126 bp_interfaces: ipv4: 10.10.246.98/24 ipv6: fc0a::62/64 - ARISTA98T0: + ARISTA94T0: properties: - common bgp: - asn: 64002 + asn: 64094 peers: 65100: - - 10.0.0.218 - - fc00::1b5 + - 10.0.0.194 + - fc00::185 interfaces: Loopback0: - ipv4: 100.1.0.110/32 - ipv6: 2064:100::6e/128 + ipv4: 100.1.0.98/32 + ipv6: 2064:100::62/128 Ethernet1: - ipv4: 10.0.0.219/31 - ipv6: fc00::1b6/126 + ipv4: 10.0.0.195/31 + ipv6: fc00::186/126 bp_interfaces: ipv4: 10.10.246.99/24 ipv6: fc0a::63/64 - ARISTA99T0: + ARISTA95T0: properties: - common bgp: - asn: 64002 + asn: 64095 peers: 65100: - - 10.0.0.220 - - fc00::1b9 + - 10.0.0.196 + - fc00::189 interfaces: Loopback0: - ipv4: 100.1.0.111/32 - ipv6: 2064:100::6f/128 + ipv4: 100.1.0.99/32 + ipv6: 2064:100::63/128 Ethernet1: - ipv4: 10.0.0.221/31 - ipv6: fc00::1ba/126 + ipv4: 10.0.0.197/31 + ipv6: fc00::18a/126 bp_interfaces: ipv4: 10.10.246.100/24 ipv6: fc0a::64/64 - ARISTA100T0: + ARISTA96T0: properties: - common bgp: - asn: 64002 + asn: 64096 peers: 65100: - - 10.0.0.222 - - fc00::1bd + - 10.0.0.198 + - fc00::18d interfaces: Loopback0: - ipv4: 100.1.0.112/32 - ipv6: 2064:100::70/128 + ipv4: 100.1.0.100/32 + ipv6: 2064:100::64/128 Ethernet1: - ipv4: 10.0.0.223/31 - ipv6: fc00::1be/126 + ipv4: 10.0.0.199/31 + ipv6: fc00::18e/126 bp_interfaces: ipv4: 10.10.246.101/24 ipv6: fc0a::65/64 - ARISTA101T0: + ARISTA97T0: properties: - common bgp: - asn: 64002 + asn: 64097 peers: 65100: - - 10.0.0.224 - - fc00::1c1 + - 10.0.0.200 + - fc00::191 interfaces: Loopback0: - ipv4: 100.1.0.113/32 - ipv6: 2064:100::71/128 + ipv4: 100.1.0.101/32 + ipv6: 2064:100::65/128 Ethernet1: - ipv4: 10.0.0.225/31 - ipv6: fc00::1c2/126 + ipv4: 10.0.0.201/31 + ipv6: fc00::192/126 bp_interfaces: ipv4: 10.10.246.102/24 ipv6: fc0a::66/64 - ARISTA102T0: + ARISTA98T0: properties: - common bgp: - asn: 64002 + asn: 64098 peers: 65100: - - 10.0.0.226 - - fc00::1c5 + - 10.0.0.202 + - fc00::195 interfaces: Loopback0: - ipv4: 100.1.0.114/32 - ipv6: 2064:100::72/128 + ipv4: 100.1.0.102/32 + ipv6: 2064:100::66/128 Ethernet1: - ipv4: 10.0.0.227/31 - ipv6: fc00::1c6/126 + ipv4: 10.0.0.203/31 + ipv6: fc00::196/126 bp_interfaces: ipv4: 10.10.246.103/24 ipv6: fc0a::67/64 - ARISTA103T0: + ARISTA99T0: properties: - common bgp: - asn: 64002 + asn: 64099 peers: 65100: - - 10.0.0.228 - - fc00::1c9 + - 10.0.0.204 + - fc00::199 interfaces: Loopback0: - ipv4: 100.1.0.115/32 - ipv6: 2064:100::73/128 + ipv4: 100.1.0.103/32 + ipv6: 2064:100::67/128 Ethernet1: - ipv4: 10.0.0.229/31 - ipv6: fc00::1ca/126 + ipv4: 10.0.0.205/31 + ipv6: fc00::19a/126 bp_interfaces: ipv4: 10.10.246.104/24 ipv6: fc0a::68/64 - ARISTA104T0: + ARISTA100T0: properties: - common bgp: - asn: 64002 + asn: 64100 peers: 65100: - - 10.0.0.230 - - fc00::1cd + - 10.0.0.206 + - fc00::19d interfaces: Loopback0: - ipv4: 100.1.0.116/32 - ipv6: 2064:100::74/128 + ipv4: 100.1.0.104/32 + ipv6: 2064:100::68/128 Ethernet1: - ipv4: 10.0.0.231/31 - ipv6: fc00::1ce/126 + ipv4: 10.0.0.207/31 + ipv6: fc00::19e/126 bp_interfaces: ipv4: 10.10.246.105/24 ipv6: fc0a::69/64 - ARISTA105T0: + ARISTA101T0: properties: - common bgp: - asn: 64002 + asn: 64101 peers: 65100: - - 10.0.0.232 - - fc00::1d1 + - 10.0.0.208 + - fc00::1a1 interfaces: Loopback0: - ipv4: 100.1.0.117/32 - ipv6: 2064:100::75/128 + ipv4: 100.1.0.105/32 + ipv6: 2064:100::69/128 Ethernet1: - ipv4: 10.0.0.233/31 - ipv6: fc00::1d2/126 + ipv4: 10.0.0.209/31 + ipv6: fc00::1a2/126 bp_interfaces: ipv4: 10.10.246.106/24 ipv6: fc0a::6a/64 - ARISTA106T0: + ARISTA102T0: properties: - common bgp: - asn: 64002 + asn: 64102 peers: 65100: - - 10.0.0.234 - - fc00::1d5 + - 10.0.0.210 + - fc00::1a5 interfaces: Loopback0: - ipv4: 100.1.0.118/32 - ipv6: 2064:100::76/128 + ipv4: 100.1.0.106/32 + ipv6: 2064:100::6a/128 Ethernet1: - ipv4: 10.0.0.235/31 - ipv6: fc00::1d6/126 + ipv4: 10.0.0.211/31 + ipv6: fc00::1a6/126 bp_interfaces: ipv4: 10.10.246.107/24 ipv6: fc0a::6b/64 - ARISTA107T0: + ARISTA103T0: properties: - common bgp: - asn: 64002 + asn: 64103 peers: 65100: - - 10.0.0.236 - - fc00::1d9 + - 10.0.0.212 + - fc00::1a9 interfaces: Loopback0: - ipv4: 100.1.0.119/32 - ipv6: 2064:100::77/128 + ipv4: 100.1.0.107/32 + ipv6: 2064:100::6b/128 Ethernet1: - ipv4: 10.0.0.237/31 - ipv6: fc00::1da/126 + ipv4: 10.0.0.213/31 + ipv6: fc00::1aa/126 bp_interfaces: ipv4: 10.10.246.108/24 ipv6: fc0a::6c/64 - ARISTA108T0: + ARISTA104T0: properties: - common bgp: - asn: 64002 + asn: 64104 peers: 65100: - - 10.0.0.238 - - fc00::1dd + - 10.0.0.214 + - fc00::1ad interfaces: Loopback0: - ipv4: 100.1.0.120/32 - ipv6: 2064:100::78/128 + ipv4: 100.1.0.108/32 + ipv6: 2064:100::6c/128 Ethernet1: - ipv4: 10.0.0.239/31 - ipv6: fc00::1de/126 + ipv4: 10.0.0.215/31 + ipv6: fc00::1ae/126 bp_interfaces: ipv4: 10.10.246.109/24 ipv6: fc0a::6d/64 - ARISTA109T0: + ARISTA105T0: properties: - common bgp: - asn: 64002 + asn: 64105 peers: 65100: - - 10.0.0.240 - - fc00::1e1 + - 10.0.0.216 + - fc00::1b1 interfaces: Loopback0: - ipv4: 100.1.0.121/32 - ipv6: 2064:100::79/128 + ipv4: 100.1.0.109/32 + ipv6: 2064:100::6d/128 Ethernet1: - ipv4: 10.0.0.241/31 - ipv6: fc00::1e2/126 + ipv4: 10.0.0.217/31 + ipv6: fc00::1b2/126 bp_interfaces: ipv4: 10.10.246.110/24 ipv6: fc0a::6e/64 - ARISTA110T0: + ARISTA106T0: properties: - common bgp: - asn: 64002 + asn: 64106 peers: 65100: - - 10.0.0.242 - - fc00::1e5 + - 10.0.0.218 + - fc00::1b5 interfaces: Loopback0: - ipv4: 100.1.0.122/32 - ipv6: 2064:100::7a/128 + ipv4: 100.1.0.110/32 + ipv6: 2064:100::6e/128 Ethernet1: - ipv4: 10.0.0.243/31 - ipv6: fc00::1e6/126 + ipv4: 10.0.0.219/31 + ipv6: fc00::1b6/126 bp_interfaces: ipv4: 10.10.246.111/24 ipv6: fc0a::6f/64 - ARISTA111T0: + ARISTA107T0: properties: - common bgp: - asn: 64002 + asn: 64107 peers: 65100: - - 10.0.0.244 - - fc00::1e9 + - 10.0.0.220 + - fc00::1b9 interfaces: Loopback0: - ipv4: 100.1.0.123/32 - ipv6: 2064:100::7b/128 + ipv4: 100.1.0.111/32 + ipv6: 2064:100::6f/128 Ethernet1: - ipv4: 10.0.0.245/31 - ipv6: fc00::1ea/126 + ipv4: 10.0.0.221/31 + ipv6: fc00::1ba/126 bp_interfaces: ipv4: 10.10.246.112/24 ipv6: fc0a::70/64 - ARISTA112T0: + ARISTA108T0: properties: - common bgp: - asn: 64002 + asn: 64108 peers: 65100: - - 10.0.0.246 - - fc00::1ed + - 10.0.0.222 + - fc00::1bd interfaces: Loopback0: - ipv4: 100.1.0.124/32 - ipv6: 2064:100::7c/128 + ipv4: 100.1.0.112/32 + ipv6: 2064:100::70/128 Ethernet1: - ipv4: 10.0.0.247/31 - ipv6: fc00::1ee/126 + ipv4: 10.0.0.223/31 + ipv6: fc00::1be/126 bp_interfaces: ipv4: 10.10.246.113/24 ipv6: fc0a::71/64 - ARISTA113T0: + ARISTA109T0: properties: - common bgp: - asn: 64002 + asn: 64109 peers: 65100: - - 10.0.0.248 - - fc00::1f1 + - 10.0.0.224 + - fc00::1c1 interfaces: Loopback0: - ipv4: 100.1.0.125/32 - ipv6: 2064:100::7d/128 + ipv4: 100.1.0.113/32 + ipv6: 2064:100::71/128 Ethernet1: - ipv4: 10.0.0.249/31 - ipv6: fc00::1f2/126 + ipv4: 10.0.0.225/31 + ipv6: fc00::1c2/126 bp_interfaces: ipv4: 10.10.246.114/24 ipv6: fc0a::72/64 - ARISTA114T0: + ARISTA110T0: properties: - common bgp: - asn: 64002 + asn: 64110 peers: 65100: - - 10.0.0.250 - - fc00::1f5 + - 10.0.0.226 + - fc00::1c5 interfaces: Loopback0: - ipv4: 100.1.0.126/32 - ipv6: 2064:100::7e/128 + ipv4: 100.1.0.114/32 + ipv6: 2064:100::72/128 Ethernet1: - ipv4: 10.0.0.251/31 - ipv6: fc00::1f6/126 + ipv4: 10.0.0.227/31 + ipv6: fc00::1c6/126 bp_interfaces: ipv4: 10.10.246.115/24 ipv6: fc0a::73/64 - ARISTA115T0: + ARISTA111T0: properties: - common bgp: - asn: 64002 + asn: 64111 peers: 65100: - - 10.0.0.252 - - fc00::1f9 + - 10.0.0.228 + - fc00::1c9 interfaces: Loopback0: - ipv4: 100.1.0.127/32 - ipv6: 2064:100::7f/128 + ipv4: 100.1.0.115/32 + ipv6: 2064:100::73/128 Ethernet1: - ipv4: 10.0.0.253/31 - ipv6: fc00::1fa/126 + ipv4: 10.0.0.229/31 + ipv6: fc00::1ca/126 bp_interfaces: ipv4: 10.10.246.116/24 ipv6: fc0a::74/64 - ARISTA116T0: + ARISTA112T0: properties: - common bgp: - asn: 64002 + asn: 64112 peers: 65100: - - 10.0.0.254 - - fc00::1fd + - 10.0.0.230 + - fc00::1cd interfaces: Loopback0: - ipv4: 100.1.0.128/32 - ipv6: 2064:100::80/128 + ipv4: 100.1.0.116/32 + ipv6: 2064:100::74/128 Ethernet1: - ipv4: 10.0.0.255/31 - ipv6: fc00::1fe/126 + ipv4: 10.0.0.231/31 + ipv6: fc00::1ce/126 bp_interfaces: ipv4: 10.10.246.117/24 ipv6: fc0a::75/64 - ARISTA117T0: + ARISTA113T0: properties: - common bgp: - asn: 64002 + asn: 64113 peers: 65100: - - 10.0.1.0 - - fc00::201 + - 10.0.0.232 + - fc00::1d1 interfaces: Loopback0: - ipv4: 100.1.0.129/32 - ipv6: 2064:100::81/128 + ipv4: 100.1.0.117/32 + ipv6: 2064:100::75/128 Ethernet1: - ipv4: 10.0.1.1/31 - ipv6: fc00::202/126 + ipv4: 10.0.0.233/31 + ipv6: fc00::1d2/126 bp_interfaces: ipv4: 10.10.246.118/24 ipv6: fc0a::76/64 - ARISTA118T0: + ARISTA114T0: properties: - common bgp: - asn: 64002 + asn: 64114 peers: 65100: - - 10.0.1.2 - - fc00::205 + - 10.0.0.234 + - fc00::1d5 interfaces: Loopback0: - ipv4: 100.1.0.130/32 - ipv6: 2064:100::82/128 + ipv4: 100.1.0.118/32 + ipv6: 2064:100::76/128 Ethernet1: - ipv4: 10.0.1.3/31 - ipv6: fc00::206/126 + ipv4: 10.0.0.235/31 + ipv6: fc00::1d6/126 bp_interfaces: ipv4: 10.10.246.119/24 ipv6: fc0a::77/64 - ARISTA119T0: + ARISTA115T0: properties: - common bgp: - asn: 64002 + asn: 64115 peers: 65100: - - 10.0.1.4 - - fc00::209 + - 10.0.0.236 + - fc00::1d9 interfaces: Loopback0: - ipv4: 100.1.0.131/32 - ipv6: 2064:100::83/128 + ipv4: 100.1.0.119/32 + ipv6: 2064:100::77/128 Ethernet1: - ipv4: 10.0.1.5/31 - ipv6: fc00::20a/126 + ipv4: 10.0.0.237/31 + ipv6: fc00::1da/126 bp_interfaces: ipv4: 10.10.246.120/24 ipv6: fc0a::78/64 - ARISTA120T0: + ARISTA116T0: properties: - common bgp: - asn: 64002 + asn: 64116 peers: 65100: - - 10.0.1.6 - - fc00::20d + - 10.0.0.238 + - fc00::1dd interfaces: Loopback0: - ipv4: 100.1.0.132/32 - ipv6: 2064:100::84/128 + ipv4: 100.1.0.120/32 + ipv6: 2064:100::78/128 Ethernet1: - ipv4: 10.0.1.7/31 - ipv6: fc00::20e/126 + ipv4: 10.0.0.239/31 + ipv6: fc00::1de/126 bp_interfaces: ipv4: 10.10.246.121/24 ipv6: fc0a::79/64 - ARISTA121T0: + ARISTA117T0: properties: - common bgp: - asn: 64002 + asn: 64117 peers: 65100: - - 10.0.1.8 - - fc00::211 + - 10.0.0.240 + - fc00::1e1 interfaces: Loopback0: - ipv4: 100.1.0.133/32 - ipv6: 2064:100::85/128 + ipv4: 100.1.0.121/32 + ipv6: 2064:100::79/128 Ethernet1: - ipv4: 10.0.1.9/31 - ipv6: fc00::212/126 + ipv4: 10.0.0.241/31 + ipv6: fc00::1e2/126 bp_interfaces: ipv4: 10.10.246.122/24 ipv6: fc0a::7a/64 - ARISTA122T0: + ARISTA118T0: properties: - common bgp: - asn: 64002 + asn: 64118 peers: 65100: - - 10.0.1.10 - - fc00::215 + - 10.0.0.242 + - fc00::1e5 interfaces: Loopback0: - ipv4: 100.1.0.134/32 - ipv6: 2064:100::86/128 + ipv4: 100.1.0.122/32 + ipv6: 2064:100::7a/128 Ethernet1: - ipv4: 10.0.1.11/31 - ipv6: fc00::216/126 + ipv4: 10.0.0.243/31 + ipv6: fc00::1e6/126 bp_interfaces: ipv4: 10.10.246.123/24 ipv6: fc0a::7b/64 - ARISTA123T0: + ARISTA119T0: properties: - common bgp: - asn: 64002 + asn: 64119 peers: 65100: - - 10.0.1.12 - - fc00::219 + - 10.0.0.244 + - fc00::1e9 interfaces: Loopback0: - ipv4: 100.1.0.135/32 - ipv6: 2064:100::87/128 + ipv4: 100.1.0.123/32 + ipv6: 2064:100::7b/128 Ethernet1: - ipv4: 10.0.1.13/31 - ipv6: fc00::21a/126 + ipv4: 10.0.0.245/31 + ipv6: fc00::1ea/126 bp_interfaces: ipv4: 10.10.246.124/24 ipv6: fc0a::7c/64 - ARISTA124T0: + ARISTA120T0: properties: - common bgp: - asn: 64002 + asn: 64120 peers: 65100: - - 10.0.1.14 - - fc00::21d + - 10.0.0.246 + - fc00::1ed interfaces: Loopback0: - ipv4: 100.1.0.136/32 - ipv6: 2064:100::88/128 + ipv4: 100.1.0.124/32 + ipv6: 2064:100::7c/128 Ethernet1: - ipv4: 10.0.1.15/31 - ipv6: fc00::21e/126 + ipv4: 10.0.0.247/31 + ipv6: fc00::1ee/126 bp_interfaces: ipv4: 10.10.246.125/24 ipv6: fc0a::7d/64 - ARISTA125T0: + ARISTA121T0: properties: - common bgp: - asn: 64002 + asn: 64121 peers: 65100: - - 10.0.1.16 - - fc00::221 + - 10.0.0.248 + - fc00::1f1 interfaces: Loopback0: - ipv4: 100.1.0.137/32 - ipv6: 2064:100::89/128 + ipv4: 100.1.0.125/32 + ipv6: 2064:100::7d/128 Ethernet1: - ipv4: 10.0.1.17/31 - ipv6: fc00::222/126 + ipv4: 10.0.0.249/31 + ipv6: fc00::1f2/126 bp_interfaces: ipv4: 10.10.246.126/24 ipv6: fc0a::7e/64 - ARISTA126T0: + ARISTA122T0: properties: - common bgp: - asn: 64002 + asn: 64122 peers: 65100: - - 10.0.1.18 - - fc00::225 + - 10.0.0.250 + - fc00::1f5 interfaces: Loopback0: - ipv4: 100.1.0.138/32 - ipv6: 2064:100::8a/128 + ipv4: 100.1.0.126/32 + ipv6: 2064:100::7e/128 Ethernet1: - ipv4: 10.0.1.19/31 - ipv6: fc00::226/126 + ipv4: 10.0.0.251/31 + ipv6: fc00::1f6/126 bp_interfaces: ipv4: 10.10.246.127/24 ipv6: fc0a::7f/64 - ARISTA127T0: + ARISTA123T0: properties: - common bgp: - asn: 64002 + asn: 64123 peers: 65100: - - 10.0.1.20 - - fc00::229 + - 10.0.0.252 + - fc00::1f9 interfaces: Loopback0: - ipv4: 100.1.0.139/32 - ipv6: 2064:100::8b/128 + ipv4: 100.1.0.127/32 + ipv6: 2064:100::7f/128 Ethernet1: - ipv4: 10.0.1.21/31 - ipv6: fc00::22a/126 + ipv4: 10.0.0.253/31 + ipv6: fc00::1fa/126 bp_interfaces: ipv4: 10.10.246.128/24 ipv6: fc0a::80/64 - ARISTA128T0: + ARISTA124T0: properties: - common bgp: - asn: 64002 + asn: 64124 peers: 65100: - - 10.0.1.22 - - fc00::22d + - 10.0.0.254 + - fc00::1fd interfaces: Loopback0: - ipv4: 100.1.0.140/32 - ipv6: 2064:100::8c/128 + ipv4: 100.1.0.128/32 + ipv6: 2064:100::80/128 Ethernet1: - ipv4: 10.0.1.23/31 - ipv6: fc00::22e/126 + ipv4: 10.0.0.255/31 + ipv6: fc00::1fe/126 bp_interfaces: ipv4: 10.10.246.129/24 ipv6: fc0a::81/64 - ARISTA129T0: + ARISTA125T0: properties: - common bgp: - asn: 64002 + asn: 64125 peers: 65100: - - 10.0.1.24 - - fc00::231 + - 10.0.1.0 + - fc00::201 interfaces: Loopback0: - ipv4: 100.1.0.141/32 - ipv6: 2064:100::8d/128 + ipv4: 100.1.0.129/32 + ipv6: 2064:100::81/128 Ethernet1: - ipv4: 10.0.1.25/31 - ipv6: fc00::232/126 + ipv4: 10.0.1.1/31 + ipv6: fc00::202/126 bp_interfaces: ipv4: 10.10.246.130/24 ipv6: fc0a::82/64 - ARISTA130T0: + ARISTA126T0: properties: - common bgp: - asn: 64002 + asn: 64126 peers: 65100: - - 10.0.1.26 - - fc00::235 + - 10.0.1.2 + - fc00::205 interfaces: Loopback0: - ipv4: 100.1.0.142/32 - ipv6: 2064:100::8e/128 + ipv4: 100.1.0.130/32 + ipv6: 2064:100::82/128 Ethernet1: - ipv4: 10.0.1.27/31 - ipv6: fc00::236/126 + ipv4: 10.0.1.3/31 + ipv6: fc00::206/126 bp_interfaces: ipv4: 10.10.246.131/24 ipv6: fc0a::83/64 - ARISTA131T0: + ARISTA127T0: properties: - common bgp: - asn: 64002 + asn: 64127 peers: 65100: - - 10.0.1.28 - - fc00::239 + - 10.0.1.4 + - fc00::209 interfaces: Loopback0: - ipv4: 100.1.0.143/32 - ipv6: 2064:100::8f/128 - Ethernet1: - ipv4: 10.0.1.29/31 - ipv6: fc00::23a/126 + ipv4: 100.1.0.131/32 + ipv6: 2064:100::83/128 + Ethernet1: + ipv4: 10.0.1.5/31 + ipv6: fc00::20a/126 bp_interfaces: ipv4: 10.10.246.132/24 ipv6: fc0a::84/64 - ARISTA132T0: + ARISTA128T0: properties: - common bgp: - asn: 64002 + asn: 64128 peers: 65100: - - 10.0.1.30 - - fc00::23d + - 10.0.1.6 + - fc00::20d interfaces: Loopback0: - ipv4: 100.1.0.144/32 - ipv6: 2064:100::90/128 + ipv4: 100.1.0.132/32 + ipv6: 2064:100::84/128 Ethernet1: - ipv4: 10.0.1.31/31 - ipv6: fc00::23e/126 + ipv4: 10.0.1.7/31 + ipv6: fc00::20e/126 bp_interfaces: ipv4: 10.10.246.133/24 ipv6: fc0a::85/64 - ARISTA133T0: + ARISTA129T0: properties: - common bgp: - asn: 64002 + asn: 64129 peers: 65100: - - 10.0.1.32 - - fc00::241 + - 10.0.1.8 + - fc00::211 interfaces: Loopback0: - ipv4: 100.1.0.145/32 - ipv6: 2064:100::91/128 + ipv4: 100.1.0.133/32 + ipv6: 2064:100::85/128 Ethernet1: - ipv4: 10.0.1.33/31 - ipv6: fc00::242/126 + ipv4: 10.0.1.9/31 + ipv6: fc00::212/126 bp_interfaces: ipv4: 10.10.246.134/24 ipv6: fc0a::86/64 - ARISTA134T0: + ARISTA130T0: properties: - common bgp: - asn: 64002 + asn: 64130 peers: 65100: - - 10.0.1.34 - - fc00::245 + - 10.0.1.10 + - fc00::215 interfaces: Loopback0: - ipv4: 100.1.0.146/32 - ipv6: 2064:100::92/128 + ipv4: 100.1.0.134/32 + ipv6: 2064:100::86/128 Ethernet1: - ipv4: 10.0.1.35/31 - ipv6: fc00::246/126 + ipv4: 10.0.1.11/31 + ipv6: fc00::216/126 bp_interfaces: ipv4: 10.10.246.135/24 ipv6: fc0a::87/64 - ARISTA135T0: + ARISTA131T0: properties: - common bgp: - asn: 64002 + asn: 64131 peers: 65100: - - 10.0.1.36 - - fc00::249 + - 10.0.1.12 + - fc00::219 interfaces: Loopback0: - ipv4: 100.1.0.147/32 - ipv6: 2064:100::93/128 + ipv4: 100.1.0.135/32 + ipv6: 2064:100::87/128 Ethernet1: - ipv4: 10.0.1.37/31 - ipv6: fc00::24a/126 + ipv4: 10.0.1.13/31 + ipv6: fc00::21a/126 bp_interfaces: ipv4: 10.10.246.136/24 ipv6: fc0a::88/64 - ARISTA136T0: + ARISTA132T0: properties: - common bgp: - asn: 64002 + asn: 64132 peers: 65100: - - 10.0.1.38 - - fc00::24d + - 10.0.1.14 + - fc00::21d interfaces: Loopback0: - ipv4: 100.1.0.148/32 - ipv6: 2064:100::94/128 + ipv4: 100.1.0.136/32 + ipv6: 2064:100::88/128 Ethernet1: - ipv4: 10.0.1.39/31 - ipv6: fc00::24e/126 + ipv4: 10.0.1.15/31 + ipv6: fc00::21e/126 bp_interfaces: ipv4: 10.10.246.137/24 ipv6: fc0a::89/64 - ARISTA137T0: + ARISTA133T0: properties: - common bgp: - asn: 64002 + asn: 64133 peers: 65100: - - 10.0.1.40 - - fc00::251 + - 10.0.1.16 + - fc00::221 interfaces: Loopback0: - ipv4: 100.1.0.149/32 - ipv6: 2064:100::95/128 + ipv4: 100.1.0.137/32 + ipv6: 2064:100::89/128 Ethernet1: - ipv4: 10.0.1.41/31 - ipv6: fc00::252/126 + ipv4: 10.0.1.17/31 + ipv6: fc00::222/126 bp_interfaces: ipv4: 10.10.246.138/24 ipv6: fc0a::8a/64 - ARISTA138T0: + ARISTA134T0: properties: - common bgp: - asn: 64002 + asn: 64134 peers: 65100: - - 10.0.1.42 - - fc00::255 + - 10.0.1.18 + - fc00::225 interfaces: Loopback0: - ipv4: 100.1.0.150/32 - ipv6: 2064:100::96/128 + ipv4: 100.1.0.138/32 + ipv6: 2064:100::8a/128 Ethernet1: - ipv4: 10.0.1.43/31 - ipv6: fc00::256/126 + ipv4: 10.0.1.19/31 + ipv6: fc00::226/126 bp_interfaces: ipv4: 10.10.246.139/24 ipv6: fc0a::8b/64 - ARISTA139T0: + ARISTA135T0: properties: - common bgp: - asn: 64002 + asn: 64135 peers: 65100: - - 10.0.1.44 - - fc00::259 + - 10.0.1.20 + - fc00::229 interfaces: Loopback0: - ipv4: 100.1.0.151/32 - ipv6: 2064:100::97/128 + ipv4: 100.1.0.139/32 + ipv6: 2064:100::8b/128 Ethernet1: - ipv4: 10.0.1.45/31 - ipv6: fc00::25a/126 + ipv4: 10.0.1.21/31 + ipv6: fc00::22a/126 bp_interfaces: ipv4: 10.10.246.140/24 ipv6: fc0a::8c/64 - ARISTA140T0: + ARISTA136T0: properties: - common bgp: - asn: 64002 + asn: 64136 peers: 65100: - - 10.0.1.46 - - fc00::25d + - 10.0.1.22 + - fc00::22d interfaces: Loopback0: - ipv4: 100.1.0.152/32 - ipv6: 2064:100::98/128 + ipv4: 100.1.0.140/32 + ipv6: 2064:100::8c/128 Ethernet1: - ipv4: 10.0.1.47/31 - ipv6: fc00::25e/126 + ipv4: 10.0.1.23/31 + ipv6: fc00::22e/126 bp_interfaces: ipv4: 10.10.246.141/24 ipv6: fc0a::8d/64 - ARISTA141T0: + ARISTA137T0: properties: - common bgp: - asn: 64002 + asn: 64137 peers: 65100: - - 10.0.1.48 - - fc00::261 + - 10.0.1.24 + - fc00::231 interfaces: Loopback0: - ipv4: 100.1.0.153/32 - ipv6: 2064:100::99/128 + ipv4: 100.1.0.141/32 + ipv6: 2064:100::8d/128 Ethernet1: - ipv4: 10.0.1.49/31 - ipv6: fc00::262/126 + ipv4: 10.0.1.25/31 + ipv6: fc00::232/126 bp_interfaces: ipv4: 10.10.246.142/24 ipv6: fc0a::8e/64 - ARISTA142T0: + ARISTA138T0: properties: - common bgp: - asn: 64002 + asn: 64138 peers: 65100: - - 10.0.1.50 - - fc00::265 + - 10.0.1.26 + - fc00::235 interfaces: Loopback0: - ipv4: 100.1.0.154/32 - ipv6: 2064:100::9a/128 + ipv4: 100.1.0.142/32 + ipv6: 2064:100::8e/128 Ethernet1: - ipv4: 10.0.1.51/31 - ipv6: fc00::266/126 + ipv4: 10.0.1.27/31 + ipv6: fc00::236/126 bp_interfaces: ipv4: 10.10.246.143/24 ipv6: fc0a::8f/64 - ARISTA143T0: + ARISTA139T0: properties: - common bgp: - asn: 64002 + asn: 64139 peers: 65100: - - 10.0.1.52 - - fc00::269 + - 10.0.1.28 + - fc00::239 interfaces: Loopback0: - ipv4: 100.1.0.155/32 - ipv6: 2064:100::9b/128 + ipv4: 100.1.0.143/32 + ipv6: 2064:100::8f/128 Ethernet1: - ipv4: 10.0.1.53/31 - ipv6: fc00::26a/126 + ipv4: 10.0.1.29/31 + ipv6: fc00::23a/126 bp_interfaces: ipv4: 10.10.246.144/24 ipv6: fc0a::90/64 - ARISTA144T0: + ARISTA140T0: properties: - common bgp: - asn: 64002 + asn: 64140 peers: 65100: - - 10.0.1.54 - - fc00::26d + - 10.0.1.30 + - fc00::23d interfaces: Loopback0: - ipv4: 100.1.0.156/32 - ipv6: 2064:100::9c/128 + ipv4: 100.1.0.144/32 + ipv6: 2064:100::90/128 Ethernet1: - ipv4: 10.0.1.55/31 - ipv6: fc00::26e/126 + ipv4: 10.0.1.31/31 + ipv6: fc00::23e/126 bp_interfaces: ipv4: 10.10.246.145/24 ipv6: fc0a::91/64 - ARISTA145T0: + ARISTA141T0: properties: - common bgp: - asn: 64002 + asn: 64141 peers: 65100: - - 10.0.1.56 - - fc00::271 + - 10.0.1.32 + - fc00::241 interfaces: Loopback0: - ipv4: 100.1.0.157/32 - ipv6: 2064:100::9d/128 + ipv4: 100.1.0.145/32 + ipv6: 2064:100::91/128 Ethernet1: - ipv4: 10.0.1.57/31 - ipv6: fc00::272/126 + ipv4: 10.0.1.33/31 + ipv6: fc00::242/126 bp_interfaces: ipv4: 10.10.246.146/24 ipv6: fc0a::92/64 - ARISTA146T0: + ARISTA142T0: properties: - common bgp: - asn: 64002 + asn: 64142 peers: 65100: - - 10.0.1.58 - - fc00::275 + - 10.0.1.34 + - fc00::245 interfaces: Loopback0: - ipv4: 100.1.0.158/32 - ipv6: 2064:100::9e/128 + ipv4: 100.1.0.146/32 + ipv6: 2064:100::92/128 Ethernet1: - ipv4: 10.0.1.59/31 - ipv6: fc00::276/126 + ipv4: 10.0.1.35/31 + ipv6: fc00::246/126 bp_interfaces: ipv4: 10.10.246.147/24 ipv6: fc0a::93/64 - ARISTA147T0: + ARISTA143T0: properties: - common bgp: - asn: 64002 + asn: 64143 peers: 65100: - - 10.0.1.60 - - fc00::279 + - 10.0.1.36 + - fc00::249 interfaces: Loopback0: - ipv4: 100.1.0.159/32 - ipv6: 2064:100::9f/128 + ipv4: 100.1.0.147/32 + ipv6: 2064:100::93/128 Ethernet1: - ipv4: 10.0.1.61/31 - ipv6: fc00::27a/126 + ipv4: 10.0.1.37/31 + ipv6: fc00::24a/126 bp_interfaces: ipv4: 10.10.246.148/24 ipv6: fc0a::94/64 - ARISTA148T0: + ARISTA144T0: properties: - common bgp: - asn: 64002 + asn: 64144 peers: 65100: - - 10.0.1.62 - - fc00::27d + - 10.0.1.38 + - fc00::24d interfaces: Loopback0: - ipv4: 100.1.0.160/32 - ipv6: 2064:100::a0/128 + ipv4: 100.1.0.148/32 + ipv6: 2064:100::94/128 Ethernet1: - ipv4: 10.0.1.63/31 - ipv6: fc00::27e/126 + ipv4: 10.0.1.39/31 + ipv6: fc00::24e/126 bp_interfaces: ipv4: 10.10.246.149/24 ipv6: fc0a::95/64 - ARISTA149T0: + ARISTA145T0: properties: - common bgp: - asn: 64002 + asn: 64145 peers: 65100: - - 10.0.1.64 - - fc00::281 + - 10.0.1.40 + - fc00::251 interfaces: Loopback0: - ipv4: 100.1.0.161/32 - ipv6: 2064:100::a1/128 + ipv4: 100.1.0.149/32 + ipv6: 2064:100::95/128 Ethernet1: - ipv4: 10.0.1.65/31 - ipv6: fc00::282/126 + ipv4: 10.0.1.41/31 + ipv6: fc00::252/126 bp_interfaces: ipv4: 10.10.246.150/24 ipv6: fc0a::96/64 - ARISTA150T0: + ARISTA146T0: properties: - common bgp: - asn: 64002 + asn: 64146 peers: 65100: - - 10.0.1.66 - - fc00::285 + - 10.0.1.42 + - fc00::255 interfaces: Loopback0: - ipv4: 100.1.0.162/32 - ipv6: 2064:100::a2/128 + ipv4: 100.1.0.150/32 + ipv6: 2064:100::96/128 Ethernet1: - ipv4: 10.0.1.67/31 - ipv6: fc00::286/126 + ipv4: 10.0.1.43/31 + ipv6: fc00::256/126 bp_interfaces: ipv4: 10.10.246.151/24 ipv6: fc0a::97/64 - ARISTA151T0: + ARISTA147T0: properties: - common bgp: - asn: 64002 + asn: 64147 peers: 65100: - - 10.0.1.68 - - fc00::289 + - 10.0.1.44 + - fc00::259 interfaces: Loopback0: - ipv4: 100.1.0.163/32 - ipv6: 2064:100::a3/128 + ipv4: 100.1.0.151/32 + ipv6: 2064:100::97/128 Ethernet1: - ipv4: 10.0.1.69/31 - ipv6: fc00::28a/126 + ipv4: 10.0.1.45/31 + ipv6: fc00::25a/126 bp_interfaces: ipv4: 10.10.246.152/24 ipv6: fc0a::98/64 - ARISTA152T0: + ARISTA148T0: properties: - common bgp: - asn: 64002 + asn: 64148 peers: 65100: - - 10.0.1.70 - - fc00::28d + - 10.0.1.46 + - fc00::25d interfaces: Loopback0: - ipv4: 100.1.0.164/32 - ipv6: 2064:100::a4/128 + ipv4: 100.1.0.152/32 + ipv6: 2064:100::98/128 Ethernet1: - ipv4: 10.0.1.71/31 - ipv6: fc00::28e/126 + ipv4: 10.0.1.47/31 + ipv6: fc00::25e/126 bp_interfaces: ipv4: 10.10.246.153/24 ipv6: fc0a::99/64 - ARISTA153T0: + ARISTA149T0: properties: - common bgp: - asn: 64002 + asn: 64149 peers: 65100: - - 10.0.1.72 - - fc00::291 + - 10.0.1.48 + - fc00::261 interfaces: Loopback0: - ipv4: 100.1.0.165/32 - ipv6: 2064:100::a5/128 + ipv4: 100.1.0.153/32 + ipv6: 2064:100::99/128 Ethernet1: - ipv4: 10.0.1.73/31 - ipv6: fc00::292/126 + ipv4: 10.0.1.49/31 + ipv6: fc00::262/126 bp_interfaces: ipv4: 10.10.246.154/24 ipv6: fc0a::9a/64 - ARISTA154T0: + ARISTA150T0: properties: - common bgp: - asn: 64002 + asn: 64150 peers: 65100: - - 10.0.1.74 - - fc00::295 + - 10.0.1.50 + - fc00::265 interfaces: Loopback0: - ipv4: 100.1.0.166/32 - ipv6: 2064:100::a6/128 + ipv4: 100.1.0.154/32 + ipv6: 2064:100::9a/128 Ethernet1: - ipv4: 10.0.1.75/31 - ipv6: fc00::296/126 + ipv4: 10.0.1.51/31 + ipv6: fc00::266/126 bp_interfaces: ipv4: 10.10.246.155/24 ipv6: fc0a::9b/64 - ARISTA155T0: + ARISTA151T0: properties: - common bgp: - asn: 64002 + asn: 64151 peers: 65100: - - 10.0.1.76 - - fc00::299 + - 10.0.1.52 + - fc00::269 interfaces: Loopback0: - ipv4: 100.1.0.167/32 - ipv6: 2064:100::a7/128 + ipv4: 100.1.0.155/32 + ipv6: 2064:100::9b/128 Ethernet1: - ipv4: 10.0.1.77/31 - ipv6: fc00::29a/126 + ipv4: 10.0.1.53/31 + ipv6: fc00::26a/126 bp_interfaces: ipv4: 10.10.246.156/24 ipv6: fc0a::9c/64 - ARISTA156T0: + ARISTA152T0: properties: - common bgp: - asn: 64002 + asn: 64152 peers: 65100: - - 10.0.1.78 - - fc00::29d + - 10.0.1.54 + - fc00::26d interfaces: Loopback0: - ipv4: 100.1.0.168/32 - ipv6: 2064:100::a8/128 + ipv4: 100.1.0.156/32 + ipv6: 2064:100::9c/128 Ethernet1: - ipv4: 10.0.1.79/31 - ipv6: fc00::29e/126 + ipv4: 10.0.1.55/31 + ipv6: fc00::26e/126 bp_interfaces: ipv4: 10.10.246.157/24 ipv6: fc0a::9d/64 - ARISTA157T0: + ARISTA153T0: properties: - common bgp: - asn: 64002 + asn: 64153 peers: 65100: - - 10.0.1.80 - - fc00::2a1 + - 10.0.1.56 + - fc00::271 interfaces: Loopback0: - ipv4: 100.1.0.169/32 - ipv6: 2064:100::a9/128 + ipv4: 100.1.0.157/32 + ipv6: 2064:100::9d/128 Ethernet1: - ipv4: 10.0.1.81/31 - ipv6: fc00::2a2/126 + ipv4: 10.0.1.57/31 + ipv6: fc00::272/126 bp_interfaces: ipv4: 10.10.246.158/24 ipv6: fc0a::9e/64 - ARISTA158T0: + ARISTA154T0: properties: - common bgp: - asn: 64002 + asn: 64154 peers: 65100: - - 10.0.1.82 - - fc00::2a5 + - 10.0.1.58 + - fc00::275 interfaces: Loopback0: - ipv4: 100.1.0.170/32 - ipv6: 2064:100::aa/128 + ipv4: 100.1.0.158/32 + ipv6: 2064:100::9e/128 Ethernet1: - ipv4: 10.0.1.83/31 - ipv6: fc00::2a6/126 + ipv4: 10.0.1.59/31 + ipv6: fc00::276/126 bp_interfaces: ipv4: 10.10.246.159/24 ipv6: fc0a::9f/64 - ARISTA159T0: + ARISTA155T0: properties: - common bgp: - asn: 64002 + asn: 64155 peers: 65100: - - 10.0.1.84 - - fc00::2a9 + - 10.0.1.60 + - fc00::279 interfaces: Loopback0: - ipv4: 100.1.0.171/32 - ipv6: 2064:100::ab/128 + ipv4: 100.1.0.159/32 + ipv6: 2064:100::9f/128 Ethernet1: - ipv4: 10.0.1.85/31 - ipv6: fc00::2aa/126 + ipv4: 10.0.1.61/31 + ipv6: fc00::27a/126 bp_interfaces: ipv4: 10.10.246.160/24 ipv6: fc0a::a0/64 - ARISTA160T0: + ARISTA156T0: properties: - common bgp: - asn: 64002 + asn: 64156 peers: 65100: - - 10.0.1.86 - - fc00::2ad + - 10.0.1.62 + - fc00::27d interfaces: Loopback0: - ipv4: 100.1.0.172/32 - ipv6: 2064:100::ac/128 + ipv4: 100.1.0.160/32 + ipv6: 2064:100::a0/128 Ethernet1: - ipv4: 10.0.1.87/31 - ipv6: fc00::2ae/126 + ipv4: 10.0.1.63/31 + ipv6: fc00::27e/126 bp_interfaces: ipv4: 10.10.246.161/24 ipv6: fc0a::a1/64 - ARISTA161T0: + ARISTA157T0: properties: - common bgp: - asn: 64002 + asn: 64157 peers: 65100: - - 10.0.1.88 - - fc00::2b1 + - 10.0.1.64 + - fc00::281 interfaces: Loopback0: - ipv4: 100.1.0.173/32 - ipv6: 2064:100::ad/128 + ipv4: 100.1.0.161/32 + ipv6: 2064:100::a1/128 Ethernet1: - ipv4: 10.0.1.89/31 - ipv6: fc00::2b2/126 + ipv4: 10.0.1.65/31 + ipv6: fc00::282/126 bp_interfaces: ipv4: 10.10.246.162/24 ipv6: fc0a::a2/64 - ARISTA162T0: + ARISTA158T0: properties: - common bgp: - asn: 64002 + asn: 64158 peers: 65100: - - 10.0.1.90 - - fc00::2b5 + - 10.0.1.66 + - fc00::285 interfaces: Loopback0: - ipv4: 100.1.0.174/32 - ipv6: 2064:100::ae/128 + ipv4: 100.1.0.162/32 + ipv6: 2064:100::a2/128 Ethernet1: - ipv4: 10.0.1.91/31 - ipv6: fc00::2b6/126 + ipv4: 10.0.1.67/31 + ipv6: fc00::286/126 bp_interfaces: ipv4: 10.10.246.163/24 ipv6: fc0a::a3/64 - ARISTA163T0: + ARISTA159T0: properties: - common bgp: - asn: 64002 + asn: 64159 peers: 65100: - - 10.0.1.92 - - fc00::2b9 + - 10.0.1.68 + - fc00::289 interfaces: Loopback0: - ipv4: 100.1.0.175/32 - ipv6: 2064:100::af/128 + ipv4: 100.1.0.163/32 + ipv6: 2064:100::a3/128 Ethernet1: - ipv4: 10.0.1.93/31 - ipv6: fc00::2ba/126 + ipv4: 10.0.1.69/31 + ipv6: fc00::28a/126 bp_interfaces: ipv4: 10.10.246.164/24 ipv6: fc0a::a4/64 - ARISTA164T0: + ARISTA160T0: properties: - common bgp: - asn: 64002 + asn: 64160 peers: 65100: - - 10.0.1.94 - - fc00::2bd + - 10.0.1.70 + - fc00::28d interfaces: Loopback0: - ipv4: 100.1.0.176/32 - ipv6: 2064:100::b0/128 + ipv4: 100.1.0.164/32 + ipv6: 2064:100::a4/128 Ethernet1: - ipv4: 10.0.1.95/31 - ipv6: fc00::2be/126 + ipv4: 10.0.1.71/31 + ipv6: fc00::28e/126 bp_interfaces: ipv4: 10.10.246.165/24 ipv6: fc0a::a5/64 - ARISTA165T2: + ARISTA05T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.96 - - fc00::2c1 + - 10.0.1.72 + - fc00::291 interfaces: Loopback0: - ipv4: 100.1.0.177/32 - ipv6: 2064:100::b1/128 + ipv4: 100.1.0.165/32 + ipv6: 2064:100::a5/128 Ethernet1: - ipv4: 10.0.1.97/31 - ipv6: fc00::2c2/126 + ipv4: 10.0.1.73/31 + ipv6: fc00::292/126 bp_interfaces: ipv4: 10.10.246.166/24 ipv6: fc0a::a6/64 - ARISTA166T2: + ARISTA06T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.98 - - fc00::2c5 + - 10.0.1.74 + - fc00::295 interfaces: Loopback0: - ipv4: 100.1.0.178/32 - ipv6: 2064:100::b2/128 + ipv4: 100.1.0.166/32 + ipv6: 2064:100::a6/128 Ethernet1: - ipv4: 10.0.1.99/31 - ipv6: fc00::2c6/126 + ipv4: 10.0.1.75/31 + ipv6: fc00::296/126 bp_interfaces: ipv4: 10.10.246.167/24 ipv6: fc0a::a7/64 - ARISTA167T0: + ARISTA161T0: properties: - common bgp: - asn: 64002 + asn: 64161 peers: 65100: - - 10.0.1.112 - - fc00::2e1 + - 10.0.1.76 + - fc00::299 interfaces: Loopback0: - ipv4: 100.1.0.185/32 - ipv6: 2064:100::b9/128 + ipv4: 100.1.0.167/32 + ipv6: 2064:100::a7/128 Ethernet1: - ipv4: 10.0.1.113/31 - ipv6: fc00::2e2/126 + ipv4: 10.0.1.77/31 + ipv6: fc00::29a/126 bp_interfaces: ipv4: 10.10.246.168/24 ipv6: fc0a::a8/64 - ARISTA168T0: + ARISTA162T0: properties: - common bgp: - asn: 64002 + asn: 64162 peers: 65100: - - 10.0.1.114 - - fc00::2e5 + - 10.0.1.78 + - fc00::29d interfaces: Loopback0: - ipv4: 100.1.0.186/32 - ipv6: 2064:100::ba/128 + ipv4: 100.1.0.168/32 + ipv6: 2064:100::a8/128 Ethernet1: - ipv4: 10.0.1.115/31 - ipv6: fc00::2e6/126 + ipv4: 10.0.1.79/31 + ipv6: fc00::29e/126 bp_interfaces: ipv4: 10.10.246.169/24 ipv6: fc0a::a9/64 - ARISTA169T0: + ARISTA163T0: properties: - common bgp: - asn: 64002 + asn: 64163 peers: 65100: - - 10.0.1.116 - - fc00::2e9 + - 10.0.1.80 + - fc00::2a1 interfaces: Loopback0: - ipv4: 100.1.0.187/32 - ipv6: 2064:100::bb/128 + ipv4: 100.1.0.169/32 + ipv6: 2064:100::a9/128 Ethernet1: - ipv4: 10.0.1.117/31 - ipv6: fc00::2ea/126 + ipv4: 10.0.1.81/31 + ipv6: fc00::2a2/126 bp_interfaces: ipv4: 10.10.246.170/24 ipv6: fc0a::aa/64 - ARISTA170T0: + ARISTA164T0: properties: - common bgp: - asn: 64002 + asn: 64164 peers: 65100: - - 10.0.1.118 - - fc00::2ed + - 10.0.1.82 + - fc00::2a5 interfaces: Loopback0: - ipv4: 100.1.0.188/32 - ipv6: 2064:100::bc/128 + ipv4: 100.1.0.170/32 + ipv6: 2064:100::aa/128 Ethernet1: - ipv4: 10.0.1.119/31 - ipv6: fc00::2ee/126 + ipv4: 10.0.1.83/31 + ipv6: fc00::2a6/126 bp_interfaces: ipv4: 10.10.246.171/24 ipv6: fc0a::ab/64 - ARISTA171T0: + ARISTA165T0: properties: - common bgp: - asn: 64002 + asn: 64165 peers: 65100: - - 10.0.1.120 - - fc00::2f1 + - 10.0.1.84 + - fc00::2a9 interfaces: Loopback0: - ipv4: 100.1.0.189/32 - ipv6: 2064:100::bd/128 + ipv4: 100.1.0.171/32 + ipv6: 2064:100::ab/128 Ethernet1: - ipv4: 10.0.1.121/31 - ipv6: fc00::2f2/126 + ipv4: 10.0.1.85/31 + ipv6: fc00::2aa/126 bp_interfaces: ipv4: 10.10.246.172/24 ipv6: fc0a::ac/64 - ARISTA172T0: + ARISTA166T0: properties: - common bgp: - asn: 64002 + asn: 64166 peers: 65100: - - 10.0.1.122 - - fc00::2f5 + - 10.0.1.86 + - fc00::2ad interfaces: Loopback0: - ipv4: 100.1.0.190/32 - ipv6: 2064:100::be/128 + ipv4: 100.1.0.172/32 + ipv6: 2064:100::ac/128 Ethernet1: - ipv4: 10.0.1.123/31 - ipv6: fc00::2f6/126 + ipv4: 10.0.1.87/31 + ipv6: fc00::2ae/126 bp_interfaces: ipv4: 10.10.246.173/24 ipv6: fc0a::ad/64 - ARISTA173T0: + ARISTA167T0: properties: - common bgp: - asn: 64002 + asn: 64167 peers: 65100: - - 10.0.1.124 - - fc00::2f9 + - 10.0.1.88 + - fc00::2b1 interfaces: Loopback0: - ipv4: 100.1.0.191/32 - ipv6: 2064:100::bf/128 + ipv4: 100.1.0.173/32 + ipv6: 2064:100::ad/128 Ethernet1: - ipv4: 10.0.1.125/31 - ipv6: fc00::2fa/126 + ipv4: 10.0.1.89/31 + ipv6: fc00::2b2/126 bp_interfaces: ipv4: 10.10.246.174/24 ipv6: fc0a::ae/64 - ARISTA174T0: + ARISTA168T0: properties: - common bgp: - asn: 64002 + asn: 64168 peers: 65100: - - 10.0.1.126 - - fc00::2fd + - 10.0.1.90 + - fc00::2b5 interfaces: Loopback0: - ipv4: 100.1.0.192/32 - ipv6: 2064:100::c0/128 + ipv4: 100.1.0.174/32 + ipv6: 2064:100::ae/128 Ethernet1: - ipv4: 10.0.1.127/31 - ipv6: fc00::2fe/126 + ipv4: 10.0.1.91/31 + ipv6: fc00::2b6/126 bp_interfaces: ipv4: 10.10.246.175/24 ipv6: fc0a::af/64 - ARISTA175T2: + ARISTA07T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.128 - - fc00::301 + - 10.0.1.92 + - fc00::2b9 interfaces: Loopback0: - ipv4: 100.1.0.193/32 - ipv6: 2064:100::c1/128 + ipv4: 100.1.0.175/32 + ipv6: 2064:100::af/128 Ethernet1: - ipv4: 10.0.1.129/31 - ipv6: fc00::302/126 + ipv4: 10.0.1.93/31 + ipv6: fc00::2ba/126 bp_interfaces: ipv4: 10.10.246.176/24 ipv6: fc0a::b0/64 - ARISTA176T2: + ARISTA08T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.130 - - fc00::305 + - 10.0.1.94 + - fc00::2bd interfaces: Loopback0: - ipv4: 100.1.0.194/32 - ipv6: 2064:100::c2/128 + ipv4: 100.1.0.176/32 + ipv6: 2064:100::b0/128 Ethernet1: - ipv4: 10.0.1.131/31 - ipv6: fc00::306/126 + ipv4: 10.0.1.95/31 + ipv6: fc00::2be/126 bp_interfaces: ipv4: 10.10.246.177/24 ipv6: fc0a::b1/64 - ARISTA177T0: + ARISTA169T0: properties: - common bgp: - asn: 64002 + asn: 64169 peers: 65100: - - 10.0.1.144 - - fc00::321 + - 10.0.1.96 + - fc00::2c1 interfaces: Loopback0: - ipv4: 100.1.0.201/32 - ipv6: 2064:100::c9/128 + ipv4: 100.1.0.177/32 + ipv6: 2064:100::b1/128 Ethernet1: - ipv4: 10.0.1.145/31 - ipv6: fc00::322/126 + ipv4: 10.0.1.97/31 + ipv6: fc00::2c2/126 bp_interfaces: ipv4: 10.10.246.178/24 ipv6: fc0a::b2/64 - ARISTA178T0: + ARISTA170T0: properties: - common bgp: - asn: 64002 + asn: 64170 peers: 65100: - - 10.0.1.146 - - fc00::325 + - 10.0.1.98 + - fc00::2c5 interfaces: Loopback0: - ipv4: 100.1.0.202/32 - ipv6: 2064:100::ca/128 + ipv4: 100.1.0.178/32 + ipv6: 2064:100::b2/128 Ethernet1: - ipv4: 10.0.1.147/31 - ipv6: fc00::326/126 + ipv4: 10.0.1.99/31 + ipv6: fc00::2c6/126 bp_interfaces: ipv4: 10.10.246.179/24 ipv6: fc0a::b3/64 - ARISTA179T0: + ARISTA171T0: properties: - common bgp: - asn: 64002 + asn: 64171 peers: 65100: - - 10.0.1.148 - - fc00::329 + - 10.0.1.100 + - fc00::2c9 interfaces: Loopback0: - ipv4: 100.1.0.203/32 - ipv6: 2064:100::cb/128 + ipv4: 100.1.0.179/32 + ipv6: 2064:100::b3/128 Ethernet1: - ipv4: 10.0.1.149/31 - ipv6: fc00::32a/126 + ipv4: 10.0.1.101/31 + ipv6: fc00::2ca/126 bp_interfaces: ipv4: 10.10.246.180/24 ipv6: fc0a::b4/64 - ARISTA180T0: + ARISTA172T0: properties: - common bgp: - asn: 64002 + asn: 64172 peers: 65100: - - 10.0.1.150 - - fc00::32d + - 10.0.1.102 + - fc00::2cd interfaces: Loopback0: - ipv4: 100.1.0.204/32 - ipv6: 2064:100::cc/128 + ipv4: 100.1.0.180/32 + ipv6: 2064:100::b4/128 Ethernet1: - ipv4: 10.0.1.151/31 - ipv6: fc00::32e/126 + ipv4: 10.0.1.103/31 + ipv6: fc00::2ce/126 bp_interfaces: ipv4: 10.10.246.181/24 ipv6: fc0a::b5/64 - ARISTA181T0: + ARISTA173T0: properties: - common bgp: - asn: 64002 + asn: 64173 peers: 65100: - - 10.0.1.152 - - fc00::331 + - 10.0.1.104 + - fc00::2d1 interfaces: Loopback0: - ipv4: 100.1.0.205/32 - ipv6: 2064:100::cd/128 + ipv4: 100.1.0.181/32 + ipv6: 2064:100::b5/128 Ethernet1: - ipv4: 10.0.1.153/31 - ipv6: fc00::332/126 + ipv4: 10.0.1.105/31 + ipv6: fc00::2d2/126 bp_interfaces: ipv4: 10.10.246.182/24 ipv6: fc0a::b6/64 - ARISTA182T0: + ARISTA174T0: properties: - common bgp: - asn: 64002 + asn: 64174 peers: 65100: - - 10.0.1.154 - - fc00::335 + - 10.0.1.106 + - fc00::2d5 interfaces: Loopback0: - ipv4: 100.1.0.206/32 - ipv6: 2064:100::ce/128 + ipv4: 100.1.0.182/32 + ipv6: 2064:100::b6/128 Ethernet1: - ipv4: 10.0.1.155/31 - ipv6: fc00::336/126 + ipv4: 10.0.1.107/31 + ipv6: fc00::2d6/126 bp_interfaces: ipv4: 10.10.246.183/24 ipv6: fc0a::b7/64 - ARISTA183T0: + ARISTA175T0: properties: - common bgp: - asn: 64002 + asn: 64175 peers: 65100: - - 10.0.1.156 - - fc00::339 + - 10.0.1.108 + - fc00::2d9 interfaces: Loopback0: - ipv4: 100.1.0.207/32 - ipv6: 2064:100::cf/128 + ipv4: 100.1.0.183/32 + ipv6: 2064:100::b7/128 Ethernet1: - ipv4: 10.0.1.157/31 - ipv6: fc00::33a/126 + ipv4: 10.0.1.109/31 + ipv6: fc00::2da/126 bp_interfaces: ipv4: 10.10.246.184/24 ipv6: fc0a::b8/64 - ARISTA184T0: + ARISTA176T0: properties: - common bgp: - asn: 64002 + asn: 64176 peers: 65100: - - 10.0.1.158 - - fc00::33d + - 10.0.1.110 + - fc00::2dd interfaces: Loopback0: - ipv4: 100.1.0.208/32 - ipv6: 2064:100::d0/128 + ipv4: 100.1.0.184/32 + ipv6: 2064:100::b8/128 Ethernet1: - ipv4: 10.0.1.159/31 - ipv6: fc00::33e/126 + ipv4: 10.0.1.111/31 + ipv6: fc00::2de/126 bp_interfaces: ipv4: 10.10.246.185/24 ipv6: fc0a::b9/64 - ARISTA185T0: + ARISTA177T0: properties: - common bgp: - asn: 64002 + asn: 64177 peers: 65100: - - 10.0.1.160 - - fc00::341 + - 10.0.1.112 + - fc00::2e1 interfaces: Loopback0: - ipv4: 100.1.0.209/32 - ipv6: 2064:100::d1/128 + ipv4: 100.1.0.185/32 + ipv6: 2064:100::b9/128 Ethernet1: - ipv4: 10.0.1.161/31 - ipv6: fc00::342/126 + ipv4: 10.0.1.113/31 + ipv6: fc00::2e2/126 bp_interfaces: ipv4: 10.10.246.186/24 ipv6: fc0a::ba/64 - ARISTA186T0: + ARISTA178T0: properties: - common bgp: - asn: 64002 + asn: 64178 peers: 65100: - - 10.0.1.162 - - fc00::345 + - 10.0.1.114 + - fc00::2e5 interfaces: Loopback0: - ipv4: 100.1.0.210/32 - ipv6: 2064:100::d2/128 + ipv4: 100.1.0.186/32 + ipv6: 2064:100::ba/128 Ethernet1: - ipv4: 10.0.1.163/31 - ipv6: fc00::346/126 + ipv4: 10.0.1.115/31 + ipv6: fc00::2e6/126 bp_interfaces: ipv4: 10.10.246.187/24 ipv6: fc0a::bb/64 - ARISTA187T0: + ARISTA179T0: properties: - common bgp: - asn: 64002 + asn: 64179 peers: 65100: - - 10.0.1.164 - - fc00::349 + - 10.0.1.116 + - fc00::2e9 interfaces: Loopback0: - ipv4: 100.1.0.211/32 - ipv6: 2064:100::d3/128 + ipv4: 100.1.0.187/32 + ipv6: 2064:100::bb/128 Ethernet1: - ipv4: 10.0.1.165/31 - ipv6: fc00::34a/126 + ipv4: 10.0.1.117/31 + ipv6: fc00::2ea/126 bp_interfaces: ipv4: 10.10.246.188/24 ipv6: fc0a::bc/64 - ARISTA188T0: + ARISTA180T0: properties: - common bgp: - asn: 64002 + asn: 64180 peers: 65100: - - 10.0.1.166 - - fc00::34d + - 10.0.1.118 + - fc00::2ed interfaces: Loopback0: - ipv4: 100.1.0.212/32 - ipv6: 2064:100::d4/128 + ipv4: 100.1.0.188/32 + ipv6: 2064:100::bc/128 Ethernet1: - ipv4: 10.0.1.167/31 - ipv6: fc00::34e/126 + ipv4: 10.0.1.119/31 + ipv6: fc00::2ee/126 bp_interfaces: ipv4: 10.10.246.189/24 ipv6: fc0a::bd/64 - ARISTA189T0: + ARISTA181T0: properties: - common bgp: - asn: 64002 + asn: 64181 peers: 65100: - - 10.0.1.168 - - fc00::351 + - 10.0.1.120 + - fc00::2f1 interfaces: Loopback0: - ipv4: 100.1.0.213/32 - ipv6: 2064:100::d5/128 + ipv4: 100.1.0.189/32 + ipv6: 2064:100::bd/128 Ethernet1: - ipv4: 10.0.1.169/31 - ipv6: fc00::352/126 + ipv4: 10.0.1.121/31 + ipv6: fc00::2f2/126 bp_interfaces: ipv4: 10.10.246.190/24 ipv6: fc0a::be/64 - ARISTA190T0: + ARISTA182T0: properties: - common bgp: - asn: 64002 + asn: 64182 peers: 65100: - - 10.0.1.170 - - fc00::355 + - 10.0.1.122 + - fc00::2f5 interfaces: Loopback0: - ipv4: 100.1.0.214/32 - ipv6: 2064:100::d6/128 + ipv4: 100.1.0.190/32 + ipv6: 2064:100::be/128 Ethernet1: - ipv4: 10.0.1.171/31 - ipv6: fc00::356/126 + ipv4: 10.0.1.123/31 + ipv6: fc00::2f6/126 bp_interfaces: ipv4: 10.10.246.191/24 ipv6: fc0a::bf/64 - ARISTA191T0: + ARISTA183T0: properties: - common bgp: - asn: 64002 + asn: 64183 peers: 65100: - - 10.0.1.172 - - fc00::359 + - 10.0.1.124 + - fc00::2f9 interfaces: Loopback0: - ipv4: 100.1.0.215/32 - ipv6: 2064:100::d7/128 + ipv4: 100.1.0.191/32 + ipv6: 2064:100::bf/128 Ethernet1: - ipv4: 10.0.1.173/31 - ipv6: fc00::35a/126 + ipv4: 10.0.1.125/31 + ipv6: fc00::2fa/126 bp_interfaces: ipv4: 10.10.246.192/24 ipv6: fc0a::c0/64 - ARISTA192T0: + ARISTA184T0: properties: - common bgp: - asn: 64002 + asn: 64184 peers: 65100: - - 10.0.1.174 - - fc00::35d + - 10.0.1.126 + - fc00::2fd interfaces: Loopback0: - ipv4: 100.1.0.216/32 - ipv6: 2064:100::d8/128 + ipv4: 100.1.0.192/32 + ipv6: 2064:100::c0/128 Ethernet1: - ipv4: 10.0.1.175/31 - ipv6: fc00::35e/126 + ipv4: 10.0.1.127/31 + ipv6: fc00::2fe/126 bp_interfaces: ipv4: 10.10.246.193/24 ipv6: fc0a::c1/64 - ARISTA193T0: + ARISTA185T0: properties: - common bgp: - asn: 64002 + asn: 64185 peers: 65100: - - 10.0.1.176 - - fc00::361 + - 10.0.1.128 + - fc00::301 interfaces: Loopback0: - ipv4: 100.1.0.217/32 - ipv6: 2064:100::d9/128 + ipv4: 100.1.0.193/32 + ipv6: 2064:100::c1/128 Ethernet1: - ipv4: 10.0.1.177/31 - ipv6: fc00::362/126 + ipv4: 10.0.1.129/31 + ipv6: fc00::302/126 bp_interfaces: ipv4: 10.10.246.194/24 ipv6: fc0a::c2/64 - ARISTA194T0: + ARISTA186T0: properties: - common bgp: - asn: 64002 + asn: 64186 peers: 65100: - - 10.0.1.178 - - fc00::365 + - 10.0.1.130 + - fc00::305 interfaces: Loopback0: - ipv4: 100.1.0.218/32 - ipv6: 2064:100::da/128 + ipv4: 100.1.0.194/32 + ipv6: 2064:100::c2/128 Ethernet1: - ipv4: 10.0.1.179/31 - ipv6: fc00::366/126 + ipv4: 10.0.1.131/31 + ipv6: fc00::306/126 bp_interfaces: ipv4: 10.10.246.195/24 ipv6: fc0a::c3/64 - ARISTA195T0: + ARISTA187T0: properties: - common bgp: - asn: 64002 + asn: 64187 peers: 65100: - - 10.0.1.180 - - fc00::369 + - 10.0.1.132 + - fc00::309 interfaces: Loopback0: - ipv4: 100.1.0.219/32 - ipv6: 2064:100::db/128 + ipv4: 100.1.0.195/32 + ipv6: 2064:100::c3/128 Ethernet1: - ipv4: 10.0.1.181/31 - ipv6: fc00::36a/126 + ipv4: 10.0.1.133/31 + ipv6: fc00::30a/126 bp_interfaces: ipv4: 10.10.246.196/24 ipv6: fc0a::c4/64 - ARISTA196T0: + ARISTA188T0: properties: - common bgp: - asn: 64002 + asn: 64188 peers: 65100: - - 10.0.1.182 - - fc00::36d + - 10.0.1.134 + - fc00::30d interfaces: Loopback0: - ipv4: 100.1.0.220/32 - ipv6: 2064:100::dc/128 + ipv4: 100.1.0.196/32 + ipv6: 2064:100::c4/128 Ethernet1: - ipv4: 10.0.1.183/31 - ipv6: fc00::36e/126 + ipv4: 10.0.1.135/31 + ipv6: fc00::30e/126 bp_interfaces: ipv4: 10.10.246.197/24 ipv6: fc0a::c5/64 - ARISTA197T0: + ARISTA189T0: properties: - common bgp: - asn: 64002 + asn: 64189 peers: 65100: - - 10.0.1.184 - - fc00::371 + - 10.0.1.136 + - fc00::311 interfaces: Loopback0: - ipv4: 100.1.0.221/32 - ipv6: 2064:100::dd/128 + ipv4: 100.1.0.197/32 + ipv6: 2064:100::c5/128 Ethernet1: - ipv4: 10.0.1.185/31 - ipv6: fc00::372/126 + ipv4: 10.0.1.137/31 + ipv6: fc00::312/126 bp_interfaces: ipv4: 10.10.246.198/24 ipv6: fc0a::c6/64 - ARISTA198T0: + ARISTA190T0: properties: - common bgp: - asn: 64002 + asn: 64190 peers: 65100: - - 10.0.1.186 - - fc00::375 + - 10.0.1.138 + - fc00::315 interfaces: Loopback0: - ipv4: 100.1.0.222/32 - ipv6: 2064:100::de/128 + ipv4: 100.1.0.198/32 + ipv6: 2064:100::c6/128 Ethernet1: - ipv4: 10.0.1.187/31 - ipv6: fc00::376/126 + ipv4: 10.0.1.139/31 + ipv6: fc00::316/126 bp_interfaces: ipv4: 10.10.246.199/24 ipv6: fc0a::c7/64 - ARISTA199T0: + ARISTA191T0: properties: - common bgp: - asn: 64002 + asn: 64191 peers: 65100: - - 10.0.1.188 - - fc00::379 + - 10.0.1.140 + - fc00::319 interfaces: Loopback0: - ipv4: 100.1.0.223/32 - ipv6: 2064:100::df/128 + ipv4: 100.1.0.199/32 + ipv6: 2064:100::c7/128 Ethernet1: - ipv4: 10.0.1.189/31 - ipv6: fc00::37a/126 + ipv4: 10.0.1.141/31 + ipv6: fc00::31a/126 bp_interfaces: ipv4: 10.10.246.200/24 ipv6: fc0a::c8/64 - ARISTA200T0: + ARISTA192T0: properties: - common bgp: - asn: 64002 + asn: 64192 peers: 65100: - - 10.0.1.190 - - fc00::37d + - 10.0.1.142 + - fc00::31d interfaces: Loopback0: - ipv4: 100.1.0.224/32 - ipv6: 2064:100::e0/128 + ipv4: 100.1.0.200/32 + ipv6: 2064:100::c8/128 Ethernet1: - ipv4: 10.0.1.191/31 - ipv6: fc00::37e/126 + ipv4: 10.0.1.143/31 + ipv6: fc00::31e/126 bp_interfaces: ipv4: 10.10.246.201/24 ipv6: fc0a::c9/64 - ARISTA201T0: + ARISTA193T0: properties: - common bgp: - asn: 64002 + asn: 64193 peers: 65100: - - 10.0.1.192 - - fc00::381 + - 10.0.1.144 + - fc00::321 interfaces: Loopback0: - ipv4: 100.1.0.225/32 - ipv6: 2064:100::e1/128 + ipv4: 100.1.0.201/32 + ipv6: 2064:100::c9/128 Ethernet1: - ipv4: 10.0.1.193/31 - ipv6: fc00::382/126 + ipv4: 10.0.1.145/31 + ipv6: fc00::322/126 bp_interfaces: ipv4: 10.10.246.202/24 ipv6: fc0a::ca/64 - ARISTA202T0: + ARISTA194T0: properties: - common bgp: - asn: 64002 + asn: 64194 peers: 65100: - - 10.0.1.194 - - fc00::385 + - 10.0.1.146 + - fc00::325 interfaces: Loopback0: - ipv4: 100.1.0.226/32 - ipv6: 2064:100::e2/128 + ipv4: 100.1.0.202/32 + ipv6: 2064:100::ca/128 Ethernet1: - ipv4: 10.0.1.195/31 - ipv6: fc00::386/126 + ipv4: 10.0.1.147/31 + ipv6: fc00::326/126 bp_interfaces: ipv4: 10.10.246.203/24 ipv6: fc0a::cb/64 - ARISTA203T0: + ARISTA195T0: properties: - common bgp: - asn: 64002 + asn: 64195 peers: 65100: - - 10.0.1.196 - - fc00::389 + - 10.0.1.148 + - fc00::329 interfaces: Loopback0: - ipv4: 100.1.0.227/32 - ipv6: 2064:100::e3/128 + ipv4: 100.1.0.203/32 + ipv6: 2064:100::cb/128 Ethernet1: - ipv4: 10.0.1.197/31 - ipv6: fc00::38a/126 + ipv4: 10.0.1.149/31 + ipv6: fc00::32a/126 bp_interfaces: ipv4: 10.10.246.204/24 ipv6: fc0a::cc/64 - ARISTA204T0: + ARISTA196T0: properties: - common bgp: - asn: 64002 + asn: 64196 peers: 65100: - - 10.0.1.198 - - fc00::38d + - 10.0.1.150 + - fc00::32d interfaces: Loopback0: - ipv4: 100.1.0.228/32 - ipv6: 2064:100::e4/128 + ipv4: 100.1.0.204/32 + ipv6: 2064:100::cc/128 Ethernet1: - ipv4: 10.0.1.199/31 - ipv6: fc00::38e/126 + ipv4: 10.0.1.151/31 + ipv6: fc00::32e/126 bp_interfaces: ipv4: 10.10.246.205/24 ipv6: fc0a::cd/64 - ARISTA205T0: + ARISTA197T0: properties: - common bgp: - asn: 64002 + asn: 64197 peers: 65100: - - 10.0.1.200 - - fc00::391 + - 10.0.1.152 + - fc00::331 interfaces: Loopback0: - ipv4: 100.1.0.229/32 - ipv6: 2064:100::e5/128 - Ethernet1: - ipv4: 10.0.1.201/31 - ipv6: fc00::392/126 + ipv4: 100.1.0.205/32 + ipv6: 2064:100::cd/128 + Ethernet1: + ipv4: 10.0.1.153/31 + ipv6: fc00::332/126 bp_interfaces: ipv4: 10.10.246.206/24 ipv6: fc0a::ce/64 - ARISTA206T0: + ARISTA198T0: properties: - common bgp: - asn: 64002 + asn: 64198 peers: 65100: - - 10.0.1.202 - - fc00::395 + - 10.0.1.154 + - fc00::335 interfaces: Loopback0: - ipv4: 100.1.0.230/32 - ipv6: 2064:100::e6/128 + ipv4: 100.1.0.206/32 + ipv6: 2064:100::ce/128 Ethernet1: - ipv4: 10.0.1.203/31 - ipv6: fc00::396/126 + ipv4: 10.0.1.155/31 + ipv6: fc00::336/126 bp_interfaces: ipv4: 10.10.246.207/24 ipv6: fc0a::cf/64 - ARISTA207T0: + ARISTA199T0: properties: - common bgp: - asn: 64002 + asn: 64199 peers: 65100: - - 10.0.1.204 - - fc00::399 + - 10.0.1.156 + - fc00::339 interfaces: Loopback0: - ipv4: 100.1.0.231/32 - ipv6: 2064:100::e7/128 + ipv4: 100.1.0.207/32 + ipv6: 2064:100::cf/128 Ethernet1: - ipv4: 10.0.1.205/31 - ipv6: fc00::39a/126 + ipv4: 10.0.1.157/31 + ipv6: fc00::33a/126 bp_interfaces: ipv4: 10.10.246.208/24 ipv6: fc0a::d0/64 - ARISTA208T0: + ARISTA200T0: properties: - common bgp: - asn: 64002 + asn: 64200 peers: 65100: - - 10.0.1.206 - - fc00::39d + - 10.0.1.158 + - fc00::33d interfaces: Loopback0: - ipv4: 100.1.0.232/32 - ipv6: 2064:100::e8/128 + ipv4: 100.1.0.208/32 + ipv6: 2064:100::d0/128 Ethernet1: - ipv4: 10.0.1.207/31 - ipv6: fc00::39e/126 + ipv4: 10.0.1.159/31 + ipv6: fc00::33e/126 bp_interfaces: ipv4: 10.10.246.209/24 ipv6: fc0a::d1/64 - ARISTA209T0: + ARISTA201T0: properties: - common bgp: - asn: 64002 + asn: 64201 peers: 65100: - - 10.0.1.208 - - fc00::3a1 + - 10.0.1.160 + - fc00::341 interfaces: Loopback0: - ipv4: 100.1.0.233/32 - ipv6: 2064:100::e9/128 + ipv4: 100.1.0.209/32 + ipv6: 2064:100::d1/128 Ethernet1: - ipv4: 10.0.1.209/31 - ipv6: fc00::3a2/126 + ipv4: 10.0.1.161/31 + ipv6: fc00::342/126 bp_interfaces: ipv4: 10.10.246.210/24 ipv6: fc0a::d2/64 - ARISTA210T0: + ARISTA202T0: properties: - common bgp: - asn: 64002 + asn: 64202 peers: 65100: - - 10.0.1.210 - - fc00::3a5 + - 10.0.1.162 + - fc00::345 interfaces: Loopback0: - ipv4: 100.1.0.234/32 - ipv6: 2064:100::ea/128 + ipv4: 100.1.0.210/32 + ipv6: 2064:100::d2/128 Ethernet1: - ipv4: 10.0.1.211/31 - ipv6: fc00::3a6/126 + ipv4: 10.0.1.163/31 + ipv6: fc00::346/126 bp_interfaces: ipv4: 10.10.246.211/24 ipv6: fc0a::d3/64 - ARISTA211T0: + ARISTA203T0: properties: - common bgp: - asn: 64002 + asn: 64203 peers: 65100: - - 10.0.1.212 - - fc00::3a9 + - 10.0.1.164 + - fc00::349 interfaces: Loopback0: - ipv4: 100.1.0.235/32 - ipv6: 2064:100::eb/128 + ipv4: 100.1.0.211/32 + ipv6: 2064:100::d3/128 Ethernet1: - ipv4: 10.0.1.213/31 - ipv6: fc00::3aa/126 + ipv4: 10.0.1.165/31 + ipv6: fc00::34a/126 bp_interfaces: ipv4: 10.10.246.212/24 ipv6: fc0a::d4/64 - ARISTA212T0: + ARISTA204T0: properties: - common bgp: - asn: 64002 + asn: 64204 peers: 65100: - - 10.0.1.214 - - fc00::3ad + - 10.0.1.166 + - fc00::34d interfaces: Loopback0: - ipv4: 100.1.0.236/32 - ipv6: 2064:100::ec/128 + ipv4: 100.1.0.212/32 + ipv6: 2064:100::d4/128 Ethernet1: - ipv4: 10.0.1.215/31 - ipv6: fc00::3ae/126 + ipv4: 10.0.1.167/31 + ipv6: fc00::34e/126 bp_interfaces: ipv4: 10.10.246.213/24 ipv6: fc0a::d5/64 - ARISTA213T0: + ARISTA205T0: properties: - common bgp: - asn: 64002 + asn: 64205 peers: 65100: - - 10.0.1.216 - - fc00::3b1 + - 10.0.1.168 + - fc00::351 interfaces: Loopback0: - ipv4: 100.1.0.237/32 - ipv6: 2064:100::ed/128 + ipv4: 100.1.0.213/32 + ipv6: 2064:100::d5/128 Ethernet1: - ipv4: 10.0.1.217/31 - ipv6: fc00::3b2/126 + ipv4: 10.0.1.169/31 + ipv6: fc00::352/126 bp_interfaces: ipv4: 10.10.246.214/24 ipv6: fc0a::d6/64 - ARISTA214T0: + ARISTA206T0: properties: - common bgp: - asn: 64002 + asn: 64206 peers: 65100: - - 10.0.1.218 - - fc00::3b5 + - 10.0.1.170 + - fc00::355 interfaces: Loopback0: - ipv4: 100.1.0.238/32 - ipv6: 2064:100::ee/128 + ipv4: 100.1.0.214/32 + ipv6: 2064:100::d6/128 Ethernet1: - ipv4: 10.0.1.219/31 - ipv6: fc00::3b6/126 + ipv4: 10.0.1.171/31 + ipv6: fc00::356/126 bp_interfaces: ipv4: 10.10.246.215/24 ipv6: fc0a::d7/64 - ARISTA215T0: + ARISTA207T0: properties: - common bgp: - asn: 64002 + asn: 64207 peers: 65100: - - 10.0.1.220 - - fc00::3b9 + - 10.0.1.172 + - fc00::359 interfaces: Loopback0: - ipv4: 100.1.0.239/32 - ipv6: 2064:100::ef/128 + ipv4: 100.1.0.215/32 + ipv6: 2064:100::d7/128 Ethernet1: - ipv4: 10.0.1.221/31 - ipv6: fc00::3ba/126 + ipv4: 10.0.1.173/31 + ipv6: fc00::35a/126 bp_interfaces: ipv4: 10.10.246.216/24 ipv6: fc0a::d8/64 - ARISTA216T0: + ARISTA208T0: properties: - common bgp: - asn: 64002 + asn: 64208 peers: 65100: - - 10.0.1.222 - - fc00::3bd + - 10.0.1.174 + - fc00::35d interfaces: Loopback0: - ipv4: 100.1.0.240/32 - ipv6: 2064:100::f0/128 + ipv4: 100.1.0.216/32 + ipv6: 2064:100::d8/128 Ethernet1: - ipv4: 10.0.1.223/31 - ipv6: fc00::3be/126 + ipv4: 10.0.1.175/31 + ipv6: fc00::35e/126 bp_interfaces: ipv4: 10.10.246.217/24 ipv6: fc0a::d9/64 - ARISTA217T0: + ARISTA209T0: properties: - common bgp: - asn: 64002 + asn: 64209 peers: 65100: - - 10.0.1.224 - - fc00::3c1 + - 10.0.1.176 + - fc00::361 interfaces: Loopback0: - ipv4: 100.1.0.241/32 - ipv6: 2064:100::f1/128 + ipv4: 100.1.0.217/32 + ipv6: 2064:100::d9/128 Ethernet1: - ipv4: 10.0.1.225/31 - ipv6: fc00::3c2/126 + ipv4: 10.0.1.177/31 + ipv6: fc00::362/126 bp_interfaces: ipv4: 10.10.246.218/24 ipv6: fc0a::da/64 - ARISTA218T0: + ARISTA210T0: properties: - common bgp: - asn: 64002 + asn: 64210 peers: 65100: - - 10.0.1.226 - - fc00::3c5 + - 10.0.1.178 + - fc00::365 interfaces: Loopback0: - ipv4: 100.1.0.242/32 - ipv6: 2064:100::f2/128 + ipv4: 100.1.0.218/32 + ipv6: 2064:100::da/128 Ethernet1: - ipv4: 10.0.1.227/31 - ipv6: fc00::3c6/126 + ipv4: 10.0.1.179/31 + ipv6: fc00::366/126 bp_interfaces: ipv4: 10.10.246.219/24 ipv6: fc0a::db/64 - ARISTA219T0: + ARISTA211T0: properties: - common bgp: - asn: 64002 + asn: 64211 peers: 65100: - - 10.0.1.228 - - fc00::3c9 + - 10.0.1.180 + - fc00::369 interfaces: Loopback0: - ipv4: 100.1.0.243/32 - ipv6: 2064:100::f3/128 + ipv4: 100.1.0.219/32 + ipv6: 2064:100::db/128 Ethernet1: - ipv4: 10.0.1.229/31 - ipv6: fc00::3ca/126 + ipv4: 10.0.1.181/31 + ipv6: fc00::36a/126 bp_interfaces: ipv4: 10.10.246.220/24 ipv6: fc0a::dc/64 - ARISTA220T0: + ARISTA212T0: properties: - common bgp: - asn: 64002 + asn: 64212 peers: 65100: - - 10.0.1.230 - - fc00::3cd + - 10.0.1.182 + - fc00::36d interfaces: Loopback0: - ipv4: 100.1.0.244/32 - ipv6: 2064:100::f4/128 + ipv4: 100.1.0.220/32 + ipv6: 2064:100::dc/128 Ethernet1: - ipv4: 10.0.1.231/31 - ipv6: fc00::3ce/126 + ipv4: 10.0.1.183/31 + ipv6: fc00::36e/126 bp_interfaces: ipv4: 10.10.246.221/24 ipv6: fc0a::dd/64 - ARISTA221T0: + ARISTA213T0: properties: - common bgp: - asn: 64002 + asn: 64213 peers: 65100: - - 10.0.1.232 - - fc00::3d1 + - 10.0.1.184 + - fc00::371 interfaces: Loopback0: - ipv4: 100.1.0.245/32 - ipv6: 2064:100::f5/128 + ipv4: 100.1.0.221/32 + ipv6: 2064:100::dd/128 Ethernet1: - ipv4: 10.0.1.233/31 - ipv6: fc00::3d2/126 + ipv4: 10.0.1.185/31 + ipv6: fc00::372/126 bp_interfaces: ipv4: 10.10.246.222/24 ipv6: fc0a::de/64 - ARISTA222T0: + ARISTA214T0: properties: - common bgp: - asn: 64002 + asn: 64214 peers: 65100: - - 10.0.1.234 - - fc00::3d5 + - 10.0.1.186 + - fc00::375 interfaces: Loopback0: - ipv4: 100.1.0.246/32 - ipv6: 2064:100::f6/128 + ipv4: 100.1.0.222/32 + ipv6: 2064:100::de/128 Ethernet1: - ipv4: 10.0.1.235/31 - ipv6: fc00::3d6/126 + ipv4: 10.0.1.187/31 + ipv6: fc00::376/126 bp_interfaces: ipv4: 10.10.246.223/24 ipv6: fc0a::df/64 - ARISTA223T0: + ARISTA215T0: properties: - common bgp: - asn: 64002 + asn: 64215 peers: 65100: - - 10.0.1.236 - - fc00::3d9 + - 10.0.1.188 + - fc00::379 interfaces: Loopback0: - ipv4: 100.1.0.247/32 - ipv6: 2064:100::f7/128 + ipv4: 100.1.0.223/32 + ipv6: 2064:100::df/128 Ethernet1: - ipv4: 10.0.1.237/31 - ipv6: fc00::3da/126 + ipv4: 10.0.1.189/31 + ipv6: fc00::37a/126 bp_interfaces: ipv4: 10.10.246.224/24 ipv6: fc0a::e0/64 - ARISTA224T0: + ARISTA216T0: properties: - common bgp: - asn: 64002 + asn: 64216 peers: 65100: - - 10.0.1.238 - - fc00::3dd + - 10.0.1.190 + - fc00::37d interfaces: Loopback0: - ipv4: 100.1.0.248/32 - ipv6: 2064:100::f8/128 + ipv4: 100.1.0.224/32 + ipv6: 2064:100::e0/128 Ethernet1: - ipv4: 10.0.1.239/31 - ipv6: fc00::3de/126 + ipv4: 10.0.1.191/31 + ipv6: fc00::37e/126 bp_interfaces: ipv4: 10.10.246.225/24 ipv6: fc0a::e1/64 - ARISTA225T0: + ARISTA217T0: properties: - common bgp: - asn: 64002 + asn: 64217 peers: 65100: - - 10.0.1.240 - - fc00::3e1 + - 10.0.1.192 + - fc00::381 interfaces: Loopback0: - ipv4: 100.1.0.249/32 - ipv6: 2064:100::f9/128 + ipv4: 100.1.0.225/32 + ipv6: 2064:100::e1/128 Ethernet1: - ipv4: 10.0.1.241/31 - ipv6: fc00::3e2/126 + ipv4: 10.0.1.193/31 + ipv6: fc00::382/126 bp_interfaces: ipv4: 10.10.246.226/24 ipv6: fc0a::e2/64 - ARISTA226T0: + ARISTA218T0: properties: - common bgp: - asn: 64002 + asn: 64218 peers: 65100: - - 10.0.1.242 - - fc00::3e5 + - 10.0.1.194 + - fc00::385 interfaces: Loopback0: - ipv4: 100.1.0.250/32 - ipv6: 2064:100::fa/128 + ipv4: 100.1.0.226/32 + ipv6: 2064:100::e2/128 Ethernet1: - ipv4: 10.0.1.243/31 - ipv6: fc00::3e6/126 + ipv4: 10.0.1.195/31 + ipv6: fc00::386/126 bp_interfaces: ipv4: 10.10.246.227/24 ipv6: fc0a::e3/64 - ARISTA227T0: + ARISTA219T0: properties: - common bgp: - asn: 64002 + asn: 64219 peers: 65100: - - 10.0.1.244 - - fc00::3e9 + - 10.0.1.196 + - fc00::389 interfaces: Loopback0: - ipv4: 100.1.0.251/32 - ipv6: 2064:100::fb/128 + ipv4: 100.1.0.227/32 + ipv6: 2064:100::e3/128 Ethernet1: - ipv4: 10.0.1.245/31 - ipv6: fc00::3ea/126 + ipv4: 10.0.1.197/31 + ipv6: fc00::38a/126 bp_interfaces: ipv4: 10.10.246.228/24 ipv6: fc0a::e4/64 - ARISTA228T0: + ARISTA220T0: properties: - common bgp: - asn: 64002 + asn: 64220 peers: 65100: - - 10.0.1.246 - - fc00::3ed + - 10.0.1.198 + - fc00::38d interfaces: Loopback0: - ipv4: 100.1.0.252/32 - ipv6: 2064:100::fc/128 + ipv4: 100.1.0.228/32 + ipv6: 2064:100::e4/128 Ethernet1: - ipv4: 10.0.1.247/31 - ipv6: fc00::3ee/126 + ipv4: 10.0.1.199/31 + ipv6: fc00::38e/126 bp_interfaces: ipv4: 10.10.246.229/24 ipv6: fc0a::e5/64 - ARISTA229T0: + ARISTA221T0: properties: - common bgp: - asn: 64002 + asn: 64221 peers: 65100: - - 10.0.1.248 - - fc00::3f1 + - 10.0.1.200 + - fc00::391 interfaces: Loopback0: - ipv4: 100.1.0.253/32 - ipv6: 2064:100::fd/128 + ipv4: 100.1.0.229/32 + ipv6: 2064:100::e5/128 Ethernet1: - ipv4: 10.0.1.249/31 - ipv6: fc00::3f2/126 + ipv4: 10.0.1.201/31 + ipv6: fc00::392/126 bp_interfaces: ipv4: 10.10.246.230/24 ipv6: fc0a::e6/64 - ARISTA230T0: + ARISTA222T0: properties: - common bgp: - asn: 64002 + asn: 64222 peers: 65100: - - 10.0.1.250 - - fc00::3f5 + - 10.0.1.202 + - fc00::395 interfaces: Loopback0: - ipv4: 100.1.0.254/32 - ipv6: 2064:100::fe/128 + ipv4: 100.1.0.230/32 + ipv6: 2064:100::e6/128 Ethernet1: - ipv4: 10.0.1.251/31 - ipv6: fc00::3f6/126 + ipv4: 10.0.1.203/31 + ipv6: fc00::396/126 bp_interfaces: ipv4: 10.10.246.231/24 ipv6: fc0a::e7/64 - ARISTA231T0: + ARISTA223T0: properties: - common bgp: - asn: 64002 + asn: 64223 peers: 65100: - - 10.0.1.252 - - fc00::3f9 + - 10.0.1.204 + - fc00::399 interfaces: Loopback0: - ipv4: 100.1.0.255/32 - ipv6: 2064:100::ff/128 + ipv4: 100.1.0.231/32 + ipv6: 2064:100::e7/128 Ethernet1: - ipv4: 10.0.1.253/31 - ipv6: fc00::3fa/126 + ipv4: 10.0.1.205/31 + ipv6: fc00::39a/126 bp_interfaces: ipv4: 10.10.246.232/24 ipv6: fc0a::e8/64 - ARISTA232T0: + ARISTA224T0: properties: - common bgp: - asn: 64002 + asn: 64224 peers: 65100: - - 10.0.1.254 - - fc00::3fd + - 10.0.1.206 + - fc00::39d interfaces: Loopback0: - ipv4: 100.1.1.0/32 - ipv6: 2064:100::100/128 + ipv4: 100.1.0.232/32 + ipv6: 2064:100::e8/128 Ethernet1: - ipv4: 10.0.1.255/31 - ipv6: fc00::3fe/126 + ipv4: 10.0.1.207/31 + ipv6: fc00::39e/126 bp_interfaces: ipv4: 10.10.246.233/24 ipv6: fc0a::e9/64 diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 51f627dfc5c..1256f817404 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -35,11 +35,13 @@ stages: - job: validate_test_cases displayName: "Validate Test Cases" - timeoutInMinutes: 25 + timeoutInMinutes: 30 continueOnError: false pool: sonic-common steps: - template: .azure-pipelines/pytest-collect-only.yml + parameters: + MGMT_BRANCH: "" - stage: Test dependsOn: Pre_test diff --git a/docs/testplan/Smartswitch-test-plan.md b/docs/testplan/Smartswitch-test-plan.md index 3083f9f8a77..3e6d55028d6 100644 --- a/docs/testplan/Smartswitch-test-plan.md +++ b/docs/testplan/Smartswitch-test-plan.md @@ -2,6 +2,8 @@ - [Introduction](#introduction) - [Scope](#scope) +- [Testbed and Version](#testbed-and-version) +- [Topology](#topology) - [Definitions and Abbreviations](#definitions-and-abbreviations) - [Objectives of CLI Test Cases](#objectives-of-cli-test-cases) - [CLI Test Cases](#cli-test-cases) @@ -9,14 +11,23 @@ - [1.2 Check platform voltage](#12-check-platform-voltage) - [1.3 Check platform temperature](#13-check-platform-temperature) - [1.4 Check DPU console](#14-check-DPU-console) - - [1.5 Check midplane ip address between NPU and DPU](#15-check-midplane-ip-address-between-npu-and-dpu) + - [1.5 Check midplane ip address between Switch and DPU](#15-check-midplane-ip-address-between-switch-and-dpu) - [1.6 Check DPU shutdown and power up individually](#16-check-DPU-shutdown-and-power-up-individually) - - [1.7 Check removal of pcie link between NPU and DPU](#17-check-removal-of-pcie-link-between-npu-and-dpu) - - [1.8 Check the NTP date and timezone between DPU and NPU](#18-check-the-ntp-date-and-timezone-between-dpu-and-npu) + - [1.7 Check pcie link status between Switch and DPU](#17-check-pcie-link-status-between-switch-and-dpu) + - [1.8 Check the NTP date and timezone between DPU and Switch](#18-check-the-ntp-date-and-timezone-between-dpu-and-switch) - [1.9 Check the State of DPUs](#19-check-the-state-of-dpus) - [1.10 Check the Health of DPUs](#110-check-the-health-of-dpus) - [1.11 Check reboot cause history](#111-check-reboot-cause-history) - - [1.12 Check the DPU state after OS reboot](#112-check-the-dpu-state-after-os-reboot) + - [1.12 Check the DPU state after Switch reboot](#112-check-the-dpu-state-after-switch-reboot) + - [1.13 Check memory on DPU](#113-check-memory-on-dpu) + - [1.14 Check DPU status and pcie Link after memory exhaustion on Switch](#114-check-dpu-status-and-pcie-link-after-memory-exhaustion-on-switch) + - [1.15 Check DPU status and pcie Link after memory exhaustion on DPU](#115-check-dpu-status-and-pcie-link-after-memory-exhaustion-on-dpu) + - [1.16 Check DPU status and pcie Link after restart pmon on Switch](#116-check-dpu-status-and-pcie-link-after-restart-pmon-on-switch) + - [1.17 Check DPU status and pcie Link after reload of configuration on Switch](#117-check-dpu-status-and-pcie-link-after-reload-of-configuration-on-switch) + - [1.18 Check DPU status and pcie Link after kernel panic on Switch](#118-check-dpu-status-and-pcie-link-after-kernel-panic-on-switch) + - [1.19 Check DPU status and pcie Link after kernel panic on DPU](#119-check-dpu-status-and-pcie-link-after-kernel-panic-on-dpu) + - [1.20 Check DPU status and pcie Link after switch power cycle](#120-check-dpu-status-and-pcie-link-after-switch-power-cycle) + - [1.21 Check DPU status and pcie Link after SW trip by temperature trigger](#121-check-dpu-status-and-pcie-link-after-sw-trip-by-temperature-trigger) - [Objectives of API Test Cases](#objectives-of-api-test-cases) - [API Test Cases](#api-test-cases) - [1.1 Check SmartSwitch specific ChassisClass APIs](#11-check-smartswitch-specific-chassisclass-apis) @@ -36,6 +47,19 @@ Purpose of the test is to verify smartswich platform related functionalities/fea For every test cases, all DPUs need to be powered on unless specified in any of the case. General convention of DPU0, DPU1, DPU2 and DPUX has been followed to represent DPU modules and the number of DPU modules can vary. +## Testbed and Version + +The test runs on the os versions 202411 and above. +Add a check to confirm that the test environment uses version 202411 or later; if the version is earlier, skip the test. +After the above check, it needs to check DPUs in the testbed are in dark mode or not. +If it is in dark mode, then power up all the DPUs. +Dark mode is one in which all the DPUs admin_status are down. + +## Topology + +New topology called smartswitch-t1 has been added for running smartswitch cases. +T1 cases also runs on the new topology. + ## Definitions and Abbreviations | **Term** | **Meaning** | @@ -54,14 +78,23 @@ General convention of DPU0, DPU1, DPU2 and DPUX has been followed to represent D | 1.2 | Check platform voltage | To verify the Voltage sensor values and and functionality of alarm by changing the threshold values | | | 1.3 | Check platform temperature | To Verify the Temperature sensor values and functionality of alarm by changing the threshold values | | | 1.4 | Check DPU console | To Verify console access for all DPUs | | -| 1.5 | Check midplane ip address between NPU and DPU | To Verify PCIe interface created between NPU and DPU according to bus number | | +| 1.5 | Check midplane ip address between Switch and DPU | To Verify PCIe interface created between Switch and DPU according to bus number | | | 1.6 | Check DPU shutdown and power up individually | To Verify DPU shutdown and DPUs power up | | -| 1.7 | Check removal of pcie link between NPU and DPU | To Verify the PCie hot plug functinality | | -| 1.8 | Check the NTP date and timezone between DPU and NPU | To Verify NPU and DPU are in sync with respect to timezone and logs timestamp | | +| 1.7 | Check pcie link status between Switch and DPU | To Verify the PCie hot plug functinality | | +| 1.8 | Check the NTP date and timezone between DPU and Switch | To Verify Switch and DPU are in sync with respect to timezone and logs timestamp | | | 1.9 | Check the State of DPUs | To Verify DPU state details during online and offline | | | 1.10 | Check the Health of DPUs | To Verify overall health (LED, process, docker, services and hw) of DPU | Phase:2 | | 1.11 | Check reboot cause history | To Verify reboot cause history cli | | | 1.12 | Check the DPU state after OS reboot | To Verify DPU state on host reboot | | +| 1.13 | Check memory on DPU | To verify Memory and its threshold on all the DPUs | +| 1.14 | Check DPU status and pcie Link after memory exhaustion on Switch | To verify dpu status and connectivity after memory exhaustion on Switch | +| 1.15 | Check DPU status and pcie Link after memory exhaustion on DPU | To verify dpu status and connectivity after memory exhaustion on DPU | +| 1.16 | Check DPU status and pcie Link after restart pmon on Switch | To verify dpu status and connectivity after restart of pmon on NPU | +| 1.17 | Check DPU status and pcie Link after reload of configuration on Switch | To verify dpu status and connectivity after reload of configuration on Switch | +| 1.18 | Check DPU status and pcie Link after kernel panic on Switch| To verify dpu status and connectivity after Kernel Panic on Switch | +| 1.19 | Check DPU status and pcie Link after kernel panic on DPU | To verify dpu status and connectivity after Kernel Panic on DPU | +| 1.20 | Check DPU status and pcie Link after switch power cycle | To verify dpu status and connectivity after switch power cycle | +| 1.21 | Check DPU status and pcie Link after SW trip by temperature trigger | To verify dpu status and connectivity after SW trip by temperature trigger | ## CLI Test Cases @@ -69,7 +102,7 @@ General convention of DPU0, DPU1, DPU2 and DPUX has been followed to represent D ### 1.1 Check DPU Status #### Steps - * Use command `show chassis modules status` to get DPU status + * Use command on Switch: `show chassis modules status` to get DPU status * Get the number of DPU modules from ansible inventory file for the testbed. #### Verify in @@ -93,7 +126,7 @@ root@sonic:/home/cisco# show chassis modules status ### 1.2 Check platform voltage #### Steps - * Use command `show platform voltage` to get platform voltage + * Use command on Switch: `show platform voltage` to get platform voltage #### Verify in * Switch @@ -197,7 +230,7 @@ root@sonic:/home/cisco# ### 1.3 Check platform temperature #### Steps - * Use command `show platform temperature` to get platform temperature + * Use command on Switch: `show platform temperature` to get platform temperature #### Verify in * Switch @@ -318,7 +351,7 @@ root@sonic:/home/cisco# * cntrl+a and then cntrl+x to come out of the DPU console. -### 1.5 Check midplane ip address between NPU and DPU +### 1.5 Check midplane ip address between Switch and DPU #### Steps * Get the number of DPU modules from from ansible inventory file for the testbed. @@ -347,10 +380,10 @@ root@sonic:/home/cisco# #### Steps * Get the number of DPU modules from Ansible inventory file for the testbed. - * Use command `config chassis modules shutdown ` to shut down individual DPU - * Use command `show chassis modules status` to show DPU status - * Use command `config chassis modules startup ` to power up individual DPU - * Use command `show chassis modules status` to show DPU status + * Use command on Switch: `config chassis modules shutdown ` to shut down individual DPU + * Use command on Switch: `show chassis modules status` to show DPU status + * Use command on Switch: `config chassis modules startup ` to power up individual DPU + * Use command on Switch: `show chassis modules status` to show DPU status #### Verify in * Switch @@ -384,14 +417,14 @@ root@sonic:/home/cisco# show chassis modules status * Verify DPU is shown in show chassis modules status after DPU powered on -### 1.7 Check removal of pcie link between NPU and DPU +### 1.7 Check pcie link status between Switch and DPU #### Steps - * Use `show platform pcieinfo -c` to run the pcie info test to check everything is passing - * Use command `config chassis modules shutdown DPU` to bring down the dpu (This will bring down the pcie link between npu and dpu) - * Use `show platform pcieinfo -c` to run the pcie info test to check pcie link has been removed - * Use command `config chassis modules startup DPU` to bring up the dpu (This will rescan pcie links) - * Use `show platform pcieinfo -c` to run the pcie info test to check everything is passing + * Use command on Switch: `show platform pcieinfo -c` to run the pcie info test to check everything is passing + * Use command on Switch: `config chassis modules shutdown DPU` to bring down the dpu (This will bring down the pcie link between npu and dpu) + * Use command on Switch: `show platform pcieinfo -c` to run the pcie info test to check pcie link has been removed + * Use command on Switch: `config chassis modules startup DPU` to bring up the dpu (This will rescan pcie links) + * Use command on Switch: `show platform pcieinfo -c` to run the pcie info test to check everything is passing * This test is to check the PCie hot plug functinality since there is no OIR possible #### Verify in @@ -404,9 +437,9 @@ On Switch: Showing example of one DPU pcie link root@sonic:/home/cisco# show platform pcieinfo -c -root@sonic:/home/cisco# echo 1 > /sys/bus/pci/devices/0000:1a:00.0/remove +root@sonic:/home/cisco# config chassis modules shutdown DPU root@sonic:/home/cisco# -root@sonic:/home/cisco# echo 1 > /sys/bus/pci/rescan +root@sonic:/home/cisco# config chassis modules startup DPU root@sonic:/home/cisco# root@sonic:/home/cisco# show platform pcieinfo -c @@ -416,12 +449,12 @@ root@sonic:/home/cisco# show platform pcieinfo -c * Verify pcieinfo test pass for all after bringing back up the link -### 1.8 Check the NTP date and timezone between DPU and NPU +### 1.8 Check the NTP date and timezone between DPU and Switch #### Steps - * Use command `date` to get date and time zone on Switch - * Use command `ssh admin@169.254.x.x` to enter into required dpu. - * Use command `date` to get date and time zone on DPU + * Use command on Switch: `date` to get date and time zone on Switch + * Use command on Switch: `ssh admin@169.254.x.x` to enter into required dpu. + * Use command on DPU: `date` to get date and time zone on DPU #### Verify in * Switch and DPU @@ -452,7 +485,7 @@ root@sonic:/home/cisco# ### 1.9 Check the State of DPUs #### Steps - * Use command `show system-health DPU all` to get DPU health status. + * Use command on Switch:`show system-health DPU all` to get DPU health status. #### Verify in * Switch and DPU @@ -507,7 +540,7 @@ DPU0 1 Partial Online dpu_midplane_link_state up * This Test case is to be covered in Phase 2 #### Steps - * Use command `show system-health detail ` to check the health of the DPU. + * Use command on Switch: `show system-health detail ` to check the health of the DPU. #### Verify in * Switch @@ -546,13 +579,13 @@ rsyslog OK Process ### 1.11 Check reboot cause history #### Steps - * The "show reboot-cause" CLI on the switch shows the most recent rebooted device, time and the cause. - * The "show reboot-cause history" CLI on the switch shows the history of the Switch and all DPUs - * The "show reboot-cause history module-name" CLI on the switch shows the history of the specified module - * Use `config chassis modules shutdown ` - * Use `config chassis modules startup ` + * Use command on Switch: "show reboot-cause" CLI to show the most recent rebooted device, time and the cause. + * Use command on Switch: "show reboot-cause history" CLI to show the history of the Switch and all DPUs + * Use command on Switch: "show reboot-cause history module-name" CLI to show the history of the specified module + * Use command on Switch: `config chassis modules shutdown ` + * Use command on Switch: `config chassis modules startup ` * Wait for 5 minutes for Pmon to update the DPU states - * Use `show reboot-cause ` to check the latest reboot is displayed + * Use command on Switch: `show reboot-cause ` to check the latest reboot is displayed #### Verify in * Switch @@ -600,7 +633,7 @@ DPU3 2023_10_02_17_23_46 Host Reset DPU Sun 02 Oct 2 #### Steps -Existing Test case for NPU: +Existing Test case for Switch: * Reboot using a particular command (sonic reboot, watchdog reboot, etc) * All the timeout and poll timings are read from platform.json * Wait for ssh to drop @@ -617,7 +650,7 @@ Reboot Test Case for DPU: * Save the configurations of all DPU state before reboot * Power on all the DPUs that were powered on before reboot using `config chassis modules startup ` * Wait for DPUs to be up - * Use command `show chassis modules status` to get DPU status + * Use command on Switch: `show chassis modules status` to get DPU status * Get the number of DPU modules from ansible inventory file for the testbed. #### Verify in @@ -645,6 +678,675 @@ root@sonic:/home/cisco# show chassis modules status * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. +### 1.13 Check Memory on DPU + +### Steps + + * Infrastructure support will be provided to choose from following clis or in combinations based on the vendor. + * Use command on DPU: `show system-memory` to get memory usage on each of those DPUs + * Use command on Switch: `show system-health dpu ` to check memory check service status + * Use command on DPU: `pdsctl show system --events` to check memory related events triggered. + (This is vendor specific event monitoring cli) + +#### Verify in + * DPU + +#### Sample Output +``` +On DPU: + +root@sonic:/home/admin# show system-memory + total used free shared buff/cache available +Mem: 6266 4198 1509 28 765 2067 +Swap: 0 0 0 +root@sonic:/home/admin# +root@sonic:/home/admin# + +On Switch: + +root@MtFuji:/home/cisco# show system-health dpu DPU0 +is_smartswitch returning True +Name ID Oper-Status State-Detail State-Value Time Reason +------ ---- ------------- ----------------------- ------------- ----------------- -------------------------------------------------------------------------------------------------------------------------- +DPU0 0 Online dpu_midplane_link_state UP 20241003 17:57:22 INTERNAL-MGMT : admin state - UP, oper_state - UP, status - OK, HOST-MGMT : admin state - UP, oper_state - UP, status - OK + dpu_control_plane_state UP 20241003 17:57:22 All containers are up and running, host-ethlink-status: Uplink1/1 is UP + dpu_data_plane_state UP 20241001 19:54:30 DPU container named polaris is running, pdsagent running : OK, pciemgrd running : OK +root@MtFuji:/home/cisco# + +On DPU: +# Note: This command is run on cisco specific testbed. +# HwSKU: Cisco-8102-28FH-DPU-O-T1 + +root@sonic:/home/admin# pdsctl show system --events +---------------------------------------------------------------------------------------------------- +Event Severity Timestamp +---------------------------------------------------------------------------------------------------- +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:11.515551 +0000 UTC +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:11.668685 +0000 UTC +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:12.379261 +0000 UTC +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:19.379819 +0000 UTC +root@sonic:/home/admin# + +``` + +#### Pass/Fail Criteria + + * Verify that used memory should not cross the specified threshold value (90) of total memory. + * Threshold can be set different based on platform. + * Verify that dpu_control_plane_state is up under system-health dpu cli. + * Verify no memory related events (MEM_FAILURE_EVENT) under pdsctl show system --events cli. This is vendor specific event montioring cli. + * Increase the memory to go beyond threshold (head -c /dev/zero | tail &) and verify it in pdsctl show system --events cli. + + +### 1.14 Check DPU status and pcie Link after memory exhaustion on Switch + +#### Steps + + * Use command on Switch: `sudo swapoff -a`. Swapping is turned off so the OOM is triggered in a shorter time. + * Use command on Switch: 'nohup bash -c "sleep 5 && tail /dev/zero" &' to run out of memory completely. + * It runs on the background and `nohup` is also necessary to protect the background process. + * Added `sleep 5` to ensure ansible receive the result first. + * Check the status and power on DPUs after switch goes for reboot and comes back + * Use command on Switch: `show chassis modules status` to check status of the DPUs. + * Append to the existing test case: https://github.com/sonic-net/sonic-mgmt/blob/master/tests/platform_tests/test_memory_exhaustion.py + + #### Verify in + + * Switch + +#### Sample Output + +``` +root@sonic:/home/cisco# sudo swapoff -a +root@sonic:/home/cisco# +root@sonic:/home/cisco# nohup bash -c "sleep 5 && tail /dev/zero" & +root@sonic:/home/cisco# nohup: ignoring input and appending output to 'nohup.out' +root@sonic:/home/cisco# +. +(Going for reboot) +. +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +``` + +#### Pass/Fail Criteria + + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.15 Check DPU status and pcie Link after memory exhaustion on DPU + +#### Steps + +* Use command on DPU: `sudo swapoff -a`. Swapping is turned off so the OOM is triggered in a shorter time. +* Use command on DPU: 'nohup bash -c "sleep 5 && tail /dev/zero" &' to to run out of memory completely. +* It runs on the background and `nohup` is also necessary to protect the background process. +* Added `sleep 5` to ensure ansible receive the result first. +* Powercycling of DPU is to ensure that pcie link came up properly after the memory exhaustion test. +* Use command on Switch: `config chassis module shutdown ` to power off the DPUs. +* Wait for 3 mins. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. + + #### Verify in + + * DPU and Switch + +#### Sample Output + +DPU: + +``` +root@sonic:/home/admin# sudo swapoff -a +root@sonic:/home/adminsco# +root@sonic:/home/admin# nohup bash -c "sleep 5 && tail /dev/zero" & +root@sonic:/home/admin# nohup: ignoring input and appending output to 'nohup.out' +root@sonic:/home/admin# +. +(Going for reboot) +. +``` + +Switch: + +``` +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +``` + +#### Pass/Fail Criteria + + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.16 Check DPU status and pcie Link after restart pmon on Switch + +#### Steps + * Use command on Switch: `docker ps` to check the status of all the dockers. + * Use command on Switch: `systemctl restart pmon` + * Wait for 3 mins + * Use command on Switch: `docker ps` to check the status of all the dockers. + * Use command on Switch: `show chassis modules status` to check status of the DPUs. + +#### Verify in + * Switch + +#### Sample Output + +``` +root@MtFuji:/home/cisco# docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a49fcf07beb8 docker-snmp:latest "/usr/local/bin/supe…" 3 days ago Up 3 days snmp +57ecc675292d docker-platform-monitor:latest "/usr/bin/docker_ini…" 3 days ago Up 3 days pmon +f1306072ba01 docker-sonic-mgmt-framework:latest "/usr/local/bin/supe…" 3 days ago Up 3 days mgmt-framework +571cc36585ae docker-lldp:latest "/usr/bin/docker-lld…" 3 days ago Up 3 days lldp +db4b1444e8a0 docker-sonic-gnmi:latest "/usr/local/bin/supe…" 3 days ago Up 3 days gnmi +a90702b9c541 d0a0fb621c53 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_server +4d2d79b77c66 2c214d2315a2 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_relay +90246d1e26d2 docker-fpm-frr:latest "/usr/bin/docker_ini…" 3 days ago Up 3 days bgp +42cf834770a8 docker-orchagent:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days swss +7eb9da209385 docker-router-advertiser:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days radv +66c4c8779e60 docker-syncd-cisco:latest "/usr/local/bin/supe…" 3 days ago Up 3 days syncd +5d542c98fb00 docker-teamd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days teamd +a5225d08bcf4 docker-eventd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days eventd +bd7555425d6d docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu5 +42fd04767a03 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu4 +a1633fc4a6ff docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu3 +32b4e9506827 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu0 +bb73239399e4 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu6 +e5281aba74de docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu7 +96032ebcb451 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu1 +45418ff0d88f docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu2 +39ddbddd3fb3 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database +f1cac669cd08 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database-chassis +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# systemctl restart pmon +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a49fcf07beb8 docker-snmp:latest "/usr/local/bin/supe…" 3 days ago Up 3 days snmp +57ecc675292d docker-platform-monitor:latest "/usr/bin/docker_ini…" 3 days ago Up 4 seconds pmon +f1306072ba01 docker-sonic-mgmt-framework:latest "/usr/local/bin/supe…" 3 days ago Up 3 days mgmt-framework +571cc36585ae docker-lldp:latest "/usr/bin/docker-lld…" 3 days ago Up 3 days lldp +db4b1444e8a0 docker-sonic-gnmi:latest "/usr/local/bin/supe…" 3 days ago Up 3 days gnmi +a90702b9c541 d0a0fb621c53 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_server +4d2d79b77c66 2c214d2315a2 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_relay +90246d1e26d2 docker-fpm-frr:latest "/usr/bin/docker_ini…" 3 days ago Up 3 days bgp +42cf834770a8 docker-orchagent:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days swss +7eb9da209385 docker-router-advertiser:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days radv +66c4c8779e60 docker-syncd-cisco:latest "/usr/local/bin/supe…" 3 days ago Up 3 days syncd +5d542c98fb00 docker-teamd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days teamd +a5225d08bcf4 docker-eventd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days eventd +bd7555425d6d docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu5 +42fd04767a03 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu4 +a1633fc4a6ff docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu3 +32b4e9506827 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu0 +bb73239399e4 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu6 +e5281aba74de docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu7 +96032ebcb451 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu1 +45418ff0d88f docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu2 +39ddbddd3fb3 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database +f1cac669cd08 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database-chassis +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# systemctl status pmon +● pmon.service - Platform monitor container + Loaded: loaded (/lib/systemd/system/pmon.service; static) + Drop-In: /etc/systemd/system/pmon.service.d + └─auto_restart.conf + Active: active (running) since Sat 2024-10-05 00:22:29 UTC; 24s ago + Process: 3584922 ExecStartPre=/usr/bin/pmon.sh start (code=exited, status=0> + Main PID: 3584995 (pmon.sh) + Tasks: 2 (limit: 153342) + Memory: 27.6M + CGroup: /system.slice/pmon.service + ├─3584995 /bin/bash /usr/bin/pmon.sh wait + └─3585000 python3 /usr/local/bin/container wait pmon + +Oct 05 00:22:28 MtFuji container[3584943]: container_start: pmon: set_owner:loc> +Oct 05 00:22:29 MtFuji container[3584943]: docker cmd: start for pmon +Oct 05 00:22:29 MtFuji container[3584943]: container_start: END +Oct 05 00:22:29 MtFuji systemd[1]: Started pmon.service - Platform monitor cont> +Oct 05 00:22:29 MtFuji container[3585000]: container_wait: BEGIN +Oct 05 00:22:29 MtFuji container[3585000]: read_data: config:True feature:pmon > +Oct 05 00:22:29 MtFuji container[3585000]: read_data: config:False feature:pmon> +Oct 05 00:22:29 MtFuji container[3585000]: docker get image version for pmon +Oct 05 00:22:29 MtFuji container[3585000]: container_wait: pmon: set_owner:loca> +Oct 05 00:22:29 MtFuji container[3585000]: container_wait: END -- transitioning> + +root@MtFuji:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# + +``` +#### Pass/Fail Criteria + * Verify pmon and all the associated critical process is up are up. + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.17 Check DPU status and pcie Link after reload of configuration on Switch + +#### Steps +* Use command on Switch: `config reload -y` to reload the configurations in the switch. +* Wait for 3 mins. +* Use command on Switch: `show chassis modules status` to check status of the DPUs. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. +* Use command on Switch: `show chassis modules status` to check status of the DPUs. + +#### Verify in + * Switch + +#### Sample Output + +``` +root@sonic:/home/cisco# config reload -y +Acquired lock on /etc/sonic/reload.lock +Disabling container monitoring ... +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -j /etc/sonic/init_cfg.json -j /etc/sonic/config_db.json --write-to-db +Running command: /usr/local/bin/db_migrator.py -o migrate +Running command: /usr/local/bin/sonic-cfggen -d -y /etc/sonic/sonic_version.yml -t /usr/share/sonic/templates/sonic-environment.j2,/etc/sonic/sonic-environment +Restarting SONiC target ... +Enabling container monitoring ... +Reloading Monit configuration ... +Reinitializing monit daemon +Released lock on /etc/sonic/reload.lock +root@MtFuji:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# + +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.18 Check DPU status and pcie Link after kernel panic on Switch + +#### Steps +* Use command on Switch: `nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" &` +* Use command on Switch: `show chassis modules status` to check status of the DPUs. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. +* Use command on Switch: `show chassis modules status` to check status of the DPUs. +* Append to the existing test case: https://github.com/sonic-net/sonic-mgmt/blob/master/tests/platform_tests/test_kdump.py + +#### Verify in + * Switch + +#### Sample Output + +``` +root@sonic:/home/cisco# nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" & +. +(Going for reboot) +. +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# show reboot-cause + +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + * Verify `show reboot-cause ` to check the reboot is caused by kernel panic. + + +### 1.19 Check DPU status and pcie Link after kernel panic on DPU + +#### Steps +* Use command on DPU: `nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" &`. +* Powercycling of DPU is to ensure that pcie link came up properly after the memory exhaustion test. +* Use command on Switch: `config chassis module shutdown ` to power off the DPUs. +* Wait for 3 mins. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. + +#### Verify in + * DPU and Switch + +#### Sample Output + +DPU: + +``` +root@sonic:/home/admin# nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" & +. +(Going for reboot) +. +``` + +Switch: + +``` +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +root@sonic:/home/cisco# show reboot-cause + +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + * Verify `show reboot-cause ` to check the reboot is caused by kernel panic. + + +### 1.20 Check DPU status and pcie Link after switch power cycle + +#### Steps + * Power cycle the testbed using PDU controller. + * Use command on Switch: `config chassis module startup ` to power on the DPUs. + * Use command on Switch: `show chassis modules status` to check status of the DPUs. + * Append to the existing test case: https://github.com/sonic-net/sonic-mgmt/blob/master/tests/platform_tests/test_power_off_reboot.py + +#### Verify in + * Switch + +#### Sample Output + +After power cycle of the testbed, + +``` +root@sonic:/home/cisco# +root@sonic:/home/cisco# +. +(Going for power off reboot using pdu controller) +. +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis modules startup +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.21 Check DPU status and pcie Link after SW trip by temperature trigger + +#### Steps + + * Infrastructure will be provided to run the scripts that triggers the temperature trip based on vendor. + * The following is the example sequence to trigger temperature trip on the DPU + - Note: If Cisco setup, the following steps work. + - In DPU, Execute: `docker exec -it polaris /bin/bash` + - Create /tmp/temp_sim.json file with dictionary { "hbmtemp": 65, "dietemp": 85} + - Increase dietemp to 125 to trigger the trip. + +#### Verify in + * DPU + +#### Sample Output + +DPU: + +``` +# Note: This command is run on cisco specific testbed. +# HwSKU: Cisco-8102-28FH-DPU-O-T1 + +root@sonic:/home/admin# +root@sonic:/home/admin# docker exec -it polaris /bin/bash +bash-4.4# +bash-4.4# cat /tmp/temp_sim.json +{ "hbmtemp": 65, +"dietemp": 85 } +bash-4.4# +bash-4.4# +bash-4.4# +bash-4.4# cat /tmp/temp_sim.json +{ "hbmtemp": 65, +"dietemp": 125 } +bash-4.4# exit +exit +root@sonic:/home/admin# e** RESETTING: SW TRIP dietemBoot0 v19, Id 0x82 +Boot fwid 0 @ 0x74200000... OK + +. +(Going for reboot) +. + +``` +Switch: + +``` +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis shutdown DPU0 +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +root@sonic:/home/cisco# show reboot-cause history dpu0 + + +``` + +#### Pass/Fail Criteria + * Verify Ping works to DPU mid plane ip listed in the ansible inventory file for the testbed. + * Verify the command on Swithc: show reboot cause history to check the cause as cattrip. + + ## Objectives of API Test Cases | | **Test Case** | **Intention** | **Comments** | diff --git a/docs/testplan/WoL-test-plan.md b/docs/testplan/WoL-test-plan.md index 5e48160dd48..b2fc70b8244 100644 --- a/docs/testplan/WoL-test-plan.md +++ b/docs/testplan/WoL-test-plan.md @@ -32,13 +32,19 @@ The test will issue `wol` commands with various parameter combinations on DUT, t #### Test case #1 - Verrify send a wol packet to a specific interface 1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. -1. Issue command on DUT host: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 255.255.255.255`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234`) 1. Stop `tcpdump` process in PTF. 1. Check if only one wol packet exists in `.pcap` file and the content is expected. #### Test case #2 - Verify send a wol packekt to each member of a vlan 1. Start multiple `tcpdump` processes in PTF to capture WoL packet on each interfaces. Save the captured packets to different `.pcap` files. -1. Issue command on DUT host: `wol `. (e.g., `wol Vlan1000 00:11:22:33:44:55`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol `. (e.g., `wol Vlan1000 00:11:22:33:44:55`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 255.255.255.255`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234`) 1. Stop all `tcpdump` processes in PTF. 1. *For each interface in vlan*, check if one wol packet exists in corresponding `.pcap` file and the content is expected. 1. *For each interface not in vlan*, check no wol packet exists in corresponding `.pcap` file. @@ -51,21 +57,43 @@ The test will issue `wol` commands with various parameter combinations on DUT, t #### Test case #4 - Verify send a wol packet with password 1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. -1. Issue command on DUT host: `wol -p ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -p 192.168.1.1`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol -p ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -p 192.168.1.1`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 255.255.255.255` -p 11:22:33:44:55:66`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234 -p 192.168.123.123`) 1. Stop `tcpdump` process in PTF. 1. Check if only one wol packet exists in `.pcap` file and the content is expected. Especially, verify the password in wol packet is same as command. #### Test case #5 - Verify send multiple wol packets with specific interval to a specific interface 1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. -1. Issue command on DUT host: `wol -c -i ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -c 3 -i 2000`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol -c -i ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -c 3 -i 2000`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 255.255.255.255 -c 4 -i 1000`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port1234 -c 5 -i 1500`) 1. Stop `tcpdump` process in PTF. 1. Check if exact `` wol packets exist in `.pcap` file and the content is expected. Moreover, check the time interval between each wol packet in `.pcap` file is ALMOST SAME[^1] as input ``. #### Test case #6 - Verify send multiple wol packets with specific interval to each membor of a vlan 1. Start multiple `tcpdump` processes in PTF to capture WoL packet on each interfaces. Save the captured packets to different `.pcap` files. -1. Issue command on DUT host: `wol -c -i ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -c 3 -i 2000`) +1. Issue command on DUT host: + 1. `wol -c -i ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -c 3 -i 2000`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 255.255.255.255 -c 4 -i 1000`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234 -c 5 -i 1500`) 1. Stop `tcpdump` process in PTF. 1. *For each interface in vlan*, check if exact `` wol packets exist in `.pcap` file and the content is expected. Moreover, check the time interval between each wol packet in `.pcap` file is ALMOST SAME[^1] as input ``. 1. *For each interface not in vlan*, check no wol packet exists in corresponding `.pcap` file. +#### Test case #7 - Verify constrain of parameters +1. Make sure count and interval both exist or not. +1. Make sure udp flag is required when using ip address or udp port. +1. Make sure udp flag is conflict with mac broadcast flag. + +#### Test case #8 - Verify parameters can be set correctly by CLI +1. Make sure interface that receving packet and command line parameter interface are same. +1. Make sure target_mac in payload and command line parameter target_mac are same. +1. Make sure ip address in header and command line parameter ip_address are same. (Test both ipv4 and ipv6 address with broadcase address or unicast address on VLAN interface or port interface, so there should be 8 combinations, maybe we can leverage pytest parametrize mark to realize that.) +1. Make sure when command line parameter ip_address is empty, ip address in header is default value: 255.255.255.255. +1. Make sure udp port in header and command line parameter udp port are same. +1. Make sure when command line parameter udp_port is empty, udp port in header is default value: 9. + [^1]: ALMOST SAME means we should tolerate small errors caused by electrical characteristics. diff --git a/tests/acl/custom_acl_table/test_custom_acl_table.py b/tests/acl/custom_acl_table/test_custom_acl_table.py index 617e9cac66f..7e1cddc252b 100644 --- a/tests/acl/custom_acl_table/test_custom_acl_table.py +++ b/tests/acl/custom_acl_table/test_custom_acl_table.py @@ -11,7 +11,6 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 logger = logging.getLogger(__name__) @@ -251,7 +250,7 @@ def build_exp_pkt(input_pkt): def test_custom_acl(rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter, setup_acl_rules, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_counterpoll_interval, remove_dataacl_table, skip_traffic_test): # noqa F811 + setup_counterpoll_interval, remove_dataacl_table): # noqa F811 """ The test case is to verify the functionality of custom ACL table Test steps @@ -263,6 +262,7 @@ def test_custom_acl(rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter, 6. Verify the counter of expected rule increases as expected """ mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + asic_type = rand_selected_dut.facts['asic_type'] if "dualtor" in tbinfo["topo"]["name"]: mg_facts_unselected_dut = rand_unselected_dut.get_extended_minigraph_facts(tbinfo) vlan_name = list(mg_facts['minigraph_vlans'].keys())[0] @@ -288,15 +288,17 @@ def test_custom_acl(rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter, exp_pkt = build_exp_pkt(pkt) # Send and verify packet clear_acl_counter(rand_selected_dut) - if not skip_traffic_test: - if "dualtor-aa" in tbinfo["topo"]["name"]: - clear_acl_counter(rand_unselected_dut) - ptfadapter.dataplane.flush() - testutils.send(ptfadapter, pkt=pkt, port_id=src_port_indice) - testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=dst_port_indices, timeout=5) - acl_counter = read_acl_counter(rand_selected_dut, rule) - if "dualtor-aa" in tbinfo["topo"]["name"]: - acl_counter_unselected_dut = read_acl_counter(rand_unselected_dut, rule) - acl_counter += acl_counter_unselected_dut - # Verify acl counter - pytest_assert(acl_counter == 1, "ACL counter for {} didn't increase as expected".format(rule)) + if "dualtor-aa" in tbinfo["topo"]["name"]: + clear_acl_counter(rand_unselected_dut) + if asic_type == 'vs': + logger.info("Skip ACL verification on VS platform") + continue + ptfadapter.dataplane.flush() + testutils.send(ptfadapter, pkt=pkt, port_id=src_port_indice) + testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=dst_port_indices, timeout=5) + acl_counter = read_acl_counter(rand_selected_dut, rule) + if "dualtor-aa" in tbinfo["topo"]["name"]: + acl_counter_unselected_dut = read_acl_counter(rand_unselected_dut, rule) + acl_counter += acl_counter_unselected_dut + # Verify acl counter + pytest_assert(acl_counter == 1, "ACL counter for {} didn't increase as expected".format(rule)) diff --git a/tests/acl/null_route/test_null_route_helper.py b/tests/acl/null_route/test_null_route_helper.py index e2f5da0a557..fbea5d5f1d7 100644 --- a/tests/acl/null_route/test_null_route_helper.py +++ b/tests/acl/null_route/test_null_route_helper.py @@ -9,7 +9,7 @@ from ptf.mask import Mask import ptf.packet as scapy -from tests.common.fixtures.ptfhost_utils import remove_ip_addresses, skip_traffic_test # noqa F401 +from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 import ptf.testutils as testutils from tests.common.helpers.assertions import pytest_require from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError @@ -229,12 +229,10 @@ def generate_packet(src_ip, dst_ip, dst_mac): return pkt, exp_pkt -def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_action, skip_traffic_test): # noqa F811 +def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_action): # noqa F811 """ Send packet with ptfadapter and verify if packet is forwarded or dropped as expected. """ - if skip_traffic_test: - return ptfadapter.dataplane.flush() testutils.send(ptfadapter, pkt=pkt, port_id=tx_port) if expected_action == FORWARD: @@ -244,7 +242,7 @@ def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_ def test_null_route_helper(rand_selected_dut, tbinfo, ptfadapter, - apply_pre_defined_rules, setup_ptf, skip_traffic_test): # noqa F811 + apply_pre_defined_rules, setup_ptf): # noqa F811 """ Test case to verify script null_route_helper. Some packets are generated as defined in TEST_DATA and sent to DUT, @@ -280,4 +278,4 @@ def test_null_route_helper(rand_selected_dut, tbinfo, ptfadapter, time.sleep(1) send_and_verify_packet(ptfadapter, pkt, exp_pkt, random.choice(ptf_interfaces), - rx_port, expected_result, skip_traffic_test) + rx_port, expected_result) diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index e97d0fb176d..2908cfc2038 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -19,7 +19,6 @@ from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from tests.common.config_reload import config_reload from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr # noqa F401 from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.utilities import wait_until, get_upstream_neigh_type, get_downstream_neigh_type, check_msg_in_syslog @@ -634,7 +633,7 @@ def setup_rules(self, dut, acl_table, ip_version): """ pass - def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 + def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 """Perform actions after rules have been applied. Args: @@ -664,7 +663,7 @@ def teardown_rules(self, dut): @pytest.fixture(scope="class", autouse=True) def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_entries, tbinfo, - ip_version, conn_graph_facts): # noqa F811 + ip_version, conn_graph_facts): # noqa F811 """Setup/teardown ACL rules for the current set of tests. Args: @@ -704,7 +703,7 @@ def tear_down_acl_rule_single_dut(self, duthost, loganalyzer): duthost, LOG_EXPECT_ACL_RULE_REMOVE_RE) def set_up_acl_rules_single_dut(self, acl_table, - conn_graph_facts, dut_to_analyzer_map, duthost, # noqa F811 + conn_graph_facts, dut_to_analyzer_map, duthost, # noqa F811 ip_version, localhost, populate_vlan_arp_entries, tbinfo): logger.info("{}: ACL rule application started".format(duthost.hostname)) @@ -807,10 +806,8 @@ def counters_sanity_check(self, duthosts, acl_rules, acl_table): continue counters_after[PACKETS_COUNT] += acl_facts[duthost]['after'][rule][PACKETS_COUNT] counters_after[BYTES_COUNT] += acl_facts[duthost]['after'][rule][BYTES_COUNT] - if (duthost.facts["hwsku"] == "Cisco-8111-O64" or - duthost.facts["hwsku"] == "Cisco-8111-O32" or - duthost.facts["hwsku"] == "Cisco-8111-C32" or - duthost.facts["hwsku"] == "Cisco-8111-O62C2"): + if duthost.facts["platform"] in ["x86_64-8111_32eh_o-r0", + "x86_64-8122_64eh_o-r0", "x86_64-8122_64ehf_o-r0"]: skip_byte_accounting = True logger.info("Counters for ACL rule \"{}\" after traffic:\n{}" @@ -967,57 +964,57 @@ def expected_mask_routed_packet(self, pkt, ip_version): return exp_pkt - def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage, skip_traffic_test): # noqa F811 + def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage): """Verify that unmatched packets are dropped for ingress.""" if stage == "egress": pytest.skip("Only run for ingress") pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) - def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage, skip_traffic_test): # noqa F811 + def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage): """Verify that default egress rule allow all traffics""" if stage == "ingress": pytest.skip("Only run for egress") pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) def test_source_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward a packet on source IP.""" src_ip = "20.0.0.2" if ip_version == "ipv4" else "60c0:a800::6" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(1) def test_rules_priority_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we respect rule priorites in the forwarding case.""" src_ip = "20.0.0.7" if ip_version == "ipv4" else "60c0:a800::7" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(20) def test_rules_priority_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we respect rule priorites in the drop case.""" src_ip = "20.0.0.3" if ip_version == "ipv4" else "60c0:a800::4" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(7) def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, vlan_name, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version, vlan_name): """Verify that we can match and forward a packet on destination IP.""" dst_ip = DOWNSTREAM_IP_TO_ALLOW[ip_version] \ if direction == "uplink->downlink" else UPSTREAM_IP_TO_ALLOW[ip_version] pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) # Because m0_l3_scenario use differnet IPs, so need to verify different acl rules. if direction == "uplink->downlink": if setup["topo"] == "m0_l3": @@ -1037,13 +1034,13 @@ def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check.append(rule_id) def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, vlan_name, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version, vlan_name): """Verify that we can match and drop a packet on destination IP.""" dst_ip = DOWNSTREAM_IP_TO_BLOCK[ip_version] \ if direction == "uplink->downlink" else UPSTREAM_IP_TO_BLOCK[ip_version] pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) # Because m0_l3_scenario use differnet IPs, so need to verify different acl rules. if direction == "uplink->downlink": if setup["topo"] == "m0_l3": @@ -1063,156 +1060,156 @@ def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check.append(rule_id) def test_source_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop a packet on source IP.""" src_ip = "20.0.0.6" if ip_version == "ipv4" else "60c0:a800::3" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(14) def test_udp_source_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward a UDP packet on source IP.""" src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8" pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(13) def test_udp_source_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop a UDP packet on source IP.""" src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2" pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(26) def test_icmp_source_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop an ICMP packet on source IP.""" src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2" pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(25) def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward an ICMP packet on source IP.""" src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8" pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(12) def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on L4 destination port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x1217) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(9) def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on L4 source port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x120D) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(4) def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on a range of L4 destination ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x123B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(11) def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on a range of L4 source ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x123A) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(10) def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on a range of L4 destination ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(22) def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on a range of L4 source ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(17) def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on the IP protocol.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7E) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(5) def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on the TCP flags.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x1B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(6) def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on L4 destination port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(22) def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on L4 source port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(17) def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on the IP protocol.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7F) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(18) def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on the TCP flags.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x24) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(19) def test_icmp_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on the TCP flags.""" src_ip = "20.0.0.10" if ip_version == "ipv4" else "60c0:a800::10" pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(29) - def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version, skip_traffic_test): # noqa F811 + def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version): exp_pkt = self.expected_mask_routed_packet(pkt, ip_version) if ip_version == "ipv4": @@ -1220,9 +1217,6 @@ def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_ver else: downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IPv6].dst) - if skip_traffic_test: - return - ptfadapter.dataplane.flush() testutils.send(ptfadapter, self.src_port, pkt) if direction == "uplink->downlink" and downstream_dst_port: @@ -1299,7 +1293,7 @@ class TestAclWithReboot(TestBasicAcl): upon startup. """ - def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 + def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 """Save configuration and reboot after rules are applied. Args: @@ -1344,7 +1338,7 @@ class TestAclWithPortToggle(TestBasicAcl): Verify that ACLs still function as expected after links flap. """ - def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 + def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 """Toggle ports after rules are applied. Args: diff --git a/tests/acl/test_acl_outer_vlan.py b/tests/acl/test_acl_outer_vlan.py index a9a237b8a08..0c1d0288401 100644 --- a/tests/acl/test_acl_outer_vlan.py +++ b/tests/acl/test_acl_outer_vlan.py @@ -14,7 +14,7 @@ from tests.common.utilities import wait_until from tests.common.config_reload import config_reload from tests.common.helpers.assertions import pytest_assert, pytest_require -from tests.common.fixtures.ptfhost_utils import change_mac_addresses, skip_traffic_test # noqa F401 +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from abc import abstractmethod from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -515,7 +515,7 @@ def setup(self, rand_selected_dut, ptfhost, ip_version, vlan_setup_info): self.post_running_hook(rand_selected_dut, ptfhost, ip_version) def _do_verification(self, ptfadapter, duthost, tbinfo, vlan_setup_info, - ip_version, tagged_mode, action, skip_traffic_test): # noqa F811 + ip_version, tagged_mode, action): # noqa F811 vlan_setup, _, _, _ = vlan_setup_info test_setup_config = self.setup_cfg(duthost, tbinfo, vlan_setup, tagged_mode, ip_version) @@ -556,17 +556,21 @@ def _do_verification(self, ptfadapter, duthost, tbinfo, vlan_setup_info, testutils.send(ptfadapter, port, mac_pkt) table_name = ACL_TABLE_NAME_TEMPLATE.format(stage, ip_version) + asic_type = duthost.facts['asic_type'] + if asic_type == 'vs': + logger.info("Skip ACL verification on VS platform") + return try: self._setup_acl_rules(duthost, stage, ip_version, outer_vlan_id, action) - if not skip_traffic_test: - count_before = get_acl_counter(duthost, table_name, RULE_1, timeout=0) - send_and_verify_traffic(ptfadapter, pkt, exp_pkt, src_port, dst_port, pkt_action=action) - count_after = get_acl_counter(duthost, table_name, RULE_1) - - logger.info("Verify Acl counter incremented {} > {}".format(count_after, count_before)) - pytest_assert(count_after >= count_before + 1, - "Unexpected results, counter_after {} > counter_before {}" - .format(count_after, count_before)) + + count_before = get_acl_counter(duthost, table_name, RULE_1, timeout=0) + send_and_verify_traffic(ptfadapter, pkt, exp_pkt, src_port, dst_port, pkt_action=action) + count_after = get_acl_counter(duthost, table_name, RULE_1) + + logger.info("Verify Acl counter incremented {} > {}".format(count_after, count_before)) + pytest_assert(count_after >= count_before + 1, + "Unexpected results, counter_after {} > counter_before {}" + .format(count_after, count_before)) except Exception as e: raise (e) finally: @@ -574,83 +578,75 @@ def _do_verification(self, ptfadapter, duthost, tbinfo, vlan_setup_info, @pytest.mark.po2vlan def test_tagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on tagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_TAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_TAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_tagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on tagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_TAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_TAGGED, ACTION_DROP) @pytest.mark.po2vlan def test_untagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on untagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_UNTAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_UNTAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_untagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on untagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_UNTAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_UNTAGGED, ACTION_DROP) @pytest.mark.po2vlan def test_combined_tagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on tagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_TAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_COMBINE_TAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_combined_tagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on tagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_TAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_COMBINE_TAGGED, ACTION_DROP) @pytest.mark.po2vlan def test_combined_untagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on untagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_UNTAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_COMBINE_UNTAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_combined_untagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on untagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_UNTAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_COMBINE_UNTAGGED, ACTION_DROP) @pytest.fixture(scope='module', autouse=True) diff --git a/tests/acl/test_stress_acl.py b/tests/acl/test_stress_acl.py index bae67cdb873..47244a30bc4 100644 --- a/tests/acl/test_stress_acl.py +++ b/tests/acl/test_stress_acl.py @@ -7,7 +7,6 @@ from collections import defaultdict from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa F401 from tests.common.utilities import wait_until -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 pytestmark = [ pytest.mark.topology("t0", "t1", "m0", "mx"), @@ -119,7 +118,7 @@ def prepare_test_port(rand_selected_dut, tbinfo): def verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, del_rule_id, verity_status, skip_traffic_test): # noqa F811 + acl_rule_list, del_rule_id, verity_status): for acl_id in acl_rule_list: ip_addr1 = acl_id % 256 @@ -146,13 +145,12 @@ def verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, exp_pkt.set_do_not_care_scapy(packet.Ether, 'src') exp_pkt.set_do_not_care_scapy(packet.IP, "chksum") - if not skip_traffic_test: - ptfadapter.dataplane.flush() - testutils.send(test=ptfadapter, port_id=ptf_src_port, pkt=pkt) - if verity_status == "forward" or acl_id == del_rule_id: - testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) - elif verity_status == "drop" and acl_id != del_rule_id: - testutils.verify_no_packet_any(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) + ptfadapter.dataplane.flush() + testutils.send(test=ptfadapter, port_id=ptf_src_port, pkt=pkt) + if verity_status == "forward" or acl_id == del_rule_id: + testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) + elif verity_status == "drop" and acl_id != del_rule_id: + testutils.verify_no_packet_any(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) def acl_rule_loaded(rand_selected_dut, acl_rule_list): @@ -168,7 +166,7 @@ def acl_rule_loaded(rand_selected_dut, acl_rule_list): def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_file, prepare_test_port, get_function_completeness_level, - toggle_all_simulator_ports_to_rand_selected_tor, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 ptf_src_port, ptf_dst_ports, dut_port = prepare_test_port @@ -186,7 +184,7 @@ def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_ rand_selected_dut.shell(cmd_create_table) acl_rule_list = list(range(1, ACL_RULE_NUMS + 1)) verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, 0, "forward", skip_traffic_test) + acl_rule_list, 0, "forward") try: loops = 0 while loops <= loop_times: @@ -204,7 +202,7 @@ def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_ wait_until(wait_timeout, 2, 0, acl_rule_loaded, rand_selected_dut, acl_rule_list) verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, 0, "drop", skip_traffic_test) + acl_rule_list, 0, "drop") del_rule_id = random.choice(acl_rule_list) rand_selected_dut.shell('sonic-db-cli CONFIG_DB del "ACL_RULE|STRESS_ACL| RULE_{}"'.format(del_rule_id)) @@ -212,7 +210,7 @@ def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_ wait_until(wait_timeout, 2, 0, acl_rule_loaded, rand_selected_dut, acl_rule_list) verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, del_rule_id, "drop", skip_traffic_test) + acl_rule_list, del_rule_id, "drop") loops += 1 finally: diff --git a/tests/arp/test_arpall.py b/tests/arp/test_arpall.py index b885023b688..11637cacd26 100644 --- a/tests/arp/test_arpall.py +++ b/tests/arp/test_arpall.py @@ -26,7 +26,8 @@ def test_arp_unicast_reply(common_setup_teardown, intfs_for_test, enum_frontend_ clear_dut_arp_cache(duthost, asichost.cli_ns_option) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } log_file = "/tmp/arptest.VerifyUnicastARPReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) ptf_runner(ptfhost, 'ptftests', "arptest.VerifyUnicastARPReply", '/root/ptftests', @@ -45,7 +46,8 @@ def test_arp_expect_reply(common_setup_teardown, intfs_for_test, enum_frontend_a asichost = duthost.asic_instance(enum_frontend_asic_index) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } # Start PTF runner and send correct arp packets @@ -69,7 +71,8 @@ def test_arp_no_reply_other_intf(common_setup_teardown, intfs_for_test, enum_fro clear_dut_arp_cache(duthost, asichost.cli_ns_option) intf2_params = { 'acs_mac': router_mac, - 'port': intf2_indice + 'port': intf2_indice, + 'kvm_support': True } log_file = "/tmp/arptest.SrcOutRangeNoReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) ptf_runner(ptfhost, 'ptftests', "arptest.SrcOutRangeNoReply", '/root/ptftests', @@ -87,7 +90,8 @@ def test_arp_no_reply_src_out_range(common_setup_teardown, intfs_for_test, enum_ asichost = duthost.asic_instance(enum_frontend_asic_index) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } # Check DUT won't reply ARP and install ARP entry when src address is not in interface subnet range @@ -108,7 +112,8 @@ def test_arp_garp_no_update(common_setup_teardown, intfs_for_test, enum_frontend asichost = duthost.asic_instance(enum_frontend_asic_index) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } # Test Gratuitous ARP behavior, no Gratuitous ARP installed when arp was not resolved before diff --git a/tests/arp/test_stress_arp.py b/tests/arp/test_stress_arp.py index 9461448a4ce..c6dcd250261 100644 --- a/tests/arp/test_stress_arp.py +++ b/tests/arp/test_stress_arp.py @@ -9,7 +9,6 @@ in6_getnsma, inet_pton, inet_ntop, socket from ipaddress import ip_address, ip_network from tests.common.utilities import wait_until, increment_ipv6_addr -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.errors import RunAnsibleModuleFail ARP_BASE_IP = "172.16.0.1/16" @@ -86,7 +85,7 @@ def genrate_ipv4_ip(): def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, - ptfadapter, get_function_completeness_level, skip_traffic_test): # noqa F811 + ptfadapter, get_function_completeness_level): """ Send gratuitous ARP (GARP) packet sfrom the PTF to the DUT @@ -95,7 +94,7 @@ def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, normalized_level = get_function_completeness_level if normalized_level is None: normalized_level = "debug" - + asic_type = duthost.facts['asic_type'] ipv4_avaliable = get_crm_resources(duthost, "ipv4_neighbor", "available") fdb_avaliable = get_crm_resources(duthost, "fdb_entry", "available") pytest_assert(ipv4_avaliable > 0 and fdb_avaliable > 0, "Entries have been filled") @@ -113,7 +112,7 @@ def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, loop_times -= 1 try: add_arp(ptf_intf_ipv4_hosts, intf1_index, ptfadapter) - if not skip_traffic_test: + if asic_type != 'vs': # There is a certain probability of hash collision, we set the percentage as 1% here # The entries we add will not exceed 10000, so the number we tolerate is 100 logger.debug("Expected route number: {}, real route number {}" @@ -175,7 +174,7 @@ def add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable): def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, - ptfadapter, get_function_completeness_level, proxy_arp_enabled, skip_traffic_test): # noqa F811 + ptfadapter, get_function_completeness_level, proxy_arp_enabled): _, _, ptf_intf_ipv6_addr, _, ptf_intf_index = ip_and_intf_info ptf_intf_ipv6_addr = increment_ipv6_addr(ptf_intf_ipv6_addr) pytest_require(proxy_arp_enabled, 'Proxy ARP not enabled for all VLANs') @@ -184,7 +183,7 @@ def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, normalized_level = get_function_completeness_level if normalized_level is None: normalized_level = "debug" - + asic_type = duthost.facts['asic_type'] loop_times = LOOP_TIMES_LEVEL_MAP[normalized_level] ipv6_avaliable = get_crm_resources(duthost, "ipv6_neighbor", "available") fdb_avaliable = get_crm_resources(duthost, "fdb_entry", "available") @@ -196,7 +195,7 @@ def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, loop_times -= 1 try: add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable) - if not skip_traffic_test: + if asic_type != 'vs': # There is a certain probability of hash collision, we set the percentage as 1% here # The entries we add will not exceed 10000, so the number we tolerate is 100 logger.debug("Expected route number: {}, real route number {}" diff --git a/tests/arp/test_unknown_mac.py b/tests/arp/test_unknown_mac.py index 109addaa32a..99d53249265 100644 --- a/tests/arp/test_unknown_mac.py +++ b/tests/arp/test_unknown_mac.py @@ -14,7 +14,6 @@ from tests.common import constants from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert, pytest_require from tests.common.dualtor.dual_tor_utils import mux_cable_server_ip from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -260,7 +259,7 @@ class TrafficSendVerify(object): """ Send traffic and check interface counters and ptf ports """ @initClassVars def __init__(self, duthost, ptfadapter, dst_ip, ptf_dst_port, ptf_vlan_ports, - intfs, ptf_ports, arp_entry, dscp, skip_traffic_test): # noqa F811 + intfs, ptf_ports, arp_entry, dscp): # noqa F811 """ Args: duthost(AnsibleHost) : dut instance @@ -278,7 +277,6 @@ def __init__(self, duthost, ptfadapter, dst_ip, ptf_dst_port, ptf_vlan_ports, self.pkt_map = dict() self.pre_rx_drops = dict() self.dut_mac = duthost.facts['router_mac'] - self.skip_traffic_test = skip_traffic_test def _constructPacket(self): """ @@ -361,7 +359,8 @@ def runTest(self): self._constructPacket() logger.info("Clear all counters before test run") self.duthost.command("sonic-clear counters") - if not self.skip_traffic_test: + asic_type = self.duthost.facts["asic_type"] + if asic_type != "vs": time.sleep(1) logger.info("Collect drop counters before test run") self._verifyIntfCounters(pretest=True) @@ -378,7 +377,7 @@ def runTest(self): class TestUnknownMac(object): @pytest.mark.parametrize("dscp", ["dscp-3", "dscp-4", "dscp-8"]) - def test_unknown_mac(self, unknownMacSetup, dscp, duthosts, rand_one_dut_hostname, ptfadapter, skip_traffic_test): # noqa F811 + def test_unknown_mac(self, unknownMacSetup, dscp, duthosts, rand_one_dut_hostname, ptfadapter): """ Verify unknown mac behavior for lossless and lossy priority @@ -404,7 +403,6 @@ def test_unknown_mac(self, unknownMacSetup, dscp, duthosts, rand_one_dut_hostnam self.ptf_vlan_ports = setup['ptf_vlan_ports'] self.intfs = setup['intfs'] self.ptf_ports = setup['ptf_ports'] - self.skip_traffic_test = skip_traffic_test self.validateEntries() self.run() @@ -422,5 +420,5 @@ def run(self): thandle = TrafficSendVerify(self.duthost, self.ptfadapter, self.dst_ip, self.ptf_dst_port, self.ptf_vlan_ports, self.intfs, self.ptf_ports, - self.arp_entry, self.dscp, self.skip_traffic_test) + self.arp_entry, self.dscp) thandle.runTest() diff --git a/tests/arp/test_wr_arp.py b/tests/arp/test_wr_arp.py index 92dd027220d..edc163d8ad3 100644 --- a/tests/arp/test_wr_arp.py +++ b/tests/arp/test_wr_arp.py @@ -4,7 +4,6 @@ from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.utilities import wait_until @@ -68,7 +67,7 @@ def warmRebootSystemFlag(duthost): duthost.shell(cmd='sonic-db-cli STATE_DB hset "WARM_RESTART_ENABLE_TABLE|system" enable false') -def test_wr_arp(request, duthost, ptfhost, creds, skip_traffic_test): # noqa F811 +def test_wr_arp(request, duthost, ptfhost, creds): ''' Control Plane Assistant test for Warm-Reboot. @@ -85,10 +84,10 @@ def test_wr_arp(request, duthost, ptfhost, creds, skip_traffic_test): # noqa F Returns: None ''' - testWrArp(request, duthost, ptfhost, creds, skip_traffic_test) + testWrArp(request, duthost, ptfhost, creds) -def test_wr_arp_advance(request, duthost, ptfhost, creds, skip_traffic_test): # noqa F811 +def test_wr_arp_advance(request, duthost, ptfhost, creds): testDuration = request.config.getoption('--test_duration', default=DEFAULT_TEST_DURATION) ptfIp = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] dutIp = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] @@ -96,8 +95,7 @@ def test_wr_arp_advance(request, duthost, ptfhost, creds, skip_traffic_test): logger.info('Warm-Reboot Control-Plane assist feature') sonicadmin_alt_password = duthost.host.options['variable_manager'].\ _hostvars[duthost.hostname]['sonic_default_passwords'] - if skip_traffic_test is True: - return + ptf_runner( ptfhost, 'ptftests', diff --git a/tests/bfd/bfd_base.py b/tests/bfd/bfd_base.py index 08b8a39b9ff..e801cbfa870 100644 --- a/tests/bfd/bfd_base.py +++ b/tests/bfd/bfd_base.py @@ -3,10 +3,10 @@ import pytest -from tests.bfd.bfd_helpers import modify_all_bfd_sessions, find_bfd_peers_with_given_state -from tests.common import config_reload -from tests.common.platform.processes_utils import wait_critical_processes -from tests.common.utilities import wait_until +from tests.bfd.bfd_helpers import prepare_bfd_state, selecting_route_to_delete, \ + extract_ip_addresses_for_backend_portchannels, get_dut_asic_static_routes, extract_backend_portchannels, \ + get_src_dst_asic_next_hops +from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor logger = logging.getLogger(__name__) @@ -25,55 +25,21 @@ def modify_bfd_sessions(self, duthosts): c. If expected state is "Up" and no. of down peers is 0, output is True d. If expected state is "Down" and no. of up peers is 0, output is True """ + duts = duthosts.frontend_nodes try: - duts = duthosts.frontend_nodes - for dut in duts: - modify_all_bfd_sessions(dut, "false") - for dut in duts: - # config reload - config_reload(dut) - wait_critical_processes(dut) - # Verification that all BFD sessions are deleted - for dut in duts: - asics = [ - asic.split("asic")[1] for asic in dut.get_asic_namespace_list() - ] - for asic in asics: - assert wait_until( - 600, - 10, - 0, - lambda: find_bfd_peers_with_given_state( - dut, asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for dut in duts: + executor.submit(prepare_bfd_state, dut, "false", "No BFD sessions found") yield finally: - duts = duthosts.frontend_nodes - for dut in duts: - modify_all_bfd_sessions(dut, "true") - for dut in duts: - config_reload(dut) - wait_critical_processes(dut) - # Verification that all BFD sessions are added - for dut in duts: - asics = [ - asic.split("asic")[1] for asic in dut.get_asic_namespace_list() - ] - for asic in asics: - assert wait_until( - 600, - 10, - 0, - lambda: find_bfd_peers_with_given_state( - dut, asic, "Up" - ), - ) - - @pytest.fixture(scope="class", name="select_src_dst_dut_and_asic", params=(["multi_dut"])) - def select_src_dst_dut_and_asic(self, duthosts, request, tbinfo): + with SafeThreadPoolExecutor(max_workers=8) as executor: + for dut in duts: + executor.submit(prepare_bfd_state, dut, "true", "Up") + + @pytest.fixture(scope="class", name="select_src_dst_dut_and_asic") + def select_src_dst_dut_and_asic(self, duthosts, tbinfo): if (len(duthosts.frontend_nodes)) < 2: pytest.skip("Don't have 2 frontend nodes - so can't run multi_dut tests") # Random selection of dut indices based on number of front end nodes @@ -131,6 +97,80 @@ def get_src_dst_asic_and_duts(self, duthosts, select_src_dst_dut_and_asic): rtn_dict.update(select_src_dst_dut_and_asic) yield rtn_dict + @pytest.fixture(scope="class", params=["ipv4", "ipv6"]) + def select_src_dst_dut_with_asic(self, request, get_src_dst_asic_and_duts): + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + + version = request.param + logger.info("Version: %s", version) + + # Random selection of dut & asic. + src_asic = get_src_dst_asic_and_duts["src_asic"] + dst_asic = get_src_dst_asic_and_duts["dst_asic"] + src_dut = get_src_dst_asic_and_duts["src_dut"] + dst_dut = get_src_dst_asic_and_duts["dst_dut"] + + logger.info("Source Asic: %s", src_asic) + logger.info("Destination Asic: %s", dst_asic) + logger.info("Source dut: %s", src_dut) + logger.info("Destination dut: %s", dst_dut) + + request.config.src_asic = src_asic + request.config.dst_asic = dst_asic + request.config.src_dut = src_dut + request.config.dst_dut = dst_dut + + src_asic_routes = get_dut_asic_static_routes(version, src_dut) + dst_asic_routes = get_dut_asic_static_routes(version, dst_dut) + + # Extracting nexthops + dst_dut_nexthops = ( + extract_ip_addresses_for_backend_portchannels( + src_dut, src_asic, version + ) + ) + logger.info("Destination nexthops, {}".format(dst_dut_nexthops)) + assert len(dst_dut_nexthops) != 0, "Destination Nexthops are empty" + + src_dut_nexthops = ( + extract_ip_addresses_for_backend_portchannels( + dst_dut, dst_asic, version + ) + ) + logger.info("Source nexthops, {}".format(src_dut_nexthops)) + assert len(src_dut_nexthops) != 0, "Source Nexthops are empty" + + # Picking a static route to delete corresponding BFD session + src_prefix = selecting_route_to_delete( + src_asic_routes, src_dut_nexthops.values() + ) + logger.info("Source prefix: %s", src_prefix) + request.config.src_prefix = src_prefix + assert src_prefix is not None and src_prefix != "", "Source prefix not found" + + dst_prefix = selecting_route_to_delete( + dst_asic_routes, dst_dut_nexthops.values() + ) + logger.info("Destination prefix: %s", dst_prefix) + request.config.dst_prefix = dst_prefix + assert ( + dst_prefix is not None and dst_prefix != "" + ), "Destination prefix not found" + + yield { + "src_asic": src_asic, + "dst_asic": dst_asic, + "src_dut": src_dut, + "dst_dut": dst_dut, + "src_dut_nexthops": src_dut_nexthops, + "dst_dut_nexthops": dst_dut_nexthops, + "src_prefix": src_prefix, + "dst_prefix": dst_prefix, + "version": version, + } + @pytest.fixture(scope="class") def select_dut_and_src_dst_asic_index(self, duthosts): if not duthosts.frontend_nodes: @@ -179,3 +219,44 @@ def get_src_dst_asic(self, request, duthosts, select_dut_and_src_dst_asic_index) rtn_dict.update(select_dut_and_src_dst_asic_index) yield rtn_dict + + @pytest.fixture(scope="class", params=["ipv4", "ipv6"]) + def prepare_traffic_test_variables(self, get_src_dst_asic, request): + version = request.param + logger.info("Version: %s", version) + + dut = get_src_dst_asic["dut"] + src_asic = get_src_dst_asic["src_asic"] + src_asic_index = get_src_dst_asic["src_asic_index"] + dst_asic = get_src_dst_asic["dst_asic"] + dst_asic_index = get_src_dst_asic["dst_asic_index"] + logger.info( + "DUT: {}, src_asic_index: {}, dst_asic_index: {}".format(dut.hostname, src_asic_index, dst_asic_index) + ) + + backend_port_channels = extract_backend_portchannels(dut) + src_asic_next_hops, dst_asic_next_hops, src_prefix, dst_prefix = get_src_dst_asic_next_hops( + version, + dut, + src_asic, + dst_asic, + request, + backend_port_channels, + ) + + src_asic_router_mac = src_asic.get_router_mac() + + yield { + "dut": dut, + "src_asic": src_asic, + "src_asic_index": src_asic_index, + "dst_asic": dst_asic, + "dst_asic_index": dst_asic_index, + "src_asic_next_hops": src_asic_next_hops, + "dst_asic_next_hops": dst_asic_next_hops, + "src_prefix": src_prefix, + "dst_prefix": dst_prefix, + "src_asic_router_mac": src_asic_router_mac, + "backend_port_channels": backend_port_channels, + "version": version, + } diff --git a/tests/bfd/bfd_helpers.py b/tests/bfd/bfd_helpers.py index a867614baf0..1744545f38c 100644 --- a/tests/bfd/bfd_helpers.py +++ b/tests/bfd/bfd_helpers.py @@ -7,12 +7,57 @@ import pytest from ptf import testutils +from tests.common import config_reload from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor from tests.common.utilities import wait_until logger = logging.getLogger(__name__) +def prepare_bfd_state(dut, flag, expected_bfd_state): + modify_all_bfd_sessions(dut, flag) + config_reload(dut, safe_reload=True) + # Verification that all BFD sessions are deleted + asics = [asic.split("asic")[1] for asic in dut.get_asic_namespace_list()] + for asic in asics: + assert wait_until( + 600, + 10, + 0, + lambda: find_bfd_peers_with_given_state(dut, asic, expected_bfd_state), + ) + + +def verify_bfd_only(dut, nexthops, asic, expected_bfd_state): + logger.info("BFD verifications") + assert wait_until( + 300, + 10, + 0, + lambda: verify_bfd_state(dut, nexthops.values(), asic, expected_bfd_state), + ) + + +def create_and_verify_bfd_state(asic, prefix, dut, dut_nexthops): + logger.info("BFD addition on dut") + add_bfd(asic.asic_index, prefix, dut) + verify_bfd_only(dut, dut_nexthops, asic, "Up") + + +def verify_bfd_and_static_route(dut, dut_nexthops, asic, expected_bfd_state, request, prefix, + expected_prefix_state, version): + logger.info("BFD & Static route verifications") + verify_bfd_only(dut, dut_nexthops, asic, expected_bfd_state) + verify_static_route( + request, + asic, + prefix, + dut, + expected_prefix_state, + version, + ) + + def get_dut_asic_static_routes(version, dut): if version == "ipv4": static_route_command = "show ip route static" @@ -33,75 +78,6 @@ def get_dut_asic_static_routes(version, dut): return asic_static_routes -def select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version -): - logger.debug("Selecting source and destination DUTs with ASICs...") - # Random selection of dut & asic. - src_asic = get_src_dst_asic_and_duts["src_asic"] - dst_asic = get_src_dst_asic_and_duts["dst_asic"] - src_dut = get_src_dst_asic_and_duts["src_dut"] - dst_dut = get_src_dst_asic_and_duts["dst_dut"] - - logger.info("Source Asic: %s", src_asic) - logger.info("Destination Asic: %s", dst_asic) - logger.info("Source dut: %s", src_dut) - logger.info("Destination dut: %s", dst_dut) - - request.config.src_asic = src_asic - request.config.dst_asic = dst_asic - request.config.src_dut = src_dut - request.config.dst_dut = dst_dut - - src_asic_routes = get_dut_asic_static_routes(version, src_dut) - dst_asic_routes = get_dut_asic_static_routes(version, dst_dut) - - # Extracting nexthops - dst_dut_nexthops = ( - extract_ip_addresses_for_backend_portchannels( - src_dut, src_asic, version - ) - ) - logger.info("Destination nexthops, {}".format(dst_dut_nexthops)) - assert len(dst_dut_nexthops) != 0, "Destination Nexthops are empty" - - src_dut_nexthops = ( - extract_ip_addresses_for_backend_portchannels( - dst_dut, dst_asic, version - ) - ) - logger.info("Source nexthops, {}".format(src_dut_nexthops)) - assert len(src_dut_nexthops) != 0, "Source Nexthops are empty" - - # Picking a static route to delete correspinding BFD session - src_prefix = selecting_route_to_delete( - src_asic_routes, src_dut_nexthops.values() - ) - logger.info("Source prefix: %s", src_prefix) - request.config.src_prefix = src_prefix - assert src_prefix is not None and src_prefix != "", "Source prefix not found" - - dst_prefix = selecting_route_to_delete( - dst_asic_routes, dst_dut_nexthops.values() - ) - logger.info("Destination prefix: %s", dst_prefix) - request.config.dst_prefix = dst_prefix - assert ( - dst_prefix is not None and dst_prefix != "" - ), "Destination prefix not found" - - return ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) - - def verify_bfd_state(dut, dut_nexthops, dut_asic, expected_bfd_state): logger.info("Verifying BFD state on {} ".format(dut)) for nexthop in dut_nexthops: @@ -491,56 +467,6 @@ def ensure_interfaces_are_up(dut, asic, interfaces): toggle_interfaces_in_parallel(cmds, dut, asic, interfaces, "up") -def prepare_traffic_test_variables(get_src_dst_asic, request, version): - dut = get_src_dst_asic["dut"] - src_asic = get_src_dst_asic["src_asic"] - src_asic_index = get_src_dst_asic["src_asic_index"] - dst_asic = get_src_dst_asic["dst_asic"] - dst_asic_index = get_src_dst_asic["dst_asic_index"] - logger.info( - "DUT: {}, src_asic_index: {}, dst_asic_index: {}".format(dut.hostname, src_asic_index, dst_asic_index) - ) - - backend_port_channels = extract_backend_portchannels(dut) - src_asic_next_hops, dst_asic_next_hops, src_prefix, dst_prefix = get_src_dst_asic_next_hops( - version, - dut, - src_asic, - dst_asic, - request, - backend_port_channels, - ) - - add_bfd(src_asic_index, src_prefix, dut) - add_bfd(dst_asic_index, dst_prefix, dut) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, src_asic_next_hops.values(), src_asic, "Up"), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, dst_asic_next_hops.values(), dst_asic, "Up"), - ) - - src_asic_router_mac = src_asic.get_router_mac() - - return ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) - - def clear_bfd_configs(dut, asic_index, prefix): logger.info("Clearing BFD configs on {}".format(dut)) command = ( @@ -794,27 +720,12 @@ def verify_given_bfd_state(asic_next_hops, port_channel, asic_index, dut, expect return current_state == expected_state -def wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel, - src_asic_index, - dst_asic_next_hops, - dst_port_channel, - dst_asic_index, - dut, -): - assert wait_until( - 180, - 10, - 0, - lambda: verify_given_bfd_state(src_asic_next_hops, dst_port_channel, src_asic_index, dut, "Down"), - ) - +def wait_until_given_bfd_down(next_hops, port_channel, asic_index, dut): assert wait_until( - 180, + 300, 10, 0, - lambda: verify_given_bfd_state(dst_asic_next_hops, src_port_channel, dst_asic_index, dut, "Down"), + lambda: verify_given_bfd_state(next_hops, port_channel, asic_index, dut, "Down"), ) @@ -859,19 +770,3 @@ def assert_traffic_switching( dst_asic_index, dut.hostname, ) - - -def wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic): - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, src_asic_next_hops.values(), src_asic, "Up"), - ) - - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, dst_asic_next_hops.values(), dst_asic, "Up"), - ) diff --git a/tests/bfd/test_bfd_static_route.py b/tests/bfd/test_bfd_static_route.py index 28c56228e0a..85e2e0bc607 100644 --- a/tests/bfd/test_bfd_static_route.py +++ b/tests/bfd/test_bfd_static_route.py @@ -4,12 +4,12 @@ import pytest from tests.bfd.bfd_base import BfdBase -from tests.bfd.bfd_helpers import verify_static_route, select_src_dst_dut_with_asic, check_bgp_status, \ - add_bfd, verify_bfd_state, delete_bfd, extract_backend_portchannels, batch_control_interface_state +from tests.bfd.bfd_helpers import check_bgp_status, add_bfd, delete_bfd, extract_backend_portchannels, \ + batch_control_interface_state, create_and_verify_bfd_state, verify_bfd_and_static_route, verify_bfd_only from tests.common.config_reload import config_reload +from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor from tests.common.platform.processes_utils import wait_critical_processes from tests.common.reboot import reboot -from tests.common.utilities import wait_until pytestmark = [ pytest.mark.topology("t2"), @@ -28,91 +28,39 @@ class TestBfdStaticRoute(BfdBase): 'diagnose': 200, } - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_with_lc_reboot( - self, - localhost, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + def test_bfd_with_lc_reboot(self, localhost, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") # Perform a cold reboot on source dut - reboot(src_dut, localhost) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + reboot(src_dut, localhost, safe_reboot=True) check_bgp_status(request) - # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -122,41 +70,16 @@ def test_bfd_with_lc_reboot( src_dut.shell("sudo config save -y") # Config reload of Source dut - reboot(src_dut, localhost) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + reboot(src_dut, localhost, safe_reboot=True) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_static_route_deletion( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + def test_bfd_static_route_deletion(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com @@ -169,133 +92,64 @@ def test_bfd_static_route_deletion( 4. Delete BFD on Destination dut. 5. Verify that on Destination dut BFD gets cleaned up and static route will be added back. """ - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) logger.info("BFD deletion on source dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "No BFD sessions found" if target == "src" else "Down", + request, + prefix, + "Route Addition" if target == "src" else "Route Removal", + version, + ) logger.info("BFD deletion on destination dut") delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "No BFD sessions found", + request, + prefix, + "Route Addition", + version, + ) logger.info("BFD deletion did not influence static routes and test completed successfully") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_flap( self, - duthost, request, - duthosts, - tbinfo, - get_src_dst_asic_and_duts, + select_src_dst_dut_with_asic, bfd_cleanup_db, get_function_completeness_level, - version, ): """ Author: Harsha Golla @@ -311,45 +165,23 @@ def test_bfd_flap( 6. Verify that on destination dut BFD is up and static route is added back. 7. Repeat above steps 100 times. """ - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) completeness_level = get_function_completeness_level if completeness_level is None: @@ -367,78 +199,36 @@ def test_bfd_flap( time.sleep(5) logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, - src_dut_nexthops.values(), - src_asic, - "No BFD sessions found", - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "No BFD sessions found" if target == "src" else "Down", + request, + prefix, + "Route Addition" if target == "src" else "Route Removal", + version, + ) logger.info("BFD addition on source dut") add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) # Check if both iterations were successful and increment the counter successful_iterations += 1 @@ -455,18 +245,14 @@ def test_bfd_flap( logger.info("test_bfd_flap completed") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_with_rp_reboot( self, localhost, - duthost, request, duthosts, - tbinfo, - get_src_dst_asic_and_duts, enum_supervisor_dut_hostname, + select_src_dst_dut_with_asic, bfd_cleanup_db, - version, ): """ Author: Harsha Golla @@ -474,78 +260,40 @@ def test_bfd_with_rp_reboot( """ rp = duthosts[enum_supervisor_dut_hostname] - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Perform a cold reboot on source dut - reboot(rp, localhost) + # Perform a cold reboot on RP + reboot(rp, localhost, safe_reboot=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) - # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -555,89 +303,44 @@ def test_bfd_with_rp_reboot( src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Config reload of Source dut - reboot(rp, localhost) + # Perform a cold reboot on RP + reboot(rp, localhost, safe_reboot=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) - # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_remote_link_flap( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + def test_bfd_remote_link_flap(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ request.config.interface_shutdown = True - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Extract portchannel interfaces on dst list_of_portchannels_on_dst = src_dut_nexthops.keys() @@ -648,121 +351,56 @@ def test_bfd_remote_link_flap( batch_control_interface_state(dst_dut, dst_asic, list_of_portchannels_on_dst, "shutdown") # Verification of BFD session state on src dut - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) - - # Verify that corresponding static route has been removed on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, + verify_bfd_and_static_route( + src_dut, + src_dut_nexthops, src_asic, + "Down", + request, src_prefix, - src_dut, "Route Removal", version, ) batch_control_interface_state(dst_dut, dst_asic, list_of_portchannels_on_dst, "startup") - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - - # Verify that corresponding static route has been added on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) - - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_lc_asic_shutdown( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) + + def test_bfd_lc_asic_shutdown(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ request.config.interface_shutdown = True - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Extract portchannel interfaces on src list_of_portchannels_on_src = dst_dut_nexthops.keys() @@ -772,138 +410,62 @@ def test_bfd_lc_asic_shutdown( # Shutdown PortChannels batch_control_interface_state(src_dut, src_asic, list_of_portchannels_on_src, "shutdown") - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) - - # Verify that corresponding static route has been removed on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Removal", - version, - ) + # Verify BFD and static routes + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Down", + request, + prefix, + "Route Removal", + version, + ) batch_control_interface_state(src_dut, src_asic, list_of_portchannels_on_src, "startup") - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - - # Verify that corresponding static route has been added on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) - - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_portchannel_member_flap( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + # Verify BFD and static routes. + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) + + def test_bfd_portchannel_member_flap(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ request.config.interface_shutdown = True - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Extract portchannel interfaces on src list_of_portchannels_on_src = dst_dut_nexthops.keys() @@ -922,167 +484,73 @@ def test_bfd_portchannel_member_flap( request.config.selected_portchannel_members = port_channel_members_on_src batch_control_interface_state(src_dut, src_asic, port_channel_members_on_src, "shutdown") - # Verification of BFD session state. - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) - - # Verify that corresponding static route has been removed on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Removal", - version, - ) + # Verify BFD and static routes + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Down", + request, + prefix, + "Route Removal", + version, + ) # Bring up of PortChannel members batch_control_interface_state(src_dut, src_asic, port_channel_members_on_src, "startup") - # Verification of BFD session state. - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - - # Verify that corresponding static route has been added on both duts - logger.info("Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) - - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_config_reload( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + # Verify BFD and static routes + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) + + def test_bfd_config_reload(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") # Config reload of Source dut - config_reload(src_dut) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + config_reload(src_dut, safe_reload=True) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -1092,43 +560,22 @@ def test_bfd_config_reload( src_dut.shell("sudo config save -y") # Config reload of Source dut - config_reload(src_dut) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + config_reload(src_dut, safe_reload=True) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_with_rp_config_reload( self, - localhost, - duthost, request, duthosts, - tbinfo, - get_src_dst_asic_and_duts, + select_src_dst_dut_with_asic, enum_supervisor_dut_hostname, bfd_cleanup_db, - version, ): """ Author: Harsha Golla @@ -1136,78 +583,41 @@ def test_bfd_with_rp_config_reload( """ rp = duthosts[enum_supervisor_dut_hostname] - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Perform a cold reboot on source dut - config_reload(rp) + # Config reload of RP + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -1217,45 +627,28 @@ def test_bfd_with_rp_config_reload( src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Config reload of Source dut - config_reload(rp) + # Config reload of RP + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_with_bad_fc_asic( self, - localhost, - duthost, request, duthosts, - tbinfo, - get_src_dst_asic_and_duts, + select_src_dst_dut_with_asic, enum_supervisor_dut_hostname, bfd_cleanup_db, - version, ): """ Author: Harsha Golla @@ -1263,47 +656,22 @@ def test_bfd_with_bad_fc_asic( """ rp = duthosts[enum_supervisor_dut_hostname] - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") @@ -1316,53 +684,29 @@ def test_bfd_with_bad_fc_asic( asic_ids = [int(element.split("swss")[1]) for element in docker_output] # Shut down corresponding asic on supervisor to simulate bad asic - for asic_id in asic_ids: - rp.shell("systemctl stop swss@{}".format(asic_id)) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for asic_id in asic_ids: + executor.submit(rp.shell, "systemctl stop swss@{}".format(asic_id)) # Verify that BFD sessions are down - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Down") # Config reload RP to bring up the swss containers - config_reload(rp) + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -1373,28 +717,16 @@ def test_bfd_with_bad_fc_asic( dst_dut.shell("sudo config save -y") # Config reload RP - config_reload(rp) + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") diff --git a/tests/bfd/test_bfd_traffic.py b/tests/bfd/test_bfd_traffic.py index a49b09193ce..fd3aa77d614 100644 --- a/tests/bfd/test_bfd_traffic.py +++ b/tests/bfd/test_bfd_traffic.py @@ -4,8 +4,9 @@ from tests.bfd.bfd_base import BfdBase from tests.bfd.bfd_helpers import get_ptf_src_port, get_backend_interface_in_use_by_counter, \ - prepare_traffic_test_variables, get_random_bgp_neighbor_ip_of_asic, toggle_port_channel_or_member, \ - get_port_channel_by_member, wait_until_bfd_up, wait_until_given_bfd_down, assert_traffic_switching + get_random_bgp_neighbor_ip_of_asic, toggle_port_channel_or_member, get_port_channel_by_member, \ + wait_until_given_bfd_down, assert_traffic_switching, create_and_verify_bfd_state, verify_bfd_only +from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor pytestmark = [ pytest.mark.topology("t2"), @@ -18,27 +19,34 @@ class TestBfdTraffic(BfdBase): PACKET_COUNT = 10000 - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_remote_port_channel_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -78,15 +86,12 @@ def test_bfd_traffic_remote_port_channel_shutdown( src_bp_iface_before_shutdown, ) - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -121,29 +126,38 @@ def test_bfd_traffic_remote_port_channel_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_local_port_channel_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -183,15 +197,12 @@ def test_bfd_traffic_local_port_channel_shutdown( dst_bp_iface_before_shutdown, ) - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -226,29 +237,38 @@ def test_bfd_traffic_local_port_channel_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_remote_port_channel_member_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -288,15 +308,12 @@ def test_bfd_traffic_remote_port_channel_member_shutdown( if not src_port_channel_before_shutdown or not dst_port_channel_before_shutdown: pytest.fail("No port channel found with interface in use") - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -331,29 +348,38 @@ def test_bfd_traffic_remote_port_channel_member_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_local_port_channel_member_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -393,15 +419,12 @@ def test_bfd_traffic_local_port_channel_member_shutdown( if not src_port_channel_before_shutdown or not dst_port_channel_before_shutdown: pytest.fail("No port channel found with interface in use") - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -436,4 +459,6 @@ def test_bfd_traffic_local_port_channel_member_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") diff --git a/tests/bgp/test_bgp_speaker.py b/tests/bgp/test_bgp_speaker.py index 6082f19fc90..28c6e26b1db 100644 --- a/tests/bgp/test_bgp_speaker.py +++ b/tests/bgp/test_bgp_speaker.py @@ -334,7 +334,8 @@ def bgp_speaker_announce_routes_common(common_setup_teardown, tbinfo, duthost, "ipv6": ipv6, "testbed_mtu": mtu, "asic_type": asic_type, - "test_balancing": False}, + "test_balancing": False, + "kvm_support": True}, log_file="/tmp/bgp_speaker_test.FibTest.log", socket_recv_size=16384, is_python3=True) diff --git a/tests/bgp/test_reliable_tsa.py b/tests/bgp/test_reliable_tsa.py index e956d6ef26a..928e9590d97 100644 --- a/tests/bgp/test_reliable_tsa.py +++ b/tests/bgp/test_reliable_tsa.py @@ -850,7 +850,7 @@ def test_sup_tsa_act_with_sup_reboot(duthosts, localhost, enum_supervisor_dut_ho logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in the same maintenance state like before supervisor reboot @@ -1043,7 +1043,7 @@ def test_dut_tsa_act_with_reboot_when_sup_dut_on_tsb_init(duthosts, localhost, e logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify startup_tsa_tsb service is not started and in exited due to manual TSA pytest_assert(wait_until(tsa_tsb_timer[linecard], 20, 0, get_tsa_tsb_service_status, linecard, 'exited'), @@ -1355,7 +1355,7 @@ def test_sup_tsa_when_startup_tsa_tsb_service_running(duthosts, localhost, enum_ logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify startup_tsa_tsb service is started and running pytest_assert(wait_until(tsa_tsb_timer[linecard], 20, 0, get_tsa_tsb_service_status, linecard, 'running'), @@ -1464,7 +1464,7 @@ def test_sup_tsb_when_startup_tsa_tsb_service_running(duthosts, localhost, enum_ logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify startup_tsa_tsb service is started and running pytest_assert(wait_until(tsa_tsb_timer[linecard], 20, 0, get_tsa_tsb_service_status, linecard, 'running'), diff --git a/tests/bgp/test_seq_idf_isolation.py b/tests/bgp/test_seq_idf_isolation.py index bdaf5dac62f..9d236b0ecb2 100644 --- a/tests/bgp/test_seq_idf_isolation.py +++ b/tests/bgp/test_seq_idf_isolation.py @@ -210,7 +210,7 @@ def test_idf_isolation_no_export_with_config_reload(rand_one_downlink_duthost, # Issue command to isolate with no export community on DUT duthost.shell("sudo idf_isolation isolated_no_export") duthost.shell('sudo config save -y') - config_reload(duthost, safe_reload=True, check_intf_up_ports=True) + config_reload(duthost, safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) # Verify DUT is in isolated-no-export state. pytest_assert(IDF_ISOLATED_NO_EXPORT == get_idf_isolation_state(duthost), @@ -235,7 +235,7 @@ def test_idf_isolation_no_export_with_config_reload(rand_one_downlink_duthost, """ duthost.shell("sudo idf_isolation unisolated") duthost.shell('sudo config save -y') - config_reload(duthost, safe_reload=True, check_intf_up_ports=True) + config_reload(duthost, safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) pytest_assert(IDF_UNISOLATED == get_idf_isolation_state(duthost), "DUT is not isolated_no_export state") @@ -276,7 +276,7 @@ def test_idf_isolation_withdraw_all_with_config_reload(duthosts, rand_one_downli # Issue command to isolate with no export community on DUT duthost.shell("sudo idf_isolation isolated_withdraw_all") duthost.shell('sudo config save -y') - config_reload(duthost, safe_reload=True, check_intf_up_ports=True) + config_reload(duthost, safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) # Verify DUT is in isolated-withdraw-all state. pytest_assert(IDF_ISOLATED_WITHDRAW_ALL == get_idf_isolation_state(duthost), diff --git a/tests/bgp/test_startup_tsa_tsb_service.py b/tests/bgp/test_startup_tsa_tsb_service.py index 2b3e779b328..4170fdb766a 100644 --- a/tests/bgp/test_startup_tsa_tsb_service.py +++ b/tests/bgp/test_startup_tsa_tsb_service.py @@ -19,7 +19,7 @@ logger = logging.getLogger(__name__) - +KERNEL_PANIC_REBOOT_CAUSE = "Kernel Panic" COLD_REBOOT_CAUSE = 'cold' UNKNOWN_REBOOT_CAUSE = "Unknown" SUP_REBOOT_CAUSE = 'Reboot from Supervisor' @@ -209,7 +209,7 @@ def test_tsa_tsb_service_with_dut_cold_reboot(duthosts, localhost, enum_rand_one logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -325,7 +325,7 @@ def test_tsa_tsb_service_with_dut_abnormal_reboot(duthosts, localhost, enum_rand service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() logger.info("Time difference between dut up-time & tsa_tsb_service up-time is {}".format(int(time_diff))) - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Make sure BGP containers are running properly before verifying @@ -384,8 +384,17 @@ def test_tsa_tsb_service_with_dut_abnormal_reboot(duthosts, localhost, enum_rand # Make sure the dut's reboot cause is as expected logger.info("Check reboot cause of the dut") reboot_cause = get_reboot_cause(duthost) - pytest_assert(reboot_cause == UNKNOWN_REBOOT_CAUSE, - "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE)) + out = duthost.command('show kdump config') + if "Enabled" not in out["stdout"]: + pytest_assert( + reboot_cause == UNKNOWN_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE) + ) + else: + pytest_assert( + reboot_cause == KERNEL_PANIC_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, KERNEL_PANIC_REBOOT_CAUSE) + ) @pytest.mark.disable_loganalyzer @@ -442,7 +451,7 @@ def test_tsa_tsb_service_with_supervisor_cold_reboot(duthosts, localhost, enum_s logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -592,7 +601,7 @@ def test_tsa_tsb_service_with_supervisor_abnormal_reboot(duthosts, localhost, en logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Make sure BGP containers are running properly before verifying @@ -669,8 +678,17 @@ def test_tsa_tsb_service_with_supervisor_abnormal_reboot(duthosts, localhost, en # Make sure the Supervisor's reboot cause is as expected logger.info("Check reboot cause of the supervisor") reboot_cause = get_reboot_cause(suphost) - pytest_assert(reboot_cause == UNKNOWN_REBOOT_CAUSE, - "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE)) + out = suphost.command('show kdump config') + if "Enabled" not in out["stdout"]: + pytest_assert( + reboot_cause == UNKNOWN_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE) + ) + else: + pytest_assert( + reboot_cause == KERNEL_PANIC_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, KERNEL_PANIC_REBOOT_CAUSE) + ) @pytest.mark.disable_loganalyzer @@ -718,7 +736,7 @@ def test_tsa_tsb_service_with_user_init_tsa(duthosts, localhost, enum_rand_one_p logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Ensure startup_tsa_tsb service is in exited state after dut reboot @@ -825,7 +843,7 @@ def test_user_init_tsa_while_service_run_on_dut(duthosts, localhost, enum_rand_o logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -941,7 +959,7 @@ def test_user_init_tsb_while_service_run_on_dut(duthosts, localhost, enum_rand_o logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -1059,7 +1077,7 @@ def test_user_init_tsb_on_sup_while_service_run_on_dut(duthosts, localhost, logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -1184,7 +1202,7 @@ def test_tsa_tsb_timer_efficiency(duthosts, localhost, enum_rand_one_per_hwsku_f logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") logging.info("Wait until all critical services are fully started") @@ -1309,7 +1327,7 @@ def test_tsa_tsb_service_with_tsa_on_sup(duthosts, localhost, logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. diff --git a/tests/common/arp_utils.py b/tests/common/arp_utils.py index 280819ae413..05186267f84 100644 --- a/tests/common/arp_utils.py +++ b/tests/common/arp_utils.py @@ -179,7 +179,7 @@ def tear_down(duthost, route, ptfIp, gwIp): teardownRouteToPtfhost(duthost, route, ptfIp, gwIp) -def testWrArp(request, duthost, ptfhost, creds, skip_traffic_test): +def testWrArp(request, duthost, ptfhost, creds): testDuration = request.config.getoption('--test_duration', default=DEFAULT_TEST_DURATION) ptfIp = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] dutIp = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] @@ -187,8 +187,7 @@ def testWrArp(request, duthost, ptfhost, creds, skip_traffic_test): logger.info('Warm-Reboot Control-Plane assist feature') sonicadmin_alt_password = duthost.host.options['variable_manager']. \ _hostvars[duthost.hostname]['sonic_default_passwords'] - if skip_traffic_test is True: - return + ptf_runner( ptfhost, 'ptftests', diff --git a/tests/common/config_reload.py b/tests/common/config_reload.py index ae43156288f..0b0fe7c2768 100644 --- a/tests/common/config_reload.py +++ b/tests/common/config_reload.py @@ -108,7 +108,8 @@ def config_reload_minigraph_with_rendered_golden_config_override( def pfcwd_feature_enabled(duthost): device_metadata = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']['DEVICE_METADATA'] pfc_status = device_metadata['localhost']["default_pfcwd_status"] - return pfc_status == 'enable' + switch_role = device_metadata['localhost'].get('type', '') + return pfc_status == 'enable' and switch_role not in ['MgmtToRRouter', 'BmcMgmtToRRouter'] @ignore_loganalyzer diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py index df1eaf1b7b7..6f541c201af 100644 --- a/tests/common/devices/multi_asic.py +++ b/tests/common/devices/multi_asic.py @@ -286,6 +286,12 @@ def get_linux_ip_cmd_for_namespace(self, cmd, namespace): ns_cmd = cmd.replace('ip', 'ip -n {}'.format(namespace)) return ns_cmd + def get_cli_cmd_for_namespace(self, cmd, namespace): + if not namespace: + return cmd + ns_cmd = cmd.replace('sonic-db-cli', 'sonic-db-cli -n {}'.format(namespace)) + return ns_cmd + @property def ttl_decr_value(self): """ @@ -520,9 +526,10 @@ def modify_syslog_rate_limit(self, feature, rl_option='disable'): cmds.append(cmd_reload.format(docker)) self.sonichost.shell_cmds(cmds=cmds) - def get_bgp_neighbors(self): + def get_bgp_neighbors(self, namespace=None): """ - Get a diction of BGP neighbor states + Get a diction of BGP neighbor states. If namespace is not None + will get a dictionary of BGP neighbor states for that namespace Args: None @@ -531,8 +538,9 @@ def get_bgp_neighbors(self): """ bgp_neigh = {} for asic in self.asics: - bgp_info = asic.bgp_facts() - bgp_neigh.update(bgp_info["ansible_facts"]["bgp_neighbors"]) + if namespace is None or asic.namespace == namespace: + bgp_info = asic.bgp_facts() + bgp_neigh.update(bgp_info["ansible_facts"]["bgp_neighbors"]) return bgp_neigh diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 892535c94fb..83a6d52ed31 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -1378,17 +1378,22 @@ def get_intf_link_local_ipv6_addr(self, intf): addr = self.shell(cmd)["stdout"] return addr - def get_bgp_neighbor_info(self, neighbor_ip): + def get_bgp_neighbor_info(self, neighbor_ip, asic_id=None): """ @summary: return bgp neighbor info @param neighbor_ip: bgp neighbor IP """ nbip = ipaddress.ip_address(neighbor_ip) + vtysh = "vtysh" + if asic_id is not None: + vtysh = "vtysh -n {}".format(asic_id) + if nbip.version == 4: - out = self.command("vtysh -c \"show ip bgp neighbor {} json\"".format(neighbor_ip)) + out = self.command("{} -c \"show ip bgp neighbor {} json\"".format(vtysh, neighbor_ip)) else: - out = self.command("vtysh -c \"show bgp ipv6 neighbor {} json\"".format(neighbor_ip)) + out = self.command("{} -c \"show bgp ipv6 neighbor {} json\"".format(vtysh, neighbor_ip)) + nbinfo = json.loads(re.sub(r"\\\"", '"', re.sub(r"\\n", "", out['stdout']))) logging.info("bgp neighbor {} info {}".format(neighbor_ip, nbinfo)) diff --git a/tests/common/dualtor/data_plane_utils.py b/tests/common/dualtor/data_plane_utils.py index cb15313a9e7..febaa97d841 100644 --- a/tests/common/dualtor/data_plane_utils.py +++ b/tests/common/dualtor/data_plane_utils.py @@ -241,7 +241,7 @@ def save_pcap(request, pytestconfig): @pytest.fixture def send_t1_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 """ Starts IO test from T1 router to server. As part of IO test the background thread sends and sniffs packets. @@ -263,8 +263,7 @@ def send_t1_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo, def t1_to_server_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.1, - stop_after=None, allow_disruption_before_traffic=False, - skip_traffic_test=False): + stop_after=None, allow_disruption_before_traffic=False): """ Helper method for `send_t1_to_server_with_action`. Starts sender and sniffer before performing the action on the tor host. @@ -299,9 +298,6 @@ def t1_to_server_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") - return return verify_and_report(tor_IO, verify, delay, allowed_disruption, allow_disruption_before_traffic) yield t1_to_server_io_test @@ -311,7 +307,7 @@ def t1_to_server_io_test(activehost, tor_vlan_port=None, @pytest.fixture def send_server_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 """ Starts IO test from server to T1 router. As part of IO test the background thread sends and sniffs packets. @@ -334,7 +330,7 @@ def send_server_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo, def server_to_t1_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, - stop_after=None, skip_traffic_test=False): + stop_after=None): """ Helper method for `send_server_to_t1_with_action`. Starts sender and sniffer before performing the action on the tor host. @@ -368,8 +364,9 @@ def server_to_t1_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) @@ -380,13 +377,13 @@ def server_to_t1_io_test(activehost, tor_vlan_port=None, @pytest.fixture def send_soc_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 arp_setup(ptfhost) def soc_to_t1_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, - stop_after=None, skip_traffic_test=False): + stop_after=None): tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, tor_vlan_port, send_interval, @@ -396,8 +393,9 @@ def soc_to_t1_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) @@ -408,13 +406,13 @@ def soc_to_t1_io_test(activehost, tor_vlan_port=None, @pytest.fixture def send_t1_to_soc_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 arp_setup(ptfhost) def t1_to_soc_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, - stop_after=None, skip_traffic_test=False): + stop_after=None): tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, tor_vlan_port, send_interval, @@ -426,8 +424,9 @@ def t1_to_soc_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) @@ -454,13 +453,13 @@ def _select_test_mux_ports(cable_type, count): @pytest.fixture def send_server_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 arp_setup(ptfhost) def server_to_server_io_test(activehost, test_mux_ports, delay=0, allowed_disruption=0, action=None, - verify=False, send_interval=0.01, stop_after=None, skip_traffic_test=False): + verify=False, send_interval=0.01, stop_after=None): tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, test_mux_ports, send_interval, traffic_direction="server_to_server", stop_after=stop_after, @@ -471,8 +470,9 @@ def server_to_server_io_test(activehost, test_mux_ports, delay=0, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py index bb028a1144c..27b4819d447 100644 --- a/tests/common/dualtor/dual_tor_utils.py +++ b/tests/common/dualtor/dual_tor_utils.py @@ -883,7 +883,7 @@ def mux_cable_server_ip(dut): def check_tunnel_balance(ptfhost, standby_tor_mac, vlan_mac, active_tor_ip, standby_tor_ip, selected_port, target_server_ip, target_server_ipv6, target_server_port, ptf_portchannel_indices, - completeness_level, check_ipv6=False, skip_traffic_test=False): + completeness_level, check_ipv6=False): """ Function for testing traffic distribution among all avtive T1. A test script will be running on ptf to generate traffic to standby interface, and the traffic will be forwarded to @@ -901,9 +901,6 @@ def check_tunnel_balance(ptfhost, standby_tor_mac, vlan_mac, active_tor_ip, Returns: None. """ - if skip_traffic_test is True: - logging.info("Skip checking tunnel balance due to traffic test was skipped") - return HASH_KEYS = ["src-port", "dst-port", "src-ip"] params = { "server_ip": target_server_ip, @@ -1158,7 +1155,7 @@ def check_nexthops_balance(rand_selected_dut, ptfadapter, dst_server_addr, pc)) -def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num, skip_traffic_test=False): +def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num): for pc, intfs in portchannel_ports.items(): count = 0 # Collect the packets count within a single portchannel @@ -1167,16 +1164,14 @@ def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_pa count = count + port_packet_count.get(uplink_int, 0) logging.info("Packets received on portchannel {}: {}".format(pc, count)) - if skip_traffic_test is True: - logging.info("Skip checking single uplink balance due to traffic test was skipped") - continue if count > 0 and count != expect_packet_num: pytest.fail("Packets not sent up single standby port {}".format(pc)) # verify nexthops are only sent to single active or standby mux def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, downlink_ints, skip_traffic_test=False): + tbinfo, downlink_ints): + asic_type = rand_selected_dut.facts["asic_type"] HASH_KEYS = ["src-port", "dst-port", "src-ip"] expect_packet_num = 1000 expect_packet_num_high = expect_packet_num * (0.90) @@ -1189,11 +1184,13 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add ptf_t1_intf = random.choice(get_t1_ptf_ports(rand_selected_dut, tbinfo)) port_packet_count = dict() + + if asic_type == "vs": + logging.info("Skipping validation on VS platform") + return packets_to_send = generate_hashed_packet_to_server(ptfadapter, rand_selected_dut, HASH_KEYS, dst_server_addr, expect_packet_num) - if skip_traffic_test is True: - logging.info("Skip checking single downlink balance due to traffic test was skipped") - return + for send_packet, exp_pkt, exp_tunnel_pkt in packets_to_send: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), send_packet, count=1) # expect multi-mux nexthops to focus packets to one downlink @@ -1215,11 +1212,11 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add if len(downlink_ints) == 0: # All nexthops are now connected to standby mux, and the packets will be sent towards a single portchanel int # Check if uplink distribution is towards a single portchannel - check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num, skip_traffic_test) + check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num) def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, - pkt_num=100, drop=False, skip_traffic_test=False): + pkt_num=100, drop=False): """ @summary: Helper function for verifying upstream packets @param host: The dut host @@ -1272,9 +1269,7 @@ def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, logger.info("Verifying upstream traffic. packet number = {} interface = {} \ server_ip = {} expect_drop = {}".format(pkt_num, itfs, server_ip, drop)) - if skip_traffic_test is True: - logger.info("Skip verifying upstream traffic due to traffic test was skipped") - return + for i in range(0, pkt_num): ptfadapter.dataplane.flush() testutils.send(ptfadapter, tx_port, pkt, count=1) diff --git a/tests/common/dualtor/server_traffic_utils.py b/tests/common/dualtor/server_traffic_utils.py index 33e0293b3ce..a0be517354d 100644 --- a/tests/common/dualtor/server_traffic_utils.py +++ b/tests/common/dualtor/server_traffic_utils.py @@ -56,8 +56,7 @@ class ServerTrafficMonitor(object): VLAN_INTERFACE_TEMPLATE = "{external_port}.{vlan_id}" def __init__(self, duthost, ptfhost, vmhost, tbinfo, dut_iface, - conn_graph_facts, exp_pkt, existing=True, is_mocked=False, - skip_traffic_test=False): + conn_graph_facts, exp_pkt, existing=True, is_mocked=False): """ @summary: Initialize the monitor. @@ -82,7 +81,7 @@ def __init__(self, duthost, ptfhost, vmhost, tbinfo, dut_iface, self.conn_graph_facts = conn_graph_facts self.captured_packets = [] self.matched_packets = [] - self.skip_traffic_test = skip_traffic_test + if is_mocked: mg_facts = self.duthost.get_extended_minigraph_facts(self.tbinfo) ptf_iface = "eth%s" % mg_facts['minigraph_ptf_indices'][self.dut_iface] @@ -128,8 +127,9 @@ def __exit__(self, exc_type, exc_value, traceback): logging.info("the expected packet:\n%s", str(self.exp_pkt)) self.matched_packets = [p for p in self.captured_packets if match_exp_pkt(self.exp_pkt, p)] logging.info("received %d matched packets", len(self.matched_packets)) - if self.skip_traffic_test is True: - logging.info("Skip matched_packets verify due to traffic test was skipped.") + asic_type = self.duthost.facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping matched_packets verify on VS platform.") return if self.matched_packets: logging.info( diff --git a/tests/common/dualtor/tunnel_traffic_utils.py b/tests/common/dualtor/tunnel_traffic_utils.py index 7e59f3c2d8e..059ca8ad703 100644 --- a/tests/common/dualtor/tunnel_traffic_utils.py +++ b/tests/common/dualtor/tunnel_traffic_utils.py @@ -250,7 +250,7 @@ def _disassemble_ip_tos(tos): return " ,".join(check_res) def __init__(self, standby_tor, active_tor=None, existing=True, inner_packet=None, - check_items=("ttl", "tos", "queue"), packet_count=10, skip_traffic_test=False): + check_items=("ttl", "tos", "queue"), packet_count=10): """ Init the tunnel traffic monitor. @@ -262,7 +262,6 @@ def __init__(self, standby_tor, active_tor=None, existing=True, inner_packet=Non self.listen_ports = sorted(self._get_t1_ptf_port_indexes(standby_tor, tbinfo)) self.ptfadapter = ptfadapter self.packet_count = packet_count - self.skip_traffic_test = skip_traffic_test standby_tor_cfg_facts = self.standby_tor.config_facts( host=self.standby_tor.hostname, source="running" @@ -293,15 +292,17 @@ def __enter__(self): def __exit__(self, *exc_info): if exc_info[0]: return - if self.skip_traffic_test is True: - logging.info("Skip tunnel traffic verify due to traffic test was skipped.") - return try: - port_index, rec_pkt = testutils.verify_packet_any_port( + result = testutils.verify_packet_any_port( ptfadapter, self.exp_pkt, ports=self.listen_ports ) + if isinstance(result, tuple): + port_index, rec_pkt = result + elif isinstance(result, bool): + logging.info("Using dummy testutils to skip traffic test.") + return except AssertionError as detail: logging.debug("Error occurred in polling for tunnel traffic", exc_info=True) if "Did not receive expected packet on any of ports" in str(detail): diff --git a/tests/common/errors.py b/tests/common/errors.py index 7f7d3f7cb98..d162ac26d5c 100644 --- a/tests/common/errors.py +++ b/tests/common/errors.py @@ -2,19 +2,31 @@ Customize exceptions """ from ansible.errors import AnsibleError -from ansible.plugins.loader import callback_loader class UnsupportedAnsibleModule(Exception): pass -def dump_ansible_results(results, stdout_callback='json'): - try: - cb = callback_loader.get(stdout_callback) - return cb._dump_results(results) if cb else results - except Exception: - return str(results) +def dump_ansible_results(results): + """Dump ansible results in a clean format. + Prints simple attributes printed first, followed by the stdout and stderr.""" + simple_attrs = "" + stdout = "stdout =\n" + stderr = "stderr =\n" + for key in results: + if key in ['stdout', 'stderr']: + # Use stdout_lines and stderr_lines instead + continue + if '_lines' in key: + text = "\n".join(results[key]) + if key == 'stdout_lines': + stdout += text + else: + stderr += text + else: + simple_attrs += "{} = {}\n".format(key, results[key]) + return "{}{}{}".format(simple_attrs, stdout, stderr) class RunAnsibleModuleFail(AnsibleError): diff --git a/tests/common/gu_utils.py b/tests/common/gu_utils.py index 3bf7da0d5bf..cb5b0f96a37 100644 --- a/tests/common/gu_utils.py +++ b/tests/common/gu_utils.py @@ -14,7 +14,7 @@ DEFAULT_CHECKPOINT_NAME = "test" GCU_FIELD_OPERATION_CONF_FILE = "gcu_field_operation_validators.conf.json" GET_HWSKU_CMD = "sonic-cfggen -d -v DEVICE_METADATA.localhost.hwsku" -GCUTIMEOUT = 240 +GCUTIMEOUT = 600 BASE_DIR = os.path.dirname(os.path.realpath(__file__)) FILES_DIR = os.path.join(BASE_DIR, "files") diff --git a/tests/common/helpers/console_helper.py b/tests/common/helpers/console_helper.py index 58bf82326aa..37f4f95997e 100644 --- a/tests/common/helpers/console_helper.py +++ b/tests/common/helpers/console_helper.py @@ -1,5 +1,6 @@ import pytest import pexpect +import re def assert_expect_text(client, text, target_line, timeout_sec=0.1): @@ -22,3 +23,40 @@ def create_ssh_client(ip, user, pwd): def ensure_console_session_up(client, line): client.expect_exact('Successful connection to line [{}]'.format(line)) client.expect_exact('Press ^A ^X to disconnect') + + +def get_target_lines(duthost): + """ + retrieve the indices of online line cards. + Returns a list of indices of the line cards that are online. + """ + result = duthost.shell("show chassis module status", module_ignore_errors=True) + lines = result['stdout'].splitlines() + linecards = [] + + # Pattern to match lines that have a "LINE-CARD" entry and "Online" in the Oper-Status column + linecard_pattern = re.compile(r"^\s*(LINE-CARD\d+)\s+.*?\s+\d+\s+Online\s+up\s+\S+") + + for line in lines: + match = linecard_pattern.match(line) + if match: + linecard_name = match.group(1) + index = linecard_name.split("LINE-CARD")[1] + linecards.append(index) + + if not linecards: + pytest.fail("No line cards are online.") + + return linecards + + +def handle_pexpect_exceptions(target_line): + """Handle pexpect exceptions during console interactions.""" + try: + yield + except pexpect.exceptions.EOF: + pytest.fail(f"EOF reached during console interaction for line {target_line}.") + except pexpect.exceptions.TIMEOUT: + pytest.fail(f"Timeout reached during console interaction for line {target_line}.") + except Exception as e: + pytest.fail(f"Error occured during console interaction for line {target_line}: {e}") diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 34c09025b31..d91c871a263 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1259,12 +1259,6 @@ pfc_asym/test_pfc_asym.py: conditions: - "asic_type not in ['barefoot']" -pfc_asym/test_pfc_asym.py::test_pfc_asym_off_rx_pause_frames: - skip: - reason: "skipped for Barefoot platform" - conditions: - - "asic_type in ['barefoot']" - ####################################### ##### pfcwd ##### ####################################### @@ -1516,7 +1510,7 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiLossyQueueVoqMultiSrc: reason: "Lossy Queue Voq multiple source test is not supported / M0/MX topo does not support qos" conditions_logical_operator: or conditions: - - "asic_type not in ['cisco-8000']" + - "asic_type not in ['cisco-8000'] or platform in ['x86_64-8122_64eh_o-r0']" - "topo_type in ['m0', 'mx']" qos/test_qos_sai.py::TestQosSai::testQosSaiPGDrop: @@ -1746,10 +1740,11 @@ snmp/test_snmp_queue.py: snmp/test_snmp_queue_counters.py: skip: - reason: "Have an known issue on kvm testbed" + conditions_logical_operator: OR + reason: "Have an known issue on kvm testbed / Unsupported in MGFX topos" conditions: - - asic_type in ['vs'] - - https://github.com/sonic-net/sonic-mgmt/issues/14007 + - "asic_type in ['vs'] and https://github.com/sonic-net/sonic-mgmt/issues/14007" + - "topo_type in ['m0', 'mx']" ####################################### ##### span ##### @@ -1894,9 +1889,12 @@ telemetry/test_telemetry.py: telemetry/test_telemetry.py::test_telemetry_queue_buffer_cnt: skip: - reason: "Testcase ignored due to switch type is voq" + conditions_logical_operator: or + reason: "Testcase ignored due to switch type is voq / Unsupported in MGFX topos / multi-asic issue 15393" conditions: - "(switch_type=='voq')" + - "topo_type in ['m0', 'mx']" + - "(is_multi_asic==True) and https://github.com/sonic-net/sonic-mgmt/issues/15393" ####################################### ##### pktgen ##### diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml index b1bd6251861..222014ab10b 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml @@ -5,11 +5,12 @@ #Hence, it is not dropped by default in Cisco-8000. For dropping link local address, it should be done through security/DATA ACL drop_packets/test_configurable_drop_counters.py::test_dip_link_local: skip: - reason: "Cisco 8000 platform and some mlx platforms does not drop DIP link local packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform and some mlx platforms does not drop DIP link local packets" conditions_logical_operator: or conditions: - "'Mellanox' in hwsku" - asic_type=='cisco-8000' + - "topo_type in ['m0', 'mx']" drop_packets/test_configurable_drop_counters.py::test_neighbor_link_down: skip: @@ -19,31 +20,38 @@ drop_packets/test_configurable_drop_counters.py::test_neighbor_link_down: drop_packets/test_configurable_drop_counters.py::test_sip_link_local: skip: - reason: "Cisco 8000 platform and some MLX platforms does not drop SIP link local packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform and some MLX platforms does not drop SIP link local packets" conditions_logical_operator: or conditions: - asic_type=="cisco-8000" - "'Mellanox' in hwsku" + - "topo_type in ['m0', 'mx']" ####################################### ##### test_drop_counters.py ##### ####################################### drop_packets/test_drop_counters.py::test_absent_ip_header: skip: - reason: "Test case not supported on Broadcom DNX platform" + reason: "Test case not supported on Broadcom DNX platform and MGFX topos" + conditions_logical_operator: or conditions: - "asic_subtype in ['broadcom-dnx']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_acl_egress_drop: skip: - reason: "Not supported on Broadcom platforms" + reason: "Not supported on Broadcom platforms and MGFX topos" + conditions_logical_operator: or conditions: - "asic_type in ['broadcom']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_dst_ip_absent: skip: - reason: "Test case not supported on Broadcom DNX platform and Cisco 8000 platform" + reason: "Test case not supported on Broadcom DNX platform and Cisco 8000 platform and MGFX topos" + conditions_logical_operator: or conditions: - "asic_subtype in ['broadcom-dnx'] or asic_type in ['cisco-8000']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_dst_ip_absent[vlan_members]: skip: @@ -55,9 +63,11 @@ drop_packets/test_drop_counters.py::test_dst_ip_absent[vlan_members]: drop_packets/test_drop_counters.py::test_dst_ip_is_loopback_addr: skip: - reason: "Cisco 8000 platform does not drop DIP loopback packets. Test also not supported on Broadcom DNX" + reason: "Cisco 8000 platform does not drop DIP loopback packets. Test also not supported on Broadcom DNX and MGFX topos" + conditions_logical_operator: or conditions: - "(asic_type=='cisco-8000') or (asic_subtype in ['broadcom-dnx'])" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_dst_ip_is_loopback_addr[vlan_members]: skip: @@ -69,23 +79,28 @@ drop_packets/test_drop_counters.py::test_dst_ip_is_loopback_addr[vlan_members]: drop_packets/test_drop_counters.py::test_dst_ip_link_local: skip: - reason: "Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop DIP linklocal packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop DIP linklocal packets" conditions_logical_operator: or conditions: - "(asic_type=='cisco-8000') or (asic_subtype in ['broadcom-dnx'])" - "'Mellanox' in hwsku" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_equal_smac_dmac_drop: skip: - reason: "Drop not enabled on chassis since internal traffic uses same smac & dmac" + conditions_logical_operator: or + reason: "MGFX topos doesn't support drop packets / Drop not enabled on chassis since internal traffic uses same smac & dmac" conditions: - "asic_subtype in ['broadcom-dnx']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_ip_is_zero_addr: skip: - reason: "Cisco 8000 platform does not drop packets with 0.0.0.0 source or destination IP address" + conditions_logical_operator: or + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform does not drop packets with 0.0.0.0 source or destination IP address" conditions: - "asic_type=='cisco-8000'" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_ip_is_zero_addr[vlan_members-ipv4-dst]: skip: @@ -128,9 +143,11 @@ drop_packets/test_drop_counters.py::test_ip_is_zero_addr[vlan_members-ipv6-src]: drop_packets/test_drop_counters.py::test_ip_pkt_with_expired_ttl: skip: - reason: "Not supported on Mellanox devices" + reason: "Not supported on Mellanox devices and MGFX topos" + conditions_logical_operator: or conditions: - "asic_type in ['mellanox']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_loopback_filter: # Test case is skipped, because SONiC does not have a control to adjust loop-back filter settings. @@ -143,9 +160,11 @@ drop_packets/test_drop_counters.py::test_loopback_filter: drop_packets/test_drop_counters.py::test_no_egress_drop_on_down_link: skip: - reason: "VS platform do not support fanout configuration" + reason: "MGFX topos doesn't support drop packets / VS platform do not support fanout configuration" + conditions_logical_operator: or conditions: - "asic_type in ['vs']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_not_expected_vlan_tag_drop[vlan_members]: skip: @@ -163,15 +182,19 @@ drop_packets/test_drop_counters.py::test_not_expected_vlan_tag_drop[vlan_members drop_packets/test_drop_counters.py::test_src_ip_is_class_e: skip: - reason: "Cisco 8000 platform does not drop packets with source IP address in class E" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform does not drop packets with source IP address in class E" + conditions_logical_operator: or conditions: - "asic_type=='cisco-8000'" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_src_ip_is_loopback_addr: skip: - reason: "Test currently not supported on broadcom DNX platform" + conditions_logical_operator: or + reason: "MGFX topos doesn't support drop packets / Test currently not supported on broadcom DNX platform" conditions: - "asic_subtype in ['broadcom-dnx']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_src_ip_is_loopback_addr[vlan_members]: skip: @@ -199,8 +222,9 @@ drop_packets/test_drop_counters.py::test_src_ip_is_multicast_addr[vlan_members-i drop_packets/test_drop_counters.py::test_src_ip_link_local: skip: - reason: "Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop SIP linklocal packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop SIP linklocal packets" conditions_logical_operator: or conditions: - "(asic_type=='cisco-8000') or (asic_subtype in ['broadcom-dnx'])" - "'Mellanox' in hwsku" + - "topo_type in ['m0', 'mx']" diff --git a/tests/common/plugins/ptfadapter/dummy_testutils.py b/tests/common/plugins/ptfadapter/dummy_testutils.py new file mode 100644 index 00000000000..f88b087556b --- /dev/null +++ b/tests/common/plugins/ptfadapter/dummy_testutils.py @@ -0,0 +1,28 @@ +import ptf.testutils as testutils +import inspect +import logging + +logger = logging.getLogger(__name__) + + +def wrapped(*args, **kwargs): + return True + + +class DummyTestUtils: + def __init__(self, *args, **kwargs): + func_dict = {} + for name, func in inspect.getmembers(testutils, inspect.isfunction): + if name.startswith("verify"): + func_dict[name] = func + self.func_dict = func_dict + + def __enter__(self, *args, **kwargs): + """ enter in 'with' block """ + for name, func in self.func_dict.items(): + setattr(testutils, name, wrapped) + + def __exit__(self, *args, **kwargs): + """ exit from 'with' block """ + for name, func in self.func_dict.items(): + setattr(testutils, name, self.func_dict[name]) diff --git a/tests/common/snappi_tests/read_pcap.py b/tests/common/snappi_tests/read_pcap.py index 1cd49f62131..f0a522b9576 100644 --- a/tests/common/snappi_tests/read_pcap.py +++ b/tests/common/snappi_tests/read_pcap.py @@ -3,6 +3,7 @@ from dpkt.utils import mac_to_str from tests.common.snappi_tests.pfc_packet import PFCPacket +from tests.snappi_tests.pfc.files.cisco_pfc_packet import CiscoPFCPacket logger = logging.getLogger(__name__) @@ -62,6 +63,63 @@ def validate_pfc_frame(pfc_pcap_file, SAMPLE_SIZE=15000, UTIL_THRESHOLD=0.8): return True, None +def validate_pfc_frame_cisco(pfc_pcap_file, SAMPLE_SIZE=15000, UTIL_THRESHOLD=0.8, peer_mac_addr=None): + """ + Validate PFC frame by checking the CBFC opcode, class enable vector and class pause times. + + Args: + pfc_cap: PFC pcap file + SAMPLE_SIZE: number of packets to sample + UTIL_THRESHOLD: threshold for PFC utilization to check if enough PFC frames were sent + + Returns: + True if valid PFC frame, False otherwise + """ + f = open(pfc_pcap_file, "rb") + pcap = dpkt.pcapng.Reader(f) + seen_non_zero_cev = False # Flag for checking if any PFC frame has non-zero class enable vector + + curPktCount = 0 + curPFCXoffPktCount = 0 + for _, buf in pcap: + if curPFCXoffPktCount >= SAMPLE_SIZE: + break + eth = dpkt.ethernet.Ethernet(buf) + if eth.type == PFC_MAC_CONTROL_CODE: + dest_mac = mac_to_str(eth.dst) + if dest_mac.lower() != PFC_DEST_MAC: + return False, "Destination MAC address is not 01:80:c2:00:00:01" + if peer_mac_addr: + src_mac = mac_to_str(eth.src) + if src_mac.lower() != peer_mac_addr: + return False, "Source MAC address is not the peer's mac address" + pfc_packet = CiscoPFCPacket(pfc_frame_bytes=bytes(eth.data)) + if not pfc_packet.is_valid(): + logger.info("PFC frame {} is not valid. Please check the capture file.".format(curPktCount)) + return False, "PFC frame is not valid" + cev = [int(i) for i in pfc_packet.class_enable_vec] + seen_non_zero_cev = True if sum(cev) > 0 else seen_non_zero_cev + if seen_non_zero_cev: + curPFCXoffPktCount += 1 + curPktCount += 1 + + if not seen_non_zero_cev: + logger.info("No PFC frames with non-zero class enable vector found in the capture file.") + return False, "No PFC frames with non-zero class enable vector found" + + f.close() + pfc_util = curPktCount / SAMPLE_SIZE + + if curPktCount == 0: + logger.info("No PFC frames found in the capture file.") + return False, "No PFC frames found in the capture file" + elif pfc_util < UTIL_THRESHOLD: + logger.info("PFC utilization is too low. Please check the capture file.") + return False, "PFC utilization is too low" + + return True, None + + def get_ipv4_pkts(pcap_file_name, protocol_num=61): """ Get IPv4 packets from the pcap/pcapng file diff --git a/tests/common/snappi_tests/traffic_generation.py b/tests/common/snappi_tests/traffic_generation.py index 2d3a1755c42..5acf21c90fd 100644 --- a/tests/common/snappi_tests/traffic_generation.py +++ b/tests/common/snappi_tests/traffic_generation.py @@ -12,6 +12,7 @@ from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp, fetch_snappi_flow_metrics from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.cisco_data import is_cisco_device logger = logging.getLogger(__name__) @@ -619,9 +620,14 @@ def verify_pause_frame_count_dut(rx_dut, pytest_assert(pfc_pause_rx_frames == 0, "PFC pause frames with no bit set in the class enable vector should be dropped") else: - pytest_assert(pfc_pause_rx_frames > 0, - "PFC pause frames should be received and counted in RX PFC counters for priority {}" - .format(prio)) + if len(prios) > 1 and is_cisco_device(tx_dut) and not test_traffic_pause: + pytest_assert(pfc_pause_rx_frames == 0, + "PFC pause frames should not be counted in RX PFC counters for priority {}" + .format(prios)) + else: + pytest_assert(pfc_pause_rx_frames > 0, + "PFC pause frames should be received and counted in RX PFC counters for priority {}" + .format(prio)) for peer_port, prios in dut_port_config[0].items(): # PFC pause frames sent by DUT's ingress port to TGEN for prio in prios: diff --git a/tests/common/templates/pfc_storm_sonic_t2.j2 b/tests/common/templates/pfc_storm_sonic_t2.j2 index fb6945ee2dc..7881a510804 100644 --- a/tests/common/templates/pfc_storm_sonic_t2.j2 +++ b/tests/common/templates/pfc_storm_sonic_t2.j2 @@ -1,6 +1,6 @@ cd {{pfc_gen_dir}} {% if (pfc_asym is defined) and (pfc_asym == True) %} -nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python3 {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}" > /dev/null 2>&1 & {% else %} -nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python3 {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}" > /dev/null 2>&1 & {% endif %} diff --git a/tests/common/templates/pfc_storm_stop_sonic_t2.j2 b/tests/common/templates/pfc_storm_stop_sonic_t2.j2 index 1f29691ca5f..597417abb6b 100755 --- a/tests/common/templates/pfc_storm_stop_sonic_t2.j2 +++ b/tests/common/templates/pfc_storm_stop_sonic_t2.j2 @@ -1,6 +1,6 @@ cd {{pfc_gen_dir}} {% if (pfc_asym is defined) and (pfc_asym == True) %} -nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}'" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python3 {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}'" > /dev/null 2>&1 & {% else %} -nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}'" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python3 {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}'" > /dev/null 2>&1 & {% endif %} diff --git a/tests/common/utilities.py b/tests/common/utilities.py index 760e885deb7..8c8e2d70410 100644 --- a/tests/common/utilities.py +++ b/tests/common/utilities.py @@ -1358,3 +1358,31 @@ def run_show_features(duthosts, enum_dut_hostname): .format(cmd_key), module_ignore_errors=False)['stdout'] pytest_assert(redis_value.lower() == cmd_value.lower(), "'{}' is '{}' which does not match with config_db".format(cmd_key, cmd_value)) + + +def kill_process_by_pid(duthost, container_name, program_name, program_pid): + """Kills a process in the specified container by its pid. + + Args: + duthost: Hostname of DUT. + container_name: A string shows container name. + program_name: A string shows process name. + program_pid: An integer represents the PID of a process. + + Returns: + None. + """ + if "20191130" in duthost.os_version: + kill_cmd_result = duthost.shell("docker exec {} supervisorctl stop {}".format(container_name, program_name)) + else: + # If we used the command `supervisorctl stop ' to stop process, + # Supervisord will treat the exit code of process as expected and it will not generate + # alerting message. + kill_cmd_result = duthost.shell("docker exec {} kill -SIGKILL {}".format(container_name, program_pid)) + + # Get the exit code of 'kill' or 'supervisorctl stop' command + exit_code = kill_cmd_result["rc"] + pytest_assert(exit_code == 0, "Failed to stop program '{}' before test".format(program_name)) + + logger.info("Program '{}' in container '{}' was stopped successfully" + .format(program_name, container_name)) diff --git a/tests/conftest.py b/tests/conftest.py index 8e9e7ce2674..b9ba2c76de9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -66,6 +66,7 @@ from tests.common.helpers.assertions import pytest_assert as pt_assert from tests.common.helpers.inventory_utils import trim_inventory from tests.common.utilities import InterruptableThread +from tests.common.plugins.ptfadapter.dummy_testutils import DummyTestUtils try: from tests.macsec import MacsecPluginT2, MacsecPluginT0 @@ -991,6 +992,21 @@ def pytest_runtest_makereport(item, call): setattr(item, "rep_" + rep.when, rep) +# This function is a pytest hook implementation that is called in runtest call stage. +# We are using this hook to set ptf.testutils to DummyTestUtils if the test is marked with "skip_traffic_test", +# DummyTestUtils would always return True for all verify function in ptf.testutils. +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_call(item): + if "skip_traffic_test" in item.keywords: + logger.info("Got skip_traffic_test marker, will skip traffic test") + with DummyTestUtils(): + logger.info("Set ptf.testutils to DummyTestUtils to skip traffic test") + yield + logger.info("Reset ptf.testutils") + else: + yield + + def collect_techsupport_on_dut(request, a_dut): # request.node is an "item" because we use the default # "function" scope diff --git a/tests/copp/test_copp.py b/tests/copp/test_copp.py index d96526e2c59..324a3e6679e 100644 --- a/tests/copp/test_copp.py +++ b/tests/copp/test_copp.py @@ -41,7 +41,6 @@ # Module-level fixtures from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 pytestmark = [ pytest.mark.topology("t0", "t1", "t2", "m0", "mx") @@ -84,7 +83,7 @@ class TestCOPP(object): "LLDP", "UDLD"]) def test_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfhost, copp_testbed, dut_type, skip_traffic_test): # noqa F811 + ptfhost, copp_testbed, dut_type): """ Validates that rate-limited COPP groups work as expected. @@ -96,13 +95,11 @@ def test_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_host ptfhost, protocol, copp_testbed, - dut_type, - skip_traffic_test=skip_traffic_test) + dut_type) @pytest.mark.disable_loganalyzer def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfhost, check_image_version, copp_testbed, dut_type, backup_restore_config_db, - skip_traffic_test): # noqa F811 + ptfhost, check_image_version, copp_testbed, dut_type, backup_restore_config_db): """ Validates that one new trap(bgp) can be installed @@ -125,16 +122,14 @@ def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, self.trap_id.upper(), copp_testbed, dut_type, - has_trap=False, - skip_traffic_test=skip_traffic_test) + has_trap=False) logger.info("Set always_enabled of {} to true".format(self.trap_id)) copp_utils.configure_always_enabled_for_trap(duthost, self.trap_id, "true") logger.info("Verify {} trap status is installed by sending traffic".format(self.trap_id)) pytest_assert( - wait_until(60, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type, - skip_traffic_test=skip_traffic_test), + wait_until(60, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type), "Installing {} trap fail".format(self.trap_id)) @pytest.mark.disable_loganalyzer @@ -142,7 +137,7 @@ def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, "disable_feature_status"]) def test_remove_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, check_image_version, copp_testbed, dut_type, - backup_restore_config_db, remove_trap_type, skip_traffic_test): # noqa F811 + backup_restore_config_db, remove_trap_type): """ Validates that The trap(bgp) can be uninstalled after deleting the corresponding entry from the feature table @@ -160,7 +155,7 @@ def test_remove_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, copp_utils.uninstall_trap(duthost, "ip2me", "ip2me") logger.info("Pre condition: make trap {} is installed".format(self.feature_name)) - pre_condition_install_trap(ptfhost, duthost, copp_testbed, self.trap_id, self.feature_name, skip_traffic_test) + pre_condition_install_trap(ptfhost, duthost, copp_testbed, self.trap_id, self.feature_name) if remove_trap_type == "delete_feature_entry": logger.info("Remove feature entry: {}".format(self.feature_name)) @@ -172,13 +167,13 @@ def test_remove_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, logger.info("Verify {} trap status is uninstalled by sending traffic".format(self.trap_id)) pytest_assert( wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), - copp_testbed, dut_type, has_trap=False, skip_traffic_test=skip_traffic_test), + copp_testbed, dut_type, has_trap=False), "uninstalling {} trap fail".format(self.trap_id)) @pytest.mark.disable_loganalyzer def test_trap_config_save_after_reboot(self, duthosts, localhost, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, check_image_version, copp_testbed, dut_type, - backup_restore_config_db, request, skip_traffic_test): # noqa F811 + backup_restore_config_db, request): # noqa F811 """ Validates that the trap configuration is saved or not after reboot(reboot, fast-reboot, warm-reboot) @@ -207,8 +202,7 @@ def test_trap_config_save_after_reboot(self, duthosts, localhost, enum_rand_one_ copp_utils.verify_always_enable_value(duthost, self.trap_id, "true") logger.info("Verify {} trap status is installed by sending traffic".format(self.trap_id)) pytest_assert( - wait_until(200, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type, - skip_traffic_test=skip_traffic_test), + wait_until(200, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type), "Installing {} trap fail".format(self.trap_id)) @@ -279,7 +273,7 @@ def ignore_expected_loganalyzer_exceptions(enum_rand_one_per_hwsku_frontend_host loganalyzer[enum_rand_one_per_hwsku_frontend_hostname].ignore_regex.extend(ignoreRegex) -def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True, skip_traffic_test=False): # noqa F811 +def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True): """ Configures and runs the PTF test cases. """ @@ -299,9 +293,6 @@ def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True, skip_ device_sockets = ["0-{}@tcp://127.0.0.1:10900".format(test_params.nn_target_port), "1-{}@tcp://{}:10900".format(test_params.nn_target_port, dut_ip)] - if skip_traffic_test is True: - logger.info("Skipping traffic test.") - return True # NOTE: debug_level can actually slow the PTF down enough to fail the test cases # that are not rate limited. Until this is addressed, do not use this flag as part of # nightly test runs. @@ -497,15 +488,14 @@ def backup_restore_config_db(duthosts, enum_rand_one_per_hwsku_frontend_hostname copp_utils.restore_config_db(duthost) -def pre_condition_install_trap(ptfhost, duthost, copp_testbed, trap_id, feature_name, skip_traffic_test): # noqa F811 +def pre_condition_install_trap(ptfhost, duthost, copp_testbed, trap_id, feature_name): # noqa F811 copp_utils.install_trap(duthost, feature_name) logger.info("Set always_enabled of {} to false".format(trap_id)) copp_utils.configure_always_enabled_for_trap(duthost, trap_id, "false") logger.info("Verify {} trap status is installed by sending traffic in pre_condition".format(trap_id)) pytest_assert( - wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, trap_id.upper(), copp_testbed, dut_type, - skip_traffic_test=skip_traffic_test), + wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, trap_id.upper(), copp_testbed, dut_type), "Installing {} trap fail".format(trap_id)) diff --git a/tests/dash/dash_acl.py b/tests/dash/dash_acl.py index 1ce0abe5f7d..0248d4d6b91 100644 --- a/tests/dash/dash_acl.py +++ b/tests/dash/dash_acl.py @@ -1132,11 +1132,6 @@ def _check_tcp_rst_pkt_acl_permit(pkt): def _check_tcp_rst_pkt_acl_deny(pkt): def _set_do_not_care_fields(expected_rst_packt, bit_length_after_inner_tcp_falg): - expected_rst_packt.set_do_not_care(128, 16) # external packet total length - expected_rst_packt.set_do_not_care(304, 16) # udp length - expected_rst_packt.set_do_not_care(336, 16) # vxlan flags - expected_rst_packt.set_do_not_care(352, 16) # vxlan group policy id - expected_rst_packt.set_do_not_care(528, 16) # inner ip total length expected_rst_packt.set_do_not_care(592, 16) # checksum in inner packet # it includes the fields after inner tcp flag expected_rst_packt.set_do_not_care(784, bit_length_after_inner_tcp_falg) @@ -1146,11 +1141,12 @@ def _get_expected_rst_packet_to_receiver(): inner_extra_conf_to_receiver = copy.deepcopy(pkt.inner_extra_conf) inner_extra_conf_to_receiver["tcp_flags"] = "R" inner_extra_conf_to_receiver["ip_id"] = 0x0000 + inner_extra_conf_to_receiver["pktlen"] = 54 _, _, _, expected_rst_packet_to_receiver = packets.inbound_vnet_packets(pkt.dash_config_info, inner_extra_conf_to_receiver, inner_packet_type='tcp') logger.info("Set ignore fields for expected rst packet sent to receiver") - _set_do_not_care_fields(expected_rst_packet_to_receiver, 416) + _set_do_not_care_fields(expected_rst_packet_to_receiver, 48) return expected_rst_packet_to_receiver @@ -1185,7 +1181,7 @@ def _get_expected_rst_packet_to_sender(): # Verify packet(no syn) is dropped # verify packet RST packet is sent to two ends - len_expected_rst_packet_to_receiver = 150 + len_expected_rst_packet_to_receiver = 104 len_expected_rst_packet_to_sender = 104 packets.verify_tcp_packet_drop_rst_packet_sent( ptfadapter, diff --git a/tests/decap/test_decap.py b/tests/decap/test_decap.py index 9464ce15a5b..41264bb894a 100644 --- a/tests/decap/test_decap.py +++ b/tests/decap/test_decap.py @@ -21,7 +21,6 @@ from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.fixtures.ptfhost_utils import ptf_test_port_map_active_active from tests.common.fixtures.fib_utils import fib_info_files # noqa F401 from tests.common.fixtures.fib_utils import single_fib_for_duts # noqa F401 @@ -193,8 +192,7 @@ def simulate_vxlan_teardown(duthosts, ptfhost, tbinfo): def test_decap(tbinfo, duthosts, ptfhost, setup_teardown, mux_server_url, # noqa F811 toggle_all_simulator_ports_to_random_side, supported_ttl_dscp_params, ip_ver, loopback_ips, # noqa F811 - duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator, # noqa F811 - skip_traffic_test): # noqa F811 + duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator): # noqa F811 setup_info = setup_teardown asic_type = duthosts[0].facts["asic_type"] ecn_mode = "copy_from_outer" @@ -214,9 +212,6 @@ def test_decap(tbinfo, duthosts, ptfhost, setup_teardown, mux_server_url, else: apply_decap_cfg(duthosts, ip_ver, loopback_ips, ttl_mode, dscp_mode, ecn_mode, 'SET') - if skip_traffic_test: - return - if 'dualtor' in tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') diff --git a/tests/decap/test_subnet_decap.py b/tests/decap/test_subnet_decap.py index bed66fbe68a..7c2b48486b6 100644 --- a/tests/decap/test_subnet_decap.py +++ b/tests/decap/test_subnet_decap.py @@ -9,7 +9,6 @@ import ptf.testutils as testutils from ptf.mask import Mask from tests.common.dualtor.dual_tor_utils import rand_selected_interface # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa F401 from tests.common.config_reload import config_reload @@ -192,23 +191,20 @@ def build_expected_vlan_subnet_packet(encapsulated_packet, ip_version, stage, de def verify_packet_with_expected(ptfadapter, stage, pkt, exp_pkt, send_port, - recv_ports=[], recv_port=None, timeout=10, skip_traffic_test=False): # noqa F811 - if skip_traffic_test is True: - logger.info("Skip traffic test") - return + recv_ports=[], recv_port=None, timeout=10): # noqa F811 ptfadapter.dataplane.flush() testutils.send(ptfadapter, send_port, pkt) if stage == "positive": - testutils.verify_packet_any_port(ptfadapter, exp_pkt, recv_ports, timeout=10) + testutils.verify_packet_any_port(ptfadapter, exp_pkt, recv_ports, timeout=timeout) elif stage == "negative": - testutils.verify_packet(ptfadapter, exp_pkt, recv_port, timeout=10) + testutils.verify_packet(ptfadapter, exp_pkt, recv_port, timeout=timeout) @pytest.mark.parametrize("ip_version", ["IPv4", "IPv6"]) @pytest.mark.parametrize("stage", ["positive", "negative"]) def test_vlan_subnet_decap(request, rand_selected_dut, tbinfo, ptfhost, ptfadapter, ip_version, stage, prepare_subnet_decap_config, prepare_vlan_subnet_test_port, - prepare_negative_ip_port_map, setup_arp_responder, skip_traffic_test): # noqa F811 + prepare_negative_ip_port_map, setup_arp_responder): # noqa F811 ptf_src_port, _, upstream_port_ids = prepare_vlan_subnet_test_port encapsulated_packet = build_encapsulated_vlan_subnet_packet(ptfadapter, rand_selected_dut, ip_version, stage) @@ -221,5 +217,4 @@ def test_vlan_subnet_decap(request, rand_selected_dut, tbinfo, ptfhost, ptfadapt ptf_target_port = None verify_packet_with_expected(ptfadapter, stage, encapsulated_packet, exp_pkt, - ptf_src_port, recv_ports=upstream_port_ids, recv_port=ptf_target_port, - skip_traffic_test=skip_traffic_test) + ptf_src_port, recv_ports=upstream_port_ids, recv_port=ptf_target_port) diff --git a/tests/dhcp_relay/test_dhcp_relay.py b/tests/dhcp_relay/test_dhcp_relay.py index 99e622dbe8c..f14db90766d 100644 --- a/tests/dhcp_relay/test_dhcp_relay.py +++ b/tests/dhcp_relay/test_dhcp_relay.py @@ -254,7 +254,8 @@ def test_dhcp_relay_default(ptfhost, dut_dhcp_relay_data, validate_dut_routes_ex "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) if not skip_dhcpmon: time.sleep(36) # dhcpmon debug counter prints every 18 seconds @@ -343,7 +344,8 @@ def test_dhcp_relay_with_source_port_ip_in_relay_enabled(ptfhost, dut_dhcp_relay "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), "testing_mode": testing_mode, - "enable_source_port_ip_in_relay": True}, + "enable_source_port_ip_in_relay": True, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) if not skip_dhcpmon: time.sleep(36) # dhcpmon debug counter prints every 18 seconds @@ -404,7 +406,8 @@ def test_dhcp_relay_after_link_flap(ptfhost, dut_dhcp_relay_data, validate_dut_r "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -460,7 +463,8 @@ def test_dhcp_relay_start_with_uplinks_down(ptfhost, dut_dhcp_relay_data, valida "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -495,7 +499,8 @@ def test_dhcp_relay_unicast_mac(ptfhost, dut_dhcp_relay_data, validate_dut_route "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -529,7 +534,8 @@ def test_dhcp_relay_random_sport(ptfhost, dut_dhcp_relay_data, validate_dut_rout "client_udp_src_port": RANDOM_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -597,7 +603,8 @@ def test_dhcp_relay_counter(ptfhost, dut_dhcp_relay_data, validate_dut_routes_ex "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test_counter.DHCPTest.log", is_python3=True) for type in dhcp_message_types: if type in ["Discover", "Request"]: diff --git a/tests/dhcp_relay/test_dhcp_relay_stress.py b/tests/dhcp_relay/test_dhcp_relay_stress.py index edfd94761ef..d7a69d16ffc 100644 --- a/tests/dhcp_relay/test_dhcp_relay_stress.py +++ b/tests/dhcp_relay/test_dhcp_relay_stress.py @@ -55,7 +55,8 @@ def test_dhcp_relay_restart_with_stress(ptfhost, dut_dhcp_relay_data, validate_d "uplink_mac": str(dut_dhcp_relay_data[0]['uplink_mac']), "testing_mode": testing_mode, "duration": duration, - "pps": pps}, + "pps": pps, + "kvm_support": True}, log_file="/tmp/dhcp_relay_stress_test.DHCPContinuousStressTest.log", is_python3=True, async_mode=True) @@ -95,7 +96,8 @@ def _check_socket_buffer(): "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dut_dhcp_relay_data[0]['switch_loopback_ip'], "uplink_mac": str(dut_dhcp_relay_data[0]['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.stress.DHCPTest.log", is_python3=True) @@ -162,7 +164,8 @@ def test_dhcp_relay_stress(ptfhost, ptfadapter, dut_dhcp_relay_data, validate_du "uplink_mac": str(dhcp_relay['uplink_mac']), "packets_send_duration": packets_send_duration, "client_packets_per_sec": client_packets_per_sec, - "testing_mode": testing_mode + "testing_mode": testing_mode, + "kvm_support": True } count_file = '/tmp/dhcp_stress_test_{}.json'.format(dhcp_type) diff --git a/tests/dhcp_relay/test_dhcpv6_relay.py b/tests/dhcp_relay/test_dhcpv6_relay.py index e06a6d7057d..2446727f710 100644 --- a/tests/dhcp_relay/test_dhcpv6_relay.py +++ b/tests/dhcp_relay/test_dhcpv6_relay.py @@ -324,7 +324,8 @@ def test_dhcpv6_relay_counter(ptfhost, duthosts, rand_one_dut_hostname, dut_dhcp "dut_mac": str(dhcp_relay['uplink_mac']), "vlan_ip": str(dhcp_relay['downlink_vlan_iface']['addr']), "loopback_ipv6": str(dhcp_relay['loopback_ipv6']), - "is_dualtor": str(dhcp_relay['is_dualtor'])}, + "is_dualtor": str(dhcp_relay['is_dualtor']), + "kvm_support": True}, log_file="/tmp/dhcpv6_relay_test.DHCPCounterTest.log", is_python3=True) for type in message_types: diff --git a/tests/drop_packets/drop_packets.py b/tests/drop_packets/drop_packets.py index 9948968af0c..6cfa62c922a 100644 --- a/tests/drop_packets/drop_packets.py +++ b/tests/drop_packets/drop_packets.py @@ -15,7 +15,6 @@ from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from tests.common import config_reload -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.dut_utils import is_mellanox_fanout RX_DRP = "RX_DRP" @@ -516,7 +515,7 @@ def send_packets(pkt, ptfadapter, ptf_tx_port_id, num_packets=1): def test_equal_smac_dmac_drop(do_test, ptfadapter, setup, fanouthost, - pkt_fields, ports_info, enum_fanout_graph_facts, skip_traffic_test): # noqa F811 + pkt_fields, ports_info, enum_fanout_graph_facts): # noqa F811 """ @summary: Create a packet with equal SMAC and DMAC. """ @@ -555,7 +554,7 @@ def test_equal_smac_dmac_drop(do_test, ptfadapter, setup, fanouthost, group = "L2" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - comparable_pkt=comparable_pkt, skip_traffic_test=skip_traffic_test) + comparable_pkt=comparable_pkt) def test_multicast_smac_drop(do_test, ptfadapter, setup, fanouthost, @@ -599,11 +598,11 @@ def test_multicast_smac_drop(do_test, ptfadapter, setup, fanouthost, group = "L2" do_test(group, pkt, ptfadapter, ports_info, - setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt, skip_traffic_test=skip_traffic_test) + setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt) def test_not_expected_vlan_tag_drop(do_test, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, setup, pkt_fields, ports_info, skip_traffic_test): + ptfadapter, setup, pkt_fields, ports_info): """ @summary: Create a VLAN tagged packet which VLAN ID does not match ingress port VLAN ID. """ @@ -636,11 +635,10 @@ def test_not_expected_vlan_tag_drop(do_test, duthosts, enum_rand_one_per_hwsku_f ) group = "L2" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"]) -def test_dst_ip_is_loopback_addr(do_test, ptfadapter, setup, pkt_fields, tx_dut_ports, ports_info, skip_traffic_test): +def test_dst_ip_is_loopback_addr(do_test, ptfadapter, setup, pkt_fields, tx_dut_ports, ports_info): """ @summary: Create a packet with loopback destination IP adress. """ @@ -658,11 +656,10 @@ def test_dst_ip_is_loopback_addr(do_test, ptfadapter, setup, pkt_fields, tx_dut_ tcp_dport=pkt_fields["tcp_dport"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) -def test_src_ip_is_loopback_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): +def test_src_ip_is_loopback_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with loopback source IP adress. """ @@ -680,11 +677,10 @@ def test_src_ip_is_loopback_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_f tcp_dport=pkt_fields["tcp_dport"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) -def test_dst_ip_absent(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): +def test_dst_ip_absent(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with absent destination IP address. """ @@ -712,13 +708,12 @@ def test_dst_ip_absent(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, por group = "L3" print(("msm group {}, setup {}".format(group, setup))) - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("ip_addr", ["ipv4", "ipv6"]) def test_src_ip_is_multicast_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ip_addr, - ports_info, skip_traffic_test): + ports_info): """ @summary: Create a packet with multicast source IP adress. """ @@ -752,11 +747,11 @@ def test_src_ip_is_multicast_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_ group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, ip_ver=ip_addr, skip_traffic_test=skip_traffic_test) + tx_dut_ports, ip_ver=ip_addr) def test_src_ip_is_class_e(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with source IP address in class E. """ @@ -779,14 +774,12 @@ def test_src_ip_is_class_e(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsk tcp_dport=pkt_fields["tcp_dport"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("addr_type, addr_direction", [("ipv4", "src"), ("ipv6", "src"), ("ipv4", "dst"), ("ipv6", "dst")]) -def test_ip_is_zero_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, addr_type, addr_direction, - ports_info, skip_traffic_test): +def test_ip_is_zero_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, addr_type, addr_direction, ports_info): """ @summary: Create a packet with "0.0.0.0" source or destination IP address. """ @@ -833,11 +826,11 @@ def test_ip_is_zero_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, a pytest.skip("Src IP zero packets are not dropped on Broadcom DNX platform currently") do_test(group, pkt, ptfadapter, ports_info, list(setup["dut_to_ptf_port_map"].values()), tx_dut_ports, - ip_ver=addr_type, skip_traffic_test=skip_traffic_test) + ip_ver=addr_type) def test_dst_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with link-local address "169.254.0.0/16". """ @@ -860,11 +853,10 @@ def test_dst_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsk group = "L3" logger.info(pkt_params) - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) -def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): +def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet drops by loopback-filter. Loop-back filter means that route to the host with DST IP of received packet exists on received interface @@ -892,13 +884,11 @@ def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, p group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_ip_pkt_with_expired_ttl(duthost, do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, - ports_info, sai_acl_drop_adj_enabled, configure_copp_drop_for_ttl_error, - skip_traffic_test): + ports_info, sai_acl_drop_adj_enabled, configure_copp_drop_for_ttl_error): """ @summary: Create an IP packet with TTL=0. """ @@ -916,12 +906,12 @@ def test_ip_pkt_with_expired_ttl(duthost, do_test, ptfadapter, setup, tx_dut_por group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled, skip_traffic_test=skip_traffic_test) + tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled) @pytest.mark.parametrize("pkt_field, value", [("version", 1), ("chksum", 10), ("ihl", 1)]) def test_broken_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, pkt_field, - value, ports_info, sai_acl_drop_adj_enabled, skip_traffic_test): + value, ports_info, sai_acl_drop_adj_enabled): """ @summary: Create a packet with broken IP header. """ @@ -940,11 +930,11 @@ def test_broken_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled, skip_traffic_test=skip_traffic_test) + tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled) def test_absent_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, - sai_acl_drop_adj_enabled, skip_traffic_test): + sai_acl_drop_adj_enabled): """ @summary: Create packets with absent IP header. """ @@ -967,12 +957,12 @@ def test_absent_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled, skip_traffic_test=skip_traffic_test) + tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled) @pytest.mark.parametrize("eth_dst", ["01:00:5e:00:01:02", "ff:ff:ff:ff:ff:ff"]) def test_unicast_ip_incorrect_eth_dst(do_test, ptfadapter, setup, tx_dut_ports, - pkt_fields, eth_dst, ports_info, skip_traffic_test): + pkt_fields, eth_dst, ports_info): """ @summary: Create packets with multicast/broadcast ethernet dst. """ @@ -992,15 +982,14 @@ def test_unicast_ip_incorrect_eth_dst(do_test, ptfadapter, setup, tx_dut_ports, ) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("igmp_version,msg_type", [("v1", "general_query"), ("v3", "general_query"), ("v1", "membership_report"), ("v2", "membership_report"), ("v3", "membership_report"), ("v2", "leave_group")]) def test_non_routable_igmp_pkts(do_test, ptfadapter, setup, fanouthost, tx_dut_ports, - pkt_fields, igmp_version, msg_type, ports_info, skip_traffic_test): + pkt_fields, igmp_version, msg_type, ports_info): """ @summary: Create an IGMP non-routable packets. """ @@ -1085,12 +1074,11 @@ def test_non_routable_igmp_pkts(do_test, ptfadapter, setup, fanouthost, tx_dut_p pkt.getlayer("IP").dst, pkt_fields["ipv4_src"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, list(setup["dut_to_ptf_port_map"].values()), - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, list(setup["dut_to_ptf_port_map"].values()), tx_dut_ports) def test_acl_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, acl_ingress, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, acl_ingress, ports_info): """ @summary: Verify that DUT drops packet with SRC IP 20.0.0.0/24 matched by ingress ACL """ @@ -1114,12 +1102,11 @@ def test_acl_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_fronten tcp_dport=pkt_fields["tcp_dport"] ) - do_test("ACL", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test("ACL", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_acl_egress_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, acl_egress, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, acl_egress, ports_info): """ @summary: Verify that DUT drops packet with DST IP 192.168.144.1/24 matched by egress ACL and ACL drop counter incremented @@ -1145,5 +1132,4 @@ def test_acl_egress_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_ ip_ttl=64 ) do_test(discard_group="ACL", pkt=pkt, ptfadapter=ptfadapter, ports_info=ports_info, - sniff_ports=setup["neighbor_sniff_ports"], tx_dut_ports=tx_dut_ports, drop_information="OUTDATAACL", - skip_traffic_test=skip_traffic_test) + sniff_ports=setup["neighbor_sniff_ports"], tx_dut_ports=tx_dut_ports, drop_information="OUTDATAACL") diff --git a/tests/drop_packets/test_drop_counters.py b/tests/drop_packets/test_drop_counters.py index c1835fb8e89..ff12f8ee865 100755 --- a/tests/drop_packets/test_drop_counters.py +++ b/tests/drop_packets/test_drop_counters.py @@ -22,7 +22,6 @@ test_acl_egress_drop # noqa F401 from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.fixtures.conn_graph_facts import enum_fanout_graph_facts # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 pytestmark = [ pytest.mark.topology("any") @@ -141,8 +140,7 @@ def handle_backend_acl(duthost, tbinfo): def base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, # noqa F811 - tx_dut_ports=None, skip_counter_check=False, drop_information=None, # noqa F811 - skip_traffic_test=False): # noqa F811 + tx_dut_ports=None, skip_counter_check=False, drop_information=None): # noqa F811 """ Base test function for verification of L2 or L3 packet drops. Verification type depends on 'discard_group' value. Supported 'discard_group' values: 'L2', 'L3', 'ACL', 'NO_DROPS' @@ -162,9 +160,6 @@ def base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, port if skip_counter_check: logger.info("Skipping counter check") return None - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return None if discard_group == "L2": verify_drop_counters(duthosts, asic_index, ports_info["dut_iface"], @@ -297,8 +292,7 @@ def check_if_skip(): @pytest.fixture(scope='module') def do_test(duthosts): def do_counters_test(discard_group, pkt, ptfadapter, ports_info, sniff_ports, tx_dut_ports=None, # noqa F811 - comparable_pkt=None, skip_counter_check=False, drop_information=None, ip_ver='ipv4', - skip_traffic_test=False): # noqa F811 + comparable_pkt=None, skip_counter_check=False, drop_information=None, ip_ver='ipv4'): """ Execute test - send packet, check that expected discard counters were incremented and packet was dropped @param discard_group: Supported 'discard_group' values: 'L2', 'L3', 'ACL', 'NO_DROPS' @@ -310,24 +304,24 @@ def do_counters_test(discard_group, pkt, ptfadapter, ports_info, sniff_ports, tx @param ip_ver: A string, ipv4 or ipv6 """ check_if_skip() + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + skip_counter_check = True + asic_index = ports_info["asic_index"] base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, tx_dut_ports, - skip_counter_check=skip_counter_check, drop_information=drop_information, - skip_traffic_test=skip_traffic_test) + skip_counter_check=skip_counter_check, drop_information=drop_information) # Verify packets were not egresed the DUT if discard_group != "NO_DROPS": exp_pkt = expected_packet_mask(pkt, ip_ver=ip_ver) - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=sniff_ports) return do_counters_test def test_reserved_dmac_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, fanouthost, pkt_fields, ports_info, skip_traffic_test): # noqa F811 + setup, fanouthost, pkt_fields, ports_info): # noqa F811 """ @summary: Verify that packet with reserved DMAC is dropped and L2 drop counter incremented @used_mac_address: @@ -361,12 +355,11 @@ def test_reserved_dmac_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hws ) group = "L2" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"]) -def test_no_egress_drop_on_down_link(do_test, ptfadapter, setup, tx_dut_ports, # noqa F811 - pkt_fields, rif_port_down, ports_info, skip_traffic_test): # noqa F811 +def test_no_egress_drop_on_down_link(do_test, ptfadapter, setup, tx_dut_ports, # noqa F811 + pkt_fields, rif_port_down, ports_info): # noqa F811 """ @summary: Verify that packets on ingress port are not dropped when egress RIF link is down and check that drop counters not incremented @@ -384,12 +377,11 @@ def test_no_egress_drop_on_down_link(do_test, ptfadapter, setup, tx_dut_ports, tcp_dport=pkt_fields["tcp_dport"] ) - do_test("NO_DROPS", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test("NO_DROPS", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_src_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): # noqa F811 + setup, tx_dut_ports, pkt_fields, ports_info): # noqa F811 """ @summary: Verify that packet with link-local address "169.254.0.0/16" is dropped and L3 drop counter incremented """ @@ -412,12 +404,11 @@ def test_src_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsk pkt = testutils.simple_tcp_packet(**pkt_params) logger.info(pkt_params) - do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_ip_pkt_with_exceeded_mtu(do_test, ptfadapter, setup, tx_dut_ports, # noqa F811 - pkt_fields, mtu_config, ports_info, skip_traffic_test): # noqa F811 + pkt_fields, mtu_config, ports_info): # noqa F811 """ @summary: Verify that IP packet with exceeded MTU is dropped and L3 drop counter incremented """ @@ -447,7 +438,6 @@ def test_ip_pkt_with_exceeded_mtu(do_test, ptfadapter, setup, tx_dut_ports, ) L2_COL_KEY = RX_ERR try: - do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - skip_traffic_test=skip_traffic_test) + do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"]) finally: L2_COL_KEY = RX_DRP diff --git a/tests/dualtor/test_ipinip.py b/tests/dualtor/test_ipinip.py index d5e0b15476b..33d62946d75 100644 --- a/tests/dualtor/test_ipinip.py +++ b/tests/dualtor/test_ipinip.py @@ -28,7 +28,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output from tests.common.dualtor.dual_tor_utils import config_active_active_dualtor_active_standby # noqa F401 from tests.common.dualtor.dual_tor_utils import validate_active_active_dualtor_setup # noqa F401 @@ -105,7 +104,7 @@ def build_expected_packet_to_server(encapsulated_packet, decrease_ttl=False): def test_decap_active_tor( build_encapsulated_packet, request, ptfhost, rand_selected_interface, ptfadapter, # noqa F401 - tbinfo, rand_selected_dut, tunnel_traffic_monitor, skip_traffic_test): # noqa F811 + tbinfo, rand_selected_dut, tunnel_traffic_monitor): # noqa F811 @contextlib.contextmanager def stop_garp(ptfhost): @@ -129,9 +128,6 @@ def stop_garp(ptfhost): ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following traffic test") - return with stop_garp(ptfhost): ptfadapter.dataplane.flush() testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet) @@ -141,7 +137,7 @@ def stop_garp(ptfhost): def test_decap_standby_tor( build_encapsulated_packet, request, rand_selected_interface, ptfadapter, # noqa F401 - tbinfo, rand_selected_dut, tunnel_traffic_monitor, skip_traffic_test # noqa F401 + tbinfo, rand_selected_dut, tunnel_traffic_monitor # noqa F401 ): def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt): @@ -170,9 +166,6 @@ def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt): ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following traffic test") - return with tunnel_traffic_monitor(tor, existing=False): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet, count=10) time.sleep(2) @@ -302,7 +295,7 @@ def setup_active_active_ports(active_active_ports, rand_selected_dut, rand_unsel def test_encap_with_mirror_session(rand_selected_dut, rand_selected_interface, # noqa F811 ptfadapter, tbinfo, setup_mirror_session, toggle_all_simulator_ports_to_rand_unselected_tor, # noqa F811 - tunnel_traffic_monitor, skip_traffic_test, # noqa F811 + tunnel_traffic_monitor, # noqa F811 setup_standby_ports_on_rand_selected_tor): # noqa F811 """ A test case to verify the bounced back packet from Standby ToR to T1 doesn't have an unexpected vlan id (4095) @@ -321,8 +314,5 @@ def test_encap_with_mirror_session(rand_selected_dut, rand_selected_interface, logging.info("Sending packet from ptf t1 interface {}".format(src_port_id)) inner_packet = pkt_to_server[scapy.all.IP].copy() inner_packet[IP].ttl -= 1 - if skip_traffic_test is True: - logging.info("Skip following traffic test") - return with tunnel_traffic_monitor(rand_selected_dut, inner_packet=inner_packet, check_items=()): testutils.send(ptfadapter, src_port_id, pkt_to_server) diff --git a/tests/dualtor/test_orchagent_active_tor_downstream.py b/tests/dualtor/test_orchagent_active_tor_downstream.py index 45d3506eaf8..ddb6854a34a 100644 --- a/tests/dualtor/test_orchagent_active_tor_downstream.py +++ b/tests/dualtor/test_orchagent_active_tor_downstream.py @@ -5,7 +5,6 @@ from ipaddress import ip_address from ptf import testutils -from tests.common.dualtor.dual_tor_mock import * # noqa F403 from tests.common.dualtor.dual_tor_utils import dualtor_info from tests.common.dualtor.dual_tor_utils import flush_neighbor from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports @@ -15,13 +14,13 @@ from tests.common.dualtor.dual_tor_utils import check_nexthops_single_downlink from tests.common.dualtor.dual_tor_utils import add_nexthop_routes, remove_static_routes from tests.common.dualtor.dual_tor_mock import set_mux_state +from tests.common.dualtor.dual_tor_mock import is_mocked_dualtor from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports # noqa F401 from tests.common.dualtor.server_traffic_utils import ServerTrafficMonitor from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until @@ -68,7 +67,7 @@ def neighbor_reachable(duthost, neighbor_ip): def test_active_tor_remove_neighbor_downstream_active( conn_graph_facts, ptfadapter, ptfhost, testbed_setup, rand_selected_dut, tbinfo, set_crm_polling_interval, - tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 + tunnel_traffic_monitor, vmhost # noqa F811 ): """ @Verify those two scenarios: @@ -103,9 +102,9 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): logging.info("send traffic to server %s from ptf t1 interface %s", server_ip, ptf_t1_intf) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) @@ -113,7 +112,7 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): logging.info("send traffic to server %s after removing neighbor entry", server_ip) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) remove_neighbor_ct = remove_neighbor(ptfhost, tor, server_ip, ip_version, removed_neighbor) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ @@ -126,7 +125,7 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): logging.info("send traffic to server %s after neighbor entry is restored", server_ip) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ tunnel_monitor, server_traffic_monitor: @@ -146,10 +145,10 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): def test_downstream_ecmp_nexthops( ptfadapter, rand_selected_dut, tbinfo, - toggle_all_simulator_ports, tor_mux_intfs, ip_version, skip_traffic_test # noqa F811 + toggle_all_simulator_ports, tor_mux_intfs, ip_version # noqa F811 ): nexthops_count = 4 - set_mux_state(rand_selected_dut, tbinfo, 'active', tor_mux_intfs, toggle_all_simulator_ports) # noqa F405 + set_mux_state(rand_selected_dut, tbinfo, 'active', tor_mux_intfs, toggle_all_simulator_ports) iface_server_map = get_interface_server_map(rand_selected_dut, nexthops_count) if ip_version == "ipv4": @@ -172,7 +171,7 @@ def test_downstream_ecmp_nexthops( try: logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces, skip_traffic_test) + tbinfo, nexthop_interfaces) nexthop_interfaces_copy = nexthop_interfaces.copy() @@ -183,7 +182,7 @@ def test_downstream_ecmp_nexthops( nexthop_interfaces_copy.remove(interface) logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces_copy, skip_traffic_test) + tbinfo, nexthop_interfaces_copy) # Revert two mux states to active for index, interface in reversed(list(enumerate(nexthop_interfaces))): @@ -192,7 +191,7 @@ def test_downstream_ecmp_nexthops( nexthop_interfaces_copy.append(interface) logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces_copy, skip_traffic_test) + tbinfo, nexthop_interfaces_copy) finally: # Remove the nexthop route remove_static_routes(rand_selected_dut, dst_server_addr) diff --git a/tests/dualtor/test_orchagent_mac_move.py b/tests/dualtor/test_orchagent_mac_move.py index 7aa0a25e39d..93c18c4d14b 100644 --- a/tests/dualtor/test_orchagent_mac_move.py +++ b/tests/dualtor/test_orchagent_mac_move.py @@ -3,7 +3,8 @@ import random from ptf import testutils -from tests.common.dualtor.dual_tor_mock import * # noqa F403 +from tests.common.dualtor.dual_tor_mock import is_mocked_dualtor +from tests.common.dualtor.dual_tor_mock import set_dual_tor_state_to_orchagent from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports from tests.common.dualtor.dual_tor_utils import crm_neighbor_checker from tests.common.dualtor.dual_tor_utils import build_packet_to_server @@ -13,7 +14,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output @@ -85,7 +85,7 @@ def test_mac_move( announce_new_neighbor, apply_active_state_to_orchagent, conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, set_crm_polling_interval, - tbinfo, tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 + tbinfo, tunnel_traffic_monitor, vmhost # noqa F811 ): tor = rand_selected_dut ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) @@ -96,23 +96,23 @@ def test_mac_move( announce_new_neighbor.send(None) logging.info("let new neighbor learnt on active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) # mac move to a standby port test_port = next(announce_new_neighbor) - announce_new_neighbor.send(lambda iface: set_dual_tor_state_to_orchagent(tor, "standby", [iface])) # noqa F405 + announce_new_neighbor.send(lambda iface: set_dual_tor_state_to_orchagent(tor, "standby", [iface])) logging.info("mac move to a standby port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=True, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -121,7 +121,7 @@ def test_mac_move( tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -131,10 +131,10 @@ def test_mac_move( announce_new_neighbor.send(None) logging.info("mac move to another active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -145,7 +145,7 @@ def test_mac_move( tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) diff --git a/tests/dualtor/test_orchagent_slb.py b/tests/dualtor/test_orchagent_slb.py index 4b0aeb89627..19e88996746 100644 --- a/tests/dualtor/test_orchagent_slb.py +++ b/tests/dualtor/test_orchagent_slb.py @@ -2,7 +2,6 @@ import pytest import random import time -import logging import scapy.all as scapyall from ptf import testutils @@ -20,7 +19,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers import bgp from tests.common.utilities import is_ipv4_address @@ -217,7 +215,7 @@ def test_orchagent_slb( force_active_tor, upper_tor_host, lower_tor_host, # noqa F811 ptfadapter, ptfhost, setup_interfaces, toggle_all_simulator_ports_to_upper_tor, tbinfo, # noqa F811 - tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 + tunnel_traffic_monitor, vmhost # noqa F811 ): def verify_bgp_session(duthost, bgp_neighbor): @@ -235,11 +233,8 @@ def verify_route(duthost, route, existing=True): else: assert len(existing_route["nexthops"]) == 0 - def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True, - skip_traffic_test=skip_traffic_test): - if skip_traffic_test is True: - logging.info("Skip traffic test.") - return + def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True): + prefix = ipaddress.ip_network(route["prefix"]) dst_host = str(next(prefix.hosts())) pkt, exp_pkt = build_packet_to_server(duthost, ptfadapter, dst_host) @@ -295,11 +290,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 3: verify the route by sending some downstream traffic verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=True, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=True, is_route_existed=True ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=False, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=False, is_route_existed=True ) # STEP 4: withdraw the announced route to both ToRs @@ -314,11 +309,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 5: verify the route is removed by verifying that downstream traffic is dropped verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=True, is_route_existed=False, skip_traffic_test=skip_traffic_test + is_duthost_active=True, is_route_existed=False ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=False, is_route_existed=False, skip_traffic_test=skip_traffic_test + is_duthost_active=False, is_route_existed=False ) # STEP 6: toggle mux state change @@ -341,11 +336,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 8: verify the route by sending some downstream traffic verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=False, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=False, is_route_existed=True ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=True, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=True, is_route_existed=True ) # STEP 9: verify teardown diff --git a/tests/dualtor/test_orchagent_standby_tor_downstream.py b/tests/dualtor/test_orchagent_standby_tor_downstream.py index 1d26d74187b..b59e6f4cc1b 100644 --- a/tests/dualtor/test_orchagent_standby_tor_downstream.py +++ b/tests/dualtor/test_orchagent_standby_tor_downstream.py @@ -19,7 +19,6 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert as pt_assert from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor # noqa F401 from tests.common.dualtor.server_traffic_utils import ServerTrafficMonitor @@ -62,13 +61,12 @@ def get_function_completeness_level(pytestconfig): @pytest.fixture def get_testbed_params(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - ip_version, setup_testbed_ipv6, get_function_completeness_level, skip_traffic_test): # noqa F811 + ip_version, setup_testbed_ipv6, get_function_completeness_level): # noqa F811 """Return a function to get testbed params.""" def _get_testbed_params(): params = dualtor_info(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, get_function_completeness_level) params["check_ipv6"] = (ip_version == "ipv6") - params["skip_traffic_test"] = skip_traffic_test return params return _get_testbed_params @@ -275,8 +273,7 @@ def test_standby_tor_remove_neighbor_downstream_standby( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, set_crm_polling_interval, tunnel_traffic_monitor, # noqa: F811 - vmhost, get_testbed_params, - ip_version, skip_traffic_test # noqa: F811 + vmhost, get_testbed_params, ip_version ): """ @summary: Verify that after removing neighbor entry for a server over standby @@ -307,7 +304,7 @@ def stop_neighbor_advertiser(ptfhost, ip_version): pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, target_server) ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send traffic to server %s from ptf t1 interface %s", target_server, ptf_t1_intf) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=True, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) with tunnel_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) @@ -315,7 +312,7 @@ def stop_neighbor_advertiser(ptfhost, ip_version): tunnel_monitor.existing = False server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) # for real dualtor testbed, leave the neighbor restoration to garp service flush_neighbor_ct = flush_neighbor(tor, target_server, restore=is_t0_mocked_dualtor) @@ -334,7 +331,7 @@ def test_downstream_standby_mux_toggle_active( rand_selected_dut, rand_unselected_dut, tbinfo, tunnel_traffic_monitor, vmhost, # noqa: F811 toggle_all_simulator_ports, tor_mux_intfs, # noqa: F811 - ip_version, get_testbed_params, skip_traffic_test # noqa: F811 + ip_version, get_testbed_params ): # set rand_selected_dut as standby and rand_unselected_dut to active tor test_params = get_testbed_params() @@ -349,9 +346,8 @@ def test_downstream_standby_mux_toggle_active( ptf_t1_intf = random.choice(get_t1_ptf_ports(rand_selected_dut, tbinfo)) def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, - expect_server_traffic=True, skip_traffic_test=False): - if skip_traffic_test is True: - return + expect_server_traffic=True): + tunnel_monitor = tunnel_traffic_monitor(rand_selected_dut, existing=True) server_traffic_monitor = ServerTrafficMonitor( torhost, ptfhost, vmhost, tbinfo, test_params["selected_port"], @@ -370,7 +366,7 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, logger.info("Step 1.2: Verify traffic to this route dst is forwarded to Active ToR and equally distributed") check_tunnel_balance(**test_params) monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, - expect_tunnel_traffic=True, skip_traffic_test=skip_traffic_test) + expect_tunnel_traffic=True) logger.info("Stage 2: Verify Active Forwarding") logger.info("Step 2.1: Simulate Mux state change to active") @@ -378,7 +374,7 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, time.sleep(30) logger.info("Step 2.2: Verify traffic to this route dst is forwarded directly to server") monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=True, - expect_tunnel_traffic=False, skip_traffic_test=skip_traffic_test) + expect_tunnel_traffic=False) logger.info("Stage 3: Verify Standby Forwarding Again") logger.info("Step 3.1: Simulate Mux state change to standby") @@ -387,7 +383,7 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, logger.info("Step 3.2: Verify traffic to this route dst \ is now redirected back to Active ToR and equally distributed") monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, - expect_tunnel_traffic=True, skip_traffic_test=skip_traffic_test) + expect_tunnel_traffic=True) check_tunnel_balance(**test_params) remove_static_routes(rand_selected_dut, random_dst_ip) diff --git a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py index e7765412292..99ff89f88b6 100644 --- a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py +++ b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py @@ -10,7 +10,7 @@ from tests.common.config_reload import config_reload from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service, \ - run_icmp_responder, skip_traffic_test # noqa F401 + run_icmp_responder # noqa F401 logger = logging.getLogger(__file__) @@ -34,7 +34,7 @@ def test_cleanup(rand_selected_dut): def test_standby_tor_upstream_mux_toggle( rand_selected_dut, tbinfo, ptfadapter, rand_selected_interface, # noqa F811 - toggle_all_simulator_ports, set_crm_polling_interval, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports, set_crm_polling_interval): # noqa F811 itfs, ip = rand_selected_interface PKT_NUM = 100 # Step 1. Set mux state to standby and verify traffic is dropped by ACL rule and drop counters incremented @@ -49,8 +49,7 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=True, - skip_traffic_test=skip_traffic_test) + drop=True) time.sleep(5) # Step 2. Toggle mux state to active, and verify traffic is not dropped by ACL and fwd-ed to uplinks; @@ -65,8 +64,7 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=False, - skip_traffic_test=skip_traffic_test) + drop=False) # Step 3. Toggle mux state to standby, and verify traffic is dropped by ACL; # verify CRM show and no nexthop objects are stale @@ -80,8 +78,7 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=True, - skip_traffic_test=skip_traffic_test) + drop=True) crm_facts1 = rand_selected_dut.get_crm_facts() unmatched_crm_facts = compare_crm_facts(crm_facts0, crm_facts1) pt_assert(len(unmatched_crm_facts) == 0, 'Unmatched CRM facts: {}' diff --git a/tests/dualtor/test_tor_ecn.py b/tests/dualtor/test_tor_ecn.py index 5e965dde253..07e5aafc710 100644 --- a/tests/dualtor/test_tor_ecn.py +++ b/tests/dualtor/test_tor_ecn.py @@ -28,7 +28,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output from tests.common.dualtor.tunnel_traffic_utils import derive_queue_id_from_dscp, derive_out_dscp_from_inner_dscp from tests.common.dualtor.dual_tor_utils import config_active_active_dualtor_active_standby # noqa F401 @@ -254,7 +253,12 @@ def verify_ecn_on_received_packet( """ Verify ECN value on the received packet w.r.t expected packet """ - _, rec_pkt = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + result = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + if isinstance(result, tuple): + _, rec_pkt = result + elif isinstance(result, bool): + logging.info("Using dummy testutils to skip traffic test, skip following verify steps.") + return rec_pkt = Ether(rec_pkt) logging.info("received packet:\n%s", dump_scapy_packet_show_output(rec_pkt)) @@ -276,7 +280,7 @@ def test_dscp_to_queue_during_decap_on_active( inner_dscp, ptfhost, setup_dualtor_tor_active, request, rand_selected_interface, ptfadapter, # noqa F811 tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 - duthosts, rand_one_dut_hostname, skip_traffic_test # noqa F811 + duthosts, rand_one_dut_hostname ): """ Test if DSCP to Q mapping for inner header is matching with outer header during decap on active @@ -296,9 +300,6 @@ def test_dscp_to_queue_during_decap_on_active( duthost.shell('sonic-clear queuecounters') logging.info("Clearing queue counters before starting traffic") - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with stop_garp(ptfhost): ptfadapter.dataplane.flush() ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) @@ -309,7 +310,12 @@ def test_dscp_to_queue_during_decap_on_active( exp_dscp = exp_tos >> 2 exp_queue = derive_queue_id_from_dscp(duthost, exp_dscp, False) - _, rec_pkt = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + result = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + if isinstance(result, tuple): + _, rec_pkt = result + elif isinstance(result, bool): + logging.info("Using dummy testutils to skip traffic test, skip following verify steps.") + return rec_pkt = Ether(rec_pkt) logging.info("received decap packet:\n%s", dump_scapy_packet_show_output(rec_pkt)) @@ -351,7 +357,6 @@ def test_dscp_to_queue_during_encap_on_standby( rand_one_dut_hostname, write_standby, setup_standby_ports_on_rand_selected_tor, # noqa F811 - skip_traffic_test # noqa F811 ): """ Test if DSCP to Q mapping for outer header is matching with inner header during encap on standby @@ -372,9 +377,6 @@ def test_dscp_to_queue_during_encap_on_standby( ptfadapter.dataplane.flush() ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send IP packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with tunnel_traffic_monitor(tor, existing=True, packet_count=PACKET_NUM): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), non_encapsulated_packet, count=PACKET_NUM) @@ -384,7 +386,6 @@ def test_ecn_during_decap_on_active( inner_dscp, ptfhost, setup_dualtor_tor_active, request, rand_selected_interface, ptfadapter, # noqa F811 tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 - skip_traffic_test # noqa F811 ): """ Test if the ECN stamping on inner header is matching with outer during decap on active @@ -405,9 +406,6 @@ def test_ecn_during_decap_on_active( exp_tos = encapsulated_packet[IP].payload[IP].tos exp_ecn = exp_tos & 3 - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with stop_garp(ptfhost): tor.shell("portstat -c") tor.shell("show arp") @@ -425,7 +423,6 @@ def test_ecn_during_encap_on_standby( tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 write_standby, setup_standby_ports_on_rand_selected_tor, # noqa F811 - skip_traffic_test # noqa F811 ): """ Test if the ECN stamping on outer header is matching with inner during encap on standby @@ -440,8 +437,5 @@ def test_ecn_during_encap_on_standby( ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send IP packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with tunnel_traffic_monitor(tor, existing=True, packet_count=PACKET_NUM): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), non_encapsulated_packet, count=PACKET_NUM) diff --git a/tests/dualtor/test_tunnel_memory_leak.py b/tests/dualtor/test_tunnel_memory_leak.py index dbb46638433..357554cc00a 100644 --- a/tests/dualtor/test_tunnel_memory_leak.py +++ b/tests/dualtor/test_tunnel_memory_leak.py @@ -22,7 +22,6 @@ from tests.common.dualtor.dual_tor_utils import delete_neighbor from tests.common.helpers.dut_utils import get_program_info from tests.common.fixtures.ptfhost_utils import run_garp_service, run_icmp_responder # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import wait_until @@ -118,8 +117,7 @@ def _check_memory(duthost): def test_tunnel_memory_leak(toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - ptfhost, ptfadapter, conn_graph_facts, tbinfo, vmhost, run_arp_responder, # noqa F811 - skip_traffic_test): # noqa F811 + ptfhost, ptfadapter, conn_graph_facts, tbinfo, vmhost, run_arp_responder): # noqa F811 """ Test if there is memory leak for service tunnel_packet_handler. Send ip packets from standby TOR T1 to Server, standby TOR will @@ -155,6 +153,7 @@ def prepare_services(ptfhost): all_servers_ips = mux_cable_server_ip(upper_tor_host) unexpected_count = 0 expected_count = 0 + asic_type = upper_tor_host.facts["asic_type"] with prepare_services(ptfhost): # Delete the neighbors @@ -173,9 +172,10 @@ def prepare_services(ptfhost): pkt, exp_pkt = build_packet_to_server(lower_tor_host, ptfadapter, server_ipv4) - if skip_traffic_test is True: - logging.info("Skip traffic test.") - continue + if asic_type == "vs": + logging.info("ServerTrafficMonitor do not support on KVM dualtor, skip following steps.") + return + server_traffic_monitor = ServerTrafficMonitor( upper_tor_host, ptfhost, vmhost, tbinfo, iface, conn_graph_facts, exp_pkt, existing=True, is_mocked=False diff --git a/tests/dualtor_io/test_heartbeat_failure.py b/tests/dualtor_io/test_heartbeat_failure.py index 49afb7994ad..eddcf71e51b 100644 --- a/tests/dualtor_io/test_heartbeat_failure.py +++ b/tests/dualtor_io/test_heartbeat_failure.py @@ -1,5 +1,4 @@ import pytest -import logging from tests.common.dualtor.control_plane_utils import verify_tor_states from tests.common.dualtor.data_plane_utils import send_t1_to_server_with_action, \ @@ -10,7 +9,6 @@ from tests.common.dualtor.tor_failure_utils import shutdown_tor_heartbeat # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import cable_type # noqa F401 from tests.common.dualtor.dual_tor_common import CableType @@ -36,18 +34,16 @@ def ignore_expected_loganalyzer_exception(loganalyzer, duthosts): def test_active_tor_heartbeat_failure_upstream( - toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_server_to_t1_with_action, shutdown_tor_heartbeat, cable_type, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 + send_server_to_t1_with_action, shutdown_tor_heartbeat, cable_type # noqa F811 ): """ Send upstream traffic and stop the LinkProber module on the active ToR. Confirm switchover and disruption lasts < 1 second. """ - logging.info("skip_traffic_test: {}".format(skip_traffic_test)) send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_tor_heartbeat(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(upper_tor_host) ) if cable_type == CableType.active_standby: @@ -68,7 +64,7 @@ def test_active_tor_heartbeat_failure_upstream( @pytest.mark.enable_active_active def test_active_tor_heartbeat_failure_downstream_active( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, cable_type, skip_traffic_test # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat, cable_type # noqa F811 ): """ Send downstream traffic from T1 to the active ToR and stop the LinkProber module on the active ToR. @@ -76,8 +72,7 @@ def test_active_tor_heartbeat_failure_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_tor_heartbeat(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(upper_tor_host) ) if cable_type == CableType.active_standby: @@ -97,15 +92,14 @@ def test_active_tor_heartbeat_failure_downstream_active( def test_active_tor_heartbeat_failure_downstream_standby( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send downstream traffic from T1 to the standby ToR and stop the LinkProber module on the active ToR. Confirm switchover and disruption lasts < 1 second. """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_tor_heartbeat(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(upper_tor_host) ) verify_tor_states( expected_active_host=lower_tor_host, @@ -115,15 +109,14 @@ def test_active_tor_heartbeat_failure_downstream_standby( def test_standby_tor_heartbeat_failure_upstream( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_server_to_t1_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_server_to_t1_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send upstream traffic and stop the LinkProber module on the standby ToR. Confirm no switchover and no disruption. """ send_server_to_t1_with_action( upper_tor_host, verify=True, - action=lambda: shutdown_tor_heartbeat(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -133,15 +126,14 @@ def test_standby_tor_heartbeat_failure_upstream( def test_standby_tor_heartbeat_failure_downstream_active( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send downstream traffic from T1 to the active ToR and stop the LinkProber module on the standby ToR. Confirm no switchover and no disruption. """ send_t1_to_server_with_action( upper_tor_host, verify=True, - action=lambda: shutdown_tor_heartbeat(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -151,15 +143,14 @@ def test_standby_tor_heartbeat_failure_downstream_active( def test_standby_tor_heartbeat_failure_downstream_standby( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send downstream traffic from T1 to the standby ToR and stop the LinkProber module on the standby ToR. Confirm no switchover and no disruption. """ send_t1_to_server_with_action( lower_tor_host, verify=True, - action=lambda: shutdown_tor_heartbeat(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, diff --git a/tests/dualtor_io/test_link_drop.py b/tests/dualtor_io/test_link_drop.py index 27909f13026..5330af7d6a8 100644 --- a/tests/dualtor_io/test_link_drop.py +++ b/tests/dualtor_io/test_link_drop.py @@ -18,7 +18,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import ActiveActivePortID from tests.common.dualtor.dual_tor_common import active_active_ports # noqa F401 @@ -97,7 +96,7 @@ def _drop_flow_upper_tor_active_active(): def test_active_link_drop_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all, # noqa F811 - drop_flow_upper_tor_active_active, cable_type, skip_traffic_test # noqa F811 + drop_flow_upper_tor_active_active, cable_type # noqa F811 ): """ Send traffic from servers to T1 and remove the flow between the servers and the active ToR. @@ -109,8 +108,7 @@ def test_active_link_drop_upstream( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=3, - action=drop_flow_upper_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_all ) verify_tor_states( expected_active_host=lower_tor_host, @@ -125,8 +123,7 @@ def test_active_link_drop_upstream( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=1, - action=drop_flow_upper_tor_active_active, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_active_active ) verify_tor_states( expected_active_host=lower_tor_host, @@ -141,7 +138,7 @@ def test_active_link_drop_upstream( def test_active_link_drop_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all, # noqa F811 - drop_flow_upper_tor_active_active, cable_type, skip_traffic_test # noqa F811 + drop_flow_upper_tor_active_active, cable_type # noqa F811 ): """ Send traffic from the T1s to the servers via the active Tor and remove the flow between the @@ -154,8 +151,7 @@ def test_active_link_drop_downstream_active( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=3, - action=drop_flow_upper_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_all ) verify_tor_states( expected_active_host=lower_tor_host, @@ -170,8 +166,7 @@ def test_active_link_drop_downstream_active( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=1, - action=drop_flow_upper_tor_active_active, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_active_active ) verify_tor_states( expected_active_host=lower_tor_host, @@ -184,8 +179,7 @@ def test_active_link_drop_downstream_active( def test_active_link_drop_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all, # noqa F811 - skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all # noqa F811 ): """ Send traffic from the T1s to the servers via the standby Tor and remove the flow between the @@ -197,8 +191,7 @@ def test_active_link_drop_downstream_standby( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=3, - action=drop_flow_upper_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_all ) verify_tor_states( expected_active_host=lower_tor_host, @@ -210,7 +203,7 @@ def test_active_link_drop_downstream_standby( def test_standby_link_drop_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 check_simulator_flap_counter, drop_flow_lower_tor_all, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor # noqa F811 ): """ Send traffic from servers to T1 and remove the flow between the servers and the standby ToR. @@ -221,8 +214,7 @@ def test_standby_link_drop_upstream( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=2, - action=drop_flow_lower_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_lower_tor_all ) verify_tor_states( expected_active_host=upper_tor_host, @@ -235,7 +227,7 @@ def test_standby_link_drop_upstream( def test_standby_link_drop_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 check_simulator_flap_counter, drop_flow_lower_tor_all, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor # noqa F811 ): """ Send traffic from the T1s to the servers via the active Tor and remove the flow between the @@ -247,8 +239,7 @@ def test_standby_link_drop_downstream_active( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=2, - action=drop_flow_lower_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_lower_tor_all ) verify_tor_states( expected_active_host=upper_tor_host, @@ -261,7 +252,7 @@ def test_standby_link_drop_downstream_active( def test_standby_link_drop_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 check_simulator_flap_counter, drop_flow_lower_tor_all, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor # noqa F811 ): """ Send traffic from the T1s to the servers via the standby Tor and remove the flow between the @@ -273,8 +264,7 @@ def test_standby_link_drop_downstream_standby( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=2, - action=drop_flow_lower_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_lower_tor_all ) verify_tor_states( expected_active_host=upper_tor_host, diff --git a/tests/dualtor_io/test_link_failure.py b/tests/dualtor_io/test_link_failure.py index 580f73d805d..54aada394b5 100644 --- a/tests/dualtor_io/test_link_failure.py +++ b/tests/dualtor_io/test_link_failure.py @@ -11,7 +11,6 @@ from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_upper_tor # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import active_active_ports # noqa F401 from tests.common.dualtor.dual_tor_common import cable_type # noqa F401 @@ -28,7 +27,7 @@ def test_active_link_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_upper_tor_intfs, cable_type, skip_traffic_test # noqa F811 + shutdown_fanout_upper_tor_intfs, cable_type # noqa F811 ): """ Send traffic from server to T1 and shutdown the active ToR link. @@ -37,8 +36,7 @@ def test_active_link_down_upstream( if cable_type == CableType.active_active: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -51,8 +49,7 @@ def test_active_link_down_upstream( if cable_type == CableType.active_standby: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( @@ -67,7 +64,7 @@ def test_active_link_down_upstream( def test_active_link_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_upper_tor_intfs, cable_type, skip_traffic_test # noqa F811 + shutdown_fanout_upper_tor_intfs, cable_type # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the active ToR link. @@ -76,8 +73,7 @@ def test_active_link_down_downstream_active( if cable_type == CableType.active_standby: send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -88,8 +84,7 @@ def test_active_link_down_downstream_active( if cable_type == CableType.active_active: send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -103,7 +98,7 @@ def test_active_link_down_downstream_active( def test_active_link_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_upper_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_upper_tor_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdown the active ToR link. @@ -111,8 +106,7 @@ def test_active_link_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -124,7 +118,7 @@ def test_active_link_down_downstream_standby( def test_standby_link_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_lower_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_lower_tor_intfs # noqa F811 ): """ Send traffic from server to T1 and shutdown the standby ToR link. @@ -132,8 +126,7 @@ def test_standby_link_down_upstream( """ send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -145,7 +138,7 @@ def test_standby_link_down_upstream( def test_standby_link_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_lower_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_lower_tor_intfs # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the standby ToR link. @@ -153,8 +146,7 @@ def test_standby_link_down_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -166,7 +158,7 @@ def test_standby_link_down_downstream_active( def test_standby_link_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_lower_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_lower_tor_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdwon the standby ToR link. @@ -174,8 +166,7 @@ def test_standby_link_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -187,7 +178,7 @@ def test_standby_link_down_downstream_standby( def test_active_tor_downlink_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_upper_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_upper_tor_downlink_intfs # noqa F811 ): """ Send traffic from server to T1 and shutdown the active ToR downlink on DUT. @@ -195,8 +186,7 @@ def test_active_tor_downlink_down_upstream( """ send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -208,7 +198,7 @@ def test_active_tor_downlink_down_upstream( def test_active_tor_downlink_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_upper_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_upper_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the active ToR downlink on DUT. @@ -216,8 +206,7 @@ def test_active_tor_downlink_down_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -229,7 +218,7 @@ def test_active_tor_downlink_down_downstream_active( def test_active_tor_downlink_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_upper_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_upper_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdown the active ToR downlink on DUT. @@ -237,8 +226,7 @@ def test_active_tor_downlink_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -250,7 +238,7 @@ def test_active_tor_downlink_down_downstream_standby( def test_standby_tor_downlink_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_lower_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_lower_tor_downlink_intfs # noqa F811 ): """ Send traffic from server to T1 and shutdown the standby ToR downlink on DUT. @@ -258,8 +246,7 @@ def test_standby_tor_downlink_down_upstream( """ send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -271,7 +258,7 @@ def test_standby_tor_downlink_down_upstream( def test_standby_tor_downlink_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_lower_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_lower_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the standby ToR downlink on DUT. @@ -279,8 +266,7 @@ def test_standby_tor_downlink_down_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -292,7 +278,7 @@ def test_standby_tor_downlink_down_downstream_active( def test_standby_tor_downlink_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_lower_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_lower_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdwon the standby ToR downlink on DUT. @@ -300,8 +286,7 @@ def test_standby_tor_downlink_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs ) verify_tor_states( expected_active_host=upper_tor_host, diff --git a/tests/dualtor_io/test_normal_op.py b/tests/dualtor_io/test_normal_op.py index 20781585e6e..e71df5097e6 100644 --- a/tests/dualtor_io/test_normal_op.py +++ b/tests/dualtor_io/test_normal_op.py @@ -14,7 +14,6 @@ from tests.common.dualtor.dual_tor_utils import check_simulator_flap_counter # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC, CONFIG_RELOAD_ALLOWED_DISRUPTION_SEC from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert @@ -29,19 +28,16 @@ def test_normal_op_upstream(upper_tor_host, lower_tor_host, # noqa F811 send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """Send upstream traffic and confirm no disruption or switchover occurs""" if cable_type == CableType.active_standby: - send_server_to_t1_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_t1_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host, skip_tunnel_route=False) if cable_type == CableType.active_active: - send_server_to_t1_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_t1_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type, @@ -52,21 +48,18 @@ def test_normal_op_upstream(upper_tor_host, lower_tor_host, # noqa F def test_normal_op_downstream_upper_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the upper ToR and confirm no disruption or switchover occurs """ if cable_type == CableType.active_standby: - send_t1_to_server_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) if cable_type == CableType.active_active: - send_t1_to_server_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -76,21 +69,18 @@ def test_normal_op_downstream_upper_tor(upper_tor_host, lower_tor_host, def test_normal_op_downstream_lower_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the lower ToR and confirm no disruption or switchover occurs """ if cable_type == CableType.active_standby: - send_t1_to_server_with_action(lower_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(lower_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) if cable_type == CableType.active_active: - send_t1_to_server_with_action(lower_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(lower_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -101,8 +91,7 @@ def test_normal_op_active_server_to_active_server(upper_tor_host, lower_tor_host send_server_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 cable_type, # noqa F811 - select_test_mux_ports, # noqa F811 - skip_traffic_test): # noqa F811 + select_test_mux_ports): # noqa F811 """ Send server to server traffic in active-active setup and confirm no disruption or switchover occurs. """ @@ -110,15 +99,13 @@ def test_normal_op_active_server_to_active_server(upper_tor_host, lower_tor_host test_mux_ports = select_test_mux_ports(cable_type, 2) if cable_type == CableType.active_standby: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host, skip_tunnel_route=False) if cable_type == CableType.active_active: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type, @@ -130,8 +117,7 @@ def test_normal_op_active_server_to_standby_server(upper_tor_host, lower_tor_hos send_server_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 cable_type, force_standby_tor, # noqa F811 - select_test_mux_ports, # noqa F811 - skip_traffic_test): # noqa F811 + select_test_mux_ports): # noqa F811 """ Send server to server traffic in active-standby setup and confirm no disruption or switchover occurs. """ @@ -147,12 +133,10 @@ def _is_mux_port_standby(duthost, mux_port): "failed to toggle mux port %s to standby on DUT %s" % (tx_mux_port, upper_tor_host.hostname)) if cable_type == CableType.active_standby: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) if cable_type == CableType.active_active: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) # TODO: Add per-port db check @@ -162,8 +146,7 @@ def _is_mux_port_standby(duthost, mux_port): def test_upper_tor_config_reload_upstream(upper_tor_host, lower_tor_host, # noqa F811 send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send upstream traffic and `config reload` the active ToR. Confirm switchover occurs and disruption lasted < 1 second for active-standby ports. @@ -171,15 +154,13 @@ def test_upper_tor_config_reload_upstream(upper_tor_host, lower_tor_host, """ if cable_type == CableType.active_standby: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=CONFIG_RELOAD_ALLOWED_DISRUPTION_SEC, - action=lambda: config_reload(upper_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(upper_tor_host, wait=0)) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: config_reload(upper_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(upper_tor_host, wait=0)) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -189,16 +170,14 @@ def test_upper_tor_config_reload_upstream(upper_tor_host, lower_tor_host, def test_lower_tor_config_reload_upstream(upper_tor_host, lower_tor_host, # noqa F811 send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send upstream traffic and `config reload` the lower ToR. Confirm no switchover occurs and no disruption. """ if cable_type == CableType.active_standby: send_server_to_t1_with_action(upper_tor_host, verify=True, - action=lambda: config_reload(lower_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(lower_tor_host, wait=0)) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) @@ -208,23 +187,20 @@ def test_lower_tor_config_reload_upstream(upper_tor_host, lower_tor_host, def test_lower_tor_config_reload_downstream_upper_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the upper ToR and `config reload` the lower ToR. Confirm no switchover occurs and no disruption """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(upper_tor_host, verify=True, - action=lambda: config_reload(lower_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(lower_tor_host, wait=0)) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) if cable_type == CableType.active_active: send_t1_to_server_with_action(upper_tor_host, verify=True, - action=lambda: config_reload(lower_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(lower_tor_host, wait=0)) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -234,8 +210,7 @@ def test_lower_tor_config_reload_downstream_upper_tor(upper_tor_host, lower_tor_ def test_upper_tor_config_reload_downstream_lower_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the lower ToR and `config reload` the upper ToR. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -243,8 +218,7 @@ def test_upper_tor_config_reload_downstream_lower_tor(upper_tor_host, lower_tor_ """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(lower_tor_host, verify=True, delay=CONFIG_RELOAD_ALLOWED_DISRUPTION_SEC, - action=lambda: config_reload(upper_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(upper_tor_host, wait=0)) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) @@ -254,8 +228,7 @@ def test_tor_switch_upstream(upper_tor_host, lower_tor_host, # no send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 force_active_tor, force_standby_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send upstream traffic and perform switchover via CLI. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -263,15 +236,13 @@ def test_tor_switch_upstream(upper_tor_host, lower_tor_host, # no """ if cable_type == CableType.active_standby: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_active_tor(lower_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_active_tor(lower_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_standby_tor(upper_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_standby_tor(upper_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host, expected_standby_health="healthy", @@ -283,8 +254,7 @@ def test_tor_switch_downstream_active(upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 force_active_tor, force_standby_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the upper ToR and perform switchover via CLI. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -292,15 +262,13 @@ def test_tor_switch_downstream_active(upper_tor_host, lower_tor_host, """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_active_tor(lower_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_active_tor(lower_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_t1_to_server_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_standby_tor(upper_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_standby_tor(upper_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host, expected_standby_health="healthy", @@ -312,8 +280,7 @@ def test_tor_switch_downstream_standby(upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 force_active_tor, force_standby_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the lower ToR and perform switchover via CLI. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -321,15 +288,13 @@ def test_tor_switch_downstream_standby(upper_tor_host, lower_tor_host, """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_active_tor(lower_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_active_tor(lower_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_t1_to_server_with_action(lower_tor_host, verify=True, - action=lambda: force_standby_tor(upper_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_standby_tor(upper_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host, expected_standby_health="healthy", diff --git a/tests/dualtor_io/test_tor_bgp_failure.py b/tests/dualtor_io/test_tor_bgp_failure.py index 91783bd14fe..c6643a08134 100644 --- a/tests/dualtor_io/test_tor_bgp_failure.py +++ b/tests/dualtor_io/test_tor_bgp_failure.py @@ -11,7 +11,6 @@ from tests.common.dualtor.tor_failure_utils import shutdown_bgp_sessions_on_duthost from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import cable_type # noqa F401 @@ -80,7 +79,7 @@ def ignore_expected_loganalyzer_exception(loganalyzer, duthosts): def test_active_tor_kill_bgpd_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, kill_bgpd, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_upper_tor, kill_bgpd): # noqa F811 ''' Case: Server -> ToR -> T1 (Active ToR BGP Down) Action: Shutdown all BGP sessions on the active ToR @@ -92,8 +91,7 @@ def test_active_tor_kill_bgpd_upstream( ''' send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: kill_bgpd(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(upper_tor_host) ) verify_tor_states( expected_active_host=lower_tor_host, @@ -103,7 +101,7 @@ def test_active_tor_kill_bgpd_upstream( def test_standby_tor_kill_bgpd_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, kill_bgpd, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_upper_tor, kill_bgpd): # noqa F811 ''' Case: Server -> ToR -> T1 (Standby ToR BGP Down) Action: Shutdown all BGP sessions on the standby ToR @@ -114,8 +112,7 @@ def test_standby_tor_kill_bgpd_upstream( ''' send_server_to_t1_with_action( upper_tor_host, verify=True, - action=lambda: kill_bgpd(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -126,7 +123,7 @@ def test_standby_tor_kill_bgpd_upstream( def test_standby_tor_kill_bgpd_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, kill_bgpd, # noqa F811 - tunnel_traffic_monitor, skip_traffic_test): # noqa F811 + tunnel_traffic_monitor): # noqa F811 ''' Case: T1 -> Active ToR -> Server (Standby ToR BGP Down) Action: Shutdown all BGP sessions on the standby ToR @@ -137,8 +134,7 @@ def test_standby_tor_kill_bgpd_downstream_active( with tunnel_traffic_monitor(lower_tor_host, existing=False): send_t1_to_server_with_action( upper_tor_host, verify=True, - action=lambda: kill_bgpd(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -149,7 +145,7 @@ def test_standby_tor_kill_bgpd_downstream_active( def test_active_tor_kill_bgpd_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, kill_bgpd, # noqa F811 - tunnel_traffic_monitor, skip_traffic_test): # noqa F811 + tunnel_traffic_monitor): # noqa F811 ''' Case: T1 -> Standby ToR -> Server (Active ToR BGP Down) Action: Shutdown all BGP sessions on the active ToR @@ -160,8 +156,7 @@ def test_active_tor_kill_bgpd_downstream_standby( ''' send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: kill_bgpd(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(upper_tor_host) ) verify_tor_states( expected_active_host=lower_tor_host, @@ -173,7 +168,7 @@ def test_active_tor_kill_bgpd_downstream_standby( def test_active_tor_shutdown_bgp_sessions_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_bgp_sessions, cable_type, skip_traffic_test # noqa F811 + shutdown_bgp_sessions, cable_type # noqa F811 ): """ Case: Server -> ToR -> T1 (Active ToR BGP Down) @@ -187,15 +182,13 @@ def test_active_tor_shutdown_bgp_sessions_upstream( if cable_type == CableType.active_standby: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_bgp_sessions(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_bgp_sessions(upper_tor_host) ) if cable_type == CableType.active_active: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_bgp_sessions(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_bgp_sessions(upper_tor_host) ) if cable_type == CableType.active_active: diff --git a/tests/dualtor_mgmt/test_ingress_drop.py b/tests/dualtor_mgmt/test_ingress_drop.py index c98be9db041..75169847743 100644 --- a/tests/dualtor_mgmt/test_ingress_drop.py +++ b/tests/dualtor_mgmt/test_ingress_drop.py @@ -16,8 +16,6 @@ from tests.common.dualtor.nic_simulator_control import mux_status_from_nic_simulator # noqa F401 from tests.common.dualtor.nic_simulator_control import stop_nic_simulator # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 - from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until @@ -104,7 +102,7 @@ def selected_mux_port(cable_type, active_active_ports, active_standby_ports): @pytest.mark.enable_active_active -def test_ingress_drop(cable_type, ptfadapter, setup_mux, tbinfo, selected_mux_port, upper_tor_host, skip_traffic_test): # noqa F811 +def test_ingress_drop(cable_type, ptfadapter, setup_mux, tbinfo, selected_mux_port, upper_tor_host): # noqa F811 """ Aims to verify if orchagent installs ingress drop ACL when the port comes to standby. @@ -131,7 +129,7 @@ def test_ingress_drop(cable_type, ptfadapter, setup_mux, tbinfo, selected_mux_po if cable_type == CableType.active_active: verify_upstream_traffic(upper_tor_host, ptfadapter, tbinfo, selected_mux_port, - server_ip, pkt_num=10, drop=False, skip_traffic_test=skip_traffic_test) + server_ip, pkt_num=10, drop=False) elif cable_type == CableType.active_standby: verify_upstream_traffic(upper_tor_host, ptfadapter, tbinfo, selected_mux_port, - server_ip, pkt_num=10, drop=True, skip_traffic_test=skip_traffic_test) + server_ip, pkt_num=10, drop=True) diff --git a/tests/dut_console/test_console_baud_rate.py b/tests/dut_console/test_console_baud_rate.py index 6c974edfc95..a156c6f77d4 100644 --- a/tests/dut_console/test_console_baud_rate.py +++ b/tests/dut_console/test_console_baud_rate.py @@ -22,7 +22,7 @@ def is_sonic_console(conn_graph_facts, dut_hostname): def get_expected_baud_rate(duthost): - DEFAULT_BAUDRATE = "9600" + DEFAULT_BAUDRATE = 9600 hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname] return hostvars.get('console_baudrate', DEFAULT_BAUDRATE) @@ -31,7 +31,7 @@ def test_console_baud_rate_config(duthost): expected_baud_rate = get_expected_baud_rate(duthost) res = duthost.shell("cat /proc/cmdline | grep -Eo 'console=ttyS[0-9]+,[0-9]+' | cut -d ',' -f2") pytest_require(res["stdout"] != "", "Cannot get baud rate") - if res["stdout"] != expected_baud_rate: + if res["stdout"] != str(expected_baud_rate): global pass_config_test pass_config_test = False pytest.fail("Device baud rate is {}, expected {}".format(res["stdout"], expected_baud_rate)) diff --git a/tests/dut_console/test_console_chassis_conn.py b/tests/dut_console/test_console_chassis_conn.py new file mode 100644 index 00000000000..49e05babb41 --- /dev/null +++ b/tests/dut_console/test_console_chassis_conn.py @@ -0,0 +1,82 @@ +import pexpect +import pytest +import time + +from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.console_helper import get_target_lines, handle_pexpect_exceptions + +pytestmark = [ + pytest.mark.topology("t2") # Test is only for T2 Chassis +] + + +def test_console_availability_serial_ports(duthost, duthosts, creds, enum_supervisor_dut_hostname): + + duthost = duthosts[enum_supervisor_dut_hostname] + dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + dutuser, dutpass = creds['sonicadmin_user'], creds['sonicadmin_password'] + + target_lines = get_target_lines(duthost) # List of Serial port numbers connected from supervisor to linecards + + for target_line in target_lines: + if 'arista' in duthost.facts['hwsku'].lower(): + console_command = f"sudo /usr/bin/picocom /dev/ttySCD{target_line}" + try: + client = pexpect.spawn('ssh {}@{} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' + .format(dutuser, dutip)) + client.expect('[Pp]assword:') + client.sendline(dutpass) + client.sendline(console_command) + time.sleep(5) + client.sendline('\n') + client.expect(['login:'], timeout=20) + client.sendline(dutuser) + client.expect(['[Pp]assword:'], timeout=10) + client.sendline(dutpass) + + i = client.expect([r'.*Software\s+for\s+Open\s+Networking\s+in\s+the\s+Cloud.*', + 'Login incorrect'], timeout=100) + pytest_assert(i == 0, + f"Failed to connect to line card {target_line} " + "on Arista device. Please check credentials.") + + client.sendline('exit') + time.sleep(2) + client.sendcontrol('a') + time.sleep(2) + client.sendcontrol('x') + except Exception as e: + handle_pexpect_exceptions(target_line)(e) + + elif 'cisco' in duthost.facts['hwsku'].lower(): + console_command = f"sudo /opt/cisco/bin/rconsole.py -s {target_line}" + try: + client = pexpect.spawn('ssh {}@{} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' + .format(dutuser, dutip)) + client.expect('[Pp]assword:') + client.sendline(dutpass) + time.sleep(10) + client.sendline(console_command) + time.sleep(10) + client.sendline(dutuser) + client.expect(['[Pp]assword:'], timeout=10) + time.sleep(10) + client.sendline(dutpass) + time.sleep(10) + + i = client.expect([r'.*Software\s+for\s+Open\s+Networking\s+in\s+the\s+Cloud.*', + 'Login incorrect'], timeout=100) + pytest_assert(i == 0, + f"Failed to connect to line card {target_line} on Cisco device.Please check credentials.") + + client.sendline('exit') + time.sleep(2) + client.sendcontrol('\\') + time.sleep(2) + client.sendline('quit') + + except Exception as e: + handle_pexpect_exceptions(target_line)(e) + + else: + pytest.skip("Skipping test because test is not supported on this hwsku.") diff --git a/tests/ecmp/inner_hashing/test_inner_hashing.py b/tests/ecmp/inner_hashing/test_inner_hashing.py index fe45fe66169..896520b26a5 100644 --- a/tests/ecmp/inner_hashing/test_inner_hashing.py +++ b/tests/ecmp/inner_hashing/test_inner_hashing.py @@ -13,7 +13,6 @@ from tests.ptf_runner import ptf_runner from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST,\ VXLAN_PORT, PTF_QLEN, check_pbh_counters, OUTER_ENCAP_FORMATS, NVGRE_TNI, IP_VERSIONS_LIST, config_pbh -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -35,7 +34,7 @@ def setup_dynamic_pbh(self, duthost, vlan_ptf_ports, tbinfo): def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, duthost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing dynamic inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) with allure.step('Run ptf test InnerHashTest'): @@ -73,22 +72,22 @@ def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipv "symmetric_hashing": symmetric_hashing} duthost.shell("sonic-clear pbh statistics") - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) - retry_call(check_pbh_counters, - fargs=[duthost, outer_ipver, inner_ipver, balancing_test_times, - symmetric_hashing, hash_keys, lag_mem_ptf_ports_groups], - tries=5, - delay=5) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) + + retry_call(check_pbh_counters, + fargs=[duthost, outer_ipver, inner_ipver, balancing_test_times, + symmetric_hashing, hash_keys, lag_mem_ptf_ports_groups], + tries=5, + delay=5) if update_outer_ipver == outer_ipver and update_inner_ipver == inner_ipver: logging.info("Validate dynamic inner hash Edit Flow for outer {} and inner {} ip versions with" @@ -105,8 +104,7 @@ def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipv with allure.step('Run again the ptf test InnerHashTest after updating the rules'): logging.info('Run again the ptf test InnerHashTest after updating the rules') duthost.shell("sonic-clear pbh statistics") - if skip_traffic_test is True: - return + ptf_runner(ptfhost, "ptftests", "inner_hash_test.InnerHashTest", @@ -128,7 +126,7 @@ def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipv class TestStaticInnerHashing(): def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, - vlan_ptf_ports, symmetric_hashing, lag_mem_ptf_ports_groups, skip_traffic_test): # noqa F811 + vlan_ptf_ports, symmetric_hashing, lag_mem_ptf_ports_groups): logging.info("Executing static inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') @@ -138,8 +136,6 @@ def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, route outer_src_ip_range, outer_dst_ip_range = get_src_dst_ip_range(outer_ipver) inner_src_ip_range, inner_dst_ip_range = get_src_dst_ip_range(inner_ipver) - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "inner_hash_test.InnerHashTest", diff --git a/tests/ecmp/inner_hashing/test_inner_hashing_lag.py b/tests/ecmp/inner_hashing/test_inner_hashing_lag.py index ed616569865..7c1ccc2f00f 100644 --- a/tests/ecmp/inner_hashing/test_inner_hashing_lag.py +++ b/tests/ecmp/inner_hashing/test_inner_hashing_lag.py @@ -12,7 +12,6 @@ from tests.ptf_runner import ptf_runner from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST,\ VXLAN_PORT, PTF_QLEN, check_pbh_counters, OUTER_ENCAP_FORMATS, NVGRE_TNI, setup_lag_config, config_pbh_lag -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -33,7 +32,7 @@ def setup_dynamic_pbh(self, duthost, lag_port_map, lag_ip_map): def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, duthost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing dynamic inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) with allure.step('Run ptf test InnerHashTest'): @@ -54,8 +53,6 @@ def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, route balancing_test_times = 20 balancing_range = 0.5 - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "inner_hash_test.InnerHashTest", diff --git a/tests/ecmp/inner_hashing/test_wr_inner_hashing.py b/tests/ecmp/inner_hashing/test_wr_inner_hashing.py index 02d697cea33..28325423dc3 100644 --- a/tests/ecmp/inner_hashing/test_wr_inner_hashing.py +++ b/tests/ecmp/inner_hashing/test_wr_inner_hashing.py @@ -9,7 +9,6 @@ from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST, VXLAN_PORT,\ PTF_QLEN, OUTER_ENCAP_FORMATS, NVGRE_TNI, config_pbh from tests.ptf_runner import ptf_runner -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -29,7 +28,7 @@ def setup_dynamic_pbh(self, duthost, vlan_ptf_ports, tbinfo): def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing warm boot dynamic inner hash test for outer {} and inner {} with symmetric_hashing" " set to {}".format(outer_ipver, inner_ipver, str(symmetric_hashing))) with allure.step('Run ptf test InnerHashTest and warm-reboot in parallel'): @@ -57,30 +56,29 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv reboot_thr = threading.Thread(target=reboot, args=(duthost, localhost, 'warm', 10, 0, 0, True, True,)) reboot_thr.start() - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params={"fib_info": FIB_INFO_FILE_DST, - "router_mac": router_mac, - "src_ports": vlan_ptf_ports, - "exp_port_groups": lag_mem_ptf_ports_groups, - "hash_keys": hash_keys, - "vxlan_port": VXLAN_PORT, - "inner_src_ip_range": ",".join(inner_src_ip_range), - "inner_dst_ip_range": ",".join(inner_dst_ip_range), - "outer_src_ip_range": ",".join(outer_src_ip_range), - "outer_dst_ip_range": ",".join(outer_dst_ip_range), - "balancing_test_times": balancing_test_times, - "balancing_range": balancing_range, - "outer_encap_formats": outer_encap_format, - "nvgre_tni": NVGRE_TNI, - "symmetric_hashing": symmetric_hashing}, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params={"fib_info": FIB_INFO_FILE_DST, + "router_mac": router_mac, + "src_ports": vlan_ptf_ports, + "exp_port_groups": lag_mem_ptf_ports_groups, + "hash_keys": hash_keys, + "vxlan_port": VXLAN_PORT, + "inner_src_ip_range": ",".join(inner_src_ip_range), + "inner_dst_ip_range": ",".join(inner_dst_ip_range), + "outer_src_ip_range": ",".join(outer_src_ip_range), + "outer_dst_ip_range": ",".join(outer_dst_ip_range), + "balancing_test_times": balancing_test_times, + "balancing_range": balancing_range, + "outer_encap_formats": outer_encap_format, + "nvgre_tni": NVGRE_TNI, + "symmetric_hashing": symmetric_hashing}, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) reboot_thr.join() @@ -88,7 +86,7 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv class TestWRStaticInnerHashing(): def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, - vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups, skip_traffic_test): # noqa F811 + vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups): logging.info("Executing static inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') @@ -102,25 +100,24 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv reboot_thr = threading.Thread(target=reboot, args=(duthost, localhost, 'warm', 10, 0, 0, True, True,)) reboot_thr.start() - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params={"fib_info": FIB_INFO_FILE_DST, - "router_mac": router_mac, - "src_ports": vlan_ptf_ports, - "exp_port_groups": lag_mem_ptf_ports_groups, - "hash_keys": hash_keys, - "vxlan_port": VXLAN_PORT, - "inner_src_ip_range": ",".join(inner_src_ip_range), - "inner_dst_ip_range": ",".join(inner_dst_ip_range), - "outer_src_ip_range": ",".join(outer_src_ip_range), - "outer_dst_ip_range": ",".join(outer_dst_ip_range), - "outer_encap_formats": OUTER_ENCAP_FORMATS, - "symmetric_hashing": symmetric_hashing}, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params={"fib_info": FIB_INFO_FILE_DST, + "router_mac": router_mac, + "src_ports": vlan_ptf_ports, + "exp_port_groups": lag_mem_ptf_ports_groups, + "hash_keys": hash_keys, + "vxlan_port": VXLAN_PORT, + "inner_src_ip_range": ",".join(inner_src_ip_range), + "inner_dst_ip_range": ",".join(inner_dst_ip_range), + "outer_src_ip_range": ",".join(outer_src_ip_range), + "outer_dst_ip_range": ",".join(outer_dst_ip_range), + "outer_encap_formats": OUTER_ENCAP_FORMATS, + "symmetric_hashing": symmetric_hashing}, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) reboot_thr.join() diff --git a/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py b/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py index 6ce69b57d71..e6371e9a06d 100644 --- a/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py +++ b/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py @@ -9,7 +9,6 @@ from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST, VXLAN_PORT,\ PTF_QLEN, OUTER_ENCAP_FORMATS, NVGRE_TNI, setup_lag_config, config_pbh_lag from tests.ptf_runner import ptf_runner -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -31,7 +30,7 @@ def setup_dynamic_pbh(self, duthost, lag_port_map, lag_ip_map): def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing warm boot dynamic inner hash test for outer {} and inner {} with symmetric_hashing" " set to {}".format(outer_ipver, inner_ipver, str(symmetric_hashing))) timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') @@ -58,28 +57,27 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv reboot_thr = threading.Thread(target=reboot, args=(duthost, localhost, 'warm', 10, 0, 0, True, True,)) reboot_thr.start() - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params={"fib_info": FIB_INFO_FILE_DST, - "router_mac": router_mac, - "src_ports": vlan_ptf_ports, - "exp_port_groups": lag_mem_ptf_ports_groups, - "hash_keys": hash_keys, - "vxlan_port": VXLAN_PORT, - "inner_src_ip_range": ",".join(inner_src_ip_range), - "inner_dst_ip_range": ",".join(inner_dst_ip_range), - "outer_src_ip_range": ",".join(outer_src_ip_range), - "outer_dst_ip_range": ",".join(outer_dst_ip_range), - "balancing_test_times": balancing_test_times, - "balancing_range": balancing_range, - "outer_encap_formats": outer_encap_format, - "nvgre_tni": NVGRE_TNI, - "symmetric_hashing": symmetric_hashing}, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params={"fib_info": FIB_INFO_FILE_DST, + "router_mac": router_mac, + "src_ports": vlan_ptf_ports, + "exp_port_groups": lag_mem_ptf_ports_groups, + "hash_keys": hash_keys, + "vxlan_port": VXLAN_PORT, + "inner_src_ip_range": ",".join(inner_src_ip_range), + "inner_dst_ip_range": ",".join(inner_dst_ip_range), + "outer_src_ip_range": ",".join(outer_src_ip_range), + "outer_dst_ip_range": ",".join(outer_dst_ip_range), + "balancing_test_times": balancing_test_times, + "balancing_range": balancing_range, + "outer_encap_formats": outer_encap_format, + "nvgre_tni": NVGRE_TNI, + "symmetric_hashing": symmetric_hashing}, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) reboot_thr.join() diff --git a/tests/everflow/everflow_test_utilities.py b/tests/everflow/everflow_test_utilities.py index 97e80743fea..8377cb40548 100644 --- a/tests/everflow/everflow_test_utilities.py +++ b/tests/everflow/everflow_test_utilities.py @@ -753,8 +753,7 @@ def send_and_check_mirror_packets(self, src_port=None, dest_ports=None, expect_recv=True, - valid_across_namespace=True, - skip_traffic_test=False): + valid_across_namespace=True): # In Below logic idea is to send traffic in such a way so that mirror traffic # will need to go across namespaces and within namespace. If source and mirror destination @@ -789,9 +788,6 @@ def send_and_check_mirror_packets(self, src_port_set.add(dest_ports[0]) src_port_metadata_map[dest_ports[0]] = (None, 2) - if skip_traffic_test is True: - logging.info("Skipping traffic test") - return # Loop through Source Port Set and send traffic on each source port of the set for src_port in src_port_set: expected_mirror_packet = BaseEverflowTest.get_expected_mirror_packet(mirror_session, @@ -810,10 +806,15 @@ def send_and_check_mirror_packets(self, if expect_recv: time.sleep(STABILITY_BUFFER) - _, received_packet = testutils.verify_packet_any_port(ptfadapter, - expected_mirror_packet, - ports=dest_ports) + result = testutils.verify_packet_any_port(ptfadapter, + expected_mirror_packet, + ports=dest_ports) + if isinstance(result, bool): + logging.info("Using dummy testutils to skip traffic test, skip following checks") + return + + _, received_packet = result logging.info("Received packet: %s", packet.Ether(received_packet).summary()) inner_packet = self._extract_mirror_payload(received_packet, len(mirror_packet_sent)) diff --git a/tests/everflow/test_everflow_ipv6.py b/tests/everflow/test_everflow_ipv6.py index 6ffd720f287..df3a4b0e3a3 100644 --- a/tests/everflow/test_everflow_ipv6.py +++ b/tests/everflow/test_everflow_ipv6.py @@ -13,7 +13,6 @@ # Module-level fixtures from .everflow_test_utilities import setup_info # noqa: F401 from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 pytestmark = [ pytest.mark.topology("t0", "t1", "t2", "m0") @@ -155,8 +154,7 @@ def background_traffic(run_count=None): def test_src_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on Source IPv6 addresses.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -170,13 +168,11 @@ def test_src_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_dst_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on Destination IPv6 addresses.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -190,13 +186,11 @@ def test_dst_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_next_header_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on the Next Header field.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, next_header=0x7E) @@ -205,13 +199,11 @@ def test_next_header_mirroring(self, setup_info, setup_mirror_session, ptfadapte ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_src_port_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on the L4 Source Port.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, sport=9000) @@ -220,13 +212,11 @@ def test_l4_src_port_mirroring(self, setup_info, setup_mirror_session, ptfadapte ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_dst_port_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on the L4 Destination Port.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, dport=9001) @@ -235,14 +225,12 @@ def test_l4_dst_port_mirroring(self, setup_info, setup_mirror_session, ptfadapte ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_src_port_range_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on a range of L4 Source Ports.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, sport=10200) @@ -251,14 +239,12 @@ def test_l4_src_port_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_dst_port_range_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on a range of L4 Destination Ports.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, dport=10700) @@ -267,13 +253,11 @@ def test_l4_dst_port_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_tcp_flags_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on TCP Flags.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, flags=0x1B) @@ -282,13 +266,11 @@ def test_tcp_flags_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_dscp_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on DSCP.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, dscp=37) @@ -297,13 +279,11 @@ def test_dscp_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ever ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match from a source port to a range of destination ports and vice-versa.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -320,8 +300,7 @@ def test_l4_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_tcpv6_packet( everflow_direction, @@ -338,13 +317,11 @@ def test_l4_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_tcp_response_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match a SYN -> SYN-ACK pattern.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -360,8 +337,7 @@ def test_tcp_response_mirroring(self, setup_info, setup_mirror_session, ptfadapt ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_tcpv6_packet( everflow_direction, @@ -377,14 +353,12 @@ def test_tcp_response_mirroring(self, setup_info, setup_mirror_session, ptfadapt ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_tcp_application_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match a TCP handshake between a client and server.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -402,8 +376,7 @@ def test_tcp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_tcpv6_packet( everflow_direction, @@ -421,14 +394,12 @@ def test_tcp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_udp_application_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match UDP traffic between a client and server application.""" test_packet = self._base_udpv6_packet( everflow_direction, @@ -446,8 +417,7 @@ def test_udp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, ptfadapter, @@ -464,13 +434,11 @@ def test_udp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that the protocol number is ignored if it is not specified in the ACL rule.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -485,8 +453,7 @@ def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, @@ -501,8 +468,7 @@ def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, @@ -518,14 +484,12 @@ def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_any_transport_protocol(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that src port and dst port rules match regardless of whether TCP or UDP traffic is sent.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -542,8 +506,7 @@ def test_any_transport_protocol(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, @@ -560,13 +523,11 @@ def test_any_transport_protocol(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_invalid_tcp_rule(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that the ASIC does not reject rules with TCP flags if the protocol is not TCP.""" pass @@ -577,8 +538,7 @@ def test_invalid_tcp_rule(self, setup_info, setup_mirror_session, ptfadapter, ev def test_source_subnet(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with a Source IPv6 Subnet.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -595,13 +555,11 @@ def test_source_subnet(self, setup_info, setup_mirror_session, ptfadapter, everf ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_dest_subnet(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with a Destination IPv6 Subnet.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -618,13 +576,11 @@ def test_dest_subnet(self, setup_info, setup_mirror_session, ptfadapter, everflo ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_both_subnets(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with both source and destination subnets.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -641,13 +597,11 @@ def test_both_subnets(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_fuzzy_subnets(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with non-standard subnet sizes.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -664,8 +618,7 @@ def test_fuzzy_subnets(self, setup_info, setup_mirror_session, ptfadapter, everf ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def _base_tcpv6_packet(self, direction, diff --git a/tests/everflow/test_everflow_per_interface.py b/tests/everflow/test_everflow_per_interface.py index 820513d6675..8bef2b5ed78 100644 --- a/tests/everflow/test_everflow_per_interface.py +++ b/tests/everflow/test_everflow_per_interface.py @@ -13,7 +13,6 @@ from .everflow_test_utilities import setup_info, EVERFLOW_DSCP_RULES # noqa: F401 from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa: F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 pytestmark = [ pytest.mark.topology("any") @@ -181,8 +180,7 @@ def send_and_verify_packet(ptfadapter, packet, expected_packet, tx_port, rx_port def test_everflow_per_interface(ptfadapter, setup_info, apply_acl_rule, tbinfo, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, ip_ver, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, ip_ver): # noqa F811 """Verify packet ingress from candidate ports are captured by EVERFLOW, while packets ingress from unselected ports are not captured """ @@ -192,9 +190,6 @@ def test_everflow_per_interface(ptfadapter, setup_info, apply_acl_rule, tbinfo, setup_info[UP_STREAM]['ingress_router_mac'], setup_info, ip_ver) uplink_ports = everflow_config["monitor_port_ptf_ids"] - if skip_traffic_test: - return - # Verify that packet ingressed from INPUT_PORTS (candidate ports) are mirrored for port, ptf_idx in list(everflow_config['candidate_ports'].items()): logger.info("Verifying packet ingress from {} is mirrored".format(port)) diff --git a/tests/everflow/test_everflow_testbed.py b/tests/everflow/test_everflow_testbed.py index 1e0777dcd58..cfe3c8f109e 100644 --- a/tests/everflow/test_everflow_testbed.py +++ b/tests/everflow/test_everflow_testbed.py @@ -15,7 +15,6 @@ # Module-level fixtures from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa: F401 from tests.common.fixtures.ptfhost_utils import copy_acstests_directory # noqa: F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 from .everflow_test_utilities import setup_info, setup_arp_responder, EVERFLOW_DSCP_RULES # noqa: F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa: F401 from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa: F401 @@ -135,8 +134,7 @@ def add_dest_routes(self, setup_info, tbinfo, dest_port_type): # noqa F811 def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """ Verify basic forwarding scenarios for the Everflow feature. @@ -170,8 +168,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Add a (better) unresolved route to the mirror session destination IP @@ -188,8 +185,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the unresolved route @@ -212,8 +208,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the better route. @@ -230,8 +225,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) remote_dut.shell(remote_dut.get_vtysh_cmd_for_namespace( @@ -241,8 +235,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """Verify that session destination MAC address is changed after neighbor MAC address update.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -265,8 +258,7 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Update the MAC on the neighbor interface for the route we installed @@ -286,8 +278,7 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) finally: @@ -308,15 +299,13 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """Verify that session is still active after removal of next hop from ECMP route that was not in use.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -348,8 +337,7 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses everflow_dut, rx_port_ptf_id, tx_port_ptf_ids, - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remaining Scenario not applicable for this topology @@ -374,8 +362,7 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses [tx_port_ptf_id], dest_port_type, expect_recv=False, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Remove the extra hop @@ -393,8 +380,7 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses [tx_port_ptf_id], dest_port_type, expect_recv=False, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Verify that mirrored traffic is still sent to one of the original next hops @@ -405,15 +391,13 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses everflow_dut, rx_port_ptf_id, tx_port_ptf_ids, - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """Verify that session is still active after removal of next hop from ECMP route that was in use.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -444,8 +428,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Add two new ECMP next hops @@ -469,8 +452,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi rx_port_ptf_id, [tx_port_ptf_id], dest_port_type, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Verify that traffic is not sent along either of the new next hops @@ -487,8 +469,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi tx_port_ptf_ids, dest_port_type, expect_recv=False, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Remove the original next hop @@ -505,8 +486,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi rx_port_ptf_id, [tx_port_ptf_id], dest_port_type, - expect_recv=False, - skip_traffic_test=skip_traffic_test + expect_recv=False ) # Verify that mirrored traffis is now sent along either of the new next hops @@ -517,8 +497,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi everflow_dut, rx_port_ptf_id, tx_port_ptf_ids, - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) def test_everflow_dscp_with_policer( @@ -530,8 +509,7 @@ def test_everflow_dscp_with_policer( config_method, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally # noqa F811 ): """Verify that we can rate-limit mirrored traffic from the MIRROR_DSCP table. This tests single rate three color policer mode and specifically checks CIR value @@ -617,9 +595,6 @@ def test_everflow_dscp_with_policer( config_method, rules=EVERFLOW_DSCP_RULES) - if skip_traffic_test is True: - return - # Run test with expected CIR/CBS in packets/sec and tolerance % partial_ptf_runner(setup_info, dest_port_type, @@ -635,8 +610,7 @@ def test_everflow_dscp_with_policer( cir=rate_limit, cbs=rate_limit, send_time=send_time, - tolerance=everflow_tolerance, - skip_traffic_test=skip_traffic_test) + tolerance=everflow_tolerance) finally: # Clean up ACL rules and routes BaseEverflowTest.remove_acl_rule_config(everflow_dut, table_name, config_method) @@ -651,8 +625,7 @@ def test_everflow_dscp_with_policer( def test_everflow_frwd_with_bkg_trf(self, setup_info, # noqa F811 setup_mirror_session, - dest_port_type, ptfadapter, tbinfo, - skip_traffic_test # noqa F811 + dest_port_type, ptfadapter, tbinfo ): """ Verify basic forwarding scenarios for the Everflow feature with background traffic. @@ -743,8 +716,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Add a (better) unresolved route to the mirror session destination IP @@ -762,8 +734,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the unresolved route @@ -786,8 +757,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the better route. @@ -804,8 +774,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) remote_dut.shell(remote_dut.get_vtysh_cmd_for_namespace( @@ -820,8 +789,7 @@ def background_traffic(run_count=None): background_traffic(run_count=1) def _run_everflow_test_scenarios(self, ptfadapter, setup, mirror_session, duthost, rx_port, - tx_ports, direction, expect_recv=True, valid_across_namespace=True, - skip_traffic_test=False): # noqa F811 + tx_ports, direction, expect_recv=True, valid_across_namespace=True): # FIXME: In the ptf_runner version of these tests, LAGs were passed down to the tests # as comma-separated strings of LAG member port IDs (e.g. portchannel0001 -> "2,3"). # Because the DSCP test is still using ptf_runner we will preserve this for now, @@ -859,8 +827,7 @@ def _run_everflow_test_scenarios(self, ptfadapter, setup, mirror_session, duthos src_port=rx_port, dest_ports=tx_port_ids, expect_recv=expect_recv, - valid_across_namespace=valid_across_namespace, - skip_traffic_test=skip_traffic_test, + valid_across_namespace=valid_across_namespace ) def _base_tcp_packet( diff --git a/tests/fdb/test_fdb_flush.py b/tests/fdb/test_fdb_flush.py index 5c7670bb355..82594dcf620 100644 --- a/tests/fdb/test_fdb_flush.py +++ b/tests/fdb/test_fdb_flush.py @@ -324,6 +324,7 @@ def dynamic_fdb_oper(self, duthost, tbinfo, ptfhost, create_or_clear): "router_mac": duthost.facts["router_mac"], "fdb_info": self.FDB_INFO_FILE, "dummy_mac_prefix": self.DUMMY_MAC_PREFIX, + "kvm_support": True } self.__runPtfTest(ptfhost, "fdb_flush_test.FdbFlushTest", testParams) elif 'clear' == create_or_clear: diff --git a/tests/fdb/test_fdb_mac_expire.py b/tests/fdb/test_fdb_mac_expire.py index 4f340f46c1f..e98e86c2d7a 100644 --- a/tests/fdb/test_fdb_mac_expire.py +++ b/tests/fdb/test_fdb_mac_expire.py @@ -222,7 +222,8 @@ def testFdbMacExpire(self, request, tbinfo, rand_selected_dut, ptfhost, refresh_ "fdb_info": self.FDB_INFO_FILE, "dummy_mac_prefix": self.DUMMY_MAC_PREFIX, "refresh_type": refresh_type, - "aging_time": fdbAgingTime + "aging_time": fdbAgingTime, + "kvm_support": True } self.__runPtfTest(ptfhost, "fdb_mac_expire_test.FdbMacExpireTest", testParams) diff --git a/tests/fdb/test_fdb_mac_learning.py b/tests/fdb/test_fdb_mac_learning.py index a8bf4243e8e..e8f192243b4 100644 --- a/tests/fdb/test_fdb_mac_learning.py +++ b/tests/fdb/test_fdb_mac_learning.py @@ -167,6 +167,7 @@ def dynamic_fdb_oper(self, duthost, tbinfo, ptfhost, dut_ptf_ports): "router_mac": duthost.facts["router_mac"], "dut_ptf_ports": dut_ptf_ports, "dummy_mac_prefix": self.DUMMY_MAC_PREFIX, + "kvm_support": True } self.__runPtfTest(ptfhost, "fdb_mac_learning_test.FdbMacLearningTest", testParams) diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index c1e8f47bfbb..e65b90d81e2 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -10,7 +10,6 @@ from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # noqa F401 from tests.common.fixtures.ptfhost_utils import ptf_test_port_map_active_active, ptf_test_port_map -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.dualtor.mux_simulator_control import mux_server_url # noqa F401 @@ -84,8 +83,7 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, mux_status_from_nic_simulator, ignore_ttl, single_fib_for_duts, # noqa F401 duts_running_config_facts, duts_minigraph_facts, - validate_active_active_dualtor_setup, # noqa F401 - skip_traffic_test): # noqa F811 + validate_active_active_dualtor_setup): # noqa F811 if 'dualtor' in updated_tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') @@ -105,8 +103,6 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format( ipv4, ipv6, timestamp) logging.info("PTF log file: %s" % log_file) - if skip_traffic_test is True: - return ptf_runner( ptfhost, "ptftests", @@ -319,7 +315,7 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s hash_keys, ptfhost, ipver, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 updated_tbinfo, mux_server_url, mux_status_from_nic_simulator, ignore_ttl, # noqa F811 single_fib_for_duts, duts_running_config_facts, duts_minigraph_facts, # noqa F811 - setup_active_active_ports, active_active_ports, skip_traffic_test): # noqa F811 + setup_active_active_ports, active_active_ports): # noqa F811 if 'dualtor' in updated_tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') @@ -335,8 +331,6 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE - if skip_traffic_test is True: - return ptf_runner( ptfhost, "ptftests", @@ -371,7 +365,7 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files_per_function, # noqa F811 hash_keys, ptfhost, ipver, tbinfo, mux_server_url, # noqa F811 ignore_ttl, single_fib_for_duts, duts_running_config_facts, # noqa F811 - duts_minigraph_facts, skip_traffic_test): # noqa F811 + duts_minigraph_facts): # noqa F811 # Skip test on none T1 testbed pytest_require('t1' == tbinfo['topo']['type'], "The test case runs on T1 topology") @@ -385,8 +379,6 @@ def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "hash_test.IPinIPHashTest", @@ -413,8 +405,7 @@ def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files def test_ipinip_hash_negative(add_default_route_to_dut, duthosts, fib_info_files_per_function, # noqa F811 ptfhost, ipver, tbinfo, mux_server_url, ignore_ttl, single_fib_for_duts, # noqa F811 - duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator, - skip_traffic_test): # noqa F811 + duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator): hash_keys = ['inner_length'] timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') log_file = "/tmp/hash_test.IPinIPHashTest.{}.{}.log".format( @@ -426,8 +417,6 @@ def test_ipinip_hash_negative(add_default_route_to_dut, duthosts, fib_info_files else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "hash_test.IPinIPHashTest", diff --git a/tests/generic_config_updater/test_dynamic_acl.py b/tests/generic_config_updater/test_dynamic_acl.py index f7c86f056b9..2b5a3b2ed1d 100644 --- a/tests/generic_config_updater/test_dynamic_acl.py +++ b/tests/generic_config_updater/test_dynamic_acl.py @@ -25,7 +25,6 @@ from ipaddress import ip_network, IPv6Network, IPv4Network from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.gu_utils import expect_op_success, expect_op_failure from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import apply_formed_json_patch @@ -846,8 +845,7 @@ def dynamic_acl_create_dhcp_forward_rule(duthost, setup): expect_acl_rule_match(duthost, "DHCPV6_RULE", expected_v6_rule_content, setup) -def dynamic_acl_verify_packets(setup, ptfadapter, packets, packets_dropped, src_port=None, - skip_traffic_test=False): # noqa F811 +def dynamic_acl_verify_packets(setup, ptfadapter, packets, packets_dropped, src_port=None): """Verify that the given packets are either dropped/forwarded correctly Args: @@ -862,9 +860,6 @@ def dynamic_acl_verify_packets(setup, ptfadapter, packets, packets_dropped, src_ if src_port is None: src_port = setup["blocked_src_port_indice"] - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return for rule, pkt in list(packets.items()): logger.info("Testing that {} packets are correctly {}".format(rule, action_type)) exp_pkt = build_exp_pkt(pkt) @@ -1069,8 +1064,7 @@ def test_gcu_acl_arp_rule_creation(rand_selected_dut, setup, dynamic_acl_create_table, prepare_ptf_intf_and_ip, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that we can create a blanket ARP/NDP packet forwarding rule with GCU, and that ARP/NDP packets are correctly forwarded while all others are dropped.""" @@ -1105,8 +1099,7 @@ def test_gcu_acl_arp_rule_creation(rand_selected_dut, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=True, - src_port=ptf_intf_index, - skip_traffic_test=skip_traffic_test) + src_port=ptf_intf_index) def test_gcu_acl_dhcp_rule_creation(rand_selected_dut, @@ -1115,8 +1108,7 @@ def test_gcu_acl_dhcp_rule_creation(rand_selected_dut, setup, dynamic_acl_create_table, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor): # noqa F811 """Verify that DHCP and DHCPv6 forwarding rules can be created, and that dhcp packets are properly forwarded whereas others are dropped""" @@ -1131,8 +1123,7 @@ def test_gcu_acl_dhcp_rule_creation(rand_selected_dut, dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), - packets_dropped=True, - skip_traffic_test=skip_traffic_test) + packets_dropped=True) def test_gcu_acl_drop_rule_creation(rand_selected_dut, @@ -1140,8 +1131,7 @@ def test_gcu_acl_drop_rule_creation(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that we can create a drop rule via GCU, and that once this drop rule is in place packets that match the drop rule are dropped and packets that do not match the drop rule are forwarded""" @@ -1150,14 +1140,12 @@ def test_gcu_acl_drop_rule_creation(rand_selected_dut, dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), - packets_dropped=True, - skip_traffic_test=skip_traffic_test) + packets_dropped=True) dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=False, - src_port=setup["unblocked_src_port_indice"], - skip_traffic_test=skip_traffic_test) + src_port=setup["unblocked_src_port_indice"]) def test_gcu_acl_drop_rule_removal(rand_selected_dut, @@ -1165,8 +1153,7 @@ def test_gcu_acl_drop_rule_removal(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that once a drop rule is removed, packets that were previously being dropped are now forwarded""" dynamic_acl_create_three_drop_rules(rand_selected_dut, setup) @@ -1176,8 +1163,7 @@ def test_gcu_acl_drop_rule_removal(rand_selected_dut, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=False, - src_port=setup["scale_port_indices"][2], - skip_traffic_test=skip_traffic_test) + src_port=setup["scale_port_indices"][2]) def test_gcu_acl_forward_rule_priority_respected(rand_selected_dut, @@ -1185,8 +1171,7 @@ def test_gcu_acl_forward_rule_priority_respected(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that forward rules and drop rules can be created at the same time, with the forward rules having higher priority than drop. Then, perform a traffic test to confirm that packets that match both the forward and drop rules are correctly forwarded, as the forwarding rules have higher priority""" @@ -1195,10 +1180,10 @@ def test_gcu_acl_forward_rule_priority_respected(rand_selected_dut, dynamic_acl_create_secondary_drop_rule(rand_selected_dut, setup) dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup), - packets_dropped=False, skip_traffic_test=skip_traffic_test) + packets_dropped=False) dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), - packets_dropped=True, skip_traffic_test=skip_traffic_test) + packets_dropped=True) def test_gcu_acl_forward_rule_replacement(rand_selected_dut, @@ -1206,8 +1191,7 @@ def test_gcu_acl_forward_rule_replacement(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that forward rules can be created, and then afterwards can have their match pattern updated to a new value. Confirm that packets sent that match this new value are correctly forwarded, and that packets that are sent that match the old, replaced value are correctly dropped.""" @@ -1221,10 +1205,8 @@ def test_gcu_acl_forward_rule_replacement(rand_selected_dut, packets=generate_packets(setup, DST_IP_FORWARDED_REPLACEMENT, DST_IPV6_FORWARDED_REPLACEMENT), - packets_dropped=False, - skip_traffic_test=skip_traffic_test) - dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup), packets_dropped=True, - skip_traffic_test=skip_traffic_test) + packets_dropped=False) + dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup), packets_dropped=True) @pytest.mark.parametrize("ip_type", ["IPV4", "IPV6"]) @@ -1234,8 +1216,7 @@ def test_gcu_acl_forward_rule_removal(rand_selected_dut, setup, ip_type, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that if a forward rule is created, and then removed, that packets associated with that rule are properly no longer forwarded, and packets associated with the remaining rule are forwarded""" @@ -1252,15 +1233,12 @@ def test_gcu_acl_forward_rule_removal(rand_selected_dut, # generate_packets returns ipv4 and ipv6 packets. remove vals from two dicts so that only correct packets remain drop_packets.pop(other_type) forward_packets.pop(ip_type) - dynamic_acl_verify_packets(setup, ptfadapter, drop_packets, packets_dropped=True, - skip_traffic_test=skip_traffic_test) - dynamic_acl_verify_packets(setup, ptfadapter, forward_packets, packets_dropped=False, - skip_traffic_test=skip_traffic_test) + dynamic_acl_verify_packets(setup, ptfadapter, drop_packets, packets_dropped=True) + dynamic_acl_verify_packets(setup, ptfadapter, forward_packets, packets_dropped=False) def test_gcu_acl_scale_rules(rand_selected_dut, rand_unselected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Perform a scale test, creating 150 forward rules with top priority, and then creating a drop rule for every single VLAN port on our device. Select any one of our blocked ports, as well as the ips for two of our forward rules, @@ -1280,27 +1258,23 @@ def test_gcu_acl_scale_rules(rand_selected_dut, rand_unselected_dut, ptfadapter, ptfadapter, generate_packets(setup, v4_dest, v6_dest), packets_dropped=False, - src_port=blocked_scale_port, - skip_traffic_test=skip_traffic_test) + src_port=blocked_scale_port) dynamic_acl_verify_packets(setup, ptfadapter, generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=True, - src_port=blocked_scale_port, - skip_traffic_test=skip_traffic_test) + src_port=blocked_scale_port) def test_gcu_acl_nonexistent_rule_replacement(rand_selected_dut, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup, - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup): """Confirm that replacing a nonexistent rule results in operation failure""" dynamic_acl_replace_nonexistent_rule(rand_selected_dut, setup) def test_gcu_acl_nonexistent_table_removal(rand_selected_dut, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup, - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup): """Confirm that removing a nonexistent table results in operation failure""" dynamic_acl_remove_nonexistent_table(rand_selected_dut, setup) diff --git a/tests/hash/test_generic_hash.py b/tests/hash/test_generic_hash.py index 7e4db3fdadb..fd9c0191d0d 100644 --- a/tests/hash/test_generic_hash.py +++ b/tests/hash/test_generic_hash.py @@ -15,7 +15,6 @@ from tests.common.utilities import wait_until from tests.ptf_runner import ptf_runner from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer from tests.common.reboot import reboot from tests.common.config_reload import config_reload @@ -130,8 +129,8 @@ def test_hash_capability(duthost, global_hash_capabilities): # noqa:F811 'The lag hash capability is not as expected.') -def test_ecmp_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_capabilities, # noqa:F811 - restore_vxlan_port, toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 +def test_ecmp_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_capabilities, # noqa:F811 + restore_vxlan_port, toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the ecmp hash. The hash field to test is randomly chosen from the supported hash fields. Args: @@ -174,23 +173,22 @@ def test_ecmp_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_ # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_lag_hash(duthost, ptfhost, tbinfo, fine_params, mg_facts, restore_configuration, # noqa:F811 restore_vxlan_port, global_hash_capabilities, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 + toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the lag hash. The hash field to test is randomly chosen from the supported hash fields. When hash field is in [DST_MAC, ETHERTYPE, VLAN_ID], need to re-configure the dut for L2 traffic. @@ -247,18 +245,17 @@ def test_lag_hash(duthost, ptfhost, tbinfo, fine_params, mg_facts, restore_confi if not is_l2_test: pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def config_all_hash_fields(duthost, global_hash_capabilities): # noqa:F811 @@ -273,7 +270,7 @@ def config_all_hash_algorithm(duthost, ecmp_algorithm, lag_algorithm): # noqa:F def test_ecmp_and_lag_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_capabilities, # noqa:F811 restore_vxlan_port, get_supported_hash_algorithms, # noqa:F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 + toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the hash behavior when both ecmp and lag hash are configured with a same field. The hash field to test is randomly chosen from the supported hash fields. @@ -312,23 +309,22 @@ def test_ecmp_and_lag_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, glob # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_interfaces, # noqa:F811 restore_vxlan_port, global_hash_capabilities, get_supported_hash_algorithms, # noqa:F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 + toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the ecmp hash when there is nexthop flapping. The hash field to test is randomly chosen from the supported hash fields. @@ -368,18 +364,17 @@ def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_i # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Randomly shutdown 1 nexthop interface'): interface = random.choice(list(uplink_interfaces.keys())) remaining_uplink_interfaces = uplink_interfaces.copy() @@ -389,18 +384,17 @@ def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_i mg_facts, downlink_interfaces=[], uplink_interfaces=remaining_uplink_interfaces) shutdown_interface(duthost, interface) with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Startup the interface, and then flap it 3 more times'): startup_interface(duthost, interface) flap_interfaces(duthost, [interface], times=3) @@ -408,24 +402,22 @@ def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_i 'The default route is not restored after the flapping.') ptf_params['expected_port_groups'] = origin_ptf_expected_port_groups with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_lag_member_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_configuration, # noqa F811 restore_interfaces, global_hash_capabilities, restore_vxlan_port, # noqa F811 - get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor, # noqa F811 - skip_traffic_test): # noqa F811 + get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor): # noqa F811 """ Test case to validate the lag hash when there is lag member flapping. The hash field to test is randomly chosen from the supported hash fields. @@ -482,18 +474,17 @@ def test_lag_member_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restor if not is_l2_test: pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Randomly select one member in each portchannel and flap them 3 times'): # Randomly choose the members to flap @@ -509,24 +500,22 @@ def test_lag_member_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restor pytest_assert(wait_until(30, 5, 0, check_default_route, duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_lag_member_remove_add(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_configuration, # noqa F811 restore_interfaces, global_hash_capabilities, restore_vxlan_port, # noqa F811 - get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor, # noqa F811 - skip_traffic_test): # noqa F811 + get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor): # noqa F811 """ Test case to validate the lag hash when a lag member is removed from the lag and added back for a few times. @@ -584,18 +573,17 @@ def test_lag_member_remove_add(duthost, tbinfo, ptfhost, fine_params, mg_facts, if not is_l2_test: pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Randomly select one member in each portchannel and remove it from the lag and add it back'): # Randomly choose the members to remove/add @@ -609,23 +597,22 @@ def test_lag_member_remove_add(duthost, tbinfo, ptfhost, fine_params, mg_facts, 'The default route is not available or some nexthops are missing.') with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_reboot(duthost, tbinfo, ptfhost, localhost, fine_params, mg_facts, restore_vxlan_port, # noqa F811 global_hash_capabilities, reboot_type, get_supported_hash_algorithms, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_upper_tor): # noqa F811 """ Test case to validate the hash behavior after fast/warm/cold reboot. The hash field to test is randomly chosen from the supported hash fields. @@ -665,18 +652,17 @@ def test_reboot(duthost, tbinfo, ptfhost, localhost, fine_params, mg_facts, rest # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step(f'Randomly choose a reboot type: {reboot_type}, and reboot'): # Save config if reboot type is config reload or cold reboot @@ -698,18 +684,17 @@ def test_reboot(duthost, tbinfo, ptfhost, localhost, fine_params, mg_facts, rest pytest_assert(wait_until(60, 10, 0, check_default_route, duthost, uplink_interfaces.keys()), "The default route is not established after the cold reboot.") with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) @pytest.mark.disable_loganalyzer diff --git a/tests/ipfwd/test_mtu.py b/tests/ipfwd/test_mtu.py index b80c2489379..b8351a3bd59 100644 --- a/tests/ipfwd/test_mtu.py +++ b/tests/ipfwd/test_mtu.py @@ -35,7 +35,8 @@ def test_mtu(tbinfo, ptfhost, mtu, gather_facts): "src_router_ipv6": gather_facts['src_router_ipv6'], "dst_host_ipv6": gather_facts['dst_host_ipv6'], "src_ptf_port_list": gather_facts['src_port_ids'], - "dst_ptf_port_list": gather_facts['dst_port_ids'] + "dst_ptf_port_list": gather_facts['dst_port_ids'], + "kvm_support": True }, log_file=log_file, socket_recv_size=16384, diff --git a/tests/ipfwd/test_nhop_group.py b/tests/ipfwd/test_nhop_group.py index d9f5e683a4f..31711c18098 100644 --- a/tests/ipfwd/test_nhop_group.py +++ b/tests/ipfwd/test_nhop_group.py @@ -324,6 +324,17 @@ def build_pkt(dest_mac, ip_addr, ttl, flow_count): return pkt, exp_packet +def validate_asic_route(duthost, route, exist=True): + logger.info(f"Checking ip route: {route}") + asic_info = duthost.shell(f'redis-cli -n 1 keys "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:*{route}*"', + module_ignore_errors=True)["stdout"] + if route in asic_info: + logger.info(f"Matched ASIC route: {asic_info}") + return exist is True + else: + return exist is False + + def test_nhop_group_member_count(duthost, tbinfo, loganalyzer): """ Test next hop group resource count. Steps: @@ -534,8 +545,9 @@ def built_and_send_tcp_ip_packet(): nhop.add_ip_route(ip_prefix, ips) nhop.program_routes() - # wait for routes to be synced and programmed - time.sleep(15) + + pytest_assert(wait_until(60, 5, 0, validate_asic_route, duthost, ip_prefix), + f"Static route: {ip_prefix} is failed to be programmed!") ptfadapter.dataplane.flush() @@ -565,6 +577,8 @@ def built_and_send_tcp_ip_packet(): asic.start_service("bgp") time.sleep(15) nhop.delete_routes() + pytest_assert(wait_until(60, 5, 0, validate_asic_route, duthost, ip_prefix, False), + f"Static route: {ip_prefix} is failed to be removed!") arplist.clean_up() th_asic_flow_map = {0: 'c0:ff:ee:00:00:12', 1: 'c0:ff:ee:00:00:10', diff --git a/tests/l2/test_l2_configure.py b/tests/l2/test_l2_configure.py new file mode 100644 index 00000000000..bdb392b9e7f --- /dev/null +++ b/tests/l2/test_l2_configure.py @@ -0,0 +1,161 @@ +""" +Tests related to L2 configuration +""" + +import logging +import pytest + +from tests.common import config_reload +from tests.common.platform.processes_utils import wait_critical_processes +from tests.common.helpers.assertions import pytest_assert + +CONFIG_DB = "/etc/sonic/config_db.json" +CONFIG_DB_BAK = "/etc/sonic/config_db.json.bak" +DUT_IMG_PATH = "/tmp/dut-sonic-img.bin" +LOCALHOST_IMG_PATH = "/tmp/localhost-sonic-img.bin" + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology("t0"), + pytest.mark.sanity_check(skip_sanity=True), + pytest.mark.disable_loganalyzer, + pytest.mark.skip_check_dut_health, +] + + +@pytest.fixture(autouse=True) +def setup_env(duthosts, rand_one_dut_hostname): + """ + Setup/teardown fixture for each loopback interface test. + rollback to check if it goes back to starting config + + Args: + duthosts: list of DUTs. + rand_selected_dut: The fixture returns a randomly selected DuT. + """ + duthost = duthosts[rand_one_dut_hostname] + duthost.shell("sudo cp {} {}".format(CONFIG_DB, CONFIG_DB_BAK)) + + yield + + duthost.shell("sudo cp {} {}".format(CONFIG_DB_BAK, CONFIG_DB)) + duthost.shell("sudo rm -f {}".format(CONFIG_DB_BAK)) + config_reload(duthost) + wait_critical_processes(duthost) + + +def is_table_empty(duthost, table): + """ + @summary: Verify a table is empty. + + Args: + duthost: DUT host object. + table: Table name to verify. + """ + # grep returns 1 when there is no match, use || true to override that. + count = int( + duthost.shell( + 'sonic-db-cli CONFIG_DB KEYS "{}|*" | grep -c {} || true'.format( + table, table + ) + )["stdout"] + ) + return count == 0 + + +def get_db_version(duthost): + """ + @summary: Get the current database version. + + Args: + duthost: DUT host object. + """ + try: + return duthost.shell('sonic-db-cli CONFIG_DB HGET "VERSIONS|DATABASE" VERSION')[ + "stdout" + ] + except Exception as e: + logger.error("Failed to get database version: {}".format(e)) + return "" + + +def test_no_hardcoded_minigraph(duthosts, rand_one_dut_hostname, tbinfo): + """ + @summary: A testcase asserts no hardcoded minigraph config is imported to config_db during L2 configuration. + + Args: + duthosts: list of DUTs. + rand_one_dut_hostname: The fixture returns a randomly selected DuT. + tbinfo: The testbed information. Needed for configuring management interface. + + """ + # Setup. + duthost = duthosts[rand_one_dut_hostname] + if is_table_empty(duthost, "TELEMETRY") or is_table_empty(duthost, "RESTAPI"): + pytest.skip("TELEMETRY or RESTAPI table is empty. Please load minigraph first.") + + hwsku = duthost.facts["hwsku"] + mgmt_fact = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_mgmt_interface"] + + # Step 2: Configure DUT into L2 mode. + # Save original config + duthost.shell("sudo cp {} {}".format(CONFIG_DB, CONFIG_DB_BAK)) + # Perform L2 configuration + L2_INIT_CFG_FILE = "/tmp/init_l2_cfg.json" + MGMT_CFG_FILE = "/tmp/mgmt_cfg.json" + L2_CFG_FILE = "/tmp/l2_cfg.json" + gen_l2_cfg = "sudo sonic-cfggen --preset l2 -p -H -k {} > {}".format( + hwsku, L2_INIT_CFG_FILE + ) + duthost.shell(gen_l2_cfg) + gen_mgmt_cfg = """ + echo ' + {{ + "MGMT_INTERFACE": {{ + "eth0|{}/{}": {{ + "gwaddr": "{}" + }} + }}, + "DEVICE_METADATA": {{ + "localhost": {{ + "hostname": "{}" + }} + }}, + "MGMT_PORT": {{ + "eth0": {{ + "admin_status": "up", + "alias": "eth0" + }} + }} + }}' > {} + """.format( + mgmt_fact["addr"], + mgmt_fact["prefixlen"], + mgmt_fact["gwaddr"], + duthost.hostname, + MGMT_CFG_FILE, + ) + duthost.shell(gen_mgmt_cfg) + duthost.shell( + "jq -s '.[0] * .[1]' {} {} > {}".format( + L2_INIT_CFG_FILE, MGMT_CFG_FILE, L2_CFG_FILE + ) + ) + duthost.shell("sudo cp {} {}".format(L2_CFG_FILE, CONFIG_DB)) + db_version_before = get_db_version(duthost) + logger.info( + "Database version before L2 configuration reload: {}".format(db_version_before) + ) + config_reload(duthost) + wait_critical_processes(duthost) + db_version_after = get_db_version(duthost) + logger.info( + "Database version after L2 configuration reload: {}".format(db_version_after) + ) + + # Verify no minigraph config is present. + for table in ["TELEMETRY", "RESTAPI"]: + pytest_assert( + is_table_empty(duthost, table), "{} table is not empty!".format(table) + ) diff --git a/tests/lldp/test_lldp_syncd.py b/tests/lldp/test_lldp_syncd.py index 7cce1ec3b14..361188f0dc1 100644 --- a/tests/lldp/test_lldp_syncd.py +++ b/tests/lldp/test_lldp_syncd.py @@ -46,6 +46,7 @@ def get_lldp_entry_keys(dbs): for db in dbs: items = db.get_keys("LLDP_ENTRY_TABLE*") lldp_entries.extend([key.split(":")[1] for key in items]) + logger.debug("lldp entry keys: {}".format(lldp_entries)) return lldp_entries @@ -54,6 +55,7 @@ def get_lldp_entry_content(dbs, interface): lldp_content = {} for db in dbs: lldp_content.update(db.hget_all("LLDP_ENTRY_TABLE:{}".format(interface))) + logger.debug("lldp entry content: {}".format(lldp_content)) return lldp_content @@ -288,7 +290,7 @@ def test_lldp_entry_table_after_lldp_restart( for asic in duthost.asics: duthost.shell("sudo systemctl restart {}".format(asic.get_service_name('lldp'))) result = wait_until( - 60, 2, 5, verify_lldp_table, duthost + 60, 2, 20, verify_lldp_table, duthost ) # Adjust based on LLDP service restart time pytest_assert(result, "no output for show lldp table after restarting lldp") for asic in duthost.asics: @@ -320,10 +322,6 @@ def test_lldp_entry_table_after_reboot( localhost, duthosts, enum_rand_one_per_hwsku_frontend_hostname, db_instance ): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - lldp_entry_keys = get_lldp_entry_keys(db_instance) - show_lldp_table_int_list = get_show_lldp_table_output(duthost) - lldpctl_output = get_lldpctl_output(duthost) - # reboot logging.info("Run cold reboot on DUT") reboot( @@ -335,6 +333,9 @@ def test_lldp_entry_table_after_reboot( safe_reboot=True, check_intf_up_ports=True ) + lldp_entry_keys = get_lldp_entry_keys(db_instance) + lldpctl_output = get_lldpctl_output(duthost) + show_lldp_table_int_list = get_show_lldp_table_output(duthost) lldpctl_interfaces = lldpctl_output["lldp"]["interface"] assert_lldp_interfaces( lldp_entry_keys, show_lldp_table_int_list, lldpctl_interfaces diff --git a/tests/pc/test_lag_2.py b/tests/pc/test_lag_2.py index cff700c924b..f50e1b1cd1a 100644 --- a/tests/pc/test_lag_2.py +++ b/tests/pc/test_lag_2.py @@ -100,7 +100,8 @@ def __verify_lag_lacp_timing(self, lacp_timer, exp_iface): 'timeout': 35, 'packet_timing': lacp_timer, 'ether_type': 0x8809, - 'interval_count': 3 + 'interval_count': 3, + 'kvm_support': True } ptf_runner(self.ptfhost, 'acstests', "lag_test.LacpTimingTest", '/root/ptftests', params=params, is_python3=True) diff --git a/tests/pc/test_lag_member.py b/tests/pc/test_lag_member.py index 7f53c358d9f..b39009adb2c 100644 --- a/tests/pc/test_lag_member.py +++ b/tests/pc/test_lag_member.py @@ -452,7 +452,8 @@ def run_lag_member_traffic_test(duthost, dut_vlan, ptf_ports, ptfhost): "dut_mac": duthost.facts["router_mac"], "dut_vlan": dut_vlan, "ptf_lag": ptf_lag, - ATTR_PORT_NOT_BEHIND_LAG: ptf_not_lag + ATTR_PORT_NOT_BEHIND_LAG: ptf_not_lag, + "kvm_support": True } ptf_runner(ptfhost, 'acstests', "lag_test.LagMemberTrafficTest", "/root/ptftests", params=params, is_python3=True) diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py index fc6e3086119..2c5592bbcde 100644 --- a/tests/pfcwd/conftest.py +++ b/tests/pfcwd/conftest.py @@ -35,7 +35,7 @@ def pytest_addoption(parser): @pytest.fixture(scope="module") -def two_queues(request): +def two_queues(request, duthosts, enum_rand_one_per_hwsku_frontend_hostname, fanouthosts): """ Enable/Disable sending traffic to queues [4, 3] By default send to queue 4 @@ -48,6 +48,14 @@ def two_queues(request): Returns: two_queues: False/True """ + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + dut_asic_type = duthost.facts["asic_type"].lower() + # On Mellanox devices, if the leaf-fanout is running EOS, then only one queue is supported + if dut_asic_type == "mellanox": + for fanouthost in list(fanouthosts.values()): + fanout_os = fanouthost.get_fanout_os() + if fanout_os == 'eos': + return False return request.config.getoption('--two-queues') diff --git a/tests/platform_tests/api/platform_api_test_base.py b/tests/platform_tests/api/platform_api_test_base.py index 0550242eaa6..cb183d1adc6 100644 --- a/tests/platform_tests/api/platform_api_test_base.py +++ b/tests/platform_tests/api/platform_api_test_base.py @@ -30,3 +30,6 @@ def assert_expectations(self): # TODO: When we move to Python 3.3+, we can use self.failed_expectations.clear() instead del self.failed_expectations[:] pytest_assert(False, err_msg) + + def get_len_failed_expectations(self): + return len(self.failed_expectations) diff --git a/tests/platform_tests/api/test_psu.py b/tests/platform_tests/api/test_psu.py index 877187b40d5..b9ad83b1ea2 100644 --- a/tests/platform_tests/api/test_psu.py +++ b/tests/platform_tests/api/test_psu.py @@ -6,7 +6,7 @@ from tests.common.utilities import skip_release from tests.platform_tests.cli.util import get_skip_mod_list from .platform_api_test_base import PlatformApiTestBase -from tests.common.utilities import skip_release_for_platform +from tests.common.utilities import skip_release_for_platform, wait_until ################################################### @@ -89,6 +89,16 @@ def skip_absent_psu(self, psu_num, platform_api_conn): return True return False + def get_psu_parameter(self, psu_info, psu_parameter, get_data, message): + data = None + is_supported = self.get_psu_facts(psu_info["duthost"], psu_info["psu_id"], True, psu_parameter) + if is_supported: + data = get_data(psu_info["api"], psu_info["psu_id"]) + if self.expect(data is not None, "Failed to retrieve {} of PSU {}".format(message, psu_info["psu_id"])): + self.expect(isinstance(data, float), "PSU {} {} appears incorrect".format(psu_info["psu_id"], message)) + + return data + # # Functions to test methods inherited from DeviceBase class # @@ -204,68 +214,61 @@ def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, plat ''' PSU power test ''' duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["arista"]) + voltage = current = power = None + psu_info = { + "duthost": duthost, + "api": platform_api_conn, + "psu_id": None + } + + def check_psu_power(failure_count): + nonlocal voltage + nonlocal current + nonlocal power + voltage = self.get_psu_parameter(psu_info, "voltage", psu.get_voltage, "voltage") + current = self.get_psu_parameter(psu_info, "current", psu.get_current, "current") + power = self.get_psu_parameter(psu_info, "power", psu.get_power, "power") + + failure_occured = self.get_len_failed_expectations() > failure_count + if current and voltage and power: + is_within_tolerance = abs(power - (voltage*current)) < power*0.1 + if not failure_occured and not is_within_tolerance: + return False + + self.expect(is_within_tolerance, "PSU {} reading does not make sense \ + (power:{}, voltage:{}, current:{})".format(psu_id, power, voltage, current)) + + return True for psu_id in range(self.num_psus): + failure_count = self.get_len_failed_expectations() + psu_info['psu_id'] = psu_id name = psu.get_name(platform_api_conn, psu_id) if name in self.psu_skip_list: logger.info("skipping check for {}".format(name)) else: - voltage = None - voltage_supported = self.get_psu_facts(duthost, psu_id, True, "voltage") - if voltage_supported: - voltage = psu.get_voltage(platform_api_conn, psu_id) - if self.expect(voltage is not None, "Failed to retrieve voltage of PSU {}".format(psu_id)): - self.expect(isinstance(voltage, float), "PSU {} voltage appears incorrect".format(psu_id)) - current = None - current_supported = self.get_psu_facts(duthost, psu_id, True, "current") - if current_supported: - current = psu.get_current(platform_api_conn, psu_id) - if self.expect(current is not None, "Failed to retrieve current of PSU {}".format(psu_id)): - self.expect(isinstance(current, float), "PSU {} current appears incorrect".format(psu_id)) - power = None - power_supported = self.get_psu_facts(duthost, psu_id, True, "power") - if power_supported: - power = psu.get_power(platform_api_conn, psu_id) - if self.expect(power is not None, "Failed to retrieve power of PSU {}".format(psu_id)): - self.expect(isinstance(power, float), "PSU {} power appears incorrect".format(psu_id)) - max_supp_power = None - max_power_supported = self.get_psu_facts(duthost, psu_id, True, "max_power") - if max_power_supported: - max_supp_power = psu.get_maximum_supplied_power(platform_api_conn, psu_id) - if self.expect(max_supp_power is not None, - "Failed to retrieve maximum supplied power power of PSU {}".format(psu_id)): - self.expect(isinstance(max_supp_power, float), - "PSU {} maximum supplied power appears incorrect".format(psu_id)) - - if current is not None and voltage is not None and power is not None: - self.expect(abs(power - (voltage*current)) < power*0.1, "PSU {} reading does not make sense \ - (power:{}, voltage:{}, current:{})".format(psu_id, power, voltage, current)) + check_result = wait_until(30, 10, 0, check_psu_power, failure_count) + self.expect(check_result, "PSU {} reading does not make sense \ + (power:{}, voltage:{}, current:{})".format(psu_id, power, voltage, current)) + + self.get_psu_parameter(psu_info, "max_power", psu.get_maximum_supplied_power, + "maximum supplied power") powergood_status = psu.get_powergood_status(platform_api_conn, psu_id) if self.expect(powergood_status is not None, "Failed to retrieve operational status of PSU {}".format(psu_id)): self.expect(powergood_status is True, "PSU {} is not operational".format(psu_id)) - high_threshold = None - voltage_high_threshold_supported = self.get_psu_facts(duthost, psu_id, True, "voltage_high_threshold") - if voltage_high_threshold_supported: - high_threshold = psu.get_voltage_high_threshold(platform_api_conn, psu_id) - if self.expect(high_threshold is not None, - "Failed to retrieve the high voltage threshold of PSU {}".format(psu_id)): - self.expect(isinstance(high_threshold, float), - "PSU {} voltage high threshold appears incorrect".format(psu_id)) - low_threshold = None - voltage_low_threshold_supported = self.get_psu_facts(duthost, psu_id, True, "voltage_low_threshold") - if voltage_low_threshold_supported: - low_threshold = psu.get_voltage_low_threshold(platform_api_conn, psu_id) - if self.expect(low_threshold is not None, - "Failed to retrieve the low voltage threshold of PSU {}".format(psu_id)): - self.expect(isinstance(low_threshold, float), - "PSU {} voltage low threshold appears incorrect".format(psu_id)) - if high_threshold is not None and low_threshold is not None: + high_threshold = self.get_psu_parameter(psu_info, "voltage_high_threshold", + psu.get_voltage_high_threshold, "high voltage threshold") + low_threshold = self.get_psu_parameter(psu_info, "voltage_low_threshold", + psu.get_voltage_low_threshold, "low voltage threshold") + + if high_threshold and low_threshold: self.expect(voltage < high_threshold and voltage > low_threshold, "Voltage {} of PSU {} is not in between {} and {}" .format(voltage, psu_id, low_threshold, high_threshold)) + self.assert_expectations() def test_temperature(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): diff --git a/tests/platform_tests/sfp/test_show_intf_xcvr.py b/tests/platform_tests/sfp/test_show_intf_xcvr.py index 928720e0a34..962e58dd762 100644 --- a/tests/platform_tests/sfp/test_show_intf_xcvr.py +++ b/tests/platform_tests/sfp/test_show_intf_xcvr.py @@ -83,7 +83,19 @@ def test_check_show_lpmode(duthosts, enum_rand_one_per_hwsku_frontend_hostname, return assert sfp_lpmode['rc'] == 0, "Run command '{}' failed".format(cmd_sfp_presence) + sfp_lpmode_data = sfp_lpmode["stdout_lines"] + + # Check if the header is present + header = sfp_lpmode_data[0] + logging.info(f"The header is: {header}") + if header.replace(" ", "") != "Port Low-power Mode".replace(" ", ""): + logging.error("Invalid output format: Header missing") + return False + + # Check interface lpmode + sfp_lpmode_info = parse_output(sfp_lpmode_data[2:]) + logging.info(f"The interface sfp lpmode info is: {sfp_lpmode_info}") for intf in dev_conn: if intf not in xcvr_skip_list[duthost.hostname]: assert validate_transceiver_lpmode( - sfp_lpmode['stdout']), "Interface mode incorrect in 'show interface transceiver lpmode'" + sfp_lpmode_info, intf), "Interface mode incorrect in 'show interface transceiver lpmode'" diff --git a/tests/platform_tests/sfp/util.py b/tests/platform_tests/sfp/util.py index 50065ceb421..c793291b432 100644 --- a/tests/platform_tests/sfp/util.py +++ b/tests/platform_tests/sfp/util.py @@ -47,15 +47,14 @@ def get_dev_conn(duthost, conn_graph_facts, asic_index): return portmap, dev_conn -def validate_transceiver_lpmode(output): - lines = output.strip().split('\n') - # Check if the header is present - if lines[0].replace(" ", "") != "Port Low-power Mode".replace(" ", ""): - logging.error("Invalid output format: Header missing") +def validate_transceiver_lpmode(sfp_lpmode, port): + lpmode = sfp_lpmode.get(port) + if lpmode is None: + logging.error(f"Interface {port} does not present in the show command") return False - for line in lines[2:]: - port, lpmode = line.strip().split() - if lpmode not in ["Off", "On"]: - logging.error("Invalid low-power mode {} for port {}".format(lpmode, port)) - return False + + if lpmode not in ["Off", "On"]: + logging.error("Invalid low-power mode {} for port {}".format(lpmode, port)) + return False + return True diff --git a/tests/platform_tests/test_chassis_reboot.py b/tests/platform_tests/test_chassis_reboot.py index 30d2bc5d255..9d872818529 100644 --- a/tests/platform_tests/test_chassis_reboot.py +++ b/tests/platform_tests/test_chassis_reboot.py @@ -5,6 +5,7 @@ import random import logging import time +import concurrent.futures from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from tests.common.reboot import wait_for_startup,\ @@ -59,15 +60,16 @@ def test_parallel_reboot(duthosts, localhost, conn_graph_facts, xcvr_skip_list): core_dumps = {} # Perform reboot on multiple LCs within 30sec - for dut in duthosts: - if dut.is_supervisor_node(): - continue + executor = concurrent.futures.ThreadPoolExecutor(max_workers=8) + for dut in duthosts.frontend_nodes: # collect core dump before reboot core_dumps[dut.hostname] = get_core_dump(dut) # Perform cold reboot on all linecards, with an internal within 30sec to mimic a parallel reboot scenario - chassis_cold_reboot(dut, localhost) + # Change this to threaded reboot, to avoid ansible command timeout in 60sec, we have seen some T2 platform + # reboot exceed 60 sec, and causes test to error out + executor.submit(chassis_cold_reboot, dut, localhost) # Wait for 0 ~ 30sec rand_interval = random.randint(0, 30) @@ -88,9 +90,7 @@ def test_parallel_reboot(duthosts, localhost, conn_graph_facts, xcvr_skip_list): "Not all BGP sessions are established on DUT") # Check if new core dumps are generated - for dut in duthosts: - if dut.is_supervisor_node(): - continue + for dut in duthosts.frontend_nodes: post_core_dump = get_core_dump(dut) new_core_dumps = (set(post_core_dump) - set(core_dumps[dut.hostname])) diff --git a/tests/platform_tests/test_link_down.py b/tests/platform_tests/test_link_down.py index 75426c219ef..7cad169c6a2 100644 --- a/tests/platform_tests/test_link_down.py +++ b/tests/platform_tests/test_link_down.py @@ -26,7 +26,6 @@ MAX_TIME_TO_REBOOT = 120 -@pytest.fixture(scope='function') def set_max_to_reboot(duthost): """ For chassis testbeds, we need to specify plt_reboot_ctrl in inventory file, @@ -148,12 +147,13 @@ def check_interfaces_and_services_all_LCs(duthosts, conn_graph_facts, xcvr_skip_ def test_link_down_on_sup_reboot(duthosts, localhost, enum_supervisor_dut_hostname, - conn_graph_facts, set_max_to_reboot, + conn_graph_facts, fanouthosts, xcvr_skip_list): if len(duthosts.nodes) == 1: pytest.skip("Skip single-host dut for this test") duthost = duthosts[enum_supervisor_dut_hostname] + set_max_to_reboot(duthost) # There are some errors due to reboot happened before this test file for some reason, # and SUP may not have enough time to recover all dockers and the wait for process wait for 300 secs in @@ -203,9 +203,10 @@ def test_link_down_on_sup_reboot(duthosts, localhost, enum_supervisor_dut_hostna def test_link_status_on_host_reboot(duthosts, localhost, enum_rand_one_per_hwsku_frontend_hostname, - conn_graph_facts, set_max_to_reboot, + conn_graph_facts, fanouthosts, xcvr_skip_list): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + set_max_to_reboot(duthost) hostname = duthost.hostname # Before test, check all interfaces and services are up diff --git a/tests/process_monitoring/test_critical_process_monitoring.py b/tests/process_monitoring/test_critical_process_monitoring.py index 6626c7f5260..b99fcf190f8 100755 --- a/tests/process_monitoring/test_critical_process_monitoring.py +++ b/tests/process_monitoring/test_critical_process_monitoring.py @@ -18,7 +18,7 @@ from tests.common.helpers.dut_utils import get_group_program_info from tests.common.helpers.dut_utils import is_container_running from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer -from tests.common.utilities import wait_until +from tests.common.utilities import wait_until, kill_process_by_pid logger = logging.getLogger(__name__) @@ -371,34 +371,6 @@ def get_containers_namespace_ids(duthost, skip_containers): return containers_in_namespaces -def kill_process_by_pid(duthost, container_name, program_name, program_pid): - """Kills a process in the specified container by its pid. - - Args: - duthost: Hostname of DUT. - container_name: A string shows container name. - program_name: A string shows process name. - program_pid: An integer represents the PID of a process. - - Returns: - None. - """ - if "20191130" in duthost.os_version: - kill_cmd_result = duthost.shell("docker exec {} supervisorctl stop {}".format(container_name, program_name)) - else: - # If we used the command `supervisorctl stop ' to stop process, - # Supervisord will treat the exit code of process as expected and it will not generate - # alerting message. - kill_cmd_result = duthost.shell("docker exec {} kill -SIGKILL {}".format(container_name, program_pid)) - - # Get the exit code of 'kill' or 'supervisorctl stop' command - exit_code = kill_cmd_result["rc"] - pytest_assert(exit_code == 0, "Failed to stop program '{}' before test".format(program_name)) - - logger.info("Program '{}' in container '{}' was stopped successfully" - .format(program_name, container_name)) - - def check_and_kill_process(duthost, container_name, program_name, program_status, program_pid): """Checks the running status of a critical process. If it is running, kill it. Otherwise, fail this test. diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py index 7fc6e5a2471..ab87aa8e4b3 100644 --- a/tests/ptf_runner.py +++ b/tests/ptf_runner.py @@ -105,7 +105,8 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={}, module_ignore_errors=False, is_python3=None, async_mode=False, pdb=False): dut_type = get_dut_type(host) - if dut_type == "kvm" and params.get("kvm_support", True) is False: + kvm_support = params.get("kvm_support", False) + if dut_type == "kvm" and kvm_support is False: logger.info("Skip test case {} for not support on KVM DUT".format(testname)) return True diff --git a/tests/qos/files/cisco/qos_param_generator.py b/tests/qos/files/cisco/qos_param_generator.py index 3f109ee69e5..b38a30d33c1 100644 --- a/tests/qos/files/cisco/qos_param_generator.py +++ b/tests/qos/files/cisco/qos_param_generator.py @@ -462,6 +462,8 @@ def __define_buffer_pool_watermark(self): "pkts_num_trig_pfc": self.lossless_drop_thr // self.buffer_size // packet_buffs, "cell_size": self.buffer_size, "packet_size": packet_size} + if self.dutAsic == "gr2": + lossless_params["pkts_num_margin"] = 8 self.write_params("wm_buf_pool_lossless", lossless_params) if self.should_autogen(["wm_buf_pool_lossy"]): lossy_params = {"dscp": self.dscp_queue0, @@ -472,6 +474,8 @@ def __define_buffer_pool_watermark(self): "pkts_num_fill_egr_min": 0, "cell_size": self.buffer_size, "packet_size": packet_size} + if self.dutAsic == "gr2": + lossy_params["pkts_num_margin"] = 8 self.write_params("wm_buf_pool_lossy", lossy_params) def __define_q_shared_watermark(self): diff --git a/tests/qos/files/qos_params.j2c.yaml b/tests/qos/files/qos_params.j2c.yaml index aa88cee420c..9b569efaca0 100644 --- a/tests/qos/files/qos_params.j2c.yaml +++ b/tests/qos/files/qos_params.j2c.yaml @@ -481,7 +481,7 @@ qos_params: dscp: 8 ecn: 1 pg: 0 - pkts_num_trig_egr_drp: 2179900 + pkts_num_trig_egr_drp: 2179770 pkts_num_margin: 100 wm_pg_shared_lossless: dscp: 3 @@ -497,7 +497,7 @@ qos_params: ecn: 1 pg: 0 pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 2179900 + pkts_num_trig_egr_drp: 2179770 packet_size: 64 cell_size: 4096 pkts_num_margin: 40 @@ -523,7 +523,7 @@ qos_params: ecn: 1 queue: 0 pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 2179900 + pkts_num_trig_egr_drp: 2179770 cell_size: 4096 wm_buf_pool_lossy: dscp: 8 diff --git a/tests/qos/files/qos_params.spc3.yaml b/tests/qos/files/qos_params.spc3.yaml index b132af4c1ae..e4b91b88e8f 100644 --- a/tests/qos/files/qos_params.spc3.yaml +++ b/tests/qos/files/qos_params.spc3.yaml @@ -99,6 +99,39 @@ qos_params: pkts_num_trig_pfc: 176064 pkts_num_trig_ingr_drp: 177916 pkts_num_margin: 4 + topo-dualtor-aa: + 100000_40m: + pkts_num_leak_out: 0 + pcbb_xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 + pcbb_xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 + pcbb_xoff_3: + outer_dscp: 2 + dscp: 3 + ecn: 1 + pg: 2 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 + pcbb_xoff_4: + outer_dscp: 6 + dscp: 4 + ecn: 1 + pg: 6 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 topo-dualtor-aa-64-breakout: 200000_40m: pkts_num_leak_out: 0 diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 06152d3300b..270799e8f02 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -1839,7 +1839,8 @@ def testQosSaiQSharedWatermark( "pkts_num_fill_min": qosConfig[queueProfile]["pkts_num_fill_min"], "pkts_num_trig_drp": triggerDrop, "cell_size": qosConfig[queueProfile]["cell_size"], - "hwsku": dutTestParams['hwsku'] + "hwsku": dutTestParams['hwsku'], + "dut_asic": dutConfig["dutAsic"] }) if "platform_asic" in dutTestParams["basicParams"]: diff --git a/tests/radv/test_radv_ipv6_ra.py b/tests/radv/test_radv_ipv6_ra.py index 8f453bb68fe..aff361fa3aa 100644 --- a/tests/radv/test_radv_ipv6_ra.py +++ b/tests/radv/test_radv_ipv6_ra.py @@ -134,7 +134,8 @@ def test_radv_router_advertisement( "downlink_vlan_mac": vlan_intf['downlink_vlan_intf']['mac'], "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], - "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS}, + "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS, + "kvm_support": True}, log_file="/tmp/radv_ipv6_ra_test.RadvUnSolicitedRATest.log", is_python3=True) @@ -161,7 +162,8 @@ def test_solicited_router_advertisement( "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], "ptf_port_ip6": vlan_intf['ptf_port']['ip6'], - "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS}, + "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS, + "kvm_support": True}, log_file="/tmp/radv_ipv6_ra_test.RadvSolicitedRATest.log", is_python3=True) @@ -187,7 +189,8 @@ def test_unsolicited_router_advertisement_with_m_flag( "downlink_vlan_mac": vlan_intf['downlink_vlan_intf']['mac'], "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], - "max_ra_interval": 180}, + "max_ra_interval": 180, + "kvm_support": True}, log_file="/tmp/router_adv_mflag_test.RadvUnSolicitedRATest.log", is_python3=True) @@ -214,5 +217,6 @@ def test_solicited_router_advertisement_with_m_flag( "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], "ptf_port_ip6": vlan_intf['ptf_port']['ip6'], - "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS}, + "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS, + "kvm_support": True}, log_file="/tmp/router_adv_mflag_test.RadvSolicitedRATest.log", is_python3=True) diff --git a/tests/route/test_route_consistency.py b/tests/route/test_route_consistency.py index f36cfdf0935..f106951688d 100644 --- a/tests/route/test_route_consistency.py +++ b/tests/route/test_route_consistency.py @@ -1,8 +1,14 @@ import pytest import logging +import threading +import queue import re import time import math +from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.dut_utils import get_program_info +from tests.common.config_reload import config_reload +from tests.common.utilities import kill_process_by_pid, wait_until pytestmark = [ pytest.mark.topology('any') @@ -11,6 +17,38 @@ logger = logging.getLogger(__name__) +def check_and_kill_process(duthost, container_name, program_name): + """Checks the running status of a critical process. If it is running, kill it. Otherwise, + fail this test. + + Args: + duthost: Hostname of DUT. + container_name: A string shows container name. + program_name: A string shows process name. + + Returns: + None. + """ + program_status, program_pid = get_program_info(duthost, container_name, program_name) + if program_status == "RUNNING": + kill_process_by_pid(duthost, container_name, program_name, program_pid) + elif program_status in ["EXITED", "STOPPED", "STARTING"]: + pytest.fail("Program '{}' in container '{}' is in the '{}' state, expected 'RUNNING'" + .format(program_name, container_name, program_status)) + else: + pytest.fail("Failed to find program '{}' in container '{}'" + .format(program_name, container_name)) + + +def is_all_neighbor_session_established(duthost): + # handle both multi-asic and single-asic + bgp_facts = duthost.bgp_facts(num_npus=duthost.sonichost.num_asics())["ansible_facts"] + for neighbor in bgp_facts["bgp_neighbors"]: + if bgp_facts["bgp_neighbors"][neighbor]["state"] != "established": + return False + return True + + class TestRouteConsistency(): """ TestRouteConsistency class for testing route consistency across all the Frontend DUTs in the testbed It verifies route consistency by taking a snapshot of route table from ASIC_DB from all the DUTs before the test @@ -28,16 +66,34 @@ def extract_dest_ips(self, route_entries): def get_route_prefix_snapshot_from_asicdb(self, duthosts): prefix_snapshot = {} max_prefix_cnt = 0 + + def retrieve_route_snapshot(asic, prefix_snapshot, dut_instance_name, signal_queue): + prefix_snapshot[dut_instance_name] = \ + set(self.extract_dest_ips(asic.run_sonic_db_cli_cmd('ASIC_DB KEYS *ROUTE_ENTRY*')['stdout_lines'])) + logger.debug("snapshot of route table from {}: {}".format(dut_instance_name, + len(prefix_snapshot[dut_instance_name]))) + signal_queue.put(1) + + thread_count = 0 + signal_queue = queue.Queue() for idx, dut in enumerate(duthosts.frontend_nodes): for asic in dut.asics: dut_instance_name = dut.hostname + '-' + str(asic.asic_index) if dut.facts['switch_type'] == "voq" and idx == 0: dut_instance_name = dut_instance_name + "UpstreamLc" - prefix_snapshot[dut_instance_name] = \ - set(self.extract_dest_ips(asic.run_sonic_db_cli_cmd('ASIC_DB KEYS *ROUTE_ENTRY*')['stdout_lines'])) - logger.debug("snapshot of route table from {}: {}".format(dut_instance_name, - len(prefix_snapshot[dut_instance_name]))) - max_prefix_cnt = max(max_prefix_cnt, len(prefix_snapshot[dut_instance_name])) + threading.Thread(target=retrieve_route_snapshot, args=(asic, prefix_snapshot, + dut_instance_name, signal_queue)).start() + thread_count += 1 + + ts1 = time.time() + while signal_queue.qsize() < thread_count: + ts2 = time.time() + if (ts2 - ts1) > 60: + raise TimeoutError("Get route prefix snapshot from asicdb Timeout!") + continue + + for dut_instance_name in prefix_snapshot.keys(): + max_prefix_cnt = max(max_prefix_cnt, len(prefix_snapshot[dut_instance_name])) return prefix_snapshot, max_prefix_cnt @pytest.fixture(scope="class", autouse=True) @@ -49,7 +105,7 @@ def setup(self, duthosts): withdraw and advertise the routes by peers. """ self.__class__.sleep_interval = math.ceil(max_prefix_cnt/3000) + 120 - logger.debug("max_no_of_prefix: {} sleep_interval: {}".format(max_prefix_cnt, self.sleep_interval)) + logger.info("max_no_of_prefix: {} sleep_interval: {}".format(max_prefix_cnt, self.sleep_interval)) def test_route_withdraw_advertise(self, duthosts, tbinfo, localhost): @@ -137,3 +193,58 @@ def test_bgp_shut_noshut(self, duthosts, enum_rand_one_per_hwsku_frontend_hostna # startup bgp back in case of any exception duthost.shell("sudo config bgp startup all") time.sleep(self.sleep_interval) + + @pytest.mark.disable_loganalyzer + @pytest.mark.parametrize("container_name, program_name", [ + ("bgp", "bgpd"), + ("syncd", "syncd"), + ("swss", "orchagent") + ]) + def test_critical_process_crash_and_recover(self, duthosts, container_name, program_name): + duthost = None + for idx, dut in enumerate(duthosts.frontend_nodes): + if dut.facts['switch_type'] == "voq" and idx == 0: + # pick a UpstreamLC to get higher route churn in VoQ chassis + duthost = dut + if duthost is None: + duthost = duthosts[0] + logger.info("test_{}_crash_and_recover: DUT{}".format(program_name, duthost.hostname)) + + namespace_ids, succeeded = duthost.get_namespace_ids(container_name) + pytest_assert(succeeded, "Failed to get namespace ids of container '{}'".format(container_name)) + logger.info("namespace_ids: {}".format(namespace_ids)) + + try: + logger.info("kill {}(s) for {}".format(program_name, duthost.hostname)) + for id in namespace_ids: + if id is None: + id = "" + check_and_kill_process(duthost, container_name + str(id), program_name) + time.sleep(30) + + post_withdraw_route_snapshot, _ = self.get_route_prefix_snapshot_from_asicdb(duthosts) + num_routes_withdrawn = 0 + for dut_instance_name in self.pre_test_route_snapshot.keys(): + if num_routes_withdrawn == 0: + num_routes_withdrawn = len(self.pre_test_route_snapshot[dut_instance_name] - + post_withdraw_route_snapshot[dut_instance_name]) + logger.info("num_routes_withdrawn: {}".format(num_routes_withdrawn)) + else: + assert num_routes_withdrawn == len(self.pre_test_route_snapshot[dut_instance_name] - + post_withdraw_route_snapshot[dut_instance_name]) + + logger.info("Recover containers on {}".format(duthost.hostname)) + config_reload(duthost) + wait_until(300, 10, 0, is_all_neighbor_session_established, duthost) + time.sleep(self.sleep_interval) + + # take the snapshot of route table from all the DUTs + post_test_route_snapshot, _ = self.get_route_prefix_snapshot_from_asicdb(duthosts) + for dut_instance_name in self.pre_test_route_snapshot.keys(): + assert self.pre_test_route_snapshot[dut_instance_name] == post_test_route_snapshot[dut_instance_name] + logger.info("Route table is consistent across all the DUTs") + except Exception: + # startup bgpd back in case of any exception + logger.info("Encountered error. Perform a config reload to recover!") + config_reload(duthost) + time.sleep(self.sleep_interval) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 86369ffff56..2d649a4221e 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -1193,11 +1193,11 @@ def runTest(self): print(list(map(operator.sub, pg_cntrs, pg_cntrs_base)), file=sys.stderr) for i in range(0, PG_NUM): - # DNX/Chassis: - # pg = 0 => Some extra packets with unmarked TC - # pg = 4 => Extra packets for LACP/BGP packets - # pg = 7 => packets from cpu to front panel ports - if platform_asic and platform_asic in ["broadcom-dnx", "cisco-8000"]: + if platform_asic and platform_asic == "broadcom-dnx": + # DNX/Chassis: + # pg = 0 => Some extra packets with unmarked TC + # pg = 4 => Extra packets for LACP/BGP packets + # pg = 7 => packets from cpu to front panel ports if i == pg: if i == 3: assert (pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) @@ -1210,9 +1210,19 @@ def runTest(self): assert (pg_cntrs[i] == pg_cntrs_base[i]) else: if i == pg: - assert (pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) + if i == 0 or i == 4: + assert (pg_cntrs[pg] >= + pg_cntrs_base[pg] + len(dscps)) + else: + assert (pg_cntrs[pg] == + pg_cntrs_base[pg] + len(dscps)) else: - assert (pg_cntrs[i] == pg_cntrs_base[i]) + # LACP packets are mapped to queue0 and tcp syn packets for BGP to queue4 + # So for those queues the count could be more + if i == 0 or i == 4: + assert (pg_cntrs[i] >= pg_cntrs_base[i]) + else: + assert (pg_cntrs[i] == pg_cntrs_base[i]) # confirm that dscp pkts are received total_recv_cnt = 0 dscp_recv_cnt = 0 @@ -3494,16 +3504,7 @@ def get_pfc_tx_cnt(src_port_id, pg_cntr_idx): pfc_tx_cnt_base = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) time.sleep(2) xoff_txd = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) - pfc_tx_cnt_base - print("Verifying XOFF TX, count {}".format(xoff_txd), file=sys.stderr) - assert xoff_txd != 0, "Expected XOFF" - - # TODO: Revisit when TX counter in this case is correctly handled - send_packet(self, src_port_id, pkt, 1) - time.sleep(2) - pfc_tx_cnt_base = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) - time.sleep(2) - xoff_txd = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) - pfc_tx_cnt_base - print("Verifying XOFF TX stopped, count {}".format(xoff_txd), file=sys.stderr) + print("Verifying no XOFF TX, count {}".format(xoff_txd), file=sys.stderr) assert xoff_txd == 0, "Unexpected XOFF" finally: @@ -5003,6 +5004,7 @@ def runTest(self): cell_size = int(self.test_params['cell_size']) hwsku = self.test_params['hwsku'] platform_asic = self.test_params['platform_asic'] + dut_asic = self.test_params['dut_asic'] if 'packet_size' in list(self.test_params.keys()): packet_length = int(self.test_params['packet_size']) @@ -5056,6 +5058,9 @@ def runTest(self): recv_counters_base, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) + if 'cisco-8000' in asic_type: + fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type) + pg_cntrs_base = sai_thrift_read_pg_counters(self.src_client, port_list['src'][src_port_id]) dst_pg_cntrs_base = sai_thrift_read_pg_counters(self.dst_client, port_list['dst'][dst_port_id]) q_wm_res_base, pg_shared_wm_res_base, pg_headroom_wm_res_base = sai_thrift_read_port_watermarks( @@ -5133,6 +5138,7 @@ def runTest(self): else: pkts_num = 1 + margin fragment = 0 + refill_queue = 'cisco-8000' in asic_type and dut_asic != 'gr2' while (expected_wm < total_shared - fragment): expected_wm += pkts_num * cell_occupancy if (expected_wm > total_shared): @@ -5142,9 +5148,9 @@ def runTest(self): expected_wm -= diff * cell_occupancy fragment = total_shared - expected_wm - if 'cisco-8000' in asic_type: + if refill_queue: self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) - assert (fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) + fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type) pkts_total += pkts_num pkts_num = pkts_total - 1 @@ -5153,7 +5159,7 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num) - if 'cisco-8000' in asic_type: + if refill_queue: self.sai_thrift_port_tx_enable( self.dst_client, asic_type, [dst_port_id]) @@ -5205,7 +5211,7 @@ def runTest(self): pkts_num = pkts_inc - if 'cisco-8000' in asic_type: + if refill_queue: self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) fill_leakout_plus_one( self, src_port_id, dst_port_id, pkt, queue, asic_type) @@ -5215,7 +5221,7 @@ def runTest(self): # overflow the shared pool send_packet(self, src_port_id, pkt, pkts_num) - if 'cisco-8000' in asic_type: + if refill_queue: self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) time.sleep(8) @@ -5243,7 +5249,7 @@ def runTest(self): logging.info("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + "stat - so ignoring this step for now") else: - assert (expected_wm * cell_size <= q_wm_res[queue]) + assert ((expected_wm - margin) * cell_size <= q_wm_res[queue]) assert (q_wm_res[queue] <= (expected_wm + margin) * cell_size) finally: diff --git a/tests/smartswitch/common/device_utils_dpu.py b/tests/smartswitch/common/device_utils_dpu.py index 1367eff53f5..9b80882dd66 100644 --- a/tests/smartswitch/common/device_utils_dpu.py +++ b/tests/smartswitch/common/device_utils_dpu.py @@ -8,7 +8,6 @@ from tests.common.helpers.platform_api import chassis, module from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert -from pkg_resources import parse_version @pytest.fixture(scope='function') @@ -23,10 +22,10 @@ def num_dpu_modules(platform_api_conn): return num_modules -@pytest.fixture(scope='function') +@pytest.fixture(scope='function', autouse=True) def check_smartswitch_and_dark_mode(duthosts, enum_rand_one_per_hwsku_hostname, - platform_api_conn): + platform_api_conn, num_dpu_modules): """ Checks whether given testbed is running 202405 image or below versions @@ -38,17 +37,17 @@ def check_smartswitch_and_dark_mode(duthosts, duthost = duthosts[enum_rand_one_per_hwsku_hostname] - if not duthost.facts["DPUS"] and \ - parse_version(duthost.os_version) <= parse_version("202405"): - pytest.skip("Test is not supported for this testbed and os version") + if "DPUS" not in duthost.facts: + pytest.skip("Test is not supported for this testbed") - darkmode = is_dark_mode_enabled(duthost, platform_api_conn) + darkmode = is_dark_mode_enabled(duthost, platform_api_conn, + num_dpu_modules) if darkmode: - dpu_power_on(duthost, platform_api_conn) + dpu_power_on(duthost, platform_api_conn, num_dpu_modules) -def is_dark_mode_enabled(duthost, platform_api_conn): +def is_dark_mode_enabled(duthost, platform_api_conn, num_dpu_modules): """ Checks the liveliness of DPU Returns: @@ -56,10 +55,9 @@ def is_dark_mode_enabled(duthost, platform_api_conn): else False """ - num_modules = num_dpu_modules(platform_api_conn) count_admin_down = 0 - for index in range(num_modules): + for index in range(num_dpu_modules): dpu = module.get_name(platform_api_conn, index) output_config_db = duthost.command( 'redis-cli -p 6379 -h 127.0.0.1 \ @@ -70,7 +68,7 @@ def is_dark_mode_enabled(duthost, platform_api_conn): if 'down' in output_config_db['stdout']: count_admin_down += 1 - if count_admin_down == num_modules: + if count_admin_down == num_dpu_modules: logging.info("Smartswitch is in dark mode") return True @@ -78,17 +76,16 @@ def is_dark_mode_enabled(duthost, platform_api_conn): return False -def dpu_power_on(duthost, platform_api_conn): +def dpu_power_on(duthost, platform_api_conn, num_dpu_modules): """ Executes power on all DPUs Returns: Returns True or False based on all DPUs powered on or not """ - num_modules = num_dpu_modules(platform_api_conn) ip_address_list = [] - for index in range(num_modules): + for index in range(num_dpu_modules): dpu = module.get_name(platform_api_conn, index) ip_address_list.append( module.get_midplane_ip(platform_api_conn, index)) @@ -129,7 +126,7 @@ def check_dpu_module_status(duthost, power_status, dpu_name): Returns True or False based on status of given DPU module """ - output_dpu_status = duthost.command( + output_dpu_status = duthost.shell( 'show chassis module status | grep %s' % (dpu_name)) if "Offline" in output_dpu_status["stdout"]: @@ -158,7 +155,7 @@ def check_dpu_reboot_cause(duthost, dpu_name): Returns True or False based on reboot cause of all DPU modules """ - output_reboot_cause = duthost.command( + output_reboot_cause = duthost.shell( 'show reboot-cause all | grep %s' % (dpu_name)) if 'Unknown' in output_reboot_cause["stdout"]: diff --git a/tests/smartswitch/platform_tests/test_reload_dpu.py b/tests/smartswitch/platform_tests/test_reload_dpu.py index 4c4d7b61d20..1e8e7518f33 100644 --- a/tests/smartswitch/platform_tests/test_reload_dpu.py +++ b/tests/smartswitch/platform_tests/test_reload_dpu.py @@ -17,18 +17,18 @@ from tests.platform_tests.api.conftest import * # noqa: F401,F403 pytestmark = [ - pytest.mark.topology('t1') + pytest.mark.topology('smartswitch') ] def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn, num_dpu_modules): + localhost, platform_api_conn, + num_dpu_modules): """ @summary: Verify output of `config chassis modules startup ` """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] ip_address_list = [] - num_modules = num_dpu_modules(platform_api_conn) logging.info("Starting switch reboot...") reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD, @@ -39,7 +39,7 @@ def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, "Not all ports that are admin up on are operationally up") logging.info("Interfaces are up") - for index in range(num_modules): + for index in range(num_dpu_modules): ip_address_list.append( module.get_midplane_ip(platform_api_conn, index)) dpu = module.get_name(platform_api_conn, index) @@ -52,16 +52,16 @@ def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, def test_show_ping_int_after_reload(duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn, num_dpu_modules): + localhost, platform_api_conn, + num_dpu_modules): """ @summary: To Check Ping between NPU and DPU after configuration reload on NPU """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] - num_modules = num_dpu_modules(platform_api_conn) ip_address_list = [] - for index in range(num_modules): + for index in range(num_dpu_modules): ip_address_list.append( module.get_midplane_ip(platform_api_conn, index)) diff --git a/tests/smartswitch/platform_tests/test_show_platform_dpu.py b/tests/smartswitch/platform_tests/test_show_platform_dpu.py index 9c575a47880..5049975b67d 100644 --- a/tests/smartswitch/platform_tests/test_show_platform_dpu.py +++ b/tests/smartswitch/platform_tests/test_show_platform_dpu.py @@ -12,7 +12,7 @@ from tests.common.devices.sonic import * # noqa: 403 pytestmark = [ - pytest.mark.topology('t1') + pytest.mark.topology('smartswitch') ] @@ -44,9 +44,8 @@ def test_shutdown_power_up_dpu(duthosts, enum_rand_one_per_hwsku_hostname, @summary: Verify `shut down and power up DPU` """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] - num_modules = num_dpu_modules(platform_api_conn) - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules shutdown %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -54,7 +53,7 @@ def test_shutdown_power_up_dpu(duthosts, enum_rand_one_per_hwsku_hostname, duthost, "off", dpu_name), "DPU is not operationally down") - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules startup %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -69,9 +68,8 @@ def test_reboot_cause(duthosts, enum_rand_one_per_hwsku_hostname, @summary: Verify `Reboot Cause` """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] - num_modules = num_dpu_modules(platform_api_conn) - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthost.shell("config chassis \ module shutdown %s" % (dpu_name))["stdout_lines"] @@ -80,7 +78,7 @@ def test_reboot_cause(duthosts, enum_rand_one_per_hwsku_hostname, duthost, "off", dpu_name), "DPU is not operationally down") - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules startup %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -105,9 +103,7 @@ def test_pcie_link(duthosts, enum_rand_one_per_hwsku_hostname, 'PCIe Device Checking All Test ----------->>> PASSED', "PCIe Link is good'{}'".format(duthost.hostname)) - num_modules = num_dpu_modules(platform_api_conn) - - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules shutdown %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -120,7 +116,7 @@ def test_pcie_link(duthosts, enum_rand_one_per_hwsku_hostname, 'PCIe Device Checking All Test ----------->>> PASSED', "PCIe Link is good'{}'".format(duthost.hostname)) - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules startup %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, diff --git a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py index 0158317a951..8a03b72ac0e 100644 --- a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py +++ b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py @@ -9,30 +9,34 @@ get_snappi_ports # noqa: F401 from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, all_prio_list, lossless_prio_list,\ lossy_prio_list # noqa F401 -from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED +from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED # noqa: F401 from tests.snappi_tests.multidut.pfc.files.multidut_helper import run_pfc_test -from tests.common.reboot import reboot -from tests.common.utilities import wait_until import logging from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.files.helper import skip_warm_reboot +from tests.snappi_tests.files.helper import reboot_duts, setup_ports_and_dut, multidut_port_info # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) +@pytest.fixture(autouse=True) +def number_of_tx_rx_ports(): + yield (1, 1) + + def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, enum_dut_lossy_prio, - prio_dscp_map, # noqa: F811 - lossy_prio_list, # noqa: F811 - all_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - multidut_port_info): # noqa: F811 + prio_dscp_map, # noqa: F811 + lossy_prio_list, # noqa: F811 + all_prio_list, # noqa: F811 + lossless_prio_list, # noqa: F811 + get_snappi_ports, # noqa: F811 + tbinfo, # noqa: F811 + setup_ports_and_dut # noqa: F811 + ): """ Test if PFC will impact a single lossy priority in multidut setup @@ -51,31 +55,7 @@ def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut _, lossy_prio = enum_dut_lossy_prio.split('|') lossy_prio = int(lossy_prio) @@ -99,10 +79,8 @@ def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 @@ -112,14 +90,14 @@ def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 lossless_prio_list, # noqa: F811 get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info): # noqa: F811 + setup_ports_and_dut): # noqa: F811 """ Test if PFC will impact multiple lossy priorities in multidut setup Args: snappi_api (pytest fixture): SNAPPI session conn_graph_facts (pytest fixture): connection graph - fanout_graph_facts (pytest fixture): fanout graph + fanout_graph_facts_multidut (pytest fixture): fanout graph duthosts (pytest fixture): list of DUTs prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). lossless_prio_list (pytest fixture): list of all the lossless priorities @@ -129,31 +107,7 @@ def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut pause_prio_list = lossy_prio_list test_prio_list = lossy_prio_list @@ -174,35 +128,33 @@ def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, localhost, - enum_dut_lossy_prio_with_completeness_level, - prio_dscp_map, # noqa: F811 - lossy_prio_list, # noqa: F811 - all_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 + enum_dut_lossy_prio, + prio_dscp_map, # noqa: F811 + lossy_prio_list, # noqa: F811 + all_prio_list, # noqa: F811 + lossless_prio_list, # noqa: F811 + get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - reboot_type, - multidut_port_info): + setup_ports_and_dut, # noqa: F811 + reboot_duts): # noqa: F811 """ Test if PFC will impact a single lossy priority after various kinds of reboots in multidut setup Args: snappi_api (pytest fixture): SNAPPI session conn_graph_facts (pytest fixture): connection graph - fanout_graph_facts (pytest fixture): fanout graph + fanout_graph_facts_multidut (pytest fixture): fanout graph duthosts (pytest fixture): list of DUTs localhost (pytest fixture): localhost handle - enum_dut_lossy_prio_with_completeness_level (str): lossy priority to test, e.g., 's6100-1|2' + enum_dut_lossy_prio (str): name of lossy priority to test, e.g., 's6100-1|2' prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). lossy_prio_list (pytest fixture): list of all the lossy priorities all_prio_list (pytest fixture): list of all the priorities @@ -212,48 +164,15 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) - - _, lossy_prio = enum_dut_lossy_prio_with_completeness_level.split('|') + _, lossy_prio = enum_dut_lossy_prio.split('|') lossy_prio = int(lossy_prio) pause_prio_list = [lossy_prio] test_prio_list = [lossy_prio] bg_prio_list = [p for p in all_prio_list] bg_prio_list.remove(lossy_prio) - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - wait_until(180, 20, 0, duthost.critical_services_fully_started) - snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -269,24 +188,21 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, localhost, - prio_dscp_map, # noqa: F811 - lossy_prio_list, # noqa: F811 - lossless_prio_list, # noqa: F811 + prio_dscp_map, # noqa: F811 + lossy_prio_list, # noqa: F811 + lossless_prio_list, # noqa: F811 get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - reboot_type, - multidut_port_info): + tbinfo, # noqa: F811 + setup_ports_and_dut, # noqa: F811 + reboot_duts): # noqa: F811 """ Test if PFC will impact multiple lossy priorities after various kinds of reboots @@ -294,7 +210,7 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 snappi_api (pytest fixture): SNAPPI session snappi_testbed_config (pytest fixture): testbed configuration information conn_graph_facts (pytest fixture): connection graph - fanout_graph_facts (pytest fixture): fanout graph + fanout_graph_facts_multidut (pytest fixture): fanout graph localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). @@ -306,44 +222,12 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut pause_prio_list = lossy_prio_list test_prio_list = lossy_prio_list bg_prio_list = lossless_prio_list - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - wait_until(180, 20, 0, duthost.critical_services_fully_started) - snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -359,4 +243,3 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) diff --git a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py index 01b5a64a899..126a5fe14dd 100644 --- a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py +++ b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py @@ -11,28 +11,36 @@ get_snappi_ports_multi_dut, is_snappi_multidut, \ snappi_api, snappi_dut_base_config, get_snappi_ports, get_snappi_ports_for_rdma, cleanup_config # noqa: F401 from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, lossless_prio_list # noqa F401 -from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED +from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED # noqa: F401 from tests.common.reboot import reboot # noqa: F401 from tests.common.utilities import wait_until # noqa: F401 from tests.snappi_tests.multidut.pfcwd.files.pfcwd_multidut_basic_helper import run_pfcwd_basic_test from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.files.helper import skip_warm_reboot, skip_pfcwd_test # noqa: F401 +from tests.snappi_tests.files.helper import skip_pfcwd_test, reboot_duts, \ + setup_ports_and_dut, multidut_port_info # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] +WAIT_TIME = 600 +INTERVAL = 40 + + +@pytest.fixture(autouse=True) +def number_of_tx_rx_ports(): + yield (1, 1) + @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_single_lossless_prio(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 - trigger_pfcwd): + tbinfo, # noqa: F811 + prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 + trigger_pfcwd, # noqa: F811 + ): """ Run PFC watchdog basic test on a single lossless priority @@ -47,33 +55,7 @@ def test_pfcwd_basic_single_lossless_prio(snappi_api, # noqa: Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - skip_pfcwd_test(duthost=snappi_ports[0]['duthost'], trigger_pfcwd=trigger_pfcwd) - skip_pfcwd_test(duthost=snappi_ports[1]['duthost'], trigger_pfcwd=trigger_pfcwd) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut lossless_prio = random.sample(lossless_prio_list, 1) lossless_prio = int(lossless_prio[0]) @@ -92,20 +74,16 @@ def test_pfcwd_basic_single_lossless_prio(snappi_api, # noqa: trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_multi_lossless_prio(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 trigger_pfcwd): """ Run PFC watchdog basic test on multiple lossless priorities @@ -122,31 +100,7 @@ def test_pfcwd_basic_multi_lossless_prio(snappi_api, # noqa F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -162,25 +116,21 @@ def test_pfcwd_basic_multi_lossless_prio(snappi_api, # noqa F811 trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 localhost, duthosts, - enum_dut_lossless_prio_with_completeness_level, # noqa: F811 - get_snappi_ports, # noqa: F811 + lossless_prio_list, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 - reboot_type, - trigger_pfcwd): + prio_dscp_map, # noqa: F811 + setup_ports_and_dut, # noqa: F811 + reboot_duts, # noqa: F811 + trigger_pfcwd # noqa: F811 + ): """ Verify PFC watchdog basic test works on a single lossless priority after various types of reboot @@ -190,7 +140,6 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no fanout_graph_facts_multidut (pytest fixture): fanout graph localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs - enum_dut_lossless_prio_with_completeness_level (str): lossless priority to test, e.g., 's6100-1|3' prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority) reboot_type (str): reboot type to be issued on the DUT trigger_pfcwd (bool): if PFC watchdog is expected to be triggered @@ -199,46 +148,13 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) - - _, lossless_prio = enum_dut_lossless_prio_with_completeness_level.split('|') - lossless_prio = int(lossless_prio) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut + + lossless_prio = random.sample(lossless_prio_list, 1) + lossless_prio = int(lossless_prio[0]) snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), - "Not all critical services are fully started") - run_pfcwd_basic_test(api=snappi_api, testbed_config=testbed_config, port_config_list=port_config_list, @@ -250,24 +166,19 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 localhost, duthosts, - lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 - reboot_type, + lossless_prio_list, # noqa: F811 + tbinfo, # noqa: F811 + prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 + reboot_duts, # noqa: F811 trigger_pfcwd): """ Verify PFC watchdog basic test works on multiple lossless priorities after various kinds of reboots @@ -286,41 +197,7 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # no Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) - - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), - "Not all critical services are fully started") + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -336,24 +213,20 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # no trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer @pytest.mark.parametrize('restart_service', ['swss']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 + prio_dscp_map, # noqa: F811 restart_service, - trigger_pfcwd): + trigger_pfcwd, + setup_ports_and_dut): # noqa: F811 """ Verify PFC watchdog basic test works on a single lossless priority after various service restarts @@ -369,31 +242,7 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut lossless_prio = random.sample(lossless_prio_list, 1) lossless_prio = int(lossless_prio[0]) @@ -406,24 +255,27 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, ports_dict[k] = list(set(ports_dict[k])) logger.info('Port dictionary:{}'.format(ports_dict)) - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): + # Record current state of critical services. + duthost.critical_services_fully_started() + asic_list = ports_dict[duthost.hostname] - for asic in asic_list: - asic_id = re.match(r"(asic)(\d+)", asic).group(2) - proc = 'swss@' + asic_id - logger.info("Issuing a restart of service {} on the dut {}".format(proc, duthost.hostname)) - duthost.command("sudo systemctl reset-failed {}".format(proc)) - duthost.command("sudo systemctl restart {}".format(proc)) - logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), - "Not all critical services are fully started") + asic = random.sample(asic_list, 1)[0] + asic_id = re.match(r"(asic)(\d+)", asic).group(2) + proc = 'swss@' + asic_id + logger.info("Issuing a restart of service {} on the dut {}".format(proc, duthost.hostname)) + duthost.command("sudo systemctl reset-failed {}".format(proc)) + duthost.command("sudo systemctl restart {}".format(proc)) + logger.info("Wait until the system is stable") + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), + "Not all critical services are fully started") else: - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) duthost.command("systemctl reset-failed {}".format(restart_service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") snappi_extra_params = SnappiTestParams() @@ -440,23 +292,19 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer @pytest.mark.parametrize('restart_service', ['swss']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, prio_dscp_map, # noqa F811 restart_service, + setup_ports_and_dut, # noqa: F811 trigger_pfcwd): """ Verify PFC watchdog basic test works on multiple lossless priorities after various service restarts @@ -474,31 +322,8 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut if (snappi_ports[0]['duthost'].is_multi_asic): ports_dict = defaultdict(list) @@ -509,7 +334,7 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, ports_dict[k] = list(set(ports_dict[k])) logger.info('Port dictionary:{}'.format(ports_dict)) - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): asic_list = ports_dict[duthost.hostname] for asic in asic_list: asic_id = re.match(r"(asic)(\d+)", asic).group(2) @@ -518,15 +343,15 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, duthost.command("sudo systemctl reset-failed {}".format(proc)) duthost.command("sudo systemctl restart {}".format(proc)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") else: - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) duthost.command("systemctl reset-failed {}".format(restart_service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") snappi_extra_params = SnappiTestParams() @@ -541,5 +366,3 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, prio_dscp_map=prio_dscp_map, trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - - cleanup_config(duthosts, snappi_ports) diff --git a/tests/snappi_tests/pfc/files/cisco_pfc_packet.py b/tests/snappi_tests/pfc/files/cisco_pfc_packet.py new file mode 100644 index 00000000000..298e12b12d5 --- /dev/null +++ b/tests/snappi_tests/pfc/files/cisco_pfc_packet.py @@ -0,0 +1,32 @@ +""" +The CiscoPFCPacket module handles Cisco specific PFC frame checks +""" +from tests.common.snappi_tests.pfc_packet import PFCPacket, PRIO_DEFAULT_LEN + + +class CiscoPFCPacket(PFCPacket): + def __init__(self, pfc_frame_bytes=None, cbfc_opcode=None, class_enable_vec=None, class_pause_times=None): + """ + Initialize the PFCPacket base class + + """ + super().__init__(pfc_frame_bytes, cbfc_opcode, class_enable_vec, class_pause_times) + + def _check_class_pause_times(self): + """ + Check if class pause times are valid. Both conditions must be met: + 1) class pause times are between 0x0 and 0xFFFF + 2) class pause times are 0 if the corresponding bit in the class enable vector is 0, and vice versa + + Args: + + Returns: + True if valid class pause times, False otherwise + """ + for i in range(len(self.class_pause_times)): + if self.class_pause_times[i] < 0x0 or self.class_pause_times[i] > 0xFFFF: + return False + elif self.class_pause_times[i] == 0x0 and self.class_enable_vec[PRIO_DEFAULT_LEN - i - 1] == "1": + return False + + return True diff --git a/tests/snappi_tests/pfc/files/helper.py b/tests/snappi_tests/pfc/files/helper.py index 154e5fd17bf..89216bf0b96 100644 --- a/tests/snappi_tests/pfc/files/helper.py +++ b/tests/snappi_tests/pfc/files/helper.py @@ -18,7 +18,8 @@ verify_in_flight_buffer_pkts, verify_unset_cev_pause_frame_count, verify_tx_frame_count_dut, \ verify_rx_frame_count_dut from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.common.snappi_tests.read_pcap import validate_pfc_frame +from tests.common.cisco_data import is_cisco_device +from tests.common.snappi_tests.read_pcap import validate_pfc_frame, validate_pfc_frame_cisco logger = logging.getLogger(__name__) @@ -191,7 +192,7 @@ def run_pfc_test(api, # PFC pause frame capture is not requested valid_pfc_frame_test = False - if valid_pfc_frame_test: + if valid_pfc_frame_test and not is_cisco_device(duthost): snappi_extra_params.traffic_flow_config.pause_flow_config["flow_dur_sec"] = DATA_FLOW_DURATION_SEC + \ data_flow_delay_sec + SNAPPI_POLL_DELAY_SEC + PAUSE_FLOW_DUR_BASE_SEC snappi_extra_params.traffic_flow_config.pause_flow_config["flow_traffic_type"] = \ @@ -245,7 +246,11 @@ def run_pfc_test(api, # Verify PFC pause frames if valid_pfc_frame_test: - is_valid_pfc_frame, error_msg = validate_pfc_frame(snappi_extra_params.packet_capture_file + ".pcapng") + if not is_cisco_device(duthost): + is_valid_pfc_frame, error_msg = validate_pfc_frame(snappi_extra_params.packet_capture_file + ".pcapng") + else: + is_valid_pfc_frame, error_msg = validate_pfc_frame_cisco( + snappi_extra_params.packet_capture_file + ".pcapng") pytest_assert(is_valid_pfc_frame, error_msg) return diff --git a/tests/snappi_tests/pfc/files/valid_src_mac_pfc_frame_helper.py b/tests/snappi_tests/pfc/files/valid_src_mac_pfc_frame_helper.py new file mode 100644 index 00000000000..bda9d53ec45 --- /dev/null +++ b/tests/snappi_tests/pfc/files/valid_src_mac_pfc_frame_helper.py @@ -0,0 +1,207 @@ +import logging +import time + +from tests.common.helpers.assertions import pytest_assert +from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\ + fanout_graph_facts # noqa F401 +from tests.common.snappi_tests.snappi_helpers import get_dut_port_id +from tests.common.snappi_tests.common_helpers import pfc_class_enable_vector,\ + get_lossless_buffer_size, get_pg_dropped_packets,\ + stop_pfcwd, disable_packet_aging, sec_to_nanosec,\ + get_pfc_frame_count, packet_capture, config_capture_pkt,\ + traffic_flow_mode, calc_pfc_pause_flow_rate # noqa F401 +from tests.common.snappi_tests.port import select_ports, select_tx_port # noqa F401 +from tests.common.snappi_tests.snappi_helpers import wait_for_arp # noqa F401 +from tests.common.snappi_tests.traffic_generation import setup_base_traffic_config, generate_test_flows, \ + generate_pause_flows, run_traffic, verify_basic_test_flow +from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.common.snappi_tests.read_pcap import validate_pfc_frame_cisco + +logger = logging.getLogger(__name__) + +dut_port_config = [] +PAUSE_FLOW_NAME = 'Pause Storm' +TEST_FLOW_NAME = 'Test Flow' +TEST_FLOW_AGGR_RATE_PERCENT = 45 +data_flow_pkt_size = 1024 +DATA_FLOW_DURATION_SEC = 5 +data_flow_delay_sec = 1 +TOLERANCE_THRESHOLD = 0.05 +ANSIBLE_POLL_DELAY_SEC = 4 + + +def run_pfc_valid_src_mac_test( + api, + testbed_config, + port_config_list, + conn_data, + fanout_data, + duthost, + dut_port, + global_pause, + pause_prio_list, + test_prio_list, + prio_dscp_map, + test_traffic_pause, + snappi_extra_params=None): + """ + Run a PFC test + Args: + api (obj): snappi session + testbed_config (obj): testbed L1/L2/L3 configuration + port_config_list (list): list of port configuration + conn_data (dict): the dictionary returned by conn_graph_fact. + fanout_data (dict): the dictionary returned by fanout_graph_fact. + duthost (Ansible host instance): device under test + dut_port (str): DUT port to test + global_pause (bool): if pause frame is IEEE 802.3X pause + pause_prio_list (list): priorities to pause for pause frames + test_prio_list (list): priorities of test flows + prio_dscp_map (dict): Priority vs. DSCP map (key = priority). + test_traffic_pause (bool): if test flows are expected to be paused + snappi_extra_params (SnappiTestParams obj): additional parameters for Snappi traffic + + Returns: + N/A + """ + + pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config') + + if snappi_extra_params is None: + snappi_extra_params = SnappiTestParams() + + stop_pfcwd(duthost) + disable_packet_aging(duthost) + global DATA_FLOW_DURATION_SEC + global data_flow_delay_sec + + # Get the ID of the port to test + port_id = get_dut_port_id(dut_hostname=duthost.hostname, + dut_port=dut_port, + conn_data=conn_data, + fanout_data=fanout_data) + + pytest_assert(port_id is not None, + 'Fail to get ID for port {}'.format(dut_port)) + + # Rate percent must be an integer + test_flow_rate_percent = int(TEST_FLOW_AGGR_RATE_PERCENT / len(test_prio_list)) + + # Generate base traffic config + snappi_extra_params.base_flow_config = setup_base_traffic_config(testbed_config=testbed_config, + port_config_list=port_config_list, + port_id=port_id) + + speed_str = testbed_config.layer1[0].speed + speed_gbps = int(speed_str.split('_')[1]) + + if snappi_extra_params.poll_device_runtime: + # If the switch needs to be polled as traffic is running for stats, + # then the test runtime needs to be increased for the polling delay + DATA_FLOW_DURATION_SEC += ANSIBLE_POLL_DELAY_SEC + data_flow_delay_sec = ANSIBLE_POLL_DELAY_SEC + + if snappi_extra_params.packet_capture_type != packet_capture.NO_CAPTURE: + # Setup capture config + if snappi_extra_params.is_snappi_ingress_port_cap: + # packet capture is required on the ingress snappi port + snappi_extra_params.packet_capture_ports = [snappi_extra_params.base_flow_config["rx_port_name"]] + else: + # packet capture will be on the egress snappi port + snappi_extra_params.packet_capture_ports = [snappi_extra_params.base_flow_config["tx_port_name"]] + + snappi_extra_params.packet_capture_file = snappi_extra_params.packet_capture_type.value + + config_capture_pkt(testbed_config=testbed_config, + port_names=snappi_extra_params.packet_capture_ports, + capture_type=snappi_extra_params.packet_capture_type, + capture_name=snappi_extra_params.packet_capture_file) + logger.info("Packet capture file: {}.pcapng".format(snappi_extra_params.packet_capture_file)) + + # Set default traffic flow configs if not set + if snappi_extra_params.traffic_flow_config.data_flow_config is None: + snappi_extra_params.traffic_flow_config.data_flow_config = { + "flow_name": TEST_FLOW_NAME, + "flow_dur_sec": DATA_FLOW_DURATION_SEC, + "flow_rate_percent": test_flow_rate_percent, + "flow_rate_pps": None, + "flow_rate_bps": None, + "flow_pkt_size": data_flow_pkt_size, + "flow_pkt_count": None, + "flow_delay_sec": data_flow_delay_sec, + "flow_traffic_type": traffic_flow_mode.FIXED_DURATION + } + + if snappi_extra_params.traffic_flow_config.pause_flow_config is None: + snappi_extra_params.traffic_flow_config.pause_flow_config = { + "flow_name": PAUSE_FLOW_NAME, + "flow_dur_sec": None, + "flow_rate_percent": None, + "flow_rate_pps": calc_pfc_pause_flow_rate(speed_gbps), + "flow_rate_bps": None, + "flow_pkt_size": 64, + "flow_pkt_count": None, + "flow_delay_sec": 0, + "flow_traffic_type": traffic_flow_mode.CONTINUOUS + } + + if snappi_extra_params.packet_capture_type == packet_capture.PFC_CAPTURE: + # PFC pause frame capture is requested + validate_pfc_frame = True + else: + # PFC pause frame capture is not requested + validate_pfc_frame = False + + # Generate test flow config + generate_test_flows(testbed_config=testbed_config, + test_flow_prio_list=test_prio_list, + prio_dscp_map=prio_dscp_map, + snappi_extra_params=snappi_extra_params) + + # Generate pause storm config + generate_pause_flows(testbed_config=testbed_config, + pause_prio_list=pause_prio_list, + global_pause=global_pause, + snappi_extra_params=snappi_extra_params) + + flows = testbed_config.flows + + all_flow_names = [flow.name for flow in flows] + data_flow_names = [flow.name for flow in flows if PAUSE_FLOW_NAME not in flow.name] + + # Clear PFC, queue and interface counters before traffic run + duthost.command(" sonic-clear pfccounters") + duthost.command("sonic-clear queuecounters") + duthost.command("sonic-clear counters") + time.sleep(1) + + """ Run traffic """ + tgen_flow_stats, _, _ = run_traffic( + duthost=duthost, + api=api, + config=testbed_config, + data_flow_names=data_flow_names, + all_flow_names=all_flow_names, + exp_dur_sec=DATA_FLOW_DURATION_SEC + + data_flow_delay_sec, + snappi_extra_params=snappi_extra_params) + + # Reset pfc delay parameter + pfc = testbed_config.layer1[0].flow_control.ieee_802_1qbb + pfc.pfc_delay = 0 + + # Verify PFC pause frames + if validate_pfc_frame: + peer_mac_addr = snappi_extra_params.base_flow_config["rx_port_config"].gateway_mac + is_valid_pfc_frame, error_msg = validate_pfc_frame_cisco( + snappi_extra_params.packet_capture_file + ".pcapng", + peer_mac_addr=peer_mac_addr) + pytest_assert(is_valid_pfc_frame, error_msg) + return + + # Verify basic test flows metrics from ixia + verify_basic_test_flow(flow_metrics=tgen_flow_stats, + speed_gbps=speed_gbps, + tolerance=TOLERANCE_THRESHOLD, + test_flow_pause=test_traffic_pause, + snappi_extra_params=snappi_extra_params) diff --git a/tests/snappi_tests/pfc/test_valid_src_mac_pfc_frame.py b/tests/snappi_tests/pfc/test_valid_src_mac_pfc_frame.py new file mode 100644 index 00000000000..fddc0408ee5 --- /dev/null +++ b/tests/snappi_tests/pfc/test_valid_src_mac_pfc_frame.py @@ -0,0 +1,98 @@ +import logging +import pytest + + +from tests.snappi_tests.pfc.files.valid_src_mac_pfc_frame_helper import run_pfc_valid_src_mac_test +from tests.common.helpers.assertions import pytest_require +from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\ + fanout_graph_facts # noqa F401 +from tests.common.snappi_tests.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port,\ + snappi_api, snappi_testbed_config, is_snappi_multidut # noqa F401 +from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, all_prio_list, lossless_prio_list,\ + lossy_prio_list # noqa F401 +from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.common.snappi_tests.common_helpers import packet_capture +from tests.common.cisco_data import is_cisco_device + +logger = logging.getLogger(__name__) + +pytestmark = [pytest.mark.topology('tgen')] + + +def test_valid_pfc_frame_src_mac( + snappi_api, # noqa F811 + snappi_testbed_config, # noqa F811 + conn_graph_facts, # noqa F811 + fanout_graph_facts, # noqa F811 + duthosts, + rand_one_dut_hostname, + rand_one_dut_portname_oper_up, + lossless_prio_list, # noqa F811 + prio_dscp_map): # noqa F811 + """ + Test if PFC Pause frame generated by device under test (DUT) is having a valid src mac + + Topology: + snappi (1) -> DUT -> snappi (2) + + Test steps: + 1) Create congestion on ingress port of ixia (snappi 2). This is done by letting 1 send data traffic to 2, and 2 + sending PFC pause frames to DUT. + 2) tgen 2 sends PFC pause frames to DUT. + 3) DUT responds to PFC frames by also sending back PFC pause frames back to tgen 1. + 4) Using packet capture on tgen 1 port, verify PFC pause frames meet IEEE 802.1Qbb code point standards. + a) There is a pause quanta specified in the frame (value between 0x0 and 0xFFFF). + b) There is a valid class enable vector set on the frame - an 8-bit mask that specifies + which 802.1p priority levels should be paused. + c) The destination MAC address on the frame is "01:80:c2:00:00:01" + d) The source MAC address on the frame is of the DUT port + + Args: + snappi_api (pytest fixture): SNAPPI session + snappi_testbed_config (pytest fixture): testbed configuration information + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + rand_one_dut_hostname (str): hostname of DUT + rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0' + lossless_prio_list (pytest fixture): list of all the lossless priorities + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + + Returns: + N/A + """ + + dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|') + pytest_require(rand_one_dut_hostname == dut_hostname, + "Port is not mapped to the expected DUT") + + testbed_config, port_config_list = snappi_testbed_config + duthost = duthosts[rand_one_dut_hostname] + + if not is_cisco_device(duthost): + pytest.skip("Test is supported on Cisco device only") + + if is_snappi_multidut(duthosts): + pytest.skip("Test is not supported on multi-dut") + + pause_prio_list = lossless_prio_list + test_prio_list = lossless_prio_list + + snappi_extra_params = SnappiTestParams() + snappi_extra_params.packet_capture_type = packet_capture.PFC_CAPTURE + snappi_extra_params.is_snappi_ingress_port_cap = False + + run_pfc_valid_src_mac_test( + api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + duthost=duthost, + dut_port=dut_port, + global_pause=False, + pause_prio_list=pause_prio_list, + test_prio_list=test_prio_list, + prio_dscp_map=prio_dscp_map, + test_traffic_pause=True, + snappi_extra_params=snappi_extra_params) diff --git a/tests/span/span_helpers.py b/tests/span/span_helpers.py index 28dc2f351b7..a85c2b8d309 100644 --- a/tests/span/span_helpers.py +++ b/tests/span/span_helpers.py @@ -5,7 +5,7 @@ import ptf.testutils as testutils -def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor, skip_traffic_test=False): +def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor): ''' Send packet from ptf and verify it on monitor port @@ -18,8 +18,6 @@ def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor, skip_traffic_ pkt = testutils.simple_icmp_packet(eth_src=src_mac, eth_dst='ff:ff:ff:ff:ff:ff') - if skip_traffic_test is True: - return ptfadapter.dataplane.flush() testutils.send(ptfadapter, src_port, pkt) testutils.verify_packet(ptfadapter, pkt, monitor) diff --git a/tests/span/test_port_mirroring.py b/tests/span/test_port_mirroring.py index 646f699264a..145395b8e7f 100644 --- a/tests/span/test_port_mirroring.py +++ b/tests/span/test_port_mirroring.py @@ -5,7 +5,6 @@ import pytest from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from span_helpers import send_and_verify_mirrored_packet pytestmark = [ @@ -13,7 +12,7 @@ ] -def test_mirroring_rx(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_rx(ptfadapter, setup_session): ''' Test case #1 Verify ingress direction session @@ -27,11 +26,10 @@ def test_mirroring_rx(ptfadapter, setup_session, skip_traffic_test): # noqa F ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source1_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) -def test_mirroring_tx(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_tx(ptfadapter, setup_session): ''' Test case #2 Verify egress direction session @@ -45,11 +43,10 @@ def test_mirroring_tx(ptfadapter, setup_session, skip_traffic_test): # noqa F ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source2_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) -def test_mirroring_both(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_both(ptfadapter, setup_session): ''' Test case #3 Verify bidirectional session @@ -66,16 +63,14 @@ def test_mirroring_both(ptfadapter, setup_session, skip_traffic_test): # noqa ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source1_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) send_and_verify_mirrored_packet(ptfadapter, setup_session['source2_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) -def test_mirroring_multiple_source(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_multiple_source(ptfadapter, setup_session): ''' Test case #4 Verify ingress direction session with multiple source ports @@ -92,10 +87,8 @@ def test_mirroring_multiple_source(ptfadapter, setup_session, skip_traffic_test) ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source1_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) send_and_verify_mirrored_packet(ptfadapter, setup_session['source2_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) diff --git a/tests/syslog/test_logrotate.py b/tests/syslog/test_logrotate.py index 3dfeabecb95..cabfaab71ab 100644 --- a/tests/syslog/test_logrotate.py +++ b/tests/syslog/test_logrotate.py @@ -15,7 +15,7 @@ ] LOG_FOLDER = '/var/log' -SMALL_VAR_LOG_PARTITION_SIZE = '100M' +SMALL_VAR_LOG_PARTITION_SIZE = '300M' FAKE_IP = '10.20.30.40' FAKE_MAC = 'aa:bb:cc:dd:11:22' diff --git a/tests/telemetry/telemetry_utils.py b/tests/telemetry/telemetry_utils.py index ef4cac780e6..1b6de9e9ed5 100644 --- a/tests/telemetry/telemetry_utils.py +++ b/tests/telemetry/telemetry_utils.py @@ -106,7 +106,7 @@ def trigger_logger(duthost, log, process, container="", priority="local0.notice" def generate_client_cli(duthost, gnxi_path, method=METHOD_GET, xpath="COUNTERS/Ethernet0", target="COUNTERS_DB", subscribe_mode=SUBSCRIBE_MODE_STREAM, submode=SUBMODE_SAMPLE, - intervalms=0, update_count=3, create_connections=1, filter_event_regex="", + intervalms=0, update_count=3, create_connections=1, filter_event_regex="", namespace=None, timeout=-1): """ Generate the py_gnmicli command line based on the given params. t --target: gNMI target; required @@ -121,11 +121,15 @@ def generate_client_cli(duthost, gnxi_path, method=METHOD_GET, xpath="COUNTERS/E update_count: Max number of streaming updates to receive. 0 means no limit. default 0 create_connections: Creates TCP connections with gNMI server; default 1; -1 for infinite connections filter_event_regex: Regex to filter event when querying events path + namespace: namespace for multi-asic timeout: Subscription duration in seconds; After X seconds, request terminates; default none """ env = GNMIEnvironment(duthost, GNMIEnvironment.TELEMETRY_MODE) - cmdFormat = 'python ' + gnxi_path + 'gnmi_cli_py/py_gnmicli.py -g -t {0} -p {1} -m {2} -x {3} -xt {4} -o {5}' - cmd = cmdFormat.format(duthost.mgmt_ip, env.gnmi_port, method, xpath, target, "ndastreamingservertest") + ns = "" + if namespace is not None: + ns = "/{}".format(namespace) + cmdFormat = 'python ' + gnxi_path + 'gnmi_cli_py/py_gnmicli.py -g -t {0} -p {1} -m {2} -x {3} -xt {4}{5} -o {6}' + cmd = cmdFormat.format(duthost.mgmt_ip, env.gnmi_port, method, xpath, target, ns, "ndastreamingservertest") if method == METHOD_SUBSCRIBE: cmd += " --subscribe_mode {0} --submode {1} --interval {2} --update_count {3} --create_connections {4}".format( diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index 3014c3aea98..c975f532fd4 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -281,24 +281,33 @@ def test_on_change_updates(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, logger.info("Testing on change update notifications") duthost = duthosts[enum_rand_one_per_hwsku_hostname] + if duthost.is_supervisor_node(): + pytest.skip( + "Skipping test as no Ethernet0 frontpanel port on supervisor") skip_201911_and_older(duthost) - cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_SUBSCRIBE, - submode=SUBMODE_ONCHANGE, update_count=2, xpath="NEIGH_STATE_TABLE", - target="STATE_DB") - bgp_nbrs = list(duthost.get_bgp_neighbors().keys()) + nslist = duthost.get_asic_namespace_list() + ns = random.choice(nslist) + bgp_nbrs = list(duthost.get_bgp_neighbors(ns).keys()) bgp_neighbor = random.choice(bgp_nbrs) - bgp_info = duthost.get_bgp_neighbor_info(bgp_neighbor) + asic_id = duthost.get_asic_id_from_namespace(ns) + bgp_info = duthost.get_bgp_neighbor_info(bgp_neighbor, asic_id) original_state = bgp_info["bgpState"] new_state = "Established" if original_state.lower() == "active" else "Active" + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_SUBSCRIBE, + submode=SUBMODE_ONCHANGE, update_count=2, xpath="NEIGH_STATE_TABLE", + target="STATE_DB", namespace=ns) + def callback(result): logger.info("Assert that ptf client output is non empty and contains on change update") try: assert result != "", "Did not get output from PTF client" finally: - duthost.shell("sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, - original_state)) + ccmd = "sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, + original_state) + ccmd = duthost.get_cli_cmd_for_namespace(ccmd, ns) + duthost.shell(ccmd) ret = parse_gnmi_output(result, 1, bgp_neighbor) assert ret is True, "Did not find key in update" @@ -306,8 +315,10 @@ def callback(result): client_thread.start() wait_until(5, 1, 0, check_gnmi_cli_running, ptfhost) - duthost.shell("sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, - new_state)) + cmd = "sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, + new_state) + cmd = duthost.get_cli_cmd_for_namespace(cmd, ns) + duthost.shell(cmd) client_thread.join(60) # max timeout of 60s, expect update to come in <=30s diff --git a/tests/test_parallel_modes.json b/tests/test_parallel_modes.json index 5d72c23d63f..6d443f8c631 100644 --- a/tests/test_parallel_modes.json +++ b/tests/test_parallel_modes.json @@ -5,16 +5,25 @@ "bgp/test_bgp_session_flap.py": "FULL_PARALLEL", "container_checker/test_container_checker.py": "RP_FIRST", "crm/test_crm.py": "FULL_PARALLEL", + "iface_namingmode/test_iface_namingmode.py": "FULL_PARALLEL", "lldp/test_lldp.py": "FULL_PARALLEL", "memory_checker/test_memory_checker.py": "FULL_PARALLEL", "override_config_table/test_override_config_table_masic.py": "FULL_PARALLEL", "passw_hardening/test_passw_hardening.py": "FULL_PARALLEL", "pc/test_po_cleanup.py": "FULL_PARALLEL", - "pfcwd/test_pfc_config.py": "FULL_PARALLEL", + "platform_tests/api/test_chassis.py": "FULL_PARALLEL", + "platform_tests/api/test_module.py": "FULL_PARALLEL", "platform_tests/api/test_sfp.py": "FULL_PARALLEL", + "platform_tests/api/test_thermal.py": "FULL_PARALLEL", + "platform_tests/cli/test_show_chassis_module.py": "FULL_PARALLEL", + "platform_tests/link_flap/test_cont_link_flap.py": "FULL_PARALLEL", + "platform_tests/sfp/test_sfputil.py": "FULL_PARALLEL", + "platform_tests/test_memory_exhaustion.py": "RP_FIRST", "platform_tests/test_reboot.py": "RP_FIRST", "platform_tests/test_reload_config.py": "RP_FIRST", "platform_tests/test_sequential_restart.py": "FULL_PARALLEL", + "show_techsupport/test_techsupport.py": "FULL_PARALLEL", + "show_techsupport/test_techsupport_no_secret.py": "FULL_PARALLEL", "snmp/test_snmp_cpu.py": "FULL_PARALLEL", "snmp/test_snmp_interfaces.py": "FULL_PARALLEL", "snmp/test_snmp_link_local.py": "FULL_PARALLEL", diff --git a/tests/vlan/test_vlan_ping.py b/tests/vlan/test_vlan_ping.py index b2141646673..fd19021c88f 100644 --- a/tests/vlan/test_vlan_ping.py +++ b/tests/vlan/test_vlan_ping.py @@ -10,7 +10,6 @@ from tests.common.helpers.assertions import pytest_assert as py_assert from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 from tests.common.dualtor.dual_tor_utils import lower_tor_host # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -180,10 +179,7 @@ def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo, def verify_icmp_packet(dut_mac, src_port, dst_port, ptfadapter, tbinfo, - vlan_mac=None, dtor_ul=False, dtor_dl=False, skip_traffic_test=False): # noqa F811 - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return + vlan_mac=None, dtor_ul=False, dtor_dl=False): if dtor_ul is True: # use vlan int mac in case of dualtor UL test pkt pkt = testutils.simple_icmp_packet(eth_src=str(src_port['mac']), @@ -224,7 +220,7 @@ def verify_icmp_packet(dut_mac, src_port, dst_port, ptfadapter, tbinfo, def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor_m, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ test for checking connectivity of statically added ipv4 and ipv6 arp entries """ @@ -252,16 +248,12 @@ def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter, for member in ptfhost_info: if 'dualtor' in tbinfo["topo"]["name"]: verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], - vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True, - skip_traffic_test=skip_traffic_test) + vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True) verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], - ptfadapter, tbinfo, vlan_mac, dtor_dl=True, - skip_traffic_test=skip_traffic_test) + ptfadapter, tbinfo, vlan_mac, dtor_dl=True) else: - verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) - verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo) # flushing and re-adding ipv6 static arp entry static_neighbor_entry(duthost, ptfhost_info, "del", "6") @@ -280,13 +272,9 @@ def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter, for member in ptfhost_info: if 'dualtor' in tbinfo["topo"]["name"]: verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], - vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True, - skip_traffic_test=skip_traffic_test) + vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True) verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], - ptfadapter, tbinfo, vlan_mac, dtor_dl=True, - skip_traffic_test=skip_traffic_test) + ptfadapter, tbinfo, vlan_mac, dtor_dl=True) else: - verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) - verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo) diff --git a/tests/vxlan/test_vnet_vxlan.py b/tests/vxlan/test_vnet_vxlan.py index 41413003f9c..108b4283b70 100644 --- a/tests/vxlan/test_vnet_vxlan.py +++ b/tests/vxlan/test_vnet_vxlan.py @@ -15,8 +15,6 @@ from tests.common.flow_counter.flow_counter_utils import RouteFlowCounterTestContext, is_route_flow_counter_supported # noqa F401 from tests.common.arp_utils import set_up, tear_down, testWrArp -from tests.common.fixtures.ptfhost_utils import skip_traffic_test - from tests.common.config_reload import config_reload logger = logging.getLogger(__name__) @@ -159,7 +157,7 @@ def vxlan_status(setup, request, duthosts, rand_one_dut_hostname, elif request.param == "WR_ARP": route, ptfIp, gwIp = set_up(duthost, ptfhost, tbinfo) try: - testWrArp(request, duthost, ptfhost, creds, skip_traffic_test) + testWrArp(request, duthost, ptfhost, creds) finally: tear_down(duthost, route, ptfIp, gwIp) @@ -190,7 +188,7 @@ def is_neigh_reachable(duthost, vnet_config): def test_vnet_vxlan(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfhost, - vnet_test_params, creds, is_route_flow_counter_supported, skip_traffic_test): # noqa F811 + vnet_test_params, creds, is_route_flow_counter_supported): # noqa F811 """ Test case for VNET VxLAN @@ -229,9 +227,6 @@ def test_vnet_vxlan(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfhos logger.info("Skipping cleanup") pytest.skip("Skip cleanup specified") - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return logger.debug("Starting PTF runner") if scenario == 'Enabled' and vxlan_enabled: route_pattern = 'Vnet1|100.1.1.1/32' diff --git a/tests/vxlan/test_vxlan_bfd_tsa.py b/tests/vxlan/test_vxlan_bfd_tsa.py index 1538efd891f..b2a4ac12909 100644 --- a/tests/vxlan/test_vxlan_bfd_tsa.py +++ b/tests/vxlan/test_vxlan_bfd_tsa.py @@ -14,7 +14,6 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.vxlan_ecmp_utils import Ecmp_Utils from tests.common.config_reload import config_system_checks_passed @@ -238,8 +237,7 @@ def dump_self_info_and_run_ptf(self, random_sport=False, random_src_ip=False, tolerance=None, - payload=None, - skip_traffic_test=False): # noqa F811 + payload=None): ''' Just a wrapper for dump_info_to_ptf to avoid entering 30 lines everytime. @@ -291,9 +289,6 @@ def dump_self_info_and_run_ptf(self, Logger.info( "dest->nh mapping:%s", self.vxlan_test_setup[encap_type]['dest_to_nh_map']) - if skip_traffic_test is True: - Logger.info("Skipping traffic test.") - return ptf_runner(self.vxlan_test_setup['ptfhost'], "ptftests", "vxlan_traffic.VxLAN_in_VxLAN" if payload == 'vxlan' @@ -411,7 +406,7 @@ def verfiy_bfd_down(self, ep_list): return False return True - def test_tsa_case1(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case1(self, setUp, encap_type): ''' tc1: This test checks the basic TSA removal of BFD sessions. 1) Create Vnet route with 4 endpoints and BFD monitors. @@ -428,7 +423,7 @@ def test_tsa_case1(self, setUp, encap_type, skip_traffic_test): # noqa F811 dest, ep_list = self.create_vnet_route(encap_type) - self.dump_self_info_and_run_ptf("test1", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True, []) self.apply_tsa() pytest_assert(self.in_maintainence()) @@ -437,11 +432,11 @@ def test_tsa_case1(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test1b", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1b", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case2(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case2(self, setUp, encap_type): ''' tc2: This test checks the basic route application while in TSA. 1) apply TSA. @@ -464,11 +459,11 @@ def test_tsa_case2(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test2", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case3(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case3(self, setUp, encap_type): ''' tc3: This test checks for lasting impact of TSA and TSB. 1) apply TSA. @@ -491,11 +486,11 @@ def test_tsa_case3(self, setUp, encap_type, skip_traffic_test): # noqa F811 dest, ep_list = self.create_vnet_route(encap_type) - self.dump_self_info_and_run_ptf("test3", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test3", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case4(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case4(self, setUp, encap_type): ''' tc4: This test checks basic Vnet route state retention during config reload. 1) Create Vnet route with 4 endpoints and BFD monitors. @@ -514,7 +509,7 @@ def test_tsa_case4(self, setUp, encap_type, skip_traffic_test): # noqa F811 duthost.shell("sudo config save -y", executable="/bin/bash", module_ignore_errors=True) - self.dump_self_info_and_run_ptf("test4", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test4", encap_type, True, []) duthost.shell("sudo config reload -y", executable="/bin/bash", module_ignore_errors=True) @@ -524,11 +519,11 @@ def test_tsa_case4(self, setUp, encap_type, skip_traffic_test): # noqa F811 ecmp_utils.configure_vxlan_switch(duthost, vxlan_port=4789, dutmac=self.vxlan_test_setup['dut_mac']) dest, ep_list = self.create_vnet_route(encap_type) - self.dump_self_info_and_run_ptf("test4b", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test4b", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case5(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case5(self, setUp, encap_type): ''' tc4: This test checks TSA state retention w.r.t BFD accross config reload. 1) Create Vnet route with 4 endpoints and BFD monitors. @@ -552,7 +547,7 @@ def test_tsa_case5(self, setUp, encap_type, skip_traffic_test): # noqa F811 duthost.shell("sudo config save -y", executable="/bin/bash", module_ignore_errors=True) - self.dump_self_info_and_run_ptf("test5", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test5", encap_type, True, []) self.apply_tsa() pytest_assert(self.in_maintainence()) @@ -569,11 +564,11 @@ def test_tsa_case5(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test5b", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test5b", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case6(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case6(self, setUp, encap_type): ''' tc6: This test checks that the BFD doesnt come up while device is in TSA and remains down accross config reload. @@ -615,6 +610,6 @@ def test_tsa_case6(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test6", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test6", encap_type, True, []) self.delete_vnet_route(encap_type, dest) diff --git a/tests/vxlan/test_vxlan_decap.py b/tests/vxlan/test_vxlan_decap.py index b6761ac25fd..5ba9d2c1237 100644 --- a/tests/vxlan/test_vxlan_decap.py +++ b/tests/vxlan/test_vxlan_decap.py @@ -14,7 +14,6 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa F401 from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.dualtor.mux_simulator_control import mux_server_url,\ toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -185,7 +184,7 @@ def vxlan_status(setup, request, duthosts, rand_one_dut_hostname): def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, tbinfo, - ptfhost, creds, toggle_all_simulator_ports_to_rand_selected_tor_m, skip_traffic_test): # noqa F811 + ptfhost, creds, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 duthost = duthosts[rand_one_dut_hostname] sonic_admin_alt_password = duthost.host.options['variable_manager'].\ @@ -199,9 +198,6 @@ def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, tbinf log_file = "/tmp/vxlan-decap.Vxlan.{}.{}.log".format( scenario, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')) - if skip_traffic_test is True: - logger.info("Skip traffic test") - return ptf_runner(ptfhost, "ptftests", "vxlan-decap.Vxlan", diff --git a/tests/vxlan/test_vxlan_ecmp.py b/tests/vxlan/test_vxlan_ecmp.py index 028e856fa2f..a9a419065ac 100644 --- a/tests/vxlan/test_vxlan_ecmp.py +++ b/tests/vxlan/test_vxlan_ecmp.py @@ -60,7 +60,6 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import wait_until from tests.ptf_runner import ptf_runner from tests.common.vxlan_ecmp_utils import Ecmp_Utils @@ -394,8 +393,7 @@ def dump_self_info_and_run_ptf(self, random_sport=False, random_src_ip=False, tolerance=None, - payload=None, - skip_traffic_test=False): # noqa F811 + payload=None): ''' Just a wrapper for dump_info_to_ptf to avoid entering 30 lines everytime. @@ -450,9 +448,6 @@ def dump_self_info_and_run_ptf(self, Logger.info( "dest->nh mapping:%s", self.vxlan_test_setup[encap_type]['dest_to_nh_map']) - if skip_traffic_test is True: - Logger.info("Skipping traffic test.") - return ptf_runner(self.vxlan_test_setup['ptfhost'], "ptftests", "vxlan_traffic.VxLAN_in_VxLAN" if payload == 'vxlan' @@ -510,18 +505,16 @@ class Test_VxLAN_route_tests(Test_VxLAN): Common class for the basic route test cases. ''' - def test_vxlan_single_endpoint(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_single_endpoint(self, setUp, encap_type): ''' tc1:Create a tunnel route to a single endpoint a. Send packets to the route prefix dst. ''' self.vxlan_test_setup = setUp - self.dump_self_info_and_run_ptf("tc1", encap_type, True, skip_traffic_test=skip_traffic_test) - self.dump_self_info_and_run_ptf("tc1", encap_type, True, - payload="vxlan", skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc1", encap_type, True) + self.dump_self_info_and_run_ptf("tc1", encap_type, True, payload="vxlan") - def test_vxlan_modify_route_different_endpoint( - self, setUp, request, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_modify_route_different_endpoint(self, setUp, request, encap_type): ''' tc2: change the route to different endpoint. Packets are received only at endpoint b.") @@ -571,9 +564,9 @@ def test_vxlan_modify_route_different_endpoint( Logger.info( "Copy the new set of configs to the PTF and run the tests.") - self.dump_self_info_and_run_ptf("tc2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc2", encap_type, True) - def test_vxlan_remove_all_route(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_all_route(self, setUp, encap_type): ''' tc3: remove the tunnel route. Send packets to the route prefix dst. packets should not @@ -588,7 +581,7 @@ def test_vxlan_remove_all_route(self, setUp, encap_type, skip_traffic_test): ecmp_utils.get_payload_version(encap_type), "DEL") Logger.info("Verify that the traffic is not coming back.") - self.dump_self_info_and_run_ptf("tc3", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc3", encap_type, False) finally: Logger.info("Restore the routes in the DUT.") ecmp_utils.set_routes_in_dut( @@ -605,7 +598,7 @@ class Test_VxLAN_ecmp_create(Test_VxLAN): create testcases. ''' - def test_vxlan_configure_route1_ecmp_group_a(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_configure_route1_ecmp_group_a(self, setUp, encap_type): ''' tc4:create tunnel route 1 with two endpoints a = {a1, a2...}. send packets to the route 1's prefix dst. packets are received at either @@ -646,12 +639,12 @@ def test_vxlan_configure_route1_ecmp_group_a(self, setUp, encap_type, skip_traff Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc4", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc4", encap_type, True) # Add vxlan payload testing as well. self.dump_self_info_and_run_ptf("tc4", encap_type, True, - payload="vxlan", skip_traffic_test=skip_traffic_test) + payload="vxlan") - def test_vxlan_remove_ecmp_route1(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_ecmp_route1(self, setUp, encap_type): ''' Remove tunnel route 1. Send multiple packets (varying tuple) to the route 1's prefix dst. @@ -695,7 +688,7 @@ def test_vxlan_remove_ecmp_route1(self, setUp, encap_type, skip_traffic_test): ecmp_route1_end_point_list) Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc5", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, True) # Deleting Tunnel route 1 ecmp_utils.create_and_apply_config( @@ -710,13 +703,13 @@ def test_vxlan_remove_ecmp_route1(self, setUp, encap_type, skip_traffic_test): {ecmp_route1_new_dest: ecmp_route1_end_point_list} Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc5", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, False) # Restoring dest_to_nh_map to old values self.vxlan_test_setup[encap_type]['dest_to_nh_map'][vnet] = copy.deepcopy(backup_dest) - self.dump_self_info_and_run_ptf("tc5", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, True) - def test_vxlan_configure_route1_ecmp_group_b(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_configure_route1_ecmp_group_b(self, setUp, encap_type): ''' tc5: set tunnel route 2 to endpoint group a = {a1, a2}. send packets to route 2"s prefix dst. packets are received at either a1 @@ -725,7 +718,7 @@ def test_vxlan_configure_route1_ecmp_group_b(self, setUp, encap_type, skip_traff self.vxlan_test_setup = setUp self.setup_route2_ecmp_group_b(encap_type) Logger.info("Verify the configs work and traffic flows correctly.") - self.dump_self_info_and_run_ptf("tc5", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, True) def setup_route2_ecmp_group_b(self, encap_type): ''' @@ -767,7 +760,7 @@ def setup_route2_ecmp_group_b(self, encap_type): self.vxlan_test_setup[encap_type]['tc5_dest'] = tc5_new_dest - def test_vxlan_configure_route2_ecmp_group_b(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_configure_route2_ecmp_group_b(self, setUp, encap_type): ''' tc6: set tunnel route 2 to endpoint group b = {b1, b2}. send packets to route 2"s prefix dst. packets are received at either @@ -809,13 +802,12 @@ def test_vxlan_configure_route2_ecmp_group_b(self, setUp, encap_type, skip_traff tc6_end_point_list) Logger.info("Verify that the traffic works.") - self.dump_self_info_and_run_ptf("tc6", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc6", encap_type, True) @pytest.mark.skipif( "config.option.bfd is False", reason="This test will be run only if '--bfd=True' is provided.") - def test_vxlan_bfd_health_state_change_a2down_a1up( - self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_bfd_health_state_change_a2down_a1up(self, setUp, encap_type): ''' Set BFD state for a1' to UP and a2' to Down. Send multiple packets (varying tuple) to the route 1's prefix dst. Packets are received @@ -863,12 +855,12 @@ def test_vxlan_bfd_health_state_change_a2down_a1up( end_point_list[1]) Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc_a2down_a1up", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc_a2down_a1up", encap_type, True) @pytest.mark.skipif( "config.option.bfd is False", reason="This test will be run only if '--bfd=True' is provided.") - def test_vxlan_bfd_health_state_change_a1a2_down(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_bfd_health_state_change_a1a2_down(self, setUp, encap_type): ''' Set BFD state for a1' to Down and a2' to Down. Send multiple packets (varying tuple) to the route 1's prefix dst. Packets @@ -915,14 +907,13 @@ def test_vxlan_bfd_health_state_change_a1a2_down(self, setUp, encap_type, skip_t "a1a2_down", encap_type, True, - packet_count=4, - skip_traffic_test=skip_traffic_test) + packet_count=4) @pytest.mark.skipif( "config.option.bfd is False", reason="This test will be run only if '--bfd=True' is provided.") def test_vxlan_bfd_health_state_change_a2up_a1down( - self, setUp, encap_type, skip_traffic_test): # noqa F811 + self, setUp, encap_type): ''' Set BFD state for a2' to UP. Send packets to the route 1's prefix dst. Packets are received only at endpoint a2. Verify advertise @@ -970,9 +961,9 @@ def test_vxlan_bfd_health_state_change_a2up_a1down( end_point_list[0]) Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("a2up_a1down", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("a2up_a1down", encap_type, True) - def test_vxlan_bfd_health_state_change_a1a2_up(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_bfd_health_state_change_a1a2_up(self, setUp, encap_type): ''' Set BFD state for a1' & a2' to UP. Send multiple packets (varying tuple) to the route 1's prefix dst. Packets are received at both @@ -1015,7 +1006,7 @@ def test_vxlan_bfd_health_state_change_a1a2_up(self, setUp, encap_type, skip_tra Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc4", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc4", encap_type, True) # perform cleanup by removing all the routes added by this test class. # reset to add only the routes added in the setup phase. @@ -1206,7 +1197,7 @@ def setup_route2_shared_different_endpoints(self, encap_type): encap_type, tc9_new_nhs) - def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_route2(self, setUp, encap_type): ''' tc7:send packets to route 1's prefix dst. by removing route 2 from group a, no change expected to route 1. @@ -1252,7 +1243,7 @@ def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): encap_type, tc7_end_point_list) Logger.info("Verify the setup works.") - self.dump_self_info_and_run_ptf("tc7", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc7", encap_type, True) Logger.info("End of setup.") Logger.info("Remove one of the routes.") @@ -1272,7 +1263,7 @@ def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): "DEL") Logger.info("Verify the rest of the traffic still works.") - self.dump_self_info_and_run_ptf("tc7", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc7", encap_type, True) # perform cleanup by removing all the routes added by this test class. # reset to add only the routes added in the setup phase. @@ -1289,18 +1280,18 @@ def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): ecmp_utils.get_payload_version(encap_type), "SET") - def test_vxlan_route2_single_nh(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_route2_single_nh(self, setUp, encap_type): ''' tc8: set tunnel route 2 to single endpoint b1. Send packets to route 2's prefix dst. ''' self.vxlan_test_setup = setUp self.setup_route2_single_endpoint(encap_type) - self.dump_self_info_and_run_ptf("tc8", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc8", encap_type, True) self.dump_self_info_and_run_ptf("tc8", encap_type, True, - payload="vxlan", skip_traffic_test=skip_traffic_test) + payload="vxlan") - def test_vxlan_route2_shared_nh(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_route2_shared_nh(self, setUp, encap_type): ''' tc9: set tunnel route 2 to shared endpoints a1 and b1. Send packets to route 2's @@ -1308,9 +1299,9 @@ def test_vxlan_route2_shared_nh(self, setUp, encap_type, skip_traffic_test): ''' self.vxlan_test_setup = setUp self.setup_route2_shared_endpoints(encap_type) - self.dump_self_info_and_run_ptf("tc9", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc9", encap_type, True) - def test_vxlan_route2_shared_different_nh(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_route2_shared_different_nh(self, setUp, encap_type): ''' tc9.2: set tunnel route 2 to 2 completely different shared(no-reuse) endpoints a1 and b1. send packets @@ -1318,9 +1309,9 @@ def test_vxlan_route2_shared_different_nh(self, setUp, encap_type, skip_traffic_ ''' self.vxlan_test_setup = setUp self.setup_route2_shared_different_endpoints(encap_type) - self.dump_self_info_and_run_ptf("tc9.2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc9.2", encap_type, True) - def test_vxlan_remove_ecmp_route2(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_ecmp_route2(self, setUp, encap_type): ''' tc10: remove tunnel route 2. send packets to route 2's prefix dst. ''' @@ -1369,7 +1360,7 @@ def test_vxlan_remove_ecmp_route2(self, setUp, encap_type, skip_traffic_test): tc10_nhs Logger.info("The deleted route should fail to receive traffic.") - self.dump_self_info_and_run_ptf("tc10", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc10", encap_type, False) # all others should be working. # Housekeeping: @@ -1380,7 +1371,7 @@ def test_vxlan_remove_ecmp_route2(self, setUp, encap_type, skip_traffic_test): del_needed = False Logger.info("Check the traffic is working in the other routes.") - self.dump_self_info_and_run_ptf("tc10", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc10", encap_type, True) except BaseException: self.vxlan_test_setup[encap_type]['dest_to_nh_map'][vnet] = full_map.copy() @@ -1399,7 +1390,7 @@ class Test_VxLAN_ecmp_random_hash(Test_VxLAN): Class for testing different tcp ports for payload. ''' - def test_vxlan_random_hash(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_random_hash(self, setUp, encap_type): ''' tc11: set tunnel route 3 to endpoint group c = {c1, c2, c3}. Ensure c1, c2, and c3 matches to underlay default route. @@ -1448,8 +1439,7 @@ def test_vxlan_random_hash(self, setUp, encap_type, skip_traffic_test): # no "tc11", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) @pytest.mark.skipif( @@ -1461,8 +1451,7 @@ class Test_VxLAN_underlay_ecmp(Test_VxLAN): Class for all test cases that modify the underlay default route. ''' @pytest.mark.parametrize("ecmp_path_count", [1, 2]) - def test_vxlan_modify_underlay_default( - self, setUp, minigraph_facts, encap_type, ecmp_path_count, skip_traffic_test): # noqa F811 + def test_vxlan_modify_underlay_default(self, setUp, minigraph_facts, encap_type, ecmp_path_count): ''' tc12: modify the underlay default route nexthop/s. send packets to route 3's prefix dst. @@ -1534,8 +1523,7 @@ def test_vxlan_modify_underlay_default( "tc12", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) Logger.info( "Reverse the action: bring up the selected_intfs" @@ -1582,8 +1570,7 @@ def test_vxlan_modify_underlay_default( "tc12", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) Logger.info("Recovery. Bring all up, and verify traffic works.") for intf in all_t2_intfs: @@ -1611,8 +1598,7 @@ def test_vxlan_modify_underlay_default( "tc12", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) except Exception: # If anything goes wrong in the try block, atleast bring the intf @@ -1640,8 +1626,7 @@ def test_vxlan_modify_underlay_default( def test_vxlan_remove_add_underlay_default(self, setUp, minigraph_facts, - encap_type, - skip_traffic_test): # noqa F811 + encap_type): ''' tc13: remove the underlay default route. tc14: add the underlay default route. @@ -1682,7 +1667,7 @@ def test_vxlan_remove_add_underlay_default(self, "BGP neighbors have not reached the required state after " "T2 intf are shutdown.") Logger.info("Verify that traffic is not flowing through.") - self.dump_self_info_and_run_ptf("tc13", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc13", encap_type, False) # tc14: Re-add the underlay default route. Logger.info("Bring up the T2 interfaces.") @@ -1704,8 +1689,7 @@ def test_vxlan_remove_add_underlay_default(self, "tc14", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) except Exception: Logger.info( "If anything goes wrong in the try block," @@ -1724,7 +1708,7 @@ def test_vxlan_remove_add_underlay_default(self, " interfaces have been brought up.") raise - def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type, skip_traffic_test): # noqa F811 + def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type): ''' Create a more specific underlay route to c1. Verify c1 packets are received only on the c1's nexthop interface @@ -1795,8 +1779,7 @@ def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type, skip_ self.dump_self_info_and_run_ptf( "underlay_specific_route", encap_type, - True, - skip_traffic_test=skip_traffic_test) + True) # Deletion of all static routes gateway = all_t2_neighbors[t2_neighbor][outer_layer_version].lower() for _, nexthops in list(endpoint_nhmap.items()): @@ -1844,14 +1827,12 @@ def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type, skip_ self.dump_self_info_and_run_ptf( "underlay_specific_route", encap_type, - True, - skip_traffic_test=skip_traffic_test) + True) def test_underlay_portchannel_shutdown(self, setUp, minigraph_facts, - encap_type, - skip_traffic_test): # noqa F811 + encap_type): ''' Bring down one of the port-channels. Packets are equally recieved at c1, c2 or c3 @@ -1859,7 +1840,7 @@ def test_underlay_portchannel_shutdown(self, self.vxlan_test_setup = setUp # Verification of traffic before shutting down port channel - self.dump_self_info_and_run_ptf("tc12", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc12", encap_type, True) # Gathering all portchannels all_t2_portchannel_intfs = \ @@ -1894,7 +1875,7 @@ def test_underlay_portchannel_shutdown(self, self.vxlan_test_setup[encap_type]['t2_ports'], list(self.vxlan_test_setup['list_of_bfd_monitors'])) time.sleep(10) - self.dump_self_info_and_run_ptf("tc12", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc12", encap_type, True) for intf in all_t2_portchannel_members[selected_portchannel]: self.vxlan_test_setup['duthost'].shell( @@ -1906,7 +1887,7 @@ def test_underlay_portchannel_shutdown(self, self.vxlan_test_setup[encap_type]['t2_ports'], list(self.vxlan_test_setup['list_of_bfd_monitors'])) time.sleep(10) - self.dump_self_info_and_run_ptf("tc12", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc12", encap_type, True) except BaseException: for intf in all_t2_portchannel_members[selected_portchannel]: self.vxlan_test_setup['duthost'].shell( @@ -1936,8 +1917,7 @@ def verify_entropy( random_sport=False, random_dport=True, random_src_ip=False, - tolerance=None, - skip_traffic_test=False): # noqa F811 + tolerance=None): ''' Function to be reused by the entropy testcases. Sets up a couple of endpoints on the top of the existing ones, and performs the traffic @@ -1981,10 +1961,9 @@ def verify_entropy( random_dport=random_dport, random_src_ip=random_src_ip, packet_count=1000, - tolerance=tolerance, - skip_traffic_test=skip_traffic_test) + tolerance=tolerance) - def test_verify_entropy(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_verify_entropy(self, setUp, encap_type): ''' Verification of entropy - Create tunnel route 4 to endpoint group A. Send packets (fixed tuple) to route 4's prefix dst @@ -1995,10 +1974,9 @@ def test_verify_entropy(self, setUp, encap_type, skip_traffic_test): random_dport=True, random_sport=True, random_src_ip=True, - tolerance=0.75, # More tolerance since this varies entropy a lot. - skip_traffic_test=skip_traffic_test) + tolerance=0.75) # More tolerance since this varies entropy a lot. - def test_vxlan_random_dst_port(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_random_dst_port(self, setUp, encap_type): ''' Verification of entropy - Change the udp dst port of original packet to route 4's prefix dst @@ -2006,7 +1984,7 @@ def test_vxlan_random_dst_port(self, setUp, encap_type, skip_traffic_test): self.vxlan_test_setup = setUp self.verify_entropy(encap_type, tolerance=0.03) - def test_vxlan_random_src_port(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_random_src_port(self, setUp, encap_type): ''' Verification of entropy - Change the udp src port of original packet to route 4's prefix dst @@ -2016,10 +1994,9 @@ def test_vxlan_random_src_port(self, setUp, encap_type, skip_traffic_test): encap_type, random_dport=False, random_sport=True, - tolerance=0.03, - skip_traffic_test=skip_traffic_test) + tolerance=0.03) - def test_vxlan_varying_src_ip(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_varying_src_ip(self, setUp, encap_type): ''' Verification of entropy - Change the udp src ip of original packet to route 4's prefix dst @@ -2029,5 +2006,4 @@ def test_vxlan_varying_src_ip(self, setUp, encap_type, skip_traffic_test): encap_type, random_dport=False, random_src_ip=True, - tolerance=0.03, - skip_traffic_test=skip_traffic_test) + tolerance=0.03) diff --git a/tests/vxlan/test_vxlan_ecmp_switchover.py b/tests/vxlan/test_vxlan_ecmp_switchover.py index 200f9f3548d..32a13cadf83 100644 --- a/tests/vxlan/test_vxlan_ecmp_switchover.py +++ b/tests/vxlan/test_vxlan_ecmp_switchover.py @@ -11,7 +11,6 @@ import pytest from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.vxlan_ecmp_utils import Ecmp_Utils @@ -222,8 +221,7 @@ def dump_self_info_and_run_ptf(self, random_sport=False, random_src_ip=False, tolerance=None, - payload=None, - skip_traffic_test=False): # noqa F811 + payload=None): ''' Just a wrapper for dump_info_to_ptf to avoid entering 30 lines everytime. @@ -276,9 +274,6 @@ def dump_self_info_and_run_ptf(self, Logger.info( "dest->nh mapping:%s", self.vxlan_test_setup[encap_type]['dest_to_nh_map']) - if skip_traffic_test is True: - Logger.info("Skipping traffic test.") - return ptf_runner(self.vxlan_test_setup['ptfhost'], "ptftests", "vxlan_traffic.VxLAN_in_VxLAN" if payload == 'vxlan' @@ -292,7 +287,7 @@ def dump_self_info_and_run_ptf(self, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')), is_python3=True) - def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type): ''' tc1:create tunnel route 1 with two endpoints a = {a1, b1}. a1 is primary, b1 is secondary. 1) both a1,b1 are UP. @@ -369,7 +364,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)])) assert str(result['stdout']) == ecmp_utils.OVERLAY_DMAC - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) # Single primary-secondary switchover. # Endpoint list = [A, A`], Primary[A] | Active NH=[A] | @@ -387,7 +382,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], tc1_end_point_list[0], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) # Single primary recovery. # Endpoint list = [A, A`], Primary[A] | Active NH=[A`] | @@ -405,7 +400,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], tc1_end_point_list[0], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) # Single primary backup Failure. # Endpoint list = [A, A`]. Primary[A]| Active NH=[A`] A is DOWN | @@ -427,7 +422,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ tc1_end_point_list[0], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) ecmp_utils.create_and_apply_priority_config( self.vxlan_test_setup['duthost'], vnet, @@ -447,7 +442,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ [tc1_end_point_list[0]], "DEL") - def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type): ''' tc2:create tunnel route 1 with 6 endpoints a = {A, B, A`, B`}. A,B are primary, A`,B` are secondary. @@ -545,7 +540,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t self.vxlan_test_setup['list_of_downed_endpoints'] = set(inactive_list) time.sleep(10) # ensure that the traffic is distributed to all 3 primary Endpoints. - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Single primary failure. # Endpoint list = [A, B, A`, B`], Primary = [A, B] | active NH = [A, B] | @@ -563,7 +558,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], primary_nhg[0], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. All primary failure. # Endpoint list = [A, B, A`, B`] Primary = [A, B] | A is Down. active NH = [B] | @@ -580,7 +575,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], primary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Backup Failure. # Endpoint list = [A, B, A`, B`] Primary = [A, B] | @@ -599,7 +594,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], secondary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Single primary recovery. # Endpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A`] | @@ -617,7 +612,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], primary_nhg[0], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A] | @@ -639,7 +634,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t secondary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup all failure. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A,B] | @@ -668,7 +663,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], secondary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [] | @@ -698,7 +693,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t secondary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup all failure 2. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A,B] | @@ -727,7 +722,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], secondary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery of secondary. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [] | @@ -749,7 +744,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t secondary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery of primary after secondary. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A`, B`] | @@ -771,7 +766,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t primary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) ecmp_utils.create_and_apply_priority_config( self.vxlan_test_setup['duthost'], vnet,