#!/usr/bin/env python# Copyright 2018 Cisco Systems, Inc. All rights reserved.## Licensed under the Apache License, Version 2.0 (the "License"); you may# not use this file except in compliance with the License. You may obtain# a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the# License for the specific language governing permissions and limitations# under the License.## This module takes care of chaining networks, ports and vms#"""NFVBENCH CHAIN DISCOVERY/STAGING.This module takes care of staging/discovering all resources that are participating in abenchmarking session: flavors, networks, ports, VNF instances.If a resource is discovered with the same name, it will be reused.Otherwise it will be created.ChainManager: manages VM image, flavor, the staging discovery of all chains has 1 or more chainsChain: manages one chain, has 2 or more networks and 1 or more instancesChainNetwork: manages 1 network in a chainChainVnf: manages 1 VNF instance in a chain, has 2 portsChainVnfPort: manages 1 instance portChainManager-->Chain(*)Chain-->ChainNetwork(*),ChainVnf(*)ChainVnf-->ChainVnfPort(2)Once created/discovered, instances are checked to be in the active state (ready to pass traffic)Configuration parameters that will influence how these resources are staged/related:- openstack or no openstack- chain type- number of chains- number of VNF in each chain (PVP, PVVP)- SRIOV and middle port SRIOV for port types- whether networks are shared across chains or notThere is not traffic generation involved in this module."""importosimportreimporttimefromglanceclient.v2importclientasglanceclientfromneutronclient.neutronimportclientasneutronclientfromnovaclient.clientimportClientfromattrdictimportAttrDictimportcomputefromlogimportLOGfromspecsimportChainType# Left and right index for network and port listsLEFT=0RIGHT=1# Name of the VM config fileNFVBENCH_CFG_FILENAME='nfvbenchvm.conf'# full pathame of the VM config in the VMNFVBENCH_CFG_VM_PATHNAME=os.path.join('/etc/',NFVBENCH_CFG_FILENAME)# full path of the boot shell script template file on the server where nfvbench runsBOOT_SCRIPT_PATHNAME=os.path.join(os.path.dirname(os.path.abspath(__file__)),'nfvbenchvm',NFVBENCH_CFG_FILENAME)classChainException(Exception):"""Exception while operating the chains."""passclassNetworkEncaps(object):"""Network encapsulation."""classChainFlavor(object):"""Class to manage the chain flavor."""def__init__(self,flavor_name,flavor_dict,comp):"""Create a flavor."""self.name=flavor_nameself.comp=compself.flavor=self.comp.find_flavor(flavor_name)self.reuse=Falseifself.flavor:self.reuse=TrueLOG.info("Reused flavor '%s'",flavor_name)else:extra_specs=flavor_dict.pop('extra_specs',None)self.flavor=comp.create_flavor(flavor_name,**flavor_dict)LOG.info("Created flavor '%s'",flavor_name)ifextra_specs:self.flavor.set_keys(extra_specs)defdelete(self):"""Delete this flavor."""ifnotself.reuseandself.flavor:self.flavor.delete()LOG.info("Flavor '%s' deleted",self.name)classChainVnfPort(object):"""A port associated to one VNF in the chain."""def__init__(self,name,vnf,chain_network,vnic_type):"""Create or reuse a port on a given network. if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must create a new port. Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must find an existing port to reuse that matches the port requirements: same attached network, instance, name, vnic type name: name for this port vnf: ChainVNf instance that owns this port chain_network: ChainNetwork instance where this port should attach vnic_type: required vnic type for this port """self.name=nameself.vnf=vnfself.manager=vnf.managerself.reuse=Falseself.port=Noneifvnf.instance:# VNF instance is reused, we need to find an existing port that matches this instance# and network# discover ports attached to this instanceport_list=self.manager.get_ports_from_network(chain_network)forportinport_list:ifport['name']!=name:continueifport['binding:vnic_type']!=vnic_type:continueifport['device_id']==vnf.get_uuid():self.port=portLOG.info('Reusing existing port %s mac=%s',name,port['mac_address'])breakelse:raiseChainException('Cannot find matching port')else:# VNF instance is not created yet, we need to create a new portbody={"port":{'name':name,'network_id':chain_network.get_uuid(),'binding:vnic_type':vnic_type}}port=self.manager.neutron_client.create_port(body)self.port=port['port']LOG.info('Created port %s',name)try:self.manager.neutron_client.update_port(self.port['id'],{'port':{'security_groups':[],'port_security_enabled':False,}})LOG.info('Security disabled on port %s',name)exceptException:LOG.info('Failed to disable security on port %s (ignored)',name)defget_mac(self):"""Get the MAC address for this port."""returnself.port['mac_address']defdelete(self):"""Delete this port instance."""ifself.reuseornotself.port:returnretry=0whileretry<self.manager.config.generic_retry_count:try:self.manager.neutron_client.delete_port(self.port['id'])LOG.info("Deleted port %s",self.name)returnexceptException:retry+=1time.sleep(self.manager.config.generic_poll_sec)LOG.error('Unable to delete port: %s',self.name)classChainNetwork(object):"""Could be a shared network across all chains or a chain private network."""def__init__(self,manager,network_config,chain_id=None,lookup_only=False):"""Create a network for given chain. network_config: a dict containing the network properties (segmentation_id and physical_network) chain_id: to which chain the networks belong. a None value will mean that these networks are shared by all chains """self.manager=managerself.name=network_config.nameself.segmentation_id=self._get_item(network_config.segmentation_id,chain_id,auto_index=True)self.physical_network=self._get_item(network_config.physical_network,chain_id)ifchain_idisnotNone:self.name+=str(chain_id)self.reuse=Falseself.network=Noneself.vlan=Nonetry:self._setup(network_config,lookup_only)exceptException:iflookup_only:LOG.error("Cannot find network %s",self.name)else:LOG.error("Error creating network %s",self.name)self.delete()raisedef_get_item(self,item_field,index,auto_index=False):"""Retrieve an item from a list or a single value. item_field: can be None, a tuple of a single value index: if None is same as 0, else is the index for a chain auto_index: if true will automatically get the final value by adding the index to the base value (if full list not provided) If the item_field is not a tuple, it is considered same as a tuple with same value at any index. If a list is provided, its length must be > index """ifnotitem_field:returnNoneifindexisNone:index=0ifisinstance(item_field,tuple):try:returnitem_field[index]exceptIndexError:raiseChainException("List %s is too short for chain index %d"%(str(item_field),index))# single value is configuredifauto_index:returnitem_field+indexreturnitem_fielddef_setup(self,network_config,lookup_only):# Lookup if there is a matching network with same namenetworks=self.manager.neutron_client.list_networks(name=self.name)ifnetworks['networks']:network=networks['networks'][0]# a network of same name already exists, we need to verify it has the same# characteristicsifself.segmentation_id:ifnetwork['provider:segmentation_id']!=self.segmentation_id:raiseChainException("Mismatch of 'segmentation_id' for reused ""network '{net}'. Network has id '{seg_id1}', ""configuration requires '{seg_id2}'.".format(net=self.name,seg_id1=network['provider:segmentation_id'],seg_id2=self.segmentation_id))ifself.physical_network:ifnetwork['provider:physical_network']!=self.physical_network:raiseChainException("Mismatch of 'physical_network' for reused ""network '{net}'. Network has '{phys1}', ""configuration requires '{phys2}'.".format(net=self.name,phys1=network['provider:physical_network'],phys2=self.physical_network))LOG.info('Reusing existing network %s',self.name)self.reuse=Trueself.network=networkelse:iflookup_only:raiseChainException('Network %s not found'%self.name)body={'network':{'name':self.name,'admin_state_up':True}}ifnetwork_config.network_type:body['network']['provider:network_type']=network_config.network_typeifself.segmentation_id:body['network']['provider:segmentation_id']=self.segmentation_idifself.physical_network:body['network']['provider:physical_network']=self.physical_networkself.network=self.manager.neutron_client.create_network(body)['network']body={'subnet':{'name':network_config.subnet,'cidr':network_config.cidr,'network_id':self.network['id'],'enable_dhcp':False,'ip_version':4,'dns_nameservers':[]}}subnet=self.manager.neutron_client.create_subnet(body)['subnet']# add subnet id to the network dict since it has just been addedself.network['subnets']=[subnet['id']]LOG.info('Created network: %s',self.name)defget_uuid(self):""" Extract UUID of this network. :return: UUID of this network """returnself.network['id']defget_vlan(self):""" Extract vlan for this network. :return: vlan ID for this network """ifself.network['provider:network_type']!='vlan':raiseChainException('Trying to retrieve VLAN id for non VLAN network')returnself.network['provider:segmentation_id']defget_vxlan(self):""" Extract VNI for this network. :return: VNI ID for this network """if'vxlan'notinself.network['provider:network_type']:raiseChainException('Trying to retrieve VNI for non VXLAN network')returnself.network['provider:segmentation_id']defdelete(self):"""Delete this network."""ifnotself.reuseandself.network:retry=0whileretry<self.manager.config.generic_retry_count:try:self.manager.neutron_client.delete_network(self.network['id'])LOG.info("Deleted network: %s",self.name)returnexceptException:retry+=1LOG.info('Error deleting network %s (retry %d/%d)...',self.name,retry,self.manager.config.generic_retry_count)time.sleep(self.manager.config.generic_poll_sec)LOG.error('Unable to delete network: %s',self.name)classChainVnf(object):"""A class to represent a VNF in a chain."""def__init__(self,chain,vnf_id,networks):"""Reuse a VNF instance with same characteristics or create a new VNF instance. chain: the chain where this vnf belongs vnf_id: indicates the index of this vnf in its chain (first vnf=0) networks: the list of all networks (ChainNetwork) of the current chain """self.manager=chain.managerself.chain=chainself.vnf_id=vnf_idself.name=self.manager.config.loop_vm_name+str(chain.chain_id)iflen(networks)>2:# we will have more than 1 VM in each chainself.name+='-'+str(vnf_id)self.ports=[]self.status=Noneself.instance=Noneself.reuse=Falseself.host_ip=Nonetry:# the vnf_id is conveniently also the starting index in networks# for the left and right networks associated to this VNFself._setup(networks[vnf_id:vnf_id+2])exceptException:LOG.error("Error creating VNF %s",self.name)self.delete()raisedef_get_vm_config(self,remote_mac_pair):config=self.manager.configdevices=self.manager.generator_config.deviceswithopen(BOOT_SCRIPT_PATHNAME,'r')asboot_script:content=boot_script.read()g1cidr=devices[LEFT].get_gw_ip(self.chain.chain_id)+'/8'g2cidr=devices[RIGHT].get_gw_ip(self.chain.chain_id)+'/8'vm_config={'forwarder':config.vm_forwarder,'intf_mac1':self.ports[LEFT].get_mac(),'intf_mac2':self.ports[RIGHT].get_mac(),'tg_gateway1_ip':devices[LEFT].tg_gateway_ip_addrs,'tg_gateway2_ip':devices[RIGHT].tg_gateway_ip_addrs,'tg_net1':devices[LEFT].ip_addrs,'tg_net2':devices[RIGHT].ip_addrs,'vnf_gateway1_cidr':g1cidr,'vnf_gateway2_cidr':g2cidr,'tg_mac1':remote_mac_pair[0],'tg_mac2':remote_mac_pair[1]}returncontent.format(**vm_config)def_get_vnic_type(self,port_index):"""Get the right vnic type for given port indexself. If SR-IOV is speficied, middle ports in multi-VNF chains can use vswitch or SR-IOV based on config.use_sriov_middle_net """ifself.manager.config.sriov:chain_length=self.chain.get_length()ifself.manager.config.use_sriov_middle_netorchain_length==1:return'direct'ifself.vnf_id==0andport_index==0:# first VNF in chain must use sriov for left portreturn'direct'if(self.vnf_id==chain_length-1)and(port_index==1):# last VNF in chain must use sriov for right portreturn'direct'return'normal'def_setup(self,networks):flavor_id=self.manager.flavor.flavor.id# Check if we can reuse an instance with same nameforinstanceinself.manager.existing_instances:ifinstance.name==self.name:# Verify that other instance characteristics matchifinstance.flavor['id']!=flavor_id:self._reuse_exception('Flavor mismatch')ifinstance.status!="ACTIVE":self._reuse_exception('Matching instance is not in ACTIVE state')# The 2 networks for this instance must also be reusedifnotnetworks[LEFT].reuse:self._reuse_exception('network %s is new'%networks[LEFT].name)ifnotnetworks[RIGHT].reuse:self._reuse_exception('network %s is new'%networks[RIGHT].name)# instance.networks have the network names as keys:# {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}ifnetworks[LEFT].namenotininstance.networks:self._reuse_exception('Left network mismatch')ifnetworks[RIGHT].namenotininstance.networks:self._reuse_exception('Right network mismatch')self.reuse=Trueself.instance=instanceLOG.info('Reusing existing instance %s on %s',self.name,self.get_hypervisor_name())# create or reuse/discover 2 ports per instanceself.ports=[ChainVnfPort(self.name+'-'+str(index),self,networks[index],self._get_vnic_type(index))forindexin[0,1]]# if no reuse, actual vm creation is deferred after all ports in the chain are created# since we need to know the next mac in a multi-vnf chaindefcreate_vnf(self,remote_mac_pair):"""Create the VNF instance if it does not already exist."""ifself.instanceisNone:port_ids=[{'port-id':vnf_port.port['id']}forvnf_portinself.ports]vm_config=self._get_vm_config(remote_mac_pair)az=self.manager.placer.get_required_az()server=self.manager.comp.create_server(self.name,self.manager.image_instance,self.manager.flavor.flavor,None,port_ids,None,avail_zone=az,user_data=None,config_drive=True,files={NFVBENCH_CFG_VM_PATHNAME:vm_config})ifserver:self.instance=serverifself.manager.placer.is_resolved():LOG.info('Created instance %s on %s',self.name,az)else:# the location is undetermined at this point# self.get_hypervisor_name() will return NoneLOG.info('Created instance %s - waiting for placement resolution...',self.name)# here we MUST wait until this instance is resolved otherwise subsequent# VNF creation can be placed in other hypervisors!config=self.manager.configmax_retries=(config.check_traffic_time_sec+config.generic_poll_sec-1)/config.generic_poll_secretry=0forretryinrange(max_retries):status=self.get_status()ifstatus=='ACTIVE':hyp_name=self.get_hypervisor_name()LOG.info('Instance %s is active and has been placed on %s',self.name,hyp_name)self.manager.placer.register_full_name(hyp_name)breakifstatus=='ERROR':raiseChainException('Instance %s creation error: %s'%(self.name,self.instance.fault['message']))LOG.info('Waiting for instance %s to become active (retry %d/%d)...',self.name,retry+1,max_retries+1)time.sleep(config.generic_poll_sec)else:# timing outLOG.error('Instance %s creation timed out',self.name)raiseChainException('Instance %s creation timed out'%self.name)self.reuse=Falseelse:raiseChainException('Unable to create instance: %s'%(self.name))def_reuse_exception(self,reason):raiseChainException('Instance %s cannot be reused (%s)'%(self.name,reason))defget_status(self):"""Get the statis of this instance."""ifself.instance.status!='ACTIVE':self.instance=self.manager.comp.poll_server(self.instance)returnself.instance.statusdefget_hostname(self):"""Get the hypervisor host name running this VNF instance."""returngetattr(self.instance,'OS-EXT-SRV-ATTR:hypervisor_hostname')defget_host_ip(self):"""Get the IP address of the host where this instance runs. return: the IP address """ifnotself.host_ip:self.host_ip=self.manager.comp.get_hypervisor(self.get_hostname()).host_ipreturnself.host_ipdefget_hypervisor_name(self):"""Get hypervisor name (az:hostname) for this VNF instance."""ifself.instance:az=getattr(self.instance,'OS-EXT-AZ:availability_zone')hostname=self.get_hostname()ifaz:returnaz+':'+hostnamereturnhostnamereturnNonedefget_uuid(self):"""Get the uuid for this instance."""returnself.instance.iddefdelete(self,forced=False):"""Delete this VNF instance."""ifself.reuse:LOG.info("Instance %s not deleted (reused)",self.name)else:ifself.instance:self.manager.comp.delete_server(self.instance)LOG.info("Deleted instance %s",self.name)forportinself.ports:port.delete()classChain(object):"""A class to manage a single chain. Can handle any type of chain (EXT, PVP, PVVP) """def__init__(self,chain_id,manager):"""Create a new chain. chain_id: chain index (first chain is 0) manager: the chain manager that owns all chains """self.chain_id=chain_idself.manager=managerself.encaps=manager.encapsself.networks=[]self.instances=[]try:self.networks=manager.get_networks(chain_id)# For external chain VNFs can only be discovered from their MAC addresses# either from config or from ARPifmanager.config.service_chain!=ChainType.EXT:forchain_instance_indexinrange(self.get_length()):self.instances.append(ChainVnf(self,chain_instance_index,self.networks))# at this point new VNFs are not created yet but# verify that all discovered VNFs are on the same hypervisorself._check_hypervisors()# now that all VNF ports are created we need to calculate the# left/right remote MAC for each VNF in the chain# before actually creating the VNF itselfrem_mac_pairs=self._get_remote_mac_pairs()forinstanceinself.instances:rem_mac_pair=rem_mac_pairs.pop(0)instance.create_vnf(rem_mac_pair)exceptException:self.delete()raisedef_check_hypervisors(self):common_hypervisor=Noneforinstanceinself.instances:# get the full hypervizor name (az:compute)hname=instance.get_hypervisor_name()ifhname:ifcommon_hypervisor:ifhname!=common_hypervisor:raiseChainException('Discovered instances on different hypervisors:'' %s and %s'%(hname,common_hypervisor))else:common_hypervisor=hnameifcommon_hypervisor:# check that the common hypervisor name matchs the requested hypervisor name# and set the name to be used by all future instances (if any)ifnotself.manager.placer.register_full_name(common_hypervisor):raiseChainException('Discovered hypervisor placement %s is incompatible'%common_hypervisor)defget_length(self):"""Get the number of VNF in the chain."""returnlen(self.networks)-1def_get_remote_mac_pairs(self):"""Get the list of remote mac pairs for every VNF in the chain. Traverse the chain from left to right and establish the left/right remote MAC for each VNF in the chainself. PVP case is simpler: mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]] the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right PVVP: tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac Must produce the following list: [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]] General case with 3 VMs in chain, the list of consecutive macs (left to right): tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac Must produce the following list: [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5], [4, 7]] The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2 """# line up all mac from left to rightmac_seq=[self.manager.generator_config.devices[LEFT].mac]forinstanceinself.instances:mac_seq.append(instance.ports[0].get_mac())mac_seq.append(instance.ports[1].get_mac())mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)base=0rem_mac_pairs=[]for_inself.instances:rem_mac_pairs.append([mac_seq[base],mac_seq[base+3]])base+=2returnrem_mac_pairsdefget_instances(self):"""Return all instances for this chain."""returnself.instancesdefget_vlan(self,port_index):"""Get the VLAN id on a given port. port_index: left port is 0, right port is 1 return: the vlan_id or None if there is no vlan tagging """# for port 1 we need to return the VLAN of the last network in the chain# The networks array contains 2 networks for PVP [left, right]# and 3 networks in the case of PVVP [left.middle,right]ifport_index:# this will pick the last item in arrayport_index=-1returnself.networks[port_index].get_vlan()defget_vxlan(self,port_index):"""Get the VXLAN id on a given port. port_index: left port is 0, right port is 1 return: the vxlan_id or None if there is no vxlan """# for port 1 we need to return the VLAN of the last network in the chain# The networks array contains 2 networks for PVP [left, right]# and 3 networks in the case of PVVP [left.middle,right]ifport_index:# this will pick the last item in arrayport_index=-1returnself.networks[port_index].get_vxlan()defget_dest_mac(self,port_index):"""Get the dest MAC on a given port. port_index: left port is 0, right port is 1 return: the dest MAC """ifport_index:# for right port, use the right port MAC of the last (right most) VNF In chainreturnself.instances[-1].ports[1].get_mac()# for left port use the left port MAC of the first (left most) VNF in chainreturnself.instances[0].ports[0].get_mac()defget_network_uuids(self):"""Get UUID of networks in this chain from left to right (order is important). :return: list of UUIDs of networks (2 or 3 elements) """return[net['id']fornetinself.networks]defget_host_ips(self):"""Return the IP adresss(es) of the host compute nodes used for this chain. :return: a list of 1 or 2 IP addresses """return[vnf.get_host_ip()forvnfinself.instances]defget_compute_nodes(self):"""Return the name of the host compute nodes used for this chain. :return: a list of 1 host name in the az:host format """# Since all chains go through the same compute node(s) we can just retrieve the# compute node name(s) for the first chainreturn[vnf.get_hypervisor_name()forvnfinself.instances]defdelete(self):"""Delete this chain."""forinstanceinself.instances:instance.delete()# only delete if these are chain private networks (not shared)ifnotself.manager.config.service_chain_shared_net:fornetworkinself.networks:network.delete()classInstancePlacer(object):"""A class to manage instance placement for all VNFs in all chains. A full az string is made of 2 parts AZ and hypervisor. The placement is resolved when both parts az and hypervisor names are known. """def__init__(self,req_az,req_hyp):"""Create a new instance placer. req_az: requested AZ (can be None or empty if no preference) req_hyp: requested hypervisor name (can be None of empty if no preference) can be any of 'nova:', 'comp1', 'nova:comp1' if it is a list, only the first item is used (backward compatibility in config) req_az is ignored if req_hyp has an az part all other parts beyond the first 2 are ignored in req_hyp """# if passed a list just pick the first itemifreq_hypandisinstance(req_hyp,list):req_hyp=req_hyp[0]# only pick first part of azifreq_azand':'inreq_az:req_az=req_az.split(':')[0]ifreq_hyp:# check if requested hypervisor string has an AZ partsplit_hyp=req_hyp.split(':')iflen(split_hyp)>1:# override the AZ part and hypervisor partreq_az=split_hyp[0]req_hyp=split_hyp[1]self.requested_az=req_azifreq_azelse''self.requested_hyp=req_hypifreq_hypelse''# Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)# or hypervisor only (e.g. ':comp1')# or both (e.g. 'nova:comp1')ifreq_az:self.required_az=req_az+':'+self.requested_hypelse:# need to insert a ':' so nova knows this is the hypervisor nameself.required_az=':'+self.requested_hypifreq_hypelse''# placement is resolved when both AZ and hypervisor names are known and setself.resolved=self.requested_az!=''andself.requested_hyp!=''defget_required_az(self):"""Return the required az (can be resolved or not)."""returnself.required_azdefregister_full_name(self,discovered_az):"""Verify compatibility and register a discovered hypervisor full name. discovered_az: a discovered AZ in az:hypervisor format return: True if discovered_az is compatible and set False if discovered_az is not compatible """ifself.resolved:returndiscovered_az==self.required_az# must be in full az formatsplit_daz=discovered_az.split(':')iflen(split_daz)!=2:returnFalseifself.requested_azandself.requested_az!=split_daz[0]:returnFalseifself.requested_hypandself.requested_hyp!=split_daz[1]:returnFalseself.required_az=discovered_azself.resolved=TruereturnTruedefis_resolved(self):"""Check if the full AZ is resolved. return: True if resolved """returnself.resolvedclassChainManager(object):"""A class for managing all chains for a given run. Supports openstack or no openstack. Supports EXT, PVP and PVVP chains. """def__init__(self,chain_runner):"""Create a chain manager to take care of discovering or bringing up the requested chains. A new instance must be created every time a new config is used. config: the nfvbench config to use cred: openstack credentials to use of None if there is no openstack """self.chain_runner=chain_runnerself.config=chain_runner.configself.generator_config=chain_runner.traffic_client.generator_configself.chains=[]self.image_instance=Noneself.image_name=None# Left and right networks shared across all chains (only if shared)self.networks=[]self.encaps=Noneself.flavor=Noneself.comp=Noneself.nova_client=Noneself.neutron_client=Noneself.glance_client=Noneself.existing_instances=[]# existing ports keyed by the network uuid they belong toself._existing_ports={}config=self.configself.openstack=(chain_runner.credisnotNone)andnotconfig.l2_loopbackself.chain_count=config.service_chain_countself.az=Noneifself.openstack:# openstack onlysession=chain_runner.cred.get_session()self.nova_client=Client(2,session=session)self.neutron_client=neutronclient.Client('2.0',session=session)self.glance_client=glanceclient.Client('2',session=session)self.comp=compute.Compute(self.nova_client,self.glance_client,config)try:ifconfig.service_chain!=ChainType.EXT:self.placer=InstancePlacer(config.availability_zone,config.compute_nodes)self._setup_image()self.flavor=ChainFlavor(config.flavor_type,config.flavor,self.comp)# Get list of all existing instances to check if some instances can be reusedself.existing_instances=self.comp.get_server_list()# If networks are shared across chains, get the list of networksifconfig.service_chain_shared_net:self.networks=self.get_networks()# Reuse/create chainsforchain_idinrange(self.chain_count):self.chains.append(Chain(chain_id,self))ifconfig.service_chain==ChainType.EXT:# if EXT and no ARP we need to read dest MACs from configifconfig.no_arp:self._get_dest_macs_from_config()else:# Make sure all instances are active before proceedingself._ensure_instances_active()exceptException:self.delete()raiseelse:# no openstack, no need to create chainsifnotconfig.l2_loopbackandconfig.no_arp:self._get_dest_macs_from_config()ifconfig.vlan_tagging:# make sure there at least as many entries as chains in each left/right listiflen(config.vlans)!=2:raiseChainException('The config vlans property must be a list ''with 2 lists of VLAN IDs')re_vlan="[0-9]*$"self.vlans=[self._check_list('vlans[0]',config.vlans[0],re_vlan),self._check_list('vlans[1]',config.vlans[1],re_vlan)]ifconfig.vxlan:raiseChainException('VxLAN is only supported with OpenStack')def_get_dest_macs_from_config(self):re_mac="[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"tg_config=self.config.traffic_generatorself.dest_macs=[self._check_list("mac_addrs_left",tg_config.mac_addrs_left,re_mac),self._check_list("mac_addrs_right",tg_config.mac_addrs_right,re_mac)]def_check_list(self,list_name,ll,pattern):# if it is a single int or mac, make it a list of 1 intifisinstance(ll,(int,str)):ll=[ll]foriteminll:ifnotre.match(pattern,str(item)):raiseChainException("Invalid format '{item}' specified in {fname}".format(item=item,fname=list_name))# must have at least 1 elementifnotll:raiseChainException('%s cannot be empty'%(list_name))# for shared network, if 1 element is passed, replicate it as many times# as chainsifself.config.service_chain_shared_netandlen(ll)==1:ll=[ll[0]]*self.chain_count# number of elements musty be the number of chainseliflen(ll)<self.chain_count:raiseChainException('%s=%s must be a list with %d elements per chain'%(list_name,ll,self.chain_count))returnlldef_setup_image(self):# To avoid reuploading image in server mode, check whether image_name is set or notifself.image_name:self.image_instance=self.comp.find_image(self.image_name)ifself.image_instance:LOG.info("Reusing image %s",self.image_name)else:image_name_search_pattern=r'(nfvbenchvm-\d+(\.\d+)*).qcow2'ifself.config.vm_image_file:match=re.search(image_name_search_pattern,self.config.vm_image_file)ifmatch:self.image_name=match.group(1)LOG.info('Using provided VM image file %s',self.config.vm_image_file)else:raiseChainException('Provided VM image file name %s must start with ''"nfvbenchvm-<version>"'%self.config.vm_image_file)else:pkg_root=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))forfinos.listdir(pkg_root):ifre.search(image_name_search_pattern,f):self.config.vm_image_file=pkg_root+'/'+fself.image_name=f.replace('.qcow2','')LOG.info('Found built-in VM image file %s',f)breakelse:raiseChainException('Cannot find any built-in VM image file.')ifself.image_name:self.image_instance=self.comp.find_image(self.image_name)ifnotself.image_instance:LOG.info('Uploading %s',self.image_name)res=self.comp.upload_image_via_url(self.image_name,self.config.vm_image_file)ifnotres:raiseChainException('Error uploading image %s from %s. ABORTING.'%(self.image_name,self.config.vm_image_file))LOG.info('Image %s successfully uploaded.',self.image_name)self.image_instance=self.comp.find_image(self.image_name)def_ensure_instances_active(self):instances=[]forchaininself.chains:instances.extend(chain.get_instances())initial_instance_count=len(instances)max_retries=(self.config.check_traffic_time_sec+self.config.generic_poll_sec-1)/self.config.generic_poll_secretry=0whileinstances:remaining_instances=[]forinstanceininstances:status=instance.get_status()ifstatus=='ACTIVE':LOG.info('Instance %s is ACTIVE on %s',instance.name,instance.get_hypervisor_name())continueifstatus=='ERROR':raiseChainException('Instance %s creation error: %s'%(instance.name,instance.instance.fault['message']))remaining_instances.append(instance)ifnotremaining_instances:breakretry+=1ifretry>=max_retries:raiseChainException('Time-out: %d/%d instances still not active'%(len(remaining_instances),initial_instance_count))LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',len(remaining_instances),initial_instance_count,retry,max_retries)instances=remaining_instancestime.sleep(self.config.generic_poll_sec)ifinitial_instance_count:LOG.info('All instances are active')defget_networks(self,chain_id=None):"""Get the networks for given EXT, PVP or PVVP chain. For EXT packet path, these networks must pre-exist. For PVP, PVVP these networks will be created if they do not exist. chain_id: to which chain the networks belong. a None value will mean that these networks are shared by all chains """ifself.networks:# the only case where self.networks exists is when the networks are shared# across all chainsreturnself.networksifself.config.service_chain==ChainType.EXT:lookup_only=Trueext_net=self.config.external_networksnet_cfg=[AttrDict({'name':name,'segmentation_id':None,'physical_network':None})fornamein[ext_net.left,ext_net.right]]# segmentation id and subnet should be discovered from neutronelse:lookup_only=Falseint_nets=self.config.internal_networks# VLAN and VxLANifself.config.service_chain==ChainType.PVP:net_cfg=[int_nets.left,int_nets.right]else:net_cfg=[int_nets.left,int_nets.middle,int_nets.right]networks=[]try:forcfginnet_cfg:networks.append(ChainNetwork(self,cfg,chain_id,lookup_only=lookup_only))exceptException:# need to cleanup all successful networks prior to bailing outfornetinnetworks:net.delete()raisereturnnetworksdefget_existing_ports(self):"""Get the list of existing ports. Lazy retrieval of ports as this can be costly if there are lots of ports and is only needed when VM and network are being reused. return: a dict of list of neutron ports indexed by the network uuid they are attached to Each port is a dict with fields such as below: {'allowed_address_pairs': [], 'extra_dhcp_opts': [], 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova', 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {}, 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21', 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72', 'security_groups': [], 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72', 'vhostuser_mode': 'server'}, 'binding:vif_type': 'vhostuser', 'mac_address': 'fa:16:3e:3c:63:04', 'project_id': '977ac76a63d7492f927fa80e86baff4c', 'status': 'ACTIVE', 'binding:host_id': 'a20-champagne-compute-1', 'description': '', 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b', 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True, 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055', 'tenant_id': '977ac76a63d7492f927fa80e86baff4c', 'created_at': '2018-10-06T07:15:10Z', 'binding:vnic_type': 'normal'} """ifnotself._existing_ports:LOG.info('Loading list of all ports...')existing_ports=self.neutron_client.list_ports()['ports']# place all ports in the dict keyed by the port network uuidforportinexisting_ports:port_list=self._existing_ports.setdefault(port['network_id'],[])port_list.append(port)LOG.info("Loaded %d ports attached to %d networks",len(existing_ports),len(self._existing_ports))returnself._existing_portsdefget_ports_from_network(self,chain_network):"""Get the list of existing ports that belong to a network. Lazy retrieval of ports as this can be costly if there are lots of ports and is only needed when VM and network are being reused. chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved return: list of neutron ports attached to requested network """returnself.get_existing_ports().get(chain_network.get_uuid(),None)defget_host_ip_from_mac(self,mac):"""Get the host IP address matching a MAC. mac: MAC address to look for return: the IP address of the host where the matching port runs or None if not found """# _existing_ports is a dict of list of ports indexed by network idforport_listinself.get_existing_ports().values():forportinport_list:try:ifport['mac_address']==mac:host_id=port['binding:host_id']returnself.comp.get_hypervisor(host_id).host_ipexceptKeyError:passreturnNonedefget_chain_vlans(self,port_index):"""Get the list of per chain VLAN id on a given port. port_index: left port is 0, right port is 1 return: a VLAN ID list indexed by the chain index or None if no vlan tagging """ifself.chains:return[self.chains[chain_index].get_vlan(port_index)forchain_indexinrange(self.chain_count)]# no openstackreturnself.vlans[port_index]defget_chain_vxlans(self,port_index):"""Get the list of per chain VNIs id on a given port. port_index: left port is 0, right port is 1 return: a VNIs ID list indexed by the chain index or None if no vlan tagging """ifself.chains:return[self.chains[chain_index].get_vxlan(port_index)forchain_indexinrange(self.chain_count)]# no openstackraiseChainException('VxLAN is only supported with OpenStack')defget_dest_macs(self,port_index):"""Get the list of per chain dest MACs on a given port. Should not be called if EXT+ARP is used (in that case the traffic gen will have the ARP responses back from VNFs with the dest MAC to use). port_index: left port is 0, right port is 1 return: a list of dest MACs indexed by the chain index """ifself.chainsandself.config.service_chain!=ChainType.EXT:return[self.chains[chain_index].get_dest_mac(port_index)forchain_indexinrange(self.chain_count)]# no openstack or EXT+no-arpreturnself.dest_macs[port_index]defget_host_ips(self):"""Return the IP adresss(es) of the host compute nodes used for this run. :return: a list of 1 IP address """# Since all chains go through the same compute node(s) we can just retrieve the# compute node(s) for the first chainifself.chains:ifself.config.service_chain!=ChainType.EXT:returnself.chains[0].get_host_ips()# in the case of EXT, the compute node must be retrieved from the port# associated to any of the dest MACsdst_macs=self.generator_config.get_dest_macs()# dest MAC on port 0, chain 0dst_mac=dst_macs[0][0]host_ip=self.get_host_ip_from_mac(dst_mac)ifhost_ip:LOG.info('Found compute node IP for EXT chain: %s',host_ip)return[host_ip]return[]defget_compute_nodes(self):"""Return the name of the host compute nodes used for this run. :return: a list of 0 or 1 host name in the az:host format """# Since all chains go through the same compute node(s) we can just retrieve the# compute node name(s) for the first chainifself.chains:# in the case of EXT, the compute node must be retrieved from the port# associated to any of the dest MACsreturnself.chains[0].get_compute_nodes()# no openstack = no chainsreturn[]defdelete(self):"""Delete resources for all chains."""forchaininself.chains:chain.delete()fornetworkinself.networks:network.delete()ifself.flavor:self.flavor.delete()