def upgrade_kernel_node(*args): """upgrades the kernel image in given nodes.""" for host_string in args: with settings(host_string=host_string): execute('create_install_repo_node', host_string) dist, version, extra = get_linux_distro() if version == '12.04': print "upgrading apparmor before upgrading kernel" apt_install(["apparmor"]) print "Installing 3.13.0-34 kernel headers" apt_install(["linux-headers-3.13.0-34"]) apt_install(["linux-headers-3.13.0-34-generic"]) print "Upgrading the kernel to 3.13.0-34" apt_install(["linux-image-3.13.0-34-generic"]) default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic' execute('set_grub_default_node', host_string, value=default_grub) elif version == '14.04': print "Installing 3.13.0-83 kernel headers" apt_install(["linux-headers-3.13.0-83", "linux-headers-3.13.0-83-generic"]) print "Upgrading the kernel to 3.13.0-83" apt_install(["linux-image-3.13.0-83-generic", "linux-image-extra-3.13.0-83-generic"]) default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-83-generic' execute('set_grub_default_node', host_string, value=default_grub) elif 'red hat' in dist.lower() and version == '7.0': print "Upgrading kernel to version 3.10.0-229" pkg_install(["kernel-3.10.0-229.el7.x86_64", "kernel-tools-3.10.0-229.el7.x86_64", "kernel-tools-libs-3.10.0-229.el7.x86_64", "kernel-headers-3.10.0-229.el7.x86_64"], disablerepo=False)
def upgrade_kernel_node(*args): """upgrades the kernel image in given nodes.""" for host_string in args: with settings(host_string=host_string): dist, version, extra = get_linux_distro() if version == '12.04': print "upgrading apparmor before upgrading kernel" apt_install(["apparmor"]) print "Installing 3.13.0-34 kernel headers" apt_install(["linux-headers-3.13.0-34"]) apt_install(["linux-headers-3.13.0-34-generic"]) print "Upgrading the kernel to 3.13.0-34" apt_install(["linux-image-3.13.0-34-generic"]) elif version == '14.04': print "Installing 3.13.0-40 kernel headers" apt_install(["linux-headers-3.13.0-40", "linux-headers-3.13.0-40-generic"]) print "Upgrading the kernel to 3.13.0-40" apt_install(["linux-image-3.13.0-40-generic", "linux-image-extra-3.13.0-40-generic"]) elif 'red hat' in dist.lower() and version == '7.0': print "Upgrading kernel to version 3.10.0-229" pkg_install(["kernel-3.10.0-229.el7.x86_64", "kernel-tools-3.10.0-229.el7.x86_64", "kernel-tools-libs-3.10.0-229.el7.x86_64", "kernel-headers-3.10.0-229.el7.x86_64"], disablerepo=False)
def upgrade_config_node(from_rel, pkg, *args): """Upgrades config pkgs in one or list of nodes. USAGE:fab upgrade_config_node:[email protected],[email protected]""" for host_string in args: with settings(host_string=host_string): execute('install_pkg_node', pkg, host_string) execute('create_install_repo_node', host_string) pkg_install(['contrail-setup']) if get_orchestrator() is 'vcenter': pkg = get_vcenter_plugin_pkg() install_contrail_vcenter_plugin(pkg) #Downgrading keepalived as we are packaging lower version of keepalivd in R2.20 if (LooseVersion(from_rel) == LooseVersion('2.20') and LooseVersion(get_release()) >= LooseVersion('2.20')): dist, version, extra = get_linux_distro() if version == '14.04': cmd = 'DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes' cmd += ' -o Dpkg::Options::="--force-overwrite"' cmd += ' -o Dpkg::Options::="--force-confold" install keepalived=1.2.13-0~276~ubuntu14.04.1' sudo(cmd) pkgs = get_config_pkgs() cmd = frame_vnc_config_cmd(host_string, 'upgrade-vnc-config') cmd += ' -P %s' % ' '.join(pkgs) cmd += ' -F %s' % from_rel cmd += ' -T %s' % get_release() sudo(cmd)
def upgrade_kernel_node(*args): """upgrades the kernel image in given nodes.""" for host_string in args: with settings(host_string=host_string): execute('create_install_repo_node', host_string) dist, version, extra = get_linux_distro() if version == '12.04': print "upgrading apparmor before upgrading kernel" apt_install(["apparmor"]) print "Installing 3.13.0-34 kernel headers" apt_install(["linux-headers-3.13.0-34"]) apt_install(["linux-headers-3.13.0-34-generic"]) print "Upgrading the kernel to 3.13.0-34" apt_install(["linux-image-3.13.0-34-generic"]) default_grub = 'Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic' execute('set_grub_default_node', host_string, value=default_grub) elif version == '14.04': print "Installing 3.13.0-85 kernel headers" apt_install([ "linux-headers-3.13.0-85", "linux-headers-3.13.0-85-generic" ]) print "Upgrading the kernel to 3.13.0-85" apt_install([ "linux-image-3.13.0-85-generic", "linux-image-extra-3.13.0-85-generic" ]) default_grub = 'Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-85-generic' execute('set_grub_default_node', host_string, value=default_grub) elif 'red hat' in dist.lower() and version.startswith('7'): print "Upgrading RHEL kernel to version 3.10.0-327.10.1" pkg_install([ "kernel-3.10.0-327.10.1.el7.x86_64", "kernel-tools-3.10.0-327.10.1.el7.x86_64", "kernel-tools-libs-3.10.0-327.10.1.el7.x86_64", "kernel-headers-3.10.0-327.10.1.el7.x86_64" ], disablerepo=False) default_grub = 'Red Hat Enterprise Linux Server (3.10.0-327.10.1.el7.x86_64) 7.2 (Maipo)' execute('set_grub_default_node', host_string, value=default_grub) elif 'centos linux' in dist.lower() and version.startswith('7'): print "Upgrading Centos kernel to version 3.10.0-327.10.1" pkg_install([ "kernel-3.10.0-327.10.1.el7.x86_64", "kernel-tools-3.10.0-327.10.1.el7.x86_64", "kernel-tools-libs-3.10.0-327.10.1.el7.x86_64", "kernel-headers-3.10.0-327.10.1.el7.x86_64" ], disablerepo=False) default_grub = 'CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)' execute('set_grub_default_node', host_string, value=default_grub)
def upgrade_vcenter(): pkg_install(['contrail-vmware-utils']) vcenter_info = getattr(env, 'vcenter', None) if not vcenter_info: print 'Info: vcenter block is not defined in testbed file.Exiting' return esxi_info = getattr(testbed, 'esxi_hosts', None) if not esxi_info: print 'Info: esxi_hosts block is not defined in testbed file. Exiting' return host_list = esxi_info.keys() provision_vcenter_features(vcenter_info, esxi_info, host_list)
def upgrade_database_node(from_rel, pkg, *args): """Upgrades database pkgs in one or list of nodes. USAGE:fab upgrade_database_node:[email protected],[email protected]""" for host_string in args: with settings(host_string=host_string): execute('install_pkg_node', pkg, host_string) execute('create_install_repo_node', host_string) pkg_install(['contrail-setup']) cmd = frame_vnc_database_cmd(host_string, 'upgrade-vnc-database') cmd += ' -P contrail-openstack-database' cmd += ' -F %s' % from_rel cmd += ' -T %s' % get_release() sudo(cmd)
def upgrade_database_node(from_rel, pkg, *args): """Upgrades database pkgs in one or list of nodes. USAGE:fab upgrade_database_node:[email protected],[email protected]""" for host_string in args: with settings(host_string=host_string): execute("install_pkg_node", pkg, host_string) execute("create_install_repo_node", host_string) pkg_install(["contrail-setup"]) cmd = frame_vnc_database_cmd(host_string, "upgrade-vnc-database") cmd += " -P contrail-openstack-database" cmd += " -F %s" % from_rel cmd += " -T %s" % get_release() sudo(cmd)
def upgrade_compute_node(from_rel, pkg, *args, **kwargs): """Upgrades compute pkgs in one or list of nodes. USAGE:fab upgrade_compute_node:[email protected],[email protected]""" for host_string in args: with settings(host_string=host_string): execute('install_pkg_node', pkg, host_string) execute('create_install_repo_node', host_string) pkg_install(['contrail-setup']) configure_nova = kwargs.get('configure_nova', 'yes') manage_nova_compute = kwargs.get('manage_nova_compute', 'yes') if (env.host_string in get_tsn_nodes() or get_orchestrator() == 'vcenter'): manage_nova_compute='no' # Identify packages to upgrade cmd = frame_vnc_compute_cmd( host_string, 'upgrade-vnc-compute', configure_nova=configure_nova, manage_nova_compute=manage_nova_compute) if ('vcenter_compute' in env.roledefs and env.host_string in env.roledefs['vcenter_compute']): pkgs = get_vcenter_compute_pkgs() roles = ['vcenter_compute'] else: pkgs = get_compute_pkgs(manage_nova_compute=manage_nova_compute) if (getattr(env, 'interface_rename', True) and detect_ostype() not in ['ubuntu', 'redhat']): pkgs.append('contrail-interface-name') if LooseVersion(from_rel) <= LooseVersion('3.2.0.0'): dist, version, extra = get_linux_distro() if version == '14.04': if 'contrail-vrouter-3.13.0-40-generic' in pkgs: pkgs.remove('contrail-vrouter-3.13.0-40-generic') if 'contrail-vrouter-3.13.0-85-generic' in pkgs: pkgs.remove('contrail-vrouter-3.13.0-85-generic') if 'contrail-vrouter-3.13.0-100-generic' in pkgs: pkgs.remove('contrail-vrouter-3.13.0-100-generic') if 'contrail-vrouter-3.13.0-106-generic' in pkgs: pkgs.remove('contrail-vrouter-3.13.0-106-generic') pkgs.append('contrail-vrouter-3.13.0-106-generic') # Identify roles of this node. roles = ['compute'] if env.host_string in get_tsn_nodes(): roles.append('tsn') if env.host_string in get_toragent_nodes(): roles.append('toragent') cmd += ' -P %s' % ' '.join(pkgs) cmd += ' -F %s' % from_rel cmd += ' -T %s' % get_release() cmd += ' -R %s' % ' '.join(roles) sudo(cmd)
def upgrade_kernel_node(*args, **kwargs): """upgrades the kernel image in given nodes.""" for host_string in args: with settings(host_string=host_string): execute('create_install_repo_node', host_string) dist, version, extra = get_linux_distro() if version == '12.04': print "upgrading apparmor before upgrading kernel" apt_install(["apparmor"]) print "Installing 3.13.0-34 kernel headers" apt_install(["linux-headers-3.13.0-34"]) apt_install(["linux-headers-3.13.0-34-generic"]) print "Upgrading the kernel to 3.13.0-34" apt_install(["linux-image-3.13.0-34-generic"]) default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic' execute('set_grub_default_node', host_string, value=default_grub) elif version == '14.04': if 'version' in kwargs: kernel_ver = kwargs.get('version') else: kernel_ver = "3.13.0-85" print "Installing "+kernel_ver+" kernel headers" apt_install(["linux-headers-"+kernel_ver, "linux-headers-"+kernel_ver+"-generic"]) print "Upgrading the kernel to "+kernel_ver apt_install(["linux-image-"+kernel_ver+"-generic", "linux-image-extra-"+kernel_ver+"-generic"]) default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic' execute('set_grub_default_node', host_string, value=default_grub) elif 'red hat' in dist.lower() and version.startswith('7'): print "Upgrading RHEL kernel to version 3.10.0-327.10.1" pkg_install(["kernel-3.10.0-327.10.1.el7.x86_64", "kernel-tools-3.10.0-327.10.1.el7.x86_64", "kernel-tools-libs-3.10.0-327.10.1.el7.x86_64", "kernel-headers-3.10.0-327.10.1.el7.x86_64"], disablerepo=False) default_grub='Red Hat Enterprise Linux Server (3.10.0-327.10.1.el7.x86_64) 7.2 (Maipo)' execute('set_grub_default_node', host_string, value=default_grub) elif 'centos linux' in dist.lower() and version.startswith('7'): print "Upgrading Centos kernel to version 3.10.0-327.10.1" pkg_install(["kernel-3.10.0-327.10.1.el7.x86_64", "kernel-tools-3.10.0-327.10.1.el7.x86_64", "kernel-tools-libs-3.10.0-327.10.1.el7.x86_64", "kernel-headers-3.10.0-327.10.1.el7.x86_64"], disablerepo=False) default_grub='CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)' execute('set_grub_default_node', host_string, value=default_grub)
def upgrade_compute_node(from_rel, pkg, *args, **kwargs): """Upgrades compute pkgs in one or list of nodes. USAGE:fab upgrade_compute_node:[email protected],[email protected]""" for host_string in args: with settings(host_string=host_string): execute("install_pkg_node", pkg, host_string) execute("create_install_repo_node", host_string) pkg_install(["contrail-setup"]) configure_nova = kwargs.get("configure_nova", "yes") manage_nova_compute = kwargs.get("manage_nova_compute", "yes") if env.host_string in get_tsn_nodes() or get_orchestrator() == "vcenter": manage_nova_compute = "no" # Identify packages to upgrade cmd = frame_vnc_compute_cmd( host_string, "upgrade-vnc-compute", configure_nova=configure_nova, manage_nova_compute=manage_nova_compute, ) if "vcenter_compute" in env.roledefs and env.host_string in env.roledefs["vcenter_compute"]: pkgs = get_vcenter_compute_pkgs() roles = ["vcenter_compute"] else: pkgs = get_compute_pkgs(manage_nova_compute=manage_nova_compute) if getattr(env, "interface_rename", True) and detect_ostype() not in ["ubuntu", "redhat"]: pkgs.append("contrail-interface-name") if LooseVersion(from_rel) <= LooseVersion("3.0.2.0"): dist, version, extra = get_linux_distro() if version == "14.04" and "contrail-vrouter-3.13.0-40-generic" in pkgs: pkgs.remove("contrail-vrouter-3.13.0-40-generic") pkgs.append("contrail-vrouter-3.13.0-85-generic") # Identify roles of this node. roles = ["compute"] if env.host_string in get_tsn_nodes(): roles.append("tsn") if env.host_string in get_toragent_nodes(): roles.append("toragent") cmd += " -P %s" % " ".join(pkgs) cmd += " -F %s" % from_rel cmd += " -T %s" % get_release() cmd += " -R %s" % " ".join(roles) sudo(cmd)
def upgrade_openstack_node(from_rel, pkg, *args): """Upgrades openstack pkgs in one or list of nodes. USAGE:fab upgrade_openstack_node:[email protected],[email protected]""" for host_string in args: with settings(host_string=host_string): pkg_contrail_ceilometer = None if env.roledefs["openstack"] and host_string == env.roledefs["openstack"][0]: if is_ceilometer_contrail_plugin_install_supported(): pkg_contrail_ceilometer = get_ceilometer_plugin_pkgs() execute("install_pkg_node", pkg, host_string) execute("create_install_repo_node", host_string) pkg_install(["contrail-setup"]) cmd = frame_vnc_openstack_cmd(host_string, "upgrade-vnc-openstack") openstack_pkgs = get_openstack_pkgs() if pkg_contrail_ceilometer: openstack_pkgs.extend(pkg_contrail_ceilometer) cmd += " -P %s" % " ".join(openstack_pkgs) cmd += " -F %s" % from_rel cmd += " -T %s" % get_release() sudo(cmd)
def upgrade_openstack_node(from_rel, pkg, *args): """Upgrades openstack pkgs in one or list of nodes. USAGE:fab upgrade_openstack_node:[email protected],[email protected]""" for host_string in args: with settings(host_string=host_string): pkg_contrail_ceilometer = None if env.roledefs['openstack'] and \ host_string == env.roledefs['openstack'][0]: if is_ceilometer_contrail_plugin_install_supported(): pkg_contrail_ceilometer = get_ceilometer_plugin_pkgs() execute('install_pkg_node', pkg, host_string) execute('create_install_repo_node', host_string) pkg_install(['contrail-setup']) cmd = frame_vnc_openstack_cmd(host_string, 'upgrade-vnc-openstack') openstack_pkgs = get_openstack_pkgs() if pkg_contrail_ceilometer: openstack_pkgs.extend(pkg_contrail_ceilometer) cmd += ' -P %s' % ' '.join(openstack_pkgs) cmd += ' -F %s' % from_rel cmd += ' -T %s' % get_release() sudo(cmd)
def setup_test_env(): cfgm_host = env.roledefs['cfgm'][0] cfgm_ip = hstr_to_ip(cfgm_host) with settings(warn_only=True): is_git_repo = local('git branch').succeeded if not is_git_repo: with settings(host_string=cfgm_host): build_id = sudo('cat /opt/contrail/contrail_packages/VERSION') fab_revision = build_id revision = build_id print "Testing from the CFGM." else: with settings(warn_only=True): fab_revision = local('git log --format="%H" -n 1', capture=True) if CONTROLLER_TYPE == 'Cloudstack': revision = local('cat %s/.git/refs/heads/cs_sanity' % env.test_repo_dir, capture=True) else: with lcd(env.test_repo_dir): revision = local('git log --format="%H" -n 1', capture=True) if not env.roledefs['build'][0] == cfgm_host: execute(copy_dir, env.test_repo_dir, cfgm_host) sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms':[], 'hosts_ipmi': [], 'tor':[], 'vcenter_servers':[], 'sriov':[], } sample_ini_file = env.test_repo_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if CONTROLLER_TYPE == 'Openstack': with settings(host_string = env.roledefs['openstack'][0]): openstack_host_name = sudo("hostname") elif CONTROLLER_TYPE == 'Cloudstack': openstack_host_name = None with settings(host_string = env.roledefs['cfgm'][0]): cfgm_host_name = sudo("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host): host_name = sudo("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host): host_name = sudo("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() for host_string in env.roledefs['all']: host_ip = host_string.split('@')[1] with settings(host_string = host_string): host_name = sudo("hostname") host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['ip'] = "127.0.0.1" if (CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['control']) else host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] =get_env_passwords(host_string) host_dict['roles'] = [] if not internal_vip: if CONTROLLER_TYPE == 'Openstack' and host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) elif CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['orchestrator']: role_dict = {'type': 'orchestrator', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} if CONTROLLER_TYPE == 'Openstack': if internal_vip: role_dict['openstack'] = 'contrail-vip' else: role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) # Currently Cloudstack supports all-in-one model alone for contrail hence piggybacking Controller role on to cfgm if CONTROLLER_TYPE == 'Cloudstack': role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host'] host_dict['roles'] = [] sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) # Adding vip VIP dict for HA test setup if CONTROLLER_TYPE == 'Openstack': with settings(host_string = env.roledefs['openstack'][0]): if internal_vip: host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['data-ip']= get_authserver_ip() host_dict['control-ip']= get_authserver_ip() host_dict['ip']= get_authserver_ip() host_dict['name'] = 'contrail-vip' with settings(host_string = env.roledefs['cfgm'][0]): host_dict['username'] = host_string.split('@')[0] host_dict['password'] = get_env_passwords(host_string) host_dict['roles'] = [] role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) #get vcenter info if env.has_key('vcenter_servers'): vcenter_info = {} for k in env.vcenter_servers.keys(): vcenter_info[k] = env.vcenter_servers[k] server = {} server[k] = env.vcenter_servers[k] sanity_testbed_dict['vcenter_servers'].append(server) #get sriov info if env.has_key('sriov'): sanity_testbed_dict['sriov'].append(env.sriov) # for every host_string with settings(host_string = cfgm_host): repo_dir_name = env.test_repo_dir.split('/')[-1] repo_path= get_remote_path(env.test_repo_dir) # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stop_on_fail = env.get('stop_on_fail', False) mail_to = env.get('mail_to', '') log_scenario = env.get('log_scenario', 'Sanity') if CONTROLLER_TYPE == 'Cloudstack': stack_password= '******' stack_tenant= 'default-project' admin_user= '******' else: admin_user, admin_password = get_authserver_credentials() admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = 'default-domain' webserver_host = '10.204.216.50' webserver_user = '******' webserver_password = '******' webserver_log_path = '/home/bhushana/Documents/technical/logs/' webserver_report_path = '/home/bhushana/Documents/technical/sanity' webroot = 'Docs/logs' mail_server = '10.204.216.49' mail_port = '25' fip_pool_name = 'floating-ip-pool' public_virtual_network='public' public_tenant_name='admin' fixture_cleanup = 'yes' generate_html_report = 'True' key = 'key1' mailSender = '*****@*****.**' use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) orch = getattr(env, 'orchestrator', 'openstack') router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) if not getattr(env, 'test', None): env.test={} stack_user = env.test.get('stack_user', None) stack_password = env.test.get('stack_password', None) stack_tenant = env.test.get('stack_tenant', None) tenant_isolation = env.test.get('tenant_isolation', None) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if 'mail_server' in env.keys(): mail_server = env.mail_server mail_port = env.mail_port vcenter_dc = '' if orch == 'vcenter': public_tenant_name='vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for k in env.vcenter_servers: vcenter_dc = env.vcenter_servers[k]['datacenter'] sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__nova_keypair_name__' : key, '__orch__' : orch, '__admin_user__' : admin_user, '__admin_password__' : admin_password, '__auth_ip__' : get_authserver_ip(), '__auth_port__' : get_authserver_port(), '__admin_tenant__' : admin_tenant, '__stack_domain__' : stack_domain, '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mailSender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : devstack_flag, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__' : fip_pool_name, '__public_virtual_network__':public_virtual_network, '__public_tenant_name__' :public_tenant_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', ''), '__vcenter_dc__' : vcenter_dc, '__vcenter_server__' : get_vcenter_ip(), '__vcenter_port__' : get_vcenter_port(), '__vcenter_username__' : get_vcenter_username(), '__vcenter_password__' : get_vcenter_password(), '__vcenter_datacenter__' : get_vcenter_datacenter(), '__vcenter_compute__' : get_vcenter_compute(), '__use_devicemanager_for_md5__' : use_devicemanager_for_md5, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__stack_tenant__' : stack_tenant, '__tenant_isolation__' : tenant_isolation, }) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_testbed_json) of.close() put(fname, "%s/sanity_testbed.json" %(repo_path), use_sudo=True) local ("cp %s %s/sanity_testbed.json" %(fname, env.test_repo_dir)) os.remove(fname) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_params) of.close() put(fname, "%s/sanity_params.ini" %(repo_path), use_sudo=True) local ("cp %s %s/sanity_params.ini" %(fname, env.test_repo_dir)) os.remove(fname) pkg = "" if CONTROLLER_TYPE == 'Cloudstack': with settings(warn_only = True): sudo('python-pip install fixtures testtools fabric') else: with settings(warn_only = True): run('rm -rf /tmp/pip-build-root') if detect_ostype() in ['centos', 'redhat', 'centoslinux']: sudo('yum -y install python-pip') pkg = 'fixtures==1.0.0 testtools==1.7.1 testresources==0.2.7 discover \ testrepository junitxml pytun requests==2.3.0 pyvmomi==5.5.0 eventlet \ tabulate' elif 'ubuntu' == detect_ostype(): pkg = 'fixtures==1.0.0 testtools==1.7.1 testresources==0.2.7 \ testrepository junitxml pytun pyvmomi==5.5.0 eventlet tabulate ' output = sudo('pip show requests | grep Version') if output.succeeded: version = output.split(':')[1].translate(None, whitespace) if version <= 2.3: if (LooseVersion(version) < LooseVersion('2.3.0')): pkg += ' requests==2.3.0' if os.environ.has_key('GUESTVM_IMAGE'): pkg = pkg + ' pexpect' if ui_browser: pkg = pkg + ' pyvirtualdisplay selenium' if exists('/opt/contrail/api-venv/bin/activate'): sudo('source /opt/contrail/api-venv/bin/activate && \ pip install --upgrade unittest2 && \ pip install --upgrade %s' %pkg) else: # Avoid installing linecache2 as dependency on unittest2 # Avoid "TypeError: dist must be a Distribution instance" sudo("pip install linecache2") sudo("pip install --upgrade unittest2") sudo("pip install --upgrade %s" %pkg) sudo ("pip install --upgrade easyprocess") if not exists('/usr/bin/ant'): pkg_install(['ant'],disablerepo = False) ant_version = sudo('ant -version') if ('1.7' in ant_version): pkg_install(['ant-junit' , 'ant-trax'] , disablerepo = False) if ('1.9' in ant_version): pkg_install(['ant-junit'] , disablerepo = False) pkg_install(['patch', 'python-heatclient', 'python-ceilometerclient', 'python-setuptools'],disablerepo = False) # On centos, junos-eznc install requires devel pkgs of libxml2 and libxslt if detect_ostype() in ['redhat', 'centos', 'centoslinux']: pkg_install(['libxslt-devel', 'libxml2-devel'], disablerepo=False) sudo('pip install junos-eznc==1.2.2') #Restart DM. This is because of #1490860 sudo('service contrail-device-manager restart') for host_string in env.roledefs['compute']: with settings(host_string=host_string): #pkg_install(['python-setuptools', 'python-pkg-resources', 'python-ncclient'],disablerepo = False) pkg_install(['python-setuptools', 'python-ncclient'],disablerepo = False) if detect_ostype() in ['centos', 'centoslinux', 'redhat']: sudo("yum -y install tcpdump")
def setup_test_env(): cfgm_host = env.roledefs['cfgm'][0] cfgm_ip = hstr_to_ip(cfgm_host) with settings(warn_only=True): is_git_repo = local('git branch').succeeded if not is_git_repo: with settings(host_string=cfgm_host): build_id = run('cat /opt/contrail/contrail_packages/VERSION') fab_revision = build_id revision = build_id print "Testing from the CFGM." else: fab_revision = local('git log --format="%H" -n 1', capture=True) if CONTROLLER_TYPE == 'Cloudstack': revision = local('cat %s/.git/refs/heads/cs_sanity' % env.test_repo_dir, capture=True) else: with lcd(env.test_repo_dir): revision = local('git log --format="%H" -n 1', capture=True) if not env.roledefs['build'][0] == cfgm_host: execute(copy_dir, env.test_repo_dir, cfgm_host) sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'hosts_ipmi': [] } sanity_ini_templ = string.Template("""[Basic] # Provisioning file # Provisioning json file testRepoDir=$__test_repo__ provFile=sanity_testbed.json logScenario=$__log_scenario__ # Nova Keypair key=key1 # Pointer for the repo which contains new packages. Needed for setup_systems.py stackUser=admin stackPassword=$__stack_password__ stackTenant=$__stack_tenant__ multiTenancy=$__multi_tenancy__ keystone_ip=$__keystone_ip__ # If you want the HTML report through HTMLTestRunner, select 'yes'. If not, the basic Unittest TextTestRunner will run the tests generate_html_report=yes # If you dont want fixture cleanups to remove the objects which are created as part of setUp of the fixture, set fixtureCleanup to 'no'. Default value should be 'yes'. If objects are already present before start of tests, they are not deleted. To clean them up forcefully, set fixtureCleanup to 'force' fixtureCleanup=yes [WebServer] # The URL to which the test log file and HTML report would be uploaded to. # path is the local filesystem path to which the files will get copied to # Ex: http://10.204.216.50/Docs/ host=10.204.216.50 username=bhushana password=bhu@123 path=/home/bhushana/Documents/technical/logs/ reportpath=/home/bhushana/Documents/technical/sanity webRoot=Docs/logs [Mail] server=$__mail_server__ port=$__mail_port__ mailTo=$__mail_to__ [email protected] [log_screen] # set if log redirection to console needed log_to_console= yes [loggers] keys=root,log01 [proxy] http=$__http_proxy__ [webui] webui=$__webui__ [webui_config] webui_config=$__webui_config__ [devstack] devstack=$__devstack__ [logger_root] handlers=screen #qualname=(root) level=ERROR [logger_log01] handlers=file qualname=log01 level=DEBUG propagate=0 [formatters] keys=std [formatter_std] format=%(asctime)s [ %(levelname)5s ] %(message)s [handlers] keys=file,screen #keys=file [handler_file] class= custom_filehandler.CustomFileHandler formatter=std level=DEBUG args=( 'test_details.log.$__timestamp__','a') #args is of the form : ( log-file-name , write-mode) [handler_screen] class=StreamHandler formatter=std #level=ERROR args=(sys.stdout,) [Mx] # Currently, MX configuration will be read only for the BLR sanity setup with a pre-defined MX configuration #Route Target on the MX mx_rt=$__public_vn_rtgt__ #Asn router_asn=$__router_asn__ #Just a notation to identify the router $__ext_router_names__ $__ext_router_ips__ fip_pool=$__public_vn_subnet__ fip_pool_name=public-pool [repos] #Test Revision test_revision=$__test_revision__ fab_revision=$__fab_revision__ [HA] # HA config ha_setup=$__ha_setup__ ipmi_username=$__ipmi_username__ ipmi_password=$__ipmi_password__ #For debugging [debug] stop_on_fail=no verify_on_setup=$__test_verify_on_setup__ """) if CONTROLLER_TYPE == 'Openstack': with settings(host_string = env.roledefs['openstack'][0]): openstack_host_name = run("hostname") elif CONTROLLER_TYPE == 'Cloudstack': openstack_host_name = None with settings(host_string = env.roledefs['cfgm'][0]): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host): host_name = run("hostname") cassandra_host_names.append(host_name) for host_string in env.roledefs['all']: host_ip = host_string.split('@')[1] with settings(host_string = host_string): host_name = run("hostname") host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['ip'] = "127.0.0.1" if (CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['control']) else host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] = env.passwords[host_string] host_dict['roles'] = [] if CONTROLLER_TYPE == 'Openstack' and host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) elif CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['orchestrator']: role_dict = {'type': 'orchestrator', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} if CONTROLLER_TYPE == 'Openstack': role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) # Currently Cloudstack supports all-in-one model alone for contrail hence piggybacking Controller role on to cfgm if CONTROLLER_TYPE == 'Cloudstack': role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # get host ipmi list if env.has_key('hosts_ipmi') : sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) # for every host_string with settings(host_string = cfgm_host): repo_dir_name = env.test_repo_dir.split('/')[-1] repo_path= get_remote_path(env.test_repo_dir) # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) mail_to = env.get('mail_to', '') log_scenario = env.get('log_scenario', 'Sanity') if CONTROLLER_TYPE == 'Cloudstack': stack_password= '******' stack_tenant= 'default-project' else: stack_password = get_keystone_admin_password() stack_tenant= get_keystone_admin_user() #get the ext router information from the testbed file and set it the # ini inputs. ext_bgp_names = '' ext_bgp_ips = '' router_asn = getattr(testbed, 'router_asn','0') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt','0') public_vn_subnet = getattr(testbed, 'public_vn_subnet',None) ext_routers = getattr(testbed, 'ext_routers', []) test_verify_on_setup = getattr(env,'test_verify_on_setup','True') mail_server = '10.204.216.49' mail_port = '25' webui = getattr(testbed, 'webui', False) webui_config = getattr(testbed, 'webui_config', False) if 'mail_server' in env.keys(): mail_server = env.mail_server mail_port = env.mail_port for ext_bgp in ext_routers: ext_bgp_names = ext_bgp_names + '%s_router_name=%s\n' % (ext_bgp[0], ext_bgp[0]) ext_bgp_ips = ext_bgp_ips + '%s_router_ip=%s\n' % (ext_bgp[0], ext_bgp[1]) sanity_params = sanity_ini_templ.safe_substitute( {'__timestamp__': dt.now().strftime('%Y-%m-%d-%H:%M:%S'), '__multi_tenancy__': get_mt_enable(), '__keystone_ip__': get_keystone_ip(), '__mail_to__': mail_to, '__log_scenario__': log_scenario, '__test_revision__': revision, '__fab_revision__': fab_revision, '__stack_password__': stack_password, '__stack_tenant__': stack_tenant, '__ext_router_names__': ext_bgp_names, '__ext_router_ips__': ext_bgp_ips, '__router_asn__': router_asn, '__public_vn_rtgt__': public_vn_rtgt, '__public_vn_subnet__': public_vn_subnet, '__mail_server__': mail_server, '__mail_port__': mail_port, '__test_repo__': get_remote_path(env.test_repo_dir), '__webui__': webui, '__devstack__': devstack_flag, '__webui_config__': webui_config, '__http_proxy__': env.get('http_proxy'), '__test_verify_on_setup__': test_verify_on_setup, '__ha_setup__': getattr(testbed, 'ha_setup', None), '__ipmi_username__': getattr(testbed,'ipmi_username',None), '__ipmi_password__': getattr(testbed,'ipmi_password',None) }) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_testbed_json) of.close() put(fname, "%s/sanity_testbed.json" %(repo_path)) local ("cp %s %s/sanity_testbed.json" %(fname, env.test_repo_dir)) os.remove(fname) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_params) of.close() put(fname, "%s/sanity_params.ini" %(repo_path)) local ("cp %s %s/sanity_params.ini" %(fname, env.test_repo_dir)) os.remove(fname) if CONTROLLER_TYPE == 'Cloudstack': with settings(warn_only = True): run('python-pip install fixtures testtools fabric') else: with settings(warn_only = True): if detect_ostype() in ['centos']: pkg = 'fixtures testtools testresources discover unittest2 \ selenium pyvirtualdisplay \ testrepository junitxml pytun' elif detect_ostype() in ['Ubuntu']: pkg = 'fixtures testtools testresources selenium \ pyvirtualdisplay testrepository junitxml pytun' if os.environ.has_key('GUESTVM_IMAGE'): pkg = pkg + ' pexpect' if exists('/opt/contrail/api-venv/bin/activate'): run('source /opt/contrail/api-venv/bin/activate && \ pip install --upgrade %s' %pkg) else: run("pip install --upgrade %s" %pkg) if not exists('/usr/bin/ant'): pkg_install(['ant'],disablerepo = False) ant_version = run('ant -version') if ('1.7' in ant_version): pkg_install(['ant-junit' , 'ant-trax'] , disablerepo = False) pkg_install(['patch'],disablerepo = False) for host_string in env.roledefs['compute']: with settings(host_string=host_string): if detect_ostype() in ['centos']: run("yum -y --disablerepo=* --enablerepo=contrail_install_repo install tcpdump") if detect_ostype() in ['redhat']: run("yum -y install tcpdump")
def setup_test_env(): cfgm_host = env.roledefs['cfgm'][0] cfgm_ip = hstr_to_ip(cfgm_host) with settings(warn_only=True): is_git_repo = local('git branch').succeeded if not is_git_repo: with settings(host_string=cfgm_host): build_id = run('cat /opt/contrail/contrail_packages/VERSION') fab_revision = build_id revision = build_id print "Testing from the CFGM." else: fab_revision = local('git log --format="%H" -n 1', capture=True) if CONTROLLER_TYPE == 'Cloudstack': revision = local('cat %s/.git/refs/heads/cs_sanity' % env.test_repo_dir, capture=True) else: with lcd(env.test_repo_dir): revision = local('git log --format="%H" -n 1', capture=True) if not env.roledefs['build'][0] == cfgm_host: execute(copy_dir, env.test_repo_dir, cfgm_host) sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'hosts_ipmi': [] } sample_ini_file = env.test_repo_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if CONTROLLER_TYPE == 'Openstack': with settings(host_string = env.roledefs['openstack'][0]): openstack_host_name = run("hostname") elif CONTROLLER_TYPE == 'Cloudstack': openstack_host_name = None with settings(host_string = env.roledefs['cfgm'][0]): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host): host_name = run("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() for host_string in env.roledefs['all']: host_ip = host_string.split('@')[1] with settings(host_string = host_string): host_name = run("hostname") host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['ip'] = "127.0.0.1" if (CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['control']) else host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] = env.passwords[host_string] host_dict['roles'] = [] if not internal_vip: if CONTROLLER_TYPE == 'Openstack' and host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) elif CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['orchestrator']: role_dict = {'type': 'orchestrator', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} if CONTROLLER_TYPE == 'Openstack': if internal_vip: role_dict['openstack'] = 'contrail-vip' else: role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) # Currently Cloudstack supports all-in-one model alone for contrail hence piggybacking Controller role on to cfgm if CONTROLLER_TYPE == 'Cloudstack': role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Adding vip VIP dict for HA test setup if CONTROLLER_TYPE == 'Openstack': with settings(host_string = env.roledefs['openstack'][0]): if internal_vip: host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['data-ip']= get_keystone_ip() host_dict['control-ip']= get_keystone_ip() host_dict['ip']= get_keystone_ip() host_dict['name'] = 'contrail-vip' with settings(host_string = env.roledefs['cfgm'][0]): host_dict['username'] = host_string.split('@')[0] host_dict['password'] = env.passwords[host_string] host_dict['roles'] = [] role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) # for every host_string with settings(host_string = cfgm_host): repo_dir_name = env.test_repo_dir.split('/')[-1] repo_path= get_remote_path(env.test_repo_dir) # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stop_on_fail = env.get('stop_on_fail', False) mail_to = env.get('mail_to', '') log_scenario = env.get('log_scenario', 'Sanity') if CONTROLLER_TYPE == 'Cloudstack': stack_password= '******' stack_tenant= 'default-project' stack_user= '******' else: stack_user= get_keystone_admin_user() stack_password = get_keystone_admin_password() stack_tenant = get_keystone_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = 'default-domain' webserver_host = '10.204.216.50' webserver_user = '******' webserver_password = '******' webserver_log_path = '/home/bhushana/Documents/technical/logs/' webserver_report_path = '/home/bhushana/Documents/technical/sanity' webroot = 'Docs/logs' mail_server = '10.204.216.49' mail_port = '25' fip_pool_name = 'public-pool' fixture_cleanup = 'yes' generate_html_report = 'True' key = 'key1' mailSender = '*****@*****.**' router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if 'mail_server' in env.keys(): mail_server = env.mail_server mail_port = env.mail_port sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__nova_keypair_name__' : key, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__stack_tenant__' : stack_tenant, '__stack_domain__' : stack_domain, '__keystone_ip__' : get_keystone_ip(), '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mailSender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : devstack_flag, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__' : fip_pool_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', '') }) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_testbed_json) of.close() put(fname, "%s/sanity_testbed.json" %(repo_path)) local ("cp %s %s/sanity_testbed.json" %(fname, env.test_repo_dir)) os.remove(fname) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_params) of.close() put(fname, "%s/sanity_params.ini" %(repo_path)) local ("cp %s %s/sanity_params.ini" %(fname, env.test_repo_dir)) os.remove(fname) pkg = "" if CONTROLLER_TYPE == 'Cloudstack': with settings(warn_only = True): run('python-pip install fixtures testtools fabric') else: with settings(warn_only = True): if 'centos' == detect_ostype(): pkg = 'fixtures testtools==1.1.0 testresources discover unittest2 \ selenium pyvirtualdisplay \ testrepository junitxml pytun' elif 'ubuntu' == detect_ostype(): pkg = 'fixtures testtools==1.1.0 testresources\ testrepository junitxml pytun' if os.environ.has_key('GUESTVM_IMAGE'): pkg = pkg + ' pexpect' if ui_browser: pkg = pkg + ' pyvirtualdisplay selenium' if exists('/opt/contrail/api-venv/bin/activate'): run('source /opt/contrail/api-venv/bin/activate && \ pip install --upgrade %s' %pkg) else: run("pip install --upgrade %s" %pkg) if not exists('/usr/bin/ant'): pkg_install(['ant'],disablerepo = False) ant_version = run('ant -version') if ('1.7' in ant_version): pkg_install(['ant-junit' , 'ant-trax'] , disablerepo = False) pkg_install(['patch'],disablerepo = False) for host_string in env.roledefs['compute']: with settings(host_string=host_string): if 'centos' == detect_ostype(): run("yum -y --disablerepo=* --enablerepo=contrail_install_repo install tcpdump") if 'redhat' == detect_ostype(): run("yum -y install tcpdump")
def setup_test_env(): cfgm_host = env.roledefs['cfgm'][0] cfgm_ip = hstr_to_ip(cfgm_host) with settings(warn_only=True): is_git_repo = local('git branch').succeeded if not is_git_repo: with settings(host_string=cfgm_host): build_id = sudo('cat /opt/contrail/contrail_packages/VERSION') fab_revision = build_id revision = build_id print "Testing from the CFGM." else: with settings(warn_only=True): fab_revision = local('git log --format="%H" -n 1', capture=True) if CONTROLLER_TYPE == 'Cloudstack': revision = local('cat %s/.git/refs/heads/cs_sanity' % env.test_repo_dir, capture=True) else: with lcd(env.test_repo_dir): revision = local('git log --format="%H" -n 1', capture=True) if not env.roledefs['build'][0] == cfgm_host: execute(copy_dir, env.test_repo_dir, cfgm_host) sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms': [], 'hosts_ipmi': [], 'tor': [], 'vcenter_servers': [], 'sriov': [], 'dpdk': [], } sample_ini_file = env.test_repo_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if CONTROLLER_TYPE == 'Openstack': with settings(host_string=env.roledefs['openstack'][0]): openstack_host_name = sudo("hostname") elif CONTROLLER_TYPE == 'Cloudstack': openstack_host_name = None with settings(host_string=env.roledefs['cfgm'][0]): cfgm_host_name = sudo("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string=control_host): host_name = sudo("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string=cassandra_host): host_name = sudo("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() for host_string in env.roledefs['all']: host_ip = host_string.split('@')[1] with settings(host_string=host_string): host_name = sudo("hostname") host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['ip'] = "127.0.0.1" if ( CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['control']) else host_ip host_dict['data-ip'] = get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip'] = get_control_host_string(host_string).split( '@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] = get_env_passwords(host_string) host_dict['roles'] = [] if not internal_vip: if CONTROLLER_TYPE == 'Openstack' and host_string in env.roledefs[ 'openstack']: role_dict = { 'type': 'openstack', 'params': { 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) elif CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs[ 'orchestrator']: role_dict = { 'type': 'orchestrator', 'params': { 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = { 'type': 'cfgm', 'params': { 'collector': host_name, 'cassandra': ' '.join(cassandra_host_names) } } if CONTROLLER_TYPE == 'Openstack': if internal_vip: role_dict['openstack'] = 'contrail-vip' else: role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) # Currently Cloudstack supports all-in-one model alone for contrail hence piggybacking Controller role on to cfgm if CONTROLLER_TYPE == 'Cloudstack': role_dict = { 'type': 'collector', 'params': { 'cassandra': ' '.join(cassandra_host_names) } } host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = { 'type': 'bgp', 'params': { 'collector': cfgm_host_name, 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys( ) and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': { 'cassandra': ' '.join(cassandra_host_names) } } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = { 'type': 'compute', 'params': { 'collector': cfgm_host_name, 'cfgm': cfgm_host_name } } role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys( ) and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': { 'cassandra': ' '.join(cassandra_host_names) } } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys( ) and host_string in env.roledefs['webui']: role_dict = {'type': 'webui', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] if 'contrail_vm' in host_dict: host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm'][ 'host'] host_dict['roles'] = [] sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) # Adding vip VIP dict for HA test setup if CONTROLLER_TYPE == 'Openstack': with settings(host_string=env.roledefs['openstack'][0]): if internal_vip: host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['data-ip'] = get_authserver_ip() host_dict['control-ip'] = get_authserver_ip() host_dict['ip'] = get_authserver_ip() host_dict['name'] = 'contrail-vip' with settings(host_string=env.roledefs['cfgm'][0]): host_dict['username'] = host_string.split('@')[0] host_dict['password'] = get_env_passwords(host_string) host_dict['roles'] = [] role_dict = { 'type': 'openstack', 'params': { 'cfgm': cfgm_host_name } } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) #get vcenter info if env.has_key('vcenter_servers'): vcenter_info = {} for k in env.vcenter_servers.keys(): vcenter_info[k] = env.vcenter_servers[k] server = {} server[k] = env.vcenter_servers[k] sanity_testbed_dict['vcenter_servers'].append(server) #get sriov info if env.has_key('sriov'): sanity_testbed_dict['sriov'].append(env.sriov) #get dpdk info if env.has_key('dpdk'): sanity_testbed_dict['dpdk'].append(env.dpdk) # for every host_string with settings(host_string=cfgm_host): repo_dir_name = env.test_repo_dir.split('/')[-1] repo_path = get_remote_path(env.test_repo_dir) # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stop_on_fail = env.get('stop_on_fail', False) mail_to = env.get('mail_to', '') log_scenario = env.get('log_scenario', 'Sanity') if CONTROLLER_TYPE == 'Cloudstack': stack_password = '******' stack_tenant = 'default-project' admin_user = '******' else: admin_user, admin_password = get_authserver_credentials() admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = 'default-domain' webserver_host = '10.204.216.50' webserver_user = '******' webserver_password = '******' webserver_log_path = '/home/bhushana/Documents/technical/logs/' webserver_report_path = '/home/bhushana/Documents/technical/sanity' webroot = 'Docs/logs' mail_server = '10.204.216.49' mail_port = '25' fip_pool_name = 'floating-ip-pool' public_virtual_network = 'public' public_tenant_name = 'admin' fixture_cleanup = 'yes' generate_html_report = 'True' key = 'key1' mailSender = '*****@*****.**' use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) orch = getattr(env, 'orchestrator', 'openstack') router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) if not getattr(env, 'test', None): env.test = {} stack_user = env.test.get('stack_user', None) stack_password = env.test.get('stack_password', None) stack_tenant = env.test.get('stack_tenant', None) tenant_isolation = env.test.get('tenant_isolation', None) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if 'mail_server' in env.keys(): mail_server = env.mail_server mail_port = env.mail_port vcenter_dc = '' if orch in ['vcenter', 'vcenter_gateway']: public_tenant_name = 'vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for k in env.vcenter_servers: vcenter_dc = env.vcenter_servers[k]['datacenter'] sanity_params = sanity_ini_templ.safe_substitute({ '__testbed_json_file__': 'sanity_testbed.json', '__nova_keypair_name__': key, '__orch__': orch, '__admin_user__': admin_user, '__admin_password__': admin_password, '__auth_ip__': get_authserver_ip(), '__auth_port__': get_authserver_port(), '__admin_tenant__': admin_tenant, '__stack_domain__': stack_domain, '__multi_tenancy__': get_mt_enable(), '__address_family__': get_address_family(), '__log_scenario__': log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__': fixture_cleanup, '__webserver__': webserver_host, '__webserver_user__': webserver_user, '__webserver_password__': webserver_password, '__webserver_log_dir__': webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__': webroot, '__mail_server__': mail_server, '__mail_port__': mail_port, '__sender_mail_id__': mailSender, '__receiver_mail_id__': mail_to, '__http_proxy__': env.get('http_proxy', ''), '__ui_browser__': ui_browser, '__ui_config__': ui_config, '__horizon__': horizon, '__webui__': webui, '__devstack__': devstack_flag, '__public_vn_rtgt__': public_vn_rtgt, '__router_asn__': router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__': fip_pool_name, '__public_virtual_network__': public_virtual_network, '__public_tenant_name__': public_tenant_name, '__public_vn_subnet__': public_vn_subnet, '__test_revision__': revision, '__fab_revision__': fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__': stop_on_fail, '__ha_setup__': getattr(testbed, 'ha_setup', ''), '__ipmi_username__': getattr(testbed, 'ipmi_username', ''), '__ipmi_password__': getattr(testbed, 'ipmi_password', ''), '__vcenter_dc__': vcenter_dc, '__vcenter_server__': get_vcenter_ip(), '__vcenter_port__': get_vcenter_port(), '__vcenter_username__': get_vcenter_username(), '__vcenter_password__': get_vcenter_password(), '__vcenter_datacenter__': get_vcenter_datacenter(), '__vcenter_compute__': get_vcenter_compute(), '__use_devicemanager_for_md5__': use_devicemanager_for_md5, '__stack_user__': stack_user, '__stack_password__': stack_password, '__stack_tenant__': stack_tenant, '__tenant_isolation__': tenant_isolation, }) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_testbed_json) of.close() put(fname, "%s/sanity_testbed.json" % (repo_path), use_sudo=True) local("cp %s %s/sanity_testbed.json" % (fname, env.test_repo_dir)) os.remove(fname) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_params) of.close() put(fname, "%s/sanity_params.ini" % (repo_path), use_sudo=True) local("cp %s %s/sanity_params.ini" % (fname, env.test_repo_dir)) os.remove(fname) pkg = "" if CONTROLLER_TYPE == 'Cloudstack': with settings(warn_only=True): sudo('python-pip install fixtures testtools fabric') else: with settings(warn_only=True): run('rm -rf /tmp/pip-build-root') if detect_ostype() in ['centos', 'redhat', 'centoslinux']: sudo('yum -y install python-pip') pkg = 'fixtures==1.0.0 testtools==1.7.1 testresources==0.2.7 discover \ testrepository junitxml pytun requests==2.3.0 pyvmomi==5.5.0 eventlet \ tabulate' elif 'ubuntu' == detect_ostype(): pkg = 'fixtures==1.0.0 testtools==1.7.1 testresources==0.2.7 \ testrepository junitxml pytun pyvmomi==5.5.0 eventlet tabulate ' output = sudo('pip show requests | grep Version') if output.succeeded: version = output.split(':')[1].translate( None, whitespace) if version <= 2.3: if (LooseVersion(version) < LooseVersion('2.3.0')): pkg += ' requests==2.3.0' if os.environ.has_key('GUESTVM_IMAGE'): pkg = pkg + ' pexpect' if ui_browser: pkg = pkg + ' pyvirtualdisplay selenium' if exists('/opt/contrail/api-venv/bin/activate'): sudo('source /opt/contrail/api-venv/bin/activate && \ pip install --upgrade unittest2 && \ pip install --upgrade %s' % pkg) else: # Avoid installing linecache2 as dependency on unittest2 # Avoid "TypeError: dist must be a Distribution instance" sudo("pip install linecache2") sudo("pip install --upgrade unittest2") sudo("pip install --upgrade %s" % pkg) sudo("pip install --upgrade easyprocess") if not exists('/usr/bin/ant'): pkg_install(['ant'], disablerepo=False) ant_version = sudo('ant -version') if ('1.7' in ant_version): pkg_install(['ant-junit', 'ant-trax'], disablerepo=False) if ('1.9' in ant_version): pkg_install(['ant-junit'], disablerepo=False) pkg_install([ 'patch', 'python-heatclient', 'python-ceilometerclient', 'python-setuptools' ], disablerepo=False) # On centos, junos-eznc install requires devel pkgs of libxml2 and libxslt if detect_ostype() in ['redhat', 'centos', 'centoslinux']: pkg_install(['libxslt-devel', 'libxml2-devel'], disablerepo=False) sudo('pip install paramiko==1.17.0') sudo('pip install junos-eznc==1.2.2') #Restart DM. This is because of #1490860 sudo('service contrail-device-manager restart') for host_string in env.roledefs['compute']: with settings(host_string=host_string): #pkg_install(['python-setuptools', 'python-pkg-resources', 'python-ncclient'],disablerepo = False) pkg_install(['python-setuptools', 'python-ncclient'], disablerepo=False) if detect_ostype() in ['centos', 'centoslinux', 'redhat']: sudo("yum -y install tcpdump")
def setup_test_env(): cfgm_host = env.roledefs['cfgm'][0] cfgm_ip = hstr_to_ip(cfgm_host) with settings(warn_only=True): is_git_repo = local('git branch').succeeded if not is_git_repo: with settings(host_string=cfgm_host): build_id = sudo('cat /opt/contrail/contrail_packages/VERSION') fab_revision = build_id revision = build_id print "Testing from the CFGM." else: with settings(warn_only=True): fab_revision = local('git log --format="%H" -n 1', capture=True) if CONTROLLER_TYPE == 'Cloudstack': revision = local('cat %s/.git/refs/heads/cs_sanity' % env.test_repo_dir, capture=True) else: with lcd(env.test_repo_dir): revision = local('git log --format="%H" -n 1', capture=True) if not env.roledefs['build'][0] == cfgm_host: execute(copy_dir, env.test_repo_dir, cfgm_host) sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'hosts_ipmi': [] } sample_ini_file = env.test_repo_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if CONTROLLER_TYPE == 'Openstack': with settings(host_string = env.roledefs['openstack'][0]): openstack_host_name = sudo("hostname") elif CONTROLLER_TYPE == 'Cloudstack': openstack_host_name = None with settings(host_string = env.roledefs['cfgm'][0]): cfgm_host_name = sudo("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host): host_name = sudo("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host): host_name = sudo("hostname") cassandra_host_names.append(host_name) internal_vip = get_openstack_internal_vip() for host_string in env.roledefs['all']: host_ip = host_string.split('@')[1] with settings(host_string = host_string): host_name = sudo("hostname") host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['ip'] = "127.0.0.1" if (CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['control']) else host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['username'] = host_string.split('@')[0] host_dict['password'] = env.passwords[host_string] host_dict['roles'] = [] if not internal_vip: if CONTROLLER_TYPE == 'Openstack' and host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) elif CONTROLLER_TYPE == 'Cloudstack' and host_string in env.roledefs['orchestrator']: role_dict = {'type': 'orchestrator', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} if CONTROLLER_TYPE == 'Openstack': if internal_vip: role_dict['openstack'] = 'contrail-vip' else: role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) # Currently Cloudstack supports all-in-one model alone for contrail hence piggybacking Controller role on to cfgm if CONTROLLER_TYPE == 'Cloudstack': role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) # Adding vip VIP dict for HA test setup if CONTROLLER_TYPE == 'Openstack': with settings(host_string = env.roledefs['openstack'][0]): if internal_vip: host_dict = {} # We may have to change it when we have HA support in Cloudstack host_dict['data-ip']= get_keystone_ip() host_dict['control-ip']= get_keystone_ip() host_dict['ip']= get_keystone_ip() host_dict['name'] = 'contrail-vip' with settings(host_string = env.roledefs['cfgm'][0]): host_dict['username'] = host_string.split('@')[0] host_dict['password'] = env.passwords[host_string] host_dict['roles'] = [] role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) # for every host_string with settings(host_string = cfgm_host): repo_dir_name = env.test_repo_dir.split('/')[-1] repo_path= get_remote_path(env.test_repo_dir) # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stop_on_fail = env.get('stop_on_fail', False) mail_to = env.get('mail_to', '') log_scenario = env.get('log_scenario', 'Sanity') if CONTROLLER_TYPE == 'Cloudstack': stack_password= '******' stack_tenant= 'default-project' stack_user= '******' else: stack_user= get_keystone_admin_user() stack_password = get_keystone_admin_password() stack_tenant = get_keystone_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser stack_domain = 'default-domain' webserver_host = '10.204.216.50' webserver_user = '******' webserver_password = '******' webserver_log_path = '/home/bhushana/Documents/technical/logs/' webserver_report_path = '/home/bhushana/Documents/technical/sanity' webroot = 'Docs/logs' mail_server = '10.204.216.49' mail_port = '25' fip_pool_name = 'floating-ip-pool' public_virtual_network='public' public_tenant_name='admin' fixture_cleanup = 'yes' generate_html_report = 'True' key = 'key1' mailSender = '*****@*****.**' router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if 'mail_server' in env.keys(): mail_server = env.mail_server mail_port = env.mail_port sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__nova_keypair_name__' : key, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__stack_tenant__' : stack_tenant, '__stack_domain__' : stack_domain, '__keystone_ip__' : get_keystone_ip(), '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mailSender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : devstack_flag, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__' : fip_pool_name, '__public_virtual_network__':public_virtual_network, '__public_tenant_name__' :public_tenant_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', '') }) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_testbed_json) of.close() put(fname, "%s/sanity_testbed.json" %(repo_path), use_sudo=True) local ("cp %s %s/sanity_testbed.json" %(fname, env.test_repo_dir)) os.remove(fname) fd, fname = tempfile.mkstemp() of = os.fdopen(fd, 'w') of.write(sanity_params) of.close() put(fname, "%s/sanity_params.ini" %(repo_path), use_sudo=True) local ("cp %s %s/sanity_params.ini" %(fname, env.test_repo_dir)) os.remove(fname) pkg = "" if CONTROLLER_TYPE == 'Cloudstack': with settings(warn_only = True): sudo('python-pip install fixtures testtools fabric') else: with settings(warn_only = True): run('rm -rf /tmp/pip-build-root') if detect_ostype() in ['centos', 'redhat']: pkg = 'fixtures testtools==1.1.0 testresources discover unittest2 \ testrepository junitxml pytun' elif 'ubuntu' == detect_ostype(): pkg = 'fixtures testtools==1.1.0 testresources\ testrepository junitxml pytun' if os.environ.has_key('GUESTVM_IMAGE'): pkg = pkg + ' pexpect' if ui_browser: pkg = pkg + ' pyvirtualdisplay selenium' if exists('/opt/contrail/api-venv/bin/activate'): sudo('source /opt/contrail/api-venv/bin/activate && \ pip install --upgrade %s' %pkg) else: sudo("pip install --upgrade %s" %pkg) if not exists('/usr/bin/ant'): pkg_install(['ant'],disablerepo = False) ant_version = sudo('ant -version') if ('1.7' in ant_version): pkg_install(['ant-junit' , 'ant-trax'] , disablerepo = False) if ('1.9' in ant_version): pkg_install(['ant-junit'] , disablerepo = False) pkg_install(['patch', 'python-heatclient'],disablerepo = False) for host_string in env.roledefs['compute']: with settings(host_string=host_string): if 'centos' == detect_ostype(): sudo("yum -y --disablerepo=* --enablerepo=contrail_install_repo install tcpdump") if 'redhat' == detect_ostype(): sudo("yum -y install tcpdump")