def setUp(self):
     """setUp function for context attribute <> clone latest ccsbuildtools
     """
     self.ctx = Context()
     returncode, username = helper_utils.get_gitusername(self.ctx.path)
     if returncode > 0:
         helper_utils.get_loginusername()
     service_utils.sync_service(self.ctx.path, "master", username,
                                "ccs-build-tools")
Пример #2
0
 def setUp(self):
     """
     Build site data needed to test both vagrant.yaml and vm host.yaml creation
     """
     self.site = 'ccs-dev-1'
     self.tenant = 'dev-tenant'
     self.hosts_path = os.path.join(self.ctx.path, 'services', 'ccs-data',
                                    'sites', self.site, 'environments',
                                    self.tenant, 'hosts.d')
     self.subnet = '192.168.100.0/24'
     self.addr_subnet = ipaddress.IPv4Network(unicode(self.subnet))
     os.environ['OS_USERNAME'] = '******'
     os.environ['OS_PASSWORD'] = '******'
     os.environ['OS_REGION_NAME'] = 'us-rdu-3'
     os.environ['OS_AUTH_URL'] = 'https://us-rdu-3.cisco.com:5000/v2.0/'
     os.environ['OS_TENANT_NAME'] = 'jenkins-slab'
     os.environ['OS_TENANT_ID'] = 'dc4b64c3ddcc4ce5abbddd43a24b1b0a'
     # Preserve existing data
     self.vagrant_yaml = os.path.join(self.ctx.path, 'vagrant.yaml')
     if os.path.isfile(self.vagrant_yaml):
         self.vagrant_bak = os.path.join(self.ctx.path, 'vagrant.bak')
         os.rename(self.vagrant_yaml, self.vagrant_bak)
     self.Vagrant_file = os.path.join(self.ctx.path, 'Vagrantfile')
     if os.path.isfile(self.Vagrant_file):
         self.Vagrant_bak = os.path.join(self.ctx.path, 'Vagrantfile.bak')
         os.rename(self.Vagrant_file, self.Vagrant_bak)
     self.dotvagrant_dir = os.path.join(self.ctx.path, '.vagrant')
     if os.path.isdir(self.dotvagrant_dir):
         self.dotvagrant_bak = os.path.join(self.ctx.path, '.vagrant_bak')
         os.rename(self.dotvagrant_dir, self.dotvagrant_bak)
     if not os.path.isdir(
             os.path.join(self.ctx.path, 'services', 'ccs-data')):
         service_utils.sync_service(self.ctx.path, 'master',
                                    self.ctx.username, 'ccs-data')
     env_path = os.path.join(self.ctx.path, 'services', 'ccs-data', 'sites',
                             self.site, 'environments', self.tenant)
     self.hosts_path = os.path.join(env_path, 'hosts.d')
     self.backup_path = os.path.join(env_path, 'hosts.bak')
     os.makedirs(self.backup_path)
     for f in os.listdir(self.hosts_path):
         file_name = os.path.join(self.hosts_path, f)
         file_bak = os.path.join(self.backup_path, f)
         os.rename(file_name, file_bak)
     # Generate files to consume IPs .5 - .14
     for i in range(1, 11):
         hostname = 'service-holder-' + str(i).zfill(3) + '.yaml'
         output_file = os.path.join(self.hosts_path, hostname)
         file_data = {
             'interfaces': {
                 'eth0': {
                     'ip_address':
                     str(self.addr_subnet.network_address + 4 + i),
                 },
             },
         }
         with open(output_file, 'w') as outfile:
             outfile.write(yaml.dump(file_data, default_flow_style=False))
Пример #3
0
def cli(ctx, branch, data_branch, username, service_name):
    """
    Clones a service repo that the user wants to work on.
    """
    slab_logger.info('Cloning service %s' % service_name)
    current = ""
    if not username:
        username = ctx.get_username()
    repo_name = service_name
    if os.path.isfile(os.path.join(ctx.path, "current")):
        current_file = os.path.join(ctx.path, "current")
        with open(current_file, 'r') as cfile:
            current = cfile.readline()
            # todo: verify that current is set to something sane.

        returncode = service_utils.check_service(ctx.path, service_name)
        if returncode > 0:
            slab_logger.error("Gerrit repo %s does not exist" % service_name)
            sys.exit(1)

        if current == any([None, ""]) and (service_name == "current"):
            slab_logger.error("No service set on command line nor the "
                              "current(literally) file.")
            sys.exit(1)
        # Keeps the repo_name set to service_name
        elif current == any([None, ""]) and (service_name != "current"):
            pass
        # Note: variable current and string current
        elif service_name != current and service_name != "current":
            service_utils.clean(ctx.path)
        else:
            # Note: notice we're passing the variable current not service_name.
            repo_name = current

    returncode = service_utils.sync_service(ctx.path, branch, username, repo_name)
    if not returncode:
        slab_logger.error('Unable to sync %s repo' % service_name)
        sys.exit(1)

    returncode = service_utils.link(ctx.path, service_name, branch, username)
    if not returncode == 0:
        slab_logger.error('Unable to link %s repo' % service_name)
        sys.exit(1)

    returncode, output = service_utils.setup_vagrant_sshkeys(ctx.path)
    if not returncode == 0:
        slab_logger.error('Failed to generate ssh keys:\n%s' % output)
        sys.exit(1)

    if not service_name == 'ccs-data':
        returncode = service_utils.sync_service(ctx.path, data_branch, username, 'ccs-data')
        if not returncode:
            slab_logger.error('Unable to sync ccs-data repo')
            sys.exit(1)
Пример #4
0
    def cmd_up_runner(self, args, hostname, group, remote):
        """
        Run the 'stack up' command for each of the various tests

        Args:
            args {list}: CLI args
            hostname {str}: Name of the vm
            group {str}: Name of the group the vm belongs to
            remote {bool}: True - OpenStack hypervisor
                           False - VirtualBox hypervisor

        Returns:
            Nothing.  Runs tests based on the environment setup

        Example Usage:
            self.cmd_up_runner(['--rhel7'], 'rhel7-001', False)
        """
        if not group == 'rhel7':
            repo_name = str('service-' + group)
            if not os.path.isdir(
                    os.path.join(self.ctx.path, 'services', repo_name)):
                service_utils.sync_service(self.ctx.path, 'master',
                                           self.ctx.username, repo_name)
        runner = CliRunner()
        host_yaml = hostname + '.yaml'
        result = runner.invoke(cmd_up.cli, args)
        if result > 0:
            return
        vm_yaml_file = os.path.join(self.hosts_path, host_yaml)
        self.assertTrue(os.path.isfile(vm_yaml_file))
        with open(vm_yaml_file, 'r') as yaml_file:
            vm_yaml_data = yaml.load(yaml_file)
        self.assertEqual(vm_yaml_data['groups'][1], group)
        with open(self.vagrant_yaml, 'r') as vagrant_f:
            vagrant_data = yaml.load(vagrant_f)
        self.assertEqual(vm_yaml_data['interfaces']['eth0']['ip_address'],
                         vagrant_data['hosts'][hostname]['ip'],
                         '192.168.100.15')
        if remote:
            hypervisor = "OpenStack"
        else:
            hypervisor = "VirtualBox"
        ispoweron, isremote = vagrant_utils.vm_isrunning(
            hostname, self.ctx.path)
        if ispoweron > 1:
            print('Unable to contact %s for VM status' % hypervisor)
        elif ispoweron == 1:
            print('VM is offline in %s' % hypervisor)
        else:
            self.assertEqual(ispoweron, 0)
            self.assertEqual(isremote, remote)
Пример #5
0
    def test_clean(self):
        """
        Test the clean function, which deletes the current file and unlinks current_service

        Clone and symlink a repo to current-service, and set as current
        Ensure current file and current-service symlink exist
        Ensure current file and current-service symlink are removed
        """
        sync_service_return = service_utils.sync_service(
            path=self.temp_dir,
            branch='master',
            username=self.username,
            service_name='service-horizon')
        link_return = service_utils.link(path=self.temp_dir,
                                         service_name='service-horizon',
                                         branch='master',
                                         username=self.username)
        self.assertEqual(link_return, 0)
        self.assertTrue(
            os.path.islink(os.path.join(self.temp_dir, 'current_service')))
        self.assertTrue(os.path.isfile(os.path.join(self.temp_dir, 'current')))

        service_utils.clean(self.temp_dir)
        self.assertFalse(
            os.path.islink(os.path.join(self.temp_dir, 'current_service')))
        self.assertFalse(os.path.isfile(os.path.join(self.temp_dir,
                                                     'current')))
Пример #6
0
    def test_sync_service(self):
        """
        Test the sync_service function

        Ensure that a git repo is successfully synced
        Ensure that a nonexistant git repo is not successfully synced
        """
        sync_service_out = service_utils.sync_service(
            path=self.temp_dir,
            branch='master',
            username=self.username,
            service_name='service-horizon')
        self.assertEqual(sync_service_out, True)

        sync_service_out = service_utils.sync_service(
            path=self.temp_dir,
            branch='master',
            username=self.username,
            service_name='service-fakeservice')
        self.assertEqual(sync_service_out, False)
Пример #7
0
    def test_installed(self):
        """
        Test the installed function, which checks if the specified repo is cloned locally

        Ensure failure with fake repo
        Ensure success with valid repo
        """
        sync_service_return = service_utils.sync_service(
            path=self.temp_dir,
            branch='master',
            username=self.username,
            service_name='service-horizon')
        sync_service_return = service_utils.link(
            path=self.temp_dir,
            service_name='service-horizon',
            branch='master',
            username=self.username)
        installed_return = service_utils.installed('fake-service',
                                                   self.temp_dir)
        self.assertFalse(installed_return)

        installed_return = service_utils.installed('service-horizon',
                                                   self.temp_dir)
        self.assertTrue(installed_return)
Пример #8
0
def cli(ctx, full, mini, rhel7, target, service, remote, ha, redhouse_branch,
        data_branch, service_branch, username, interactive, existing_vm, env,
        flavor, image, nfs):
    flavor = str(flavor)
    image = str(image)
    service_groups = []
    # Things the user Should not do ==================================
    if mini is True and full is True:
        slab_logger.error('You can not use the mini flag with the full flag.')
        sys.exit(1)

    # Gather as many requirements as possible for the user ===========
    if not username:
        slab_logger.log(15, 'Extracting username')
        username = ctx.get_username()

    if not any([full, mini, rhel7, target, service, existing_vm]):
        slab_logger.info("Booting vm from most recently installed service")
        try:
            returncode, service = helper_utils.get_current_service(ctx.path)
        except TypeError:
            slab_logger.error("Could not get the current service.")
            slab_logger.error("Try: stack workon service-myservice")
            sys.exit(1)
        if returncode > 0:
            slab_logger.error("Failed to get the current service")
            sys.exit(1)

    if env is not None:
        env_json = json.loads(env)
        if 'CCS_ENVIRONMENT' in env_json:
            os.environ['CCS_ENVIRONMENT'] = env_json['CCS_ENVIRONMENT']
        if 'HEIGHLINER_DEPLOY_TAG' in env_json:
            os.environ['HEIGHLINER_DEPLOY_TAG'] = env_json[
                'HEIGHLINER_DEPLOY_TAG']
        if 'HEIGHLINER_DEPLOY_TARGET_HOSTS' in env_json:
            val = env_json['HEIGHLINER_DEPLOY_TARGET_HOSTS']
            os.environ['HEIGHLINER_DEPLOY_TARGET_HOSTS'] = val

    slab_logger.log(15, 'Determining vm hostname')
    hostname = ''
    if rhel7:
        hostname = str(helper_utils.name_vm("rhel7", ctx.path))
    elif service:
        if not service_utils.installed(service, ctx.path):
            slab_logger.error("{0} is not in the .stack/services/ directory.\n"
                              "Try: stack workon {0}".format(service))
            sys.exit(1)
        hostname = str(helper_utils.name_vm(service, ctx.path))
    elif target:
        hostname = target
    elif existing_vm:
        hostname = existing_vm
        ret_code, site = ccsdata_utils.get_site_from_env(
            env_json['CCS_ENVIRONMENT'])
        if ret_code > 0:
            slab_logger.error("Could not find parent site for "
                              "{}".format(env_json['CCS_ENVIRONMENT']))
            sys.exit(1)
        env_path = os.path.join(ctx.path, 'services', 'ccs-data', 'sites',
                                site, 'environments',
                                env_json['CCS_ENVIRONMENT'])
        ret_code, yaml_data = yaml_utils.read_host_yaml(existing_vm, env_path)
        if ret_code > 0:
            slab_logger.error("Could not find host in site {0}"
                              " env {1}".format(site,
                                                env_json['CCS_ENVIRONMENT']))
            sys.exit(1)
        try:
            flavor = yaml_data['deploy_args']['flavor']
        except KeyError:
            slab_logger.warning(
                'Unable to find flavor for %s, using default flavor' %
                hostname)
        service_groups = []
        groups = []
        try:
            for group in yaml_data['groups']:
                if group != 'virtual':
                    groups.append(group)
                    service_group = 'service-' + group.replace('_', '-')
                    if os.path.isdir(
                            os.path.join(ctx.path, 'services', service_group)):
                        service_groups.append(service_group)
        except KeyError:
            pass  # can pass, vm has no groups
        if groups:
            slab_logger.log(
                25, '\nThe following groups were found within %s yaml file: ' %
                hostname)
            for group in groups:
                slab_logger.log(25, group)
            if not service_groups:
                slab_logger.log(
                    25, '\nNo service groups were found locally installed')
            else:
                slab_logger.log(
                    25, '\nThe following service groups were found installed '
                    'locally:')
                for service in service_groups:
                    slab_logger.log(25, service)
            input_display = (
                '\nAre the locally installed service groups the expected '
                'groups to be installed on %s? y/n: ' % hostname)
            if not re.search('^[Yy][Ee]*[Ss]*', raw_input(input_display)):
                slab_logger.log(
                    25, 'Try "stack workon service-<group>" for each to be '
                    'installed and rerun the "stack up --existing-vm" command')
                sys.exit(0)
        else:
            slab_logger.warning(
                'No groups were found for %s.  Continuing to build the VM.' %
                hostname)

    # Setup data and inventory
    if not target and not mini and not full:
        match = re.search('^(\d+)cpu\.(\d+)ram', flavor)
        if match:
            cpus = int(match.group(1))
            memory = int(match.group(2)) * 2
        yaml_utils.host_add_vagrantyaml(ctx.path,
                                        "vagrant.yaml",
                                        hostname,
                                        "ccs-dev-1",
                                        memory=memory,
                                        cpus=cpus)
        if not service_groups:
            yaml_utils.write_dev_hostyaml_out(ctx.path,
                                              hostname,
                                              flavor=flavor,
                                              image=image)
        else:
            yaml_utils.write_dev_hostyaml_out(ctx.path,
                                              hostname,
                                              flavor=flavor,
                                              image=image,
                                              groups=service_groups)

        slab_logger.info('Building data for %s.' % hostname)
        if service or existing_vm or rhel7:
            retc, myinfo = service_utils.build_data(ctx.path)
            if retc > 0:
                slab_logger.error('Error building ccs-data ccs-dev-1: ' +
                                  myinfo)
                sys.exit(1)

        # Prep class Objects
        myvfile = Vf_utils.SlabVagrantfile(path=ctx.path, remote=remote)
        if not os.path.exists(os.path.join(ctx.path, 'Vagrantfile')):
            myvfile.init_vagrantfile()
        myvag_env = v_utils.Connect_to_vagrant(vm_name=hostname, path=ctx.path)

        # Setup Vagrantfile w/ vm
        my_sec_grps = ""
        if remote:
            returncode, float_net, mynets, my_sec_grps = os_utils.os_ensure_network(
                ctx.path)
            if returncode > 0:
                slab_logger.error("No OS_ environment variables found")
                sys.exit(1)
            myvfile.set_env_vars(float_net, mynets, my_sec_grps)
            returncode, host_dict = yaml_utils.gethost_byname(
                hostname, ctx.path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to get the requested host from your Vagrant.yaml')
                sys.exit(1)
            myvfile.add_openstack_vm(host_dict)
        else:
            returncode, host_dict = yaml_utils.gethost_byname(
                hostname, ctx.path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to get the requested host from your Vagrant.yaml')
                sys.exit(1)
            if myvfile.add_virtualbox_vm(host_dict, ctx.path, nfs) != 0:
                ctx.logger.error('Unable to create a local virtual box vm')
                sys.exit(1)

        # Get vm running
        myvag_env.v.up(vm_name=hostname)
        returncode, myinfo = service_utils.run_this('vagrant hostmanager',
                                                    ctx.path)
        if returncode > 0:
            # Second chance.
            returncode, myinfo = service_utils.run_this(
                'vagrant hostmanager '
                '--provider openstack', ctx.path)
            if returncode > 0:
                slab_logger.error("Could not run vagrant hostmanager because\
                                 {0}".format(myinfo))
                slab_logger.error("Vagrant hostmanager will fail if you "
                                  "have local vms and remote vms.")
                sys.exit(1)
        # You can exit safely now if you're just booting a rhel7 vm
        if rhel7:
            sys.exit(0)

    # SERVICE VM remaining workflow  =================================
    if service or existing_vm:
        slab_logger.info('Booting service and infra_node vms')
        if remote:
            returncode, infra_name = v_utils.infra_ensure_up(mynets,
                                                             float_net,
                                                             my_sec_grps,
                                                             nfs,
                                                             path=ctx.path)
            if returncode == 1:
                slab_logger.error("Could not boot a remote infra node")
                sys.exit(1)
        else:
            returncode, infra_name = v_utils.infra_ensure_up(None,
                                                             None,
                                                             None,
                                                             nfs,
                                                             path=ctx.path)
            if returncode == 1:
                slab_logger.error("Could not boot a local infra node")
                sys.exit(1)

        returncode, myinfo = service_utils.run_this('vagrant hostmanager',
                                                    ctx.path)
        if returncode > 0:
            returncode, myinfo = service_utils.run_this(
                'vagrant hostmanager '
                '--provider openstack', ctx.path)
            if returncode > 0:
                slab_logger.error("Could not run vagrant hostmanager because\
                                 {0}".format(myinfo))
                slab_logger.error(
                    "Vagrant manager will fail if you have local vms"
                    "and remote vms.")
                sys.exit(1)

        command = (
            'vagrant ssh {0} -c \"cd /opt/ccs/services/{1}/ && sudo heighliner '
            '--dev --debug deploy\"')

        if service:
            returncode, myinfo = service_utils.run_this(
                command.format(infra_name, service), ctx.path)
            if returncode > 0:
                slab_logger.error(
                    "There was a failure during the heighliner deploy phase of"
                    " your service. Please see the following information"
                    "for debugging: ")
                slab_logger.error(myinfo)
                sys.exit(1)
            else:
                sys.exit(0)
        else:  # will only match if existing_vm
            for service in service_groups:
                returncode, myinfo = service_utils.run_this(
                    command.format(infra_name, service), ctx.path)
                if returncode > 0:
                    slab_logger.error(
                        "There was a failure during the heighliner deploy "
                        "phase of your service. Please see the following "
                        "information for debugging: ")
                    slab_logger.error(myinfo)
                    sys.exit(1)
            sys.exit(0)
    elif target:
        slab_logger.info('Booting target service vm')
        redhouse_ten_path = os.path.join(ctx.path, 'services',
                                         'service-redhouse-tenant')
        service_utils.sync_service(ctx.path, redhouse_branch, username,
                                   "service-redhouse-tenant")
        puppet_path = os.path.join(redhouse_ten_path, "puppet")
        if not os.path.exists(os.path.join(puppet_path, "modules", "glance")):
            slab_logger.info(
                'Updating sub repos under service-redhouse-tenant')
            slab_logger.info('This may take a few minutes.')
            returncode, myinfo = service_utils.run_this(
                "USER={0} librarian-puppet install".format(username),
                puppet_path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to retrieve the necessary puppet configurations.')
                slab_logger.error(myinfo)
                sys.exit(1)
        a = v_utils.Connect_to_vagrant(vm_name=target, path=redhouse_ten_path)
        if yaml_utils.addto_inventory(target, ctx.path) > 0:
            slab_logger.error(
                'Could not add {0} to vagrant.yaml'.format(target))
            sys.exit(1)

        if not os.path.exists(os.path.join(ctx.path, 'services', 'ccs-data')):
            service_utils.sync_service(ctx.path, data_branch, username,
                                       'ccs-data')

        if not os.path.exists(
                os.path.join(ctx.path, 'services', 'ccs-data', 'out')):
            returncode, myinfo = service_utils.build_data(ctx.path)
            if returncode > 0:
                slab_logger.error('Failed to build ccs-data data b/c ' +
                                  myinfo)
                sys.exit(1)

        if not os.path.islink(
                os.path.join(redhouse_ten_path, "dev", "ccs-data")):
            slab_logger.debug(
                'WARNING: Linking ' +
                os.path.join(redhouse_ten_path, 'dev', 'ccs-data') + "with  " +
                os.path.join(ctx.path, "services", "ccs-data"))
            # Note: os.symlink(src, dst)
            os.symlink(os.path.join(ctx.path, "services", "ccs-data"),
                       os.path.join(redhouse_ten_path, "dev", "ccs-data"))

        if remote:
            settingsyaml = {'openstack_provider': True}
            returncode = yaml_utils.wr_settingsyaml(ctx.path,
                                                    settingsyaml,
                                                    hostname=target)
            if returncode > 0:
                slab_logger.error(
                    'Failed to write settings yaml - make sure you have your '
                    'OS cred.s sourced and have access to'
                    'ccs-gerrit.cisco.com and have keys setup.')
                sys.exit(1)
            a.v.up(vm_name=target, provider='openstack')
        else:
            settingsyaml = {'openstack_provider': 'false'}
            returncode = yaml_utils.wr_settingsyaml(ctx.path,
                                                    settingsyaml=settingsyaml)
            if returncode > 0:
                slab_logger.error(
                    'Failed to write settings yaml - make sure you have your '
                    'OS cred.s sourced and have access to'
                    'ccs-gerrit.cisco.com and have keys setup.')
                sys.exit(1)
            a.v.up(vm_name=target)
        """
        The code for host manager is not implemented in service-redhouse-tenant Vagrant File.
        So this is currently stubbed out, as it causes Vagrant errors.
        """
        __EXECUTE__ = None
        if __EXECUTE__:
            returncode, myinfo = service_utils.run_this(
                'vagrant hostmanager', redhouse_ten_path)
            if returncode > 0:
                returncode, myinfo = service_utils.run_this(
                    'vagrant hostmanager '
                    '--provider openstack', redhouse_ten_path)
                if returncode > 0:
                    slab_logger.error(
                        "Could not run vagrant hostmanager because\
                                     {0}".format(myinfo))
                    sys.exit(1)
        sys.exit(0)

    service_utils.sync_service(ctx.path, redhouse_branch, username,
                               "service-redhouse-tenant")

    if mini:
        slab_logger.info('Booting vms for mini OSP deployment')
        returncode, allmy_vms = yaml_utils.getmin_OS_vms(ctx.path)
    elif full:
        slab_logger.info('Booting vms for full OSP deployment')
        returncode, allmy_vms = yaml_utils.getfull_OS_vms(
            os.path.join(ctx.path, 'provision'), '001')
    else:
        return 0
    if returncode > 0:
        slab_logger.error("Couldn't get the vms from the vagrant.yaml.")
        sys.exit(1)

    returncode, order = yaml_utils.get_host_order(
        os.path.join(ctx.path, 'provision'))
    if returncode > 0:
        slab_logger.error("Couldn't get order of vms from order.yaml")
        sys.exit(1)
    try:
        # Note: not sure if this will work w/ vm_name set to infra-001 arbitrarily
        # Note: move path to ctx.path if able to boot OSP pieces via infra/heighliner
        redhouse_ten_path = os.path.join(ctx.path, 'services',
                                         'service-redhouse-tenant')
        a = v_utils.Connect_to_vagrant(vm_name='infra-001',
                                       path=os.path.join(redhouse_ten_path))
        myvfile = Vf_utils.SlabVagrantfile(path=ctx.path, remote=remote)
        if remote:
            returncode, float_net, mynets, my_sec_grps = os_utils.os_ensure_network(
                ctx.path)
            if returncode > 0:
                slab_logger.error('Failed to get float net and mynets')
                sys.exit(1)
            myvfile.set_env_vars(float_net, mynets, my_sec_grps)

        if not os.path.exists(os.path.join(ctx.path, 'Vagrantfile')):
            myvfile.init_vagrantfile()
        puppet_path = os.path.join(redhouse_ten_path, "puppet")
        if not os.path.exists(os.path.join(puppet_path, "modules", "glance")):
            slab_logger.info(
                'Updating sub repos under service-redhouse-tenant')
            slab_logger.info('This may take a few minutes.')
            returncode, myinfo = service_utils.run_this(
                "USER={0} librarian-puppet install".format(username),
                puppet_path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to retrieve the necessary puppet configurations.')
                slab_logger.error(myinfo)
            returncode = service_utils.copy_certs(
                os.path.join(ctx.path, "provision"), puppet_path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to copy haproxy certs to ccs puppet module.')
                sys.exit(1)
        if not os.path.exists(os.path.join(ctx.path, 'services', 'ccs-data')):
            service_utils.sync_service(ctx.path, data_branch, username,
                                       'ccs-data')

        if not os.path.exists(
                os.path.join(ctx.path, 'services', 'ccs-data', 'out')):
            returncode, myinfo = service_utils.build_data(ctx.path)
            if returncode > 0:
                slab_logger.error('Failed to build ccs-data data b/c ' +
                                  myinfo)
                sys.exit(1)

        if not os.path.islink(
                os.path.join(redhouse_ten_path, "dev", "ccs-data")):
            slab_logger.debug(
                'WARNING: Linking ' +
                os.path.join(redhouse_ten_path, 'dev', 'ccs-data') + "with  " +
                os.path.join(ctx.path, "services", "ccs-data"))
            # Note: os.symlink(src, dst)
            os.symlink(os.path.join(ctx.path, "services", "ccs-data"),
                       os.path.join(redhouse_ten_path, "dev", "ccs-data"))
        for i in order:
            # Need to build nodes in specific order
            # so filter out everything but
            # if result is empty, then don't built this node and skip
            # variables aren't referenced outside of a lambda, so had
            # to pass in current node (i) as variable o
            vhosts = filter(lambda x, o=i: o in x, allmy_vms)
            if len(vhosts) == 0:
                continue
            if ha:
                ha_vm = vhosts.replace("001", "002")
                returncode, ha_vm_dicts = yaml_utils.gethost_byname(
                    ha_vm, os.path.join(ctx.path, 'provision'))
                if returncode > 0:
                    slab_logger.error(
                        "Couldn't get the vm {0} for HA".format(ha_vm))
                    sys.exit(1)
                else:
                    allmy_vms.append(ha_vm_dicts)
            for hosts in vhosts:
                for host in hosts:
                    newmem = (hosts[host]['memory'] / 512)
                    retcode = yaml_utils.host_add_vagrantyaml(
                        path=ctx.path,
                        file_name="vagrant.yaml",
                        hostname=host,
                        site='ccs-dev-1',
                        memory=newmem,
                        box=hosts[host]['box'],
                        role=hosts[host]['role'],
                        profile=hosts[host]['profile'],
                        domain=hosts[host]['domain'],
                        mac_nocolon=hosts[host]['mac'],
                        ip=hosts[host]['ip'],
                    )
                if retcode > 0:
                    slab_logger.error("Failed to add host" + host)
                    slab_logger.error("Continuing despite failure...")
            curhost = vhosts[0].keys()[0]
            if remote:
                settingsyaml = {'openstack_provider': True}
                returncode = yaml_utils.wr_settingsyaml(ctx.path,
                                                        settingsyaml,
                                                        hostname=curhost)
                if returncode > 0:
                    slab_logger.error('writing to settings yaml failed on: ' +
                                      curhost)
                myvfile.add_openstack_vm(vhosts[0])
                a.v.up(vm_name=curhost, provider='openstack')
            else:
                myvfile.add_virtualbox_vm(vhosts[0], ctx.path, nfs)
                a.v.up(vm_name=curhost)

    except IOError as e:
        slab_logger.error("{0} for vagrant.yaml in {1}".format(e, ctx.path))
        sys.exit(1)
Пример #9
0
def env_new(ctx, username):
    """Compiles data for a new environment to be built on top of an existing
    site in the ccs-data repo.

    \b
    1) Syncs the ccs-data and ccs-build-tools repo into the .stack/services directory.
    2) Allows the user to dynamically input data pertaining to the new environment, which
       will be built on top of an existing, specified service cloud.
    3) The data is compiled into a single yaml file (answer-sample.yaml) located in the
       ccs-build-tools/ignition_rb directory and includes:
           *bom version
           *CIMC password
           *vlan numbers and their corresponding ip ranges
           *service cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova cloud nodes
           *tenant cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova, ceph, net and proxy nodes
    4) Within ccs-build-tool, a vagrant environment and virtualbox is used to compile all
       of the data into a single site directory, with which the appropriate environment
       is extracted and copied to the appropriate folder in ccs-data.

      Args:
        ctx: context
        username: credential used for cloning repos from gerrit
    """
    slab_logger.log(25, 'Creating a new environment')

    # Get username
    if not username:
        username = ctx.get_username()

    slab_logger.info("Retrieving latest ccs-data branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-data")
    slab_logger.info("Retrieving latest ccs-build-tools branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-build-tools")

    # Ensure you have latest ccs-data branch
    returncode, site_dictionary = ccsbuildtools_utils.gather_env_info(ctx.path)
    if returncode > 0:
        slab_logger.error("unable to get the sites information")
        return

    svc_site_name = site_dictionary['service_cloud']['site_name']
    tc_site_name = site_dictionary['tenant_cloud']['site_name']
    slab_logger.log(
        15, "Building and Exporting %s to ccs-data---" % (svc_site_name))
    passed, log = service_utils.run_this(
        'vagrant up; vagrant destroy -f; ',
        os.path.join(ctx.path, "services", "ccs-build-tools"))
    if passed > 0:
        slab_logger.error(
            "Failed to establish vagrant environment in ccs-build-tools")
        slab_logger.error(
            "Printing log of vagrant up command in ccs-build-tools")
        slab_logger.error(log)
        return

    # Copying over contents of files generated by ccsbuildtools into ccs-data
    cmds = "cp -r ccs-build-tools/sites/%(svc)s/environments/%(tc)s "\
           "ccs-data/sites/%(svc)s/environments; "\
           "rm -rf ccs-build-tools/sites; " % {'svc': svc_site_name,
                                               'tc': tc_site_name}
    passed, log = service_utils.run_this(cmds,
                                         os.path.join(ctx.path, "services"))
    if passed > 0:
        slab_logger.error("Failed to copy environment into ccs-data")
        slab_logger.error("Printing log of directory exports")
        slab_logger.error(log)
        return

    slab_logger.info(
        "Env Data Gathered for %s in site %s. Check .stack/services/ccs-data "
        "for its contents" % (tc_site_name, svc_site_name))
Пример #10
0
def site_new(ctx, username, cont):
    """Compiles data for a new site in the ccs-data repo.

    \b
    1) Syncs the ccs-data and ccs-build-tools repo into the .stack/services directory.
    2) Allows the user to dynamically input data pertaining to the new site which
       is comprised of a single tenant cloud built on top of a service cloud.
       *The data compilation can be quit and resumed for a later time (the temporary
        data is stored in .stack/cache/temp_site.yaml
    3) The data is compiled into a single yaml file (answer-sample.yaml) located in the
       ccs-build-tools/ignition_rb directory and includes:
           *bom version
           *CIMC password
           *vlan numbers and their corresponding ip ranges
           *service cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova cloud nodes
           *tenant cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova, ceph, net and proxy nodes
    4) Within ccs-build-tool, a vagrant environment and virtualbox is used to compile all
       of the data into a single site directory, which is copied into ccs-data.
    """
    slab_logger.info("Creating a new site in ccs-data")
    if not username:
        username = ctx.get_username()

    slab_logger.log(15, "Retrieving latest ccs-data branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-data")

    slab_logger.log(15, "Retrieving latest ccs-build-tools branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-build-tools")

    slab_logger.log(15, "Retreiving user input for new site's data fields...")
    returncode, site_dictionary = ccsbuildtools_utils.gather_site_info(
        ctx.path, cont)
    if returncode != 1:
        slab_logger.error("unable to retrieve site data")
        return

    svc_site_name = site_dictionary['service_cloud']['site_name']
    slab_logger.log(
        15, "Building and Exporting %s to ccs-data---" % (svc_site_name))
    passed, log = service_utils.run_this(
        'vagrant up; vagrant destroy -f;',
        os.path.join(ctx.path, "services", "ccs-build-tools"))
    if passed > 0:
        slab_logger.error(
            "Failed to establish vagrant environment in ccs-build-tools")
        slab_logger.error(
            "Printing log of vagrant up command in ccs-build-tools")
        slab_logger.error(log)
        return

    # Copying over contents of files generated by ccsbuildtools into ccs-data
    cmds = "cp -r ccs-build-tools/sites/%(svc)s ccs-data/sites; " \
           "rm -rf ccs-build-tools/sites; " % {'svc': svc_site_name}
    passed, log = service_utils.run_this(cmds,
                                         os.path.join(ctx.path, "services"))
    if passed > 0:
        slab_logger.error("Failed to copy site into ccs-data")
        slab_logger.error("Printing log of directory exports")
        slab_logger.error(log)
        return

    slab_logger.info(
        "Site Data Gathered for %s. Check .stack/services/ccs-data "
        "for its contents---" % (svc_site_name))