Пример #1
0
def cli(ctx, deploy_svc, existing_vm, env):
    """
    Re-deploy your service in an environment with an existing infra node. This should help
    facillitate rapid development as you can make changes locally on your laptop and
    continually deploy with this command.
    """
    if not deploy_svc.startswith('service-'):
        deploy_svc = 'service-' + deploy_svc
        slab_logger.log(25, 'Adding "service-" to service: ' + deploy_svc)

    command = (
        'vagrant ssh {0} -c \"cd /opt/ccs/services/{1}/ && sudo heighliner '
        '--dev --debug deploy\"')

    returncode, myinfo = service_utils.run_this(
        command.format('infra-001', deploy_svc), ctx.path)

    slab_logger.log(25, myinfo)
    if returncode > 0:
        slab_logger.error('Failed to deploy service. Trying infra-002')
        returncode, myinfo = service_utils.run_this(
            command.format('infra-002', deploy_svc), ctx.path)
        slab_logger.log(25, myinfo)
        if returncode > 0:
            slab_logger.error('Failed to deploy service.')
            sys.exit(1)
        else:
            slab_logger.log(25, 'Deployed service successfully.')
            sys.exit(0)
    else:
        slab_logger.error('Deployed service successfully.')
        sys.exit(0)
Пример #2
0
    def test_run_this(self):
        """
        Test the run_this function, which runs a shell command

        Ensure failure of invalid command
        Ensure success of valid command
        """
        run_this_return, run_this_output = service_utils.run_this(
            'fake-command')
        self.assertTrue(run_this_return != 0)

        run_this_return, run_this_output = service_utils.run_this('type bash')
        self.assertEqual(run_this_return, 0)
 def test_cmd_vm_status(self):
     """ Tests VM status command.
     """
     runner = CliRunner()
     workon_cmd = "stack workon service-horizon"
     retcode, _ = service_utils.run_this(workon_cmd)
     if retcode != 0:
         cls.success_flag = False
         cls.message = "Unable to run stack workon service-horizon"
         return
     up_cmd = "stack up -s service-horizon"
     retcode, _ = service_utils.run_this(up_cmd)
     result = runner.invoke(cmd_status.cli, ['vm'])
     if result.output:
         self.assertTrue(TestCmdUpDataBranch.VM_STATUS in result.output)
Пример #4
0
    def setUpClass(cls):
        set_git_cmd = "git config --global user.email"\
                      " \"[email protected]\"; "\
            "git config --global user.name \"Raghu Katti\";"
        check_git_cmd = "git config user.email"
        retcode, check_val = service_utils.run_this(check_git_cmd)

        if "@cisco.com" not in check_val:
            ret_code, _ = service_utils.run_this(set_git_cmd)
            assert ret_code == 0, "Unable to run : git config user.email"

        workon_cmd = "stack workon service-horizon"
        ret_code, _ = service_utils.run_this(workon_cmd)
        assert ret_code == 0, "Unable to run stack workon service-horizon"

        up_cmd = "stack up -s service-horizon"
        retcode, _ = service_utils.run_this(up_cmd)
        assert ret_code == 0, "Unable to run stack up -s service-horizon"
Пример #5
0
 def test_project(self):
     """
     Check if the project service-ansibletest-ansible exist in the gerrit
     repository as the project.
     """
     hostname = self.gsrvr['hostname']
     port = self.gsrvr['port']
     cmd = "ssh -p {} {} gerrit ls-projects | grep {}".format(
         port, hostname, self.reponame)
     retcode, _ = service_utils.run_this(cmd)
     self.assertEqual(0, retcode,
                      "project was not created in the staging area")
Пример #6
0
    def setUp(self):
        """
        setUp function for gerrit functions
        """
        self.sortTestMethodsUsing = None

        self.ctx = Context()
        self.ctx.debug = True
        self.user = "******"
        self.prjname = "testproject"
        self.prjdir = "/tmp/" + self.prjname
        self.hostname = self.ctx.get_gerrit_server()['hostname']
        self.port = self.ctx.get_gerrit_server()['port']

        self.testrepo = "testproject"
        service_utils.run_this("cd /tmp;"
                               "git clone ssh://{}@{}:{}/{}".format(self.user,
                                                                    self.hostname,
                                                                    self.port,
                                                                    self.prjname))
        service_utils.run_this("cd {};git checkout develop;".format(self.prjdir))
        with open("{}/test.py".format(self.prjdir), "a") as tfile:
            tfile.write("print 'Hello world'")

        cmdrt, cmdrtstr = service_utils.run_this("cd {};".format(self.prjdir) +
                                                 "git add test.py;"
                                                 "git commit -m 'one additional line';")
        cmd = "cd {};git review develop".format(self.prjdir)
        cmdrt, cmdrtstr = service_utils.run_this(cmd)
        if cmdrt:
            click.echo("unable to perform setup for the test")
            click.echo("test failed")
            self.fail(cmdrtstr)

        mtch = re.search("(https://.*/)([0-9]+)", cmdrtstr)
        if not (mtch and mtch.group(2)):
            click.echo("unable to find match in the string\n{}".format(cmdrtstr))
            self.fail("test failed: unable to determine changeset")
        self.review = mtch.group(2)
Пример #7
0
def cli(ctx, full, mini, rhel7, target, service, remote, ha, redhouse_branch,
        data_branch, service_branch, username, interactive, existing_vm, env,
        flavor, image, nfs):
    flavor = str(flavor)
    image = str(image)
    service_groups = []
    # Things the user Should not do ==================================
    if mini is True and full is True:
        slab_logger.error('You can not use the mini flag with the full flag.')
        sys.exit(1)

    # Gather as many requirements as possible for the user ===========
    if not username:
        slab_logger.log(15, 'Extracting username')
        username = ctx.get_username()

    if not any([full, mini, rhel7, target, service, existing_vm]):
        slab_logger.info("Booting vm from most recently installed service")
        try:
            returncode, service = helper_utils.get_current_service(ctx.path)
        except TypeError:
            slab_logger.error("Could not get the current service.")
            slab_logger.error("Try: stack workon service-myservice")
            sys.exit(1)
        if returncode > 0:
            slab_logger.error("Failed to get the current service")
            sys.exit(1)

    if env is not None:
        env_json = json.loads(env)
        if 'CCS_ENVIRONMENT' in env_json:
            os.environ['CCS_ENVIRONMENT'] = env_json['CCS_ENVIRONMENT']
        if 'HEIGHLINER_DEPLOY_TAG' in env_json:
            os.environ['HEIGHLINER_DEPLOY_TAG'] = env_json[
                'HEIGHLINER_DEPLOY_TAG']
        if 'HEIGHLINER_DEPLOY_TARGET_HOSTS' in env_json:
            val = env_json['HEIGHLINER_DEPLOY_TARGET_HOSTS']
            os.environ['HEIGHLINER_DEPLOY_TARGET_HOSTS'] = val

    slab_logger.log(15, 'Determining vm hostname')
    hostname = ''
    if rhel7:
        hostname = str(helper_utils.name_vm("rhel7", ctx.path))
    elif service:
        if not service_utils.installed(service, ctx.path):
            slab_logger.error("{0} is not in the .stack/services/ directory.\n"
                              "Try: stack workon {0}".format(service))
            sys.exit(1)
        hostname = str(helper_utils.name_vm(service, ctx.path))
    elif target:
        hostname = target
    elif existing_vm:
        hostname = existing_vm
        ret_code, site = ccsdata_utils.get_site_from_env(
            env_json['CCS_ENVIRONMENT'])
        if ret_code > 0:
            slab_logger.error("Could not find parent site for "
                              "{}".format(env_json['CCS_ENVIRONMENT']))
            sys.exit(1)
        env_path = os.path.join(ctx.path, 'services', 'ccs-data', 'sites',
                                site, 'environments',
                                env_json['CCS_ENVIRONMENT'])
        ret_code, yaml_data = yaml_utils.read_host_yaml(existing_vm, env_path)
        if ret_code > 0:
            slab_logger.error("Could not find host in site {0}"
                              " env {1}".format(site,
                                                env_json['CCS_ENVIRONMENT']))
            sys.exit(1)
        try:
            flavor = yaml_data['deploy_args']['flavor']
        except KeyError:
            slab_logger.warning(
                'Unable to find flavor for %s, using default flavor' %
                hostname)
        service_groups = []
        groups = []
        try:
            for group in yaml_data['groups']:
                if group != 'virtual':
                    groups.append(group)
                    service_group = 'service-' + group.replace('_', '-')
                    if os.path.isdir(
                            os.path.join(ctx.path, 'services', service_group)):
                        service_groups.append(service_group)
        except KeyError:
            pass  # can pass, vm has no groups
        if groups:
            slab_logger.log(
                25, '\nThe following groups were found within %s yaml file: ' %
                hostname)
            for group in groups:
                slab_logger.log(25, group)
            if not service_groups:
                slab_logger.log(
                    25, '\nNo service groups were found locally installed')
            else:
                slab_logger.log(
                    25, '\nThe following service groups were found installed '
                    'locally:')
                for service in service_groups:
                    slab_logger.log(25, service)
            input_display = (
                '\nAre the locally installed service groups the expected '
                'groups to be installed on %s? y/n: ' % hostname)
            if not re.search('^[Yy][Ee]*[Ss]*', raw_input(input_display)):
                slab_logger.log(
                    25, 'Try "stack workon service-<group>" for each to be '
                    'installed and rerun the "stack up --existing-vm" command')
                sys.exit(0)
        else:
            slab_logger.warning(
                'No groups were found for %s.  Continuing to build the VM.' %
                hostname)

    # Setup data and inventory
    if not target and not mini and not full:
        match = re.search('^(\d+)cpu\.(\d+)ram', flavor)
        if match:
            cpus = int(match.group(1))
            memory = int(match.group(2)) * 2
        yaml_utils.host_add_vagrantyaml(ctx.path,
                                        "vagrant.yaml",
                                        hostname,
                                        "ccs-dev-1",
                                        memory=memory,
                                        cpus=cpus)
        if not service_groups:
            yaml_utils.write_dev_hostyaml_out(ctx.path,
                                              hostname,
                                              flavor=flavor,
                                              image=image)
        else:
            yaml_utils.write_dev_hostyaml_out(ctx.path,
                                              hostname,
                                              flavor=flavor,
                                              image=image,
                                              groups=service_groups)

        slab_logger.info('Building data for %s.' % hostname)
        if service or existing_vm or rhel7:
            retc, myinfo = service_utils.build_data(ctx.path)
            if retc > 0:
                slab_logger.error('Error building ccs-data ccs-dev-1: ' +
                                  myinfo)
                sys.exit(1)

        # Prep class Objects
        myvfile = Vf_utils.SlabVagrantfile(path=ctx.path, remote=remote)
        if not os.path.exists(os.path.join(ctx.path, 'Vagrantfile')):
            myvfile.init_vagrantfile()
        myvag_env = v_utils.Connect_to_vagrant(vm_name=hostname, path=ctx.path)

        # Setup Vagrantfile w/ vm
        my_sec_grps = ""
        if remote:
            returncode, float_net, mynets, my_sec_grps = os_utils.os_ensure_network(
                ctx.path)
            if returncode > 0:
                slab_logger.error("No OS_ environment variables found")
                sys.exit(1)
            myvfile.set_env_vars(float_net, mynets, my_sec_grps)
            returncode, host_dict = yaml_utils.gethost_byname(
                hostname, ctx.path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to get the requested host from your Vagrant.yaml')
                sys.exit(1)
            myvfile.add_openstack_vm(host_dict)
        else:
            returncode, host_dict = yaml_utils.gethost_byname(
                hostname, ctx.path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to get the requested host from your Vagrant.yaml')
                sys.exit(1)
            if myvfile.add_virtualbox_vm(host_dict, ctx.path, nfs) != 0:
                ctx.logger.error('Unable to create a local virtual box vm')
                sys.exit(1)

        # Get vm running
        myvag_env.v.up(vm_name=hostname)
        returncode, myinfo = service_utils.run_this('vagrant hostmanager',
                                                    ctx.path)
        if returncode > 0:
            # Second chance.
            returncode, myinfo = service_utils.run_this(
                'vagrant hostmanager '
                '--provider openstack', ctx.path)
            if returncode > 0:
                slab_logger.error("Could not run vagrant hostmanager because\
                                 {0}".format(myinfo))
                slab_logger.error("Vagrant hostmanager will fail if you "
                                  "have local vms and remote vms.")
                sys.exit(1)
        # You can exit safely now if you're just booting a rhel7 vm
        if rhel7:
            sys.exit(0)

    # SERVICE VM remaining workflow  =================================
    if service or existing_vm:
        slab_logger.info('Booting service and infra_node vms')
        if remote:
            returncode, infra_name = v_utils.infra_ensure_up(mynets,
                                                             float_net,
                                                             my_sec_grps,
                                                             nfs,
                                                             path=ctx.path)
            if returncode == 1:
                slab_logger.error("Could not boot a remote infra node")
                sys.exit(1)
        else:
            returncode, infra_name = v_utils.infra_ensure_up(None,
                                                             None,
                                                             None,
                                                             nfs,
                                                             path=ctx.path)
            if returncode == 1:
                slab_logger.error("Could not boot a local infra node")
                sys.exit(1)

        returncode, myinfo = service_utils.run_this('vagrant hostmanager',
                                                    ctx.path)
        if returncode > 0:
            returncode, myinfo = service_utils.run_this(
                'vagrant hostmanager '
                '--provider openstack', ctx.path)
            if returncode > 0:
                slab_logger.error("Could not run vagrant hostmanager because\
                                 {0}".format(myinfo))
                slab_logger.error(
                    "Vagrant manager will fail if you have local vms"
                    "and remote vms.")
                sys.exit(1)

        command = (
            'vagrant ssh {0} -c \"cd /opt/ccs/services/{1}/ && sudo heighliner '
            '--dev --debug deploy\"')

        if service:
            returncode, myinfo = service_utils.run_this(
                command.format(infra_name, service), ctx.path)
            if returncode > 0:
                slab_logger.error(
                    "There was a failure during the heighliner deploy phase of"
                    " your service. Please see the following information"
                    "for debugging: ")
                slab_logger.error(myinfo)
                sys.exit(1)
            else:
                sys.exit(0)
        else:  # will only match if existing_vm
            for service in service_groups:
                returncode, myinfo = service_utils.run_this(
                    command.format(infra_name, service), ctx.path)
                if returncode > 0:
                    slab_logger.error(
                        "There was a failure during the heighliner deploy "
                        "phase of your service. Please see the following "
                        "information for debugging: ")
                    slab_logger.error(myinfo)
                    sys.exit(1)
            sys.exit(0)
    elif target:
        slab_logger.info('Booting target service vm')
        redhouse_ten_path = os.path.join(ctx.path, 'services',
                                         'service-redhouse-tenant')
        service_utils.sync_service(ctx.path, redhouse_branch, username,
                                   "service-redhouse-tenant")
        puppet_path = os.path.join(redhouse_ten_path, "puppet")
        if not os.path.exists(os.path.join(puppet_path, "modules", "glance")):
            slab_logger.info(
                'Updating sub repos under service-redhouse-tenant')
            slab_logger.info('This may take a few minutes.')
            returncode, myinfo = service_utils.run_this(
                "USER={0} librarian-puppet install".format(username),
                puppet_path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to retrieve the necessary puppet configurations.')
                slab_logger.error(myinfo)
                sys.exit(1)
        a = v_utils.Connect_to_vagrant(vm_name=target, path=redhouse_ten_path)
        if yaml_utils.addto_inventory(target, ctx.path) > 0:
            slab_logger.error(
                'Could not add {0} to vagrant.yaml'.format(target))
            sys.exit(1)

        if not os.path.exists(os.path.join(ctx.path, 'services', 'ccs-data')):
            service_utils.sync_service(ctx.path, data_branch, username,
                                       'ccs-data')

        if not os.path.exists(
                os.path.join(ctx.path, 'services', 'ccs-data', 'out')):
            returncode, myinfo = service_utils.build_data(ctx.path)
            if returncode > 0:
                slab_logger.error('Failed to build ccs-data data b/c ' +
                                  myinfo)
                sys.exit(1)

        if not os.path.islink(
                os.path.join(redhouse_ten_path, "dev", "ccs-data")):
            slab_logger.debug(
                'WARNING: Linking ' +
                os.path.join(redhouse_ten_path, 'dev', 'ccs-data') + "with  " +
                os.path.join(ctx.path, "services", "ccs-data"))
            # Note: os.symlink(src, dst)
            os.symlink(os.path.join(ctx.path, "services", "ccs-data"),
                       os.path.join(redhouse_ten_path, "dev", "ccs-data"))

        if remote:
            settingsyaml = {'openstack_provider': True}
            returncode = yaml_utils.wr_settingsyaml(ctx.path,
                                                    settingsyaml,
                                                    hostname=target)
            if returncode > 0:
                slab_logger.error(
                    'Failed to write settings yaml - make sure you have your '
                    'OS cred.s sourced and have access to'
                    'ccs-gerrit.cisco.com and have keys setup.')
                sys.exit(1)
            a.v.up(vm_name=target, provider='openstack')
        else:
            settingsyaml = {'openstack_provider': 'false'}
            returncode = yaml_utils.wr_settingsyaml(ctx.path,
                                                    settingsyaml=settingsyaml)
            if returncode > 0:
                slab_logger.error(
                    'Failed to write settings yaml - make sure you have your '
                    'OS cred.s sourced and have access to'
                    'ccs-gerrit.cisco.com and have keys setup.')
                sys.exit(1)
            a.v.up(vm_name=target)
        """
        The code for host manager is not implemented in service-redhouse-tenant Vagrant File.
        So this is currently stubbed out, as it causes Vagrant errors.
        """
        __EXECUTE__ = None
        if __EXECUTE__:
            returncode, myinfo = service_utils.run_this(
                'vagrant hostmanager', redhouse_ten_path)
            if returncode > 0:
                returncode, myinfo = service_utils.run_this(
                    'vagrant hostmanager '
                    '--provider openstack', redhouse_ten_path)
                if returncode > 0:
                    slab_logger.error(
                        "Could not run vagrant hostmanager because\
                                     {0}".format(myinfo))
                    sys.exit(1)
        sys.exit(0)

    service_utils.sync_service(ctx.path, redhouse_branch, username,
                               "service-redhouse-tenant")

    if mini:
        slab_logger.info('Booting vms for mini OSP deployment')
        returncode, allmy_vms = yaml_utils.getmin_OS_vms(ctx.path)
    elif full:
        slab_logger.info('Booting vms for full OSP deployment')
        returncode, allmy_vms = yaml_utils.getfull_OS_vms(
            os.path.join(ctx.path, 'provision'), '001')
    else:
        return 0
    if returncode > 0:
        slab_logger.error("Couldn't get the vms from the vagrant.yaml.")
        sys.exit(1)

    returncode, order = yaml_utils.get_host_order(
        os.path.join(ctx.path, 'provision'))
    if returncode > 0:
        slab_logger.error("Couldn't get order of vms from order.yaml")
        sys.exit(1)
    try:
        # Note: not sure if this will work w/ vm_name set to infra-001 arbitrarily
        # Note: move path to ctx.path if able to boot OSP pieces via infra/heighliner
        redhouse_ten_path = os.path.join(ctx.path, 'services',
                                         'service-redhouse-tenant')
        a = v_utils.Connect_to_vagrant(vm_name='infra-001',
                                       path=os.path.join(redhouse_ten_path))
        myvfile = Vf_utils.SlabVagrantfile(path=ctx.path, remote=remote)
        if remote:
            returncode, float_net, mynets, my_sec_grps = os_utils.os_ensure_network(
                ctx.path)
            if returncode > 0:
                slab_logger.error('Failed to get float net and mynets')
                sys.exit(1)
            myvfile.set_env_vars(float_net, mynets, my_sec_grps)

        if not os.path.exists(os.path.join(ctx.path, 'Vagrantfile')):
            myvfile.init_vagrantfile()
        puppet_path = os.path.join(redhouse_ten_path, "puppet")
        if not os.path.exists(os.path.join(puppet_path, "modules", "glance")):
            slab_logger.info(
                'Updating sub repos under service-redhouse-tenant')
            slab_logger.info('This may take a few minutes.')
            returncode, myinfo = service_utils.run_this(
                "USER={0} librarian-puppet install".format(username),
                puppet_path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to retrieve the necessary puppet configurations.')
                slab_logger.error(myinfo)
            returncode = service_utils.copy_certs(
                os.path.join(ctx.path, "provision"), puppet_path)
            if returncode > 0:
                slab_logger.error(
                    'Failed to copy haproxy certs to ccs puppet module.')
                sys.exit(1)
        if not os.path.exists(os.path.join(ctx.path, 'services', 'ccs-data')):
            service_utils.sync_service(ctx.path, data_branch, username,
                                       'ccs-data')

        if not os.path.exists(
                os.path.join(ctx.path, 'services', 'ccs-data', 'out')):
            returncode, myinfo = service_utils.build_data(ctx.path)
            if returncode > 0:
                slab_logger.error('Failed to build ccs-data data b/c ' +
                                  myinfo)
                sys.exit(1)

        if not os.path.islink(
                os.path.join(redhouse_ten_path, "dev", "ccs-data")):
            slab_logger.debug(
                'WARNING: Linking ' +
                os.path.join(redhouse_ten_path, 'dev', 'ccs-data') + "with  " +
                os.path.join(ctx.path, "services", "ccs-data"))
            # Note: os.symlink(src, dst)
            os.symlink(os.path.join(ctx.path, "services", "ccs-data"),
                       os.path.join(redhouse_ten_path, "dev", "ccs-data"))
        for i in order:
            # Need to build nodes in specific order
            # so filter out everything but
            # if result is empty, then don't built this node and skip
            # variables aren't referenced outside of a lambda, so had
            # to pass in current node (i) as variable o
            vhosts = filter(lambda x, o=i: o in x, allmy_vms)
            if len(vhosts) == 0:
                continue
            if ha:
                ha_vm = vhosts.replace("001", "002")
                returncode, ha_vm_dicts = yaml_utils.gethost_byname(
                    ha_vm, os.path.join(ctx.path, 'provision'))
                if returncode > 0:
                    slab_logger.error(
                        "Couldn't get the vm {0} for HA".format(ha_vm))
                    sys.exit(1)
                else:
                    allmy_vms.append(ha_vm_dicts)
            for hosts in vhosts:
                for host in hosts:
                    newmem = (hosts[host]['memory'] / 512)
                    retcode = yaml_utils.host_add_vagrantyaml(
                        path=ctx.path,
                        file_name="vagrant.yaml",
                        hostname=host,
                        site='ccs-dev-1',
                        memory=newmem,
                        box=hosts[host]['box'],
                        role=hosts[host]['role'],
                        profile=hosts[host]['profile'],
                        domain=hosts[host]['domain'],
                        mac_nocolon=hosts[host]['mac'],
                        ip=hosts[host]['ip'],
                    )
                if retcode > 0:
                    slab_logger.error("Failed to add host" + host)
                    slab_logger.error("Continuing despite failure...")
            curhost = vhosts[0].keys()[0]
            if remote:
                settingsyaml = {'openstack_provider': True}
                returncode = yaml_utils.wr_settingsyaml(ctx.path,
                                                        settingsyaml,
                                                        hostname=curhost)
                if returncode > 0:
                    slab_logger.error('writing to settings yaml failed on: ' +
                                      curhost)
                myvfile.add_openstack_vm(vhosts[0])
                a.v.up(vm_name=curhost, provider='openstack')
            else:
                myvfile.add_virtualbox_vm(vhosts[0], ctx.path, nfs)
                a.v.up(vm_name=curhost)

    except IOError as e:
        slab_logger.error("{0} for vagrant.yaml in {1}".format(e, ctx.path))
        sys.exit(1)
Пример #8
0
def env_new(ctx, username):
    """Compiles data for a new environment to be built on top of an existing
    site in the ccs-data repo.

    \b
    1) Syncs the ccs-data and ccs-build-tools repo into the .stack/services directory.
    2) Allows the user to dynamically input data pertaining to the new environment, which
       will be built on top of an existing, specified service cloud.
    3) The data is compiled into a single yaml file (answer-sample.yaml) located in the
       ccs-build-tools/ignition_rb directory and includes:
           *bom version
           *CIMC password
           *vlan numbers and their corresponding ip ranges
           *service cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova cloud nodes
           *tenant cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova, ceph, net and proxy nodes
    4) Within ccs-build-tool, a vagrant environment and virtualbox is used to compile all
       of the data into a single site directory, with which the appropriate environment
       is extracted and copied to the appropriate folder in ccs-data.

      Args:
        ctx: context
        username: credential used for cloning repos from gerrit
    """
    slab_logger.log(25, 'Creating a new environment')

    # Get username
    if not username:
        username = ctx.get_username()

    slab_logger.info("Retrieving latest ccs-data branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-data")
    slab_logger.info("Retrieving latest ccs-build-tools branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-build-tools")

    # Ensure you have latest ccs-data branch
    returncode, site_dictionary = ccsbuildtools_utils.gather_env_info(ctx.path)
    if returncode > 0:
        slab_logger.error("unable to get the sites information")
        return

    svc_site_name = site_dictionary['service_cloud']['site_name']
    tc_site_name = site_dictionary['tenant_cloud']['site_name']
    slab_logger.log(
        15, "Building and Exporting %s to ccs-data---" % (svc_site_name))
    passed, log = service_utils.run_this(
        'vagrant up; vagrant destroy -f; ',
        os.path.join(ctx.path, "services", "ccs-build-tools"))
    if passed > 0:
        slab_logger.error(
            "Failed to establish vagrant environment in ccs-build-tools")
        slab_logger.error(
            "Printing log of vagrant up command in ccs-build-tools")
        slab_logger.error(log)
        return

    # Copying over contents of files generated by ccsbuildtools into ccs-data
    cmds = "cp -r ccs-build-tools/sites/%(svc)s/environments/%(tc)s "\
           "ccs-data/sites/%(svc)s/environments; "\
           "rm -rf ccs-build-tools/sites; " % {'svc': svc_site_name,
                                               'tc': tc_site_name}
    passed, log = service_utils.run_this(cmds,
                                         os.path.join(ctx.path, "services"))
    if passed > 0:
        slab_logger.error("Failed to copy environment into ccs-data")
        slab_logger.error("Printing log of directory exports")
        slab_logger.error(log)
        return

    slab_logger.info(
        "Env Data Gathered for %s in site %s. Check .stack/services/ccs-data "
        "for its contents" % (tc_site_name, svc_site_name))
Пример #9
0
def site_new(ctx, username, cont):
    """Compiles data for a new site in the ccs-data repo.

    \b
    1) Syncs the ccs-data and ccs-build-tools repo into the .stack/services directory.
    2) Allows the user to dynamically input data pertaining to the new site which
       is comprised of a single tenant cloud built on top of a service cloud.
       *The data compilation can be quit and resumed for a later time (the temporary
        data is stored in .stack/cache/temp_site.yaml
    3) The data is compiled into a single yaml file (answer-sample.yaml) located in the
       ccs-build-tools/ignition_rb directory and includes:
           *bom version
           *CIMC password
           *vlan numbers and their corresponding ip ranges
           *service cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova cloud nodes
           *tenant cloud information:
               *site name
               *availability zone
               *domain
               *number of virtualized nova, ceph, net and proxy nodes
    4) Within ccs-build-tool, a vagrant environment and virtualbox is used to compile all
       of the data into a single site directory, which is copied into ccs-data.
    """
    slab_logger.info("Creating a new site in ccs-data")
    if not username:
        username = ctx.get_username()

    slab_logger.log(15, "Retrieving latest ccs-data branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-data")

    slab_logger.log(15, "Retrieving latest ccs-build-tools branch")
    service_utils.sync_service(ctx.path, "master", username, "ccs-build-tools")

    slab_logger.log(15, "Retreiving user input for new site's data fields...")
    returncode, site_dictionary = ccsbuildtools_utils.gather_site_info(
        ctx.path, cont)
    if returncode != 1:
        slab_logger.error("unable to retrieve site data")
        return

    svc_site_name = site_dictionary['service_cloud']['site_name']
    slab_logger.log(
        15, "Building and Exporting %s to ccs-data---" % (svc_site_name))
    passed, log = service_utils.run_this(
        'vagrant up; vagrant destroy -f;',
        os.path.join(ctx.path, "services", "ccs-build-tools"))
    if passed > 0:
        slab_logger.error(
            "Failed to establish vagrant environment in ccs-build-tools")
        slab_logger.error(
            "Printing log of vagrant up command in ccs-build-tools")
        slab_logger.error(log)
        return

    # Copying over contents of files generated by ccsbuildtools into ccs-data
    cmds = "cp -r ccs-build-tools/sites/%(svc)s ccs-data/sites; " \
           "rm -rf ccs-build-tools/sites; " % {'svc': svc_site_name}
    passed, log = service_utils.run_this(cmds,
                                         os.path.join(ctx.path, "services"))
    if passed > 0:
        slab_logger.error("Failed to copy site into ccs-data")
        slab_logger.error("Printing log of directory exports")
        slab_logger.error(log)
        return

    slab_logger.info(
        "Site Data Gathered for %s. Check .stack/services/ccs-data "
        "for its contents---" % (svc_site_name))