def run_docl_bootstrap_or_download():
    ctx.logger.info('Preparing docl bootstrap execution')
    docl_script_path = join(utils.WORKDIR, 'docl_init.sh')
    ctx.download_resource(join('scripts', 'docl_init.sh'), docl_script_path)
    utils.run('chmod +x {0}'.format(docl_script_path))

    ctx.logger.info('Cloning cloudify manager blueprints {0}'
                    .format(MANAGER_BLUEPRINTS_REPO))
    repo_path = utils.clone(**MANAGER_BLUEPRINTS_REPO)

    simple_bp_path = os.path.join(repo_path,
                                  'simple-manager-blueprint.yaml')
    ctx.logger.info('Creating private key file')
    private_key_path = create_key_file()

    rebuild = ctx.node.properties['rebuild']
    if MANAGER_BP_BRANCH != 'master':
        rebuild = 'true'
    utils.run('{0} {1} {2} {3} {4} {5}'
              .format(docl_script_path,
                      utils.CLOUDIFY_VENV_PATH,
                      simple_bp_path,
                      private_key_path,
                      utils.REPOS_DIR,
                      rebuild),
              out=True)
def _create_db_tables_and_add_users():
    ctx.logger.info('Creating SQL tables and adding admin users...')
    create_script_path = 'components/restservice/config' \
                         '/create_tables_and_add_users.py'
    create_script_destination = join(tempfile.gettempdir(),
                                     'create_tables_and_add_users.py')
    ctx.download_resource(source=create_script_path,
                          destination=create_script_destination)
    # Directly calling with this python bin, in order to make sure it's run
    # in the correct venv
    python_path = '{0}/env/bin/python'.format(REST_SERVICE_HOME)
    runtime_props = ctx.instance.runtime_properties

    args_dict = json.loads(runtime_props['security_configuration'])
    args_dict['postgresql_host'] = runtime_props['postgresql_host']

    # The script won't have access to the ctx, so we dump the relevant args
    # to a JSON file, and pass its path to the script
    args_file_location = join(tempfile.gettempdir(), 'security_config.json')
    with open(args_file_location, 'w') as f:
        json.dump(args_dict, f)

    result = utils.sudo(
        [python_path, create_script_destination, args_file_location]
    )

    _log_results(result)
    utils.remove(args_file_location)
Exemple #3
0
def reset_kubeadm():
    """Uninstall kubernetes on a node.

    Runs `kubeadm reset` on the specified machine in order to remove the
    kubernetes services and undo all configuration set by `kubeadm init`.

    """
    # Get script from path.
    script = os.path.join(os.path.dirname(__file__), 'mega-reset.sh')
    ctx.download_resource(
        os.path.join('scripts', 'mega-reset.sh'), script
    )

    # Get worker.
    conn = MistConnectionClient()
    machine = conn.get_machine(
        cloud_id=ctx.instance.runtime_properties['cloud_id'],
        machine_id=ctx.instance.runtime_properties['machine_id'],
    )

    ctx.logger.info('Running "kubeadm reset" on %s', machine)

    _add_run_remove_script(
        cloud_id=machine.cloud.id,
        machine_id=machine.id,
        script_path=os.path.abspath(script),
        script_name='kubeadm_reset_%s' % random_string(length=4)
    )
def deploy_gateway_space(**kwargs):
    script = "xap-scripts/deploy-space-with-gateway.groovy"
    spacename = kwargs['space_name']
    spacezones = kwargs['space_zones']
    gwname = kwargs['gateway_name']
    targets = kwargs['gateway_targets']
    script_path = ctx.download_resource(script)
    ctx.download_resource("xap-scripts/space-pu.xml", "/tmp/space-pu.xml")
    xapdir = "".join([line.strip() for line in open('/tmp/gsdir')])
    locators = ",".join([line.strip() for line in open('/tmp/locators')])

    ip = get_ip_from_interface_name(ctx.node.properties['interfacename'])
    space_deployment_command = [
        xapdir + "/tools/groovy/bin/groovy",
        "-Dspacename=" + spacename,
        "-Dzones=" + spacezones,
        "-Dtargets=" + list_to_str(targets),
        "-Dgwname=" + gwname,
        "-Dlocallocators=" + locators,
        "-Djava.rmi.server.hostname=" + ip,
        script_path
    ]
    my_env = os.environ.copy()
    my_env['XAP_LOOKUP_LOCATORS'] = locators

    my_env['XAP_NIC_ADDRESS'] = ip
    ctx.logger.info("Executing: %s", space_deployment_command)
    output = subprocess.check_output(space_deployment_command)
    ctx.logger.info("Finished executing, output: %s", output)
def create_app():

    pod_file = ctx.download_resource(os.path.join(BASE_DIR, 'pod.yaml'))
    rc_file = ctx.download_resource(os.path.join(BASE_DIR, 'rc.yaml'))
    svc_file = ctx.download_resource(os.path.join(BASE_DIR, 'svc.yaml'))

    with open(pod_file, 'r') as f:
        pod_yaml = yaml.load(f)

    create_pod_output = create('pods', pod_yaml)

    if create_pod_output != SUCCESS_CODE:
        raise NonRecoverableError('Failed to create pod.')

    with open(rc_file, 'r') as f:
        rc_yaml = yaml.load(f)

    create_rc_output = create('replicationcontrollers', rc_yaml)

    if create_rc_output != SUCCESS_CODE:
        raise NonRecoverableError('Failed to create replication controller.')

    with open(svc_file, 'r') as f:
        svc_yaml = yaml.load(f)

    create_service_output = create('services', svc_yaml)

    if create_service_output != SUCCESS_CODE:
        raise NonRecoverableError('Failed to create service.')
def _create_db_tables_and_add_defaults():
    ctx.logger.info('Creating SQL tables and adding default values...')
    script_name = 'create_tables_and_add_defaults.py'
    source_script_path = join('components/restservice/config', script_name)
    destination_script_path = join(tempfile.gettempdir(), script_name)
    ctx.download_resource(source_script_path, destination_script_path)

    args_dict = runtime_props['security_configuration']
    args_dict['amqp_host'] = runtime_props['rabbitmq_endpoint_ip']
    args_dict['amqp_username'] = runtime_props['rabbitmq_username']
    args_dict['amqp_password'] = runtime_props['rabbitmq_password']
    args_dict['postgresql_host'] = runtime_props['postgresql_host']
    args_dict['db_migrate_dir'] = join(
        utils.MANAGER_RESOURCES_HOME,
        'cloudify',
        'migrations'
    )

    # The script won't have access to the ctx, so we dump the relevant args
    # to a JSON file, and pass its path to the script
    args_file_location = join(tempfile.gettempdir(), 'security_config.json')
    with open(args_file_location, 'w') as f:
        json.dump(args_dict, f)

    # Directly calling with this python bin, in order to make sure it's run
    # in the correct venv
    python_path = join(runtime_props['home_dir'], 'env', 'bin', 'python')
    result = utils.sudo(
        [python_path, destination_script_path, args_file_location]
    )

    _log_results(result)
    utils.remove(args_file_location)
    utils.remove(destination_script_path)
def main():
    '''Entry point'''
    logger = get_hostpool_logger('configure',
                                 debug=ctx.node.properties.get('debug'))

    if not ctx.node.properties.get('pool'):
        logger.info('Configuration file for the Host-Pool service '
                    'was not specified. Continuing without seed hosts')
        ctx.instance.runtime_properties['seed_hosts'] = None
        return

    logger.debug('Downloading host-pool configuration file "{0}" to "{1}"'
                 .format(ctx.node.properties['pool'], POOL_CFG_PATH))
    ctx.download_resource(ctx.node.properties['pool'],
                          target_path=POOL_CFG_PATH)

    if not os.path.exists(POOL_CFG_PATH):
        raise NonRecoverableError('Configuration file for the Host-Pool '
                                  'service could not be downloaded')

    # Load our configuration data
    with open(POOL_CFG_PATH) as f_cfg:
        cfg = None
        logger.info('Loading Host-Pool seed hosts')
        try:
            cfg = yaml.load(f_cfg)
        except yaml.YAMLError:
            raise NonRecoverableError('Configuration file for the Host-Pool '
                                      'service is not valid YAML')
        logger.info('Converting host key files from blueprint')
        seed_config = set_host_key_content(cfg, logger)
        ctx.instance.runtime_properties['seed_config'] = seed_config
def configure_php():
    sudo('apt-get -y install php5 libapache2-mod-php5 php5-mcrypt')
    execution_id_dir = '/tmp/{0}'.format(ctx.execution_id)
    info_php = '{0}/info.php'.format(execution_id_dir)
    os.mkdir(execution_id_dir)
    ctx.download_resource(resource_path='resources/info.php',
                          target_path=info_php)
    put(info_php, '/var/www/info.php', use_sudo=True)
def configure_logging():
    ctx.logger.info('Configuring Management worker logging...')
    logging_config_dir = '/etc/cloudify'
    config_name = 'logging.conf'
    config_file_destination = join(logging_config_dir, config_name)
    config_file_source = join(CONFIG_PATH, config_name)
    utils.mkdir(logging_config_dir)
    config_file_temp_destination = join(tempfile.gettempdir(), config_name)
    ctx.download_resource(config_file_source, config_file_temp_destination)
    utils.move(config_file_temp_destination, config_file_destination)
def _create_default_db(db_name, username, password):
    ctx.logger.info('Creating default postgresql database: {0}...'.format(
        db_name))
    ps_config_source = 'components/postgresql/config/create_default_db.sh'
    ps_config_destination = join(tempfile.gettempdir(),
                                 'create_default_db.sh')
    ctx.download_resource(source=ps_config_source,
                          destination=ps_config_destination)
    utils.chmod('+x', ps_config_destination)
    # TODO: Can't we use a rest call here? Is there such a thing?
    utils.sudo('su - postgres -c "{cmd} {db} {user} {password}"'
               .format(cmd=ps_config_destination, db=db_name,
                       user=username, password=password))
def get_key_content(key_file, logger):
    '''Downloads a key and returns the key contents'''
    tfd, target_path = mkstemp()
    os.close(tfd)
    logger.debug('Downloading key file "{0}" to path "{1}"'
                 .format(key_file, target_path))
    ctx.download_resource(key_file, target_path)
    keycontent = None
    with open(target_path, 'r') as f_key:
        keycontent = f_key.read()
        logger.debug('Key file "{0}" contains: {1}'
                     .format(key_file, keycontent))
    os.remove(target_path)
    return keycontent
def create_files_dictionary_from_files_list(_files):
    """
    Transform the list of files into a dict of files.

    :param _files: list of cloudify.datatypes.file data structures.
    :return:
    """

    _file_dictionary = {}

    for _file in _files:

        _filename = _file.get('filename')
        if not _filename:
            raise NonRecoverableError(
                'Improperly formatted file in list of cloudify.datatypes.file')

        if _file.get('path'):
            _fileobject = ctx.download_resource(_file.get('path'))
        elif _file.get('url'):
            raise NonRecoverableError(
                'No implementation for downloading a file.')
        else:
            raise NonRecoverableError(
                'Neither an url, nor a path was provided.')

        _file_dictionary.update({_filename: _fileobject})

    return _file_dictionary
def get_playbook_path(playbook, target_path):
    try:
        path_to_file = ctx.download_resource(playbook, os.path.join(target_path, playbook))
    except exceptions.HttpException as e:
        raise exceptions.NonRecoverableError("Could not get playbook file: {}.".format(str(e)))

    return path_to_file
def do_mapping(current_os, iaas, device_name):
    map_script_path = None
    ctx.logger.info("inside current os: '{0}'".format(current_os))
    command_prefix = None
    if 'windows' == current_os:
        ctx.logger.info('[MAPPING] windows')
        map_script_path = ctx.download_resource("device-mapping-scripts/{0}/mapDevice.ps1".format(iaas))
        command_prefix="C:\\Windows\\Sysnative\\WindowsPowerShell\\v1.0\\powershell.exe -executionpolicy bypass -File"
    else:
        ctx.logger.info("[MAPPING] linux")
        map_script_path = ctx.download_resource("device-mapping-scripts/{0}/mapDevice.sh".format(iaas))
    env_map = {'DEVICE_NAME' : device_name}
    new_script_process = {'env': env_map}
    convert_env_value_to_string(new_script_process['env'])
    outputs = execute(map_script_path, new_script_process, outputNames=None, command_prefix=command_prefix, raiseException=True)
    return outputs['last_output']
def execute(script_path, process, outputNames):
    wrapper_path = ctx.download_resource("scriptWrapper.sh")
    os.chmod(wrapper_path, 0755)

    os.chmod(script_path, 0755)
    on_posix = 'posix' in sys.builtin_module_names

    env = os.environ.copy()
    process_env = process.get('env', {})
    env.update(process_env)

    if outputNames is not None:
        env['EXPECTED_OUTPUTS'] = outputNames
        command = '{0} {1}'.format(wrapper_path, script_path)
    else:
        command = script_path

    ctx.logger.info('Executing: {0} in env {1}'.format(command, env))

    process = subprocess.Popen(command,
                               shell=True,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               env=env,
                               cwd=None,
                               bufsize=1,
                               close_fds=on_posix)

    return_code = None

    stdout_consumer = OutputConsumer(process.stdout)
    stderr_consumer = OutputConsumer(process.stderr)

    while True:
        return_code = process.poll()
        if return_code is not None:
            break
        time.sleep(0.1)

    stdout_consumer.join()
    stderr_consumer.join()

    parsed_output = parse_output(stdout_consumer.buffer.getvalue())
    if outputNames is not None:
        outputNameList = outputNames.split(';')
        for outputName in outputNameList:
            ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'][outputName]))

    ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
                                                                                                     stderr_consumer.buffer.getvalue())
    error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
                                                                                                                         stdout_consumer.buffer.getvalue(),
                                                                                                                         stderr_consumer.buffer.getvalue())
    if return_code != 0:
        ctx.logger.error(error_message)
        raise NonRecoverableError(error_message)
    else:
        ctx.logger.info(ok_message)

    return parsed_output
def get_plugin_source(plugin, blueprint_id=None):

    source = plugin.get('source') or ''
    if not source:
        return None
    source = source.strip()

    # validate source url
    if '://' in source:
        split = source.split('://')
        schema = split[0]
        if schema not in ['http', 'https']:
            # invalid schema
            raise NonRecoverableError('Invalid schema: {0}'.format(schema))
    else:
        # Else, assume its a relative path from <blueprint_home>/plugins
        # to a directory containing the plugin archive.
        # in this case, the archived plugin is expected to reside on the
        # manager file server as a zip file.
        if blueprint_id is None:
            raise ValueError('blueprint_id must be specified when plugin '
                             'source does not contain a schema')

        plugin_zip = ctx.download_resource('plugins/{0}.zip'.format(source))
        source = path_to_file_url(plugin_zip)

    return source
def get_file(playbook):
    try:
        path_to_file = ctx.download_resource(playbook)
    except exceptions.HttpException as e:
        raise exceptions.NonRecoverableError(
            'Could not get playbook file: {}.'.format(str(e)))
    return path_to_file
Exemple #18
0
def download(filename, make_executable=False):
    if filename:
        path = ctx.download_resource(filename)
        if make_executable:
            st = os.stat(path)
            os.chmod(path, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
        subprocess.call(["sudo", "mv", path, directory])
        return path
def get_roles(roles, target_path):

    try:
        path_to_file = ctx.download_resource(roles, os.path.join(target_path, roles))
    except exceptions.HttpException as e:
        raise exceptions.NonRecoverableError("Could not get roles file: {}.".format(str(e)))

    return path_to_file
def download(child_rel_path, child_abs_path, download_dir):
    artifact_downloaded_path = ctx.download_resource(child_abs_path)
    new_file = os.path.join(download_dir, child_rel_path)
    new_file_dir = os.path.dirname(new_file)
    if not os.path.exists(new_file_dir):
        os.makedirs(new_file_dir)
    os.rename(artifact_downloaded_path, new_file)
    ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
    return new_file
Exemple #21
0
def get_playbook_path(playbook):

    try:
        path_to_file = ctx.download_resource(playbook)
    except exceptions.HttpException as e:
        raise exceptions.NonRecoverableError(
            'Could not get playbook file: {}.'.format(str(e)))

    return path_to_file
Exemple #22
0
def download(child_rel_path, child_abs_path, download_dir):
    artifact_downloaded_path = ctx.download_resource(child_abs_path)
    new_file = os.path.join(download_dir, child_rel_path)
    new_file_dir = os.path.dirname(new_file)
    if not os.path.exists(new_file_dir):
        os.makedirs(new_file_dir)
    os.rename(artifact_downloaded_path, new_file)
    ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
    return new_file
def start():
  ctx.logger.info("starting codeserver")
  # create directory for serving code and load it up
  os.system("rm -rf /tmp/code")
  ctx.logger.info("creating server directory")
  os.mkdir("/tmp/code")
  ctx.logger.info("downloading resources")
  ctx.download_resource("resources/upgrade-test-1.0.tgz","/tmp/code/upgrade-test-1.0.tgz")
  ctx.download_resource("resources/upgrade-test-1.1.tgz","/tmp/code/upgrade-test-1.1.tgz")
  ctx.logger.info("starting server")

  # start server
  os.chdir("/tmp/code")
  proc=subprocess.Popen(['nohup','python','-m','SimpleHTTPServer','8000'],
                     stdout=open("/tmp/httpserver.out","w"),
                     stderr=open("/tmp/httpserver.err","w"))

  ctx.instance.runtime_properties['pid']=proc.pid
  ctx.logger.info("startup complete")
Exemple #24
0
def _recover_deployments(docker_path=None, use_sudo=True):

    ctx.logger.info("Recovering deployments...")
    script_relpath = ctx.instance.runtime_properties.get("recovery_script_relpath")
    if not script_relpath:
        raise NonRecoverableError("Cannot recover deployments. No recovery " "script specified.")
    script = ctx.download_resource(script_relpath)
    fabric.api.put(script, "~/recover_deployments.sh")
    _run_command("chmod +x ~/recover_deployments.sh")
    _run_command_in_cfy("/tmp/home/recover_deployments.sh", docker_path=docker_path, use_sudo=use_sudo)
def enable_collectors(ctx, collectors, config_path, collectors_path):
    for name, prop in collectors.items():
        if 'path' in prop.keys():
            collector_dir = os.path.join(collectors_path, name)
            os.mkdir(collector_dir)
            collector_file = os.path.join(collector_dir, '{0}.py'.format(name))
            ctx.download_resource(prop['path'], collector_file)

        config = prop.get('config', {})
        config.update({
            'enabled':
            True,
            'hostname':
            '{0}.{1}.{2}'.format(get_host_id(ctx), ctx.node.name,
                                 ctx.instance.id)
        })
        prop['config'] = config
        config_full_path = os.path.join(config_path, '{0}.conf'.format(name))
        write_config(config_full_path, prop.get('config', {}))
Exemple #26
0
def deploy_gateway_pu(**kwargs):
    script = "xap-scripts/deploy-gateway.groovy"
    puname = kwargs['space_name'] + "-gw"
    spacename = kwargs['space_name']
    gwname = kwargs['gateway_name']
    gatewayzones = kwargs['gateway_zones']
    targets = kwargs['gateway_targets']
    sources = kwargs['gateway_sources']
    lookups = kwargs['gateway_lookups']
    natmappings = kwargs['gateway_natmappings']
    script_path = ctx.download_resource(script)
    ctx.download_resource("xap-scripts/gateway-pu.xml", "/tmp/gateway-pu.xml")
    xapdir = "".join([line.strip() for line in open('/tmp/gsdir')])
    locators = ",".join([line.strip() for line in open('/tmp/locators')])

    ip = get_ip_from_interface_name(ctx.node.properties['interfacename'])
    mylocators = {'gwname': gwname, 'address': ip, 'discoport': kwargs['gateway_discoport'],
                  'commport': kwargs['gateway_commport']}
    lookups.append(mylocators)

    gateway_deployment_command = [
        xapdir + "/tools/groovy/bin/groovy",
        "-Dpuname=" + puname,
        "-Dspacename=" + spacename,
        "-Dzones=" + gatewayzones,
        "-Dtargets=" + list_to_str(targets),
        "-Dgwname=" + gwname,
        "-Dlocallocators=" + locators,
        "-Dlocalgwname=" + gwname,
        "-Dsources=" + list_to_str(sources),
        "-Dlookups=" + list_to_str(lookups),
        "-Dnatmappings=" + natmappings,
        "-Djava.rmi.server.hostname=" + ip,
        script_path
    ]

    my_env = os.environ.copy()
    my_env['LOOKUPLOCATORS'] = locators
    my_env['NIC_ADDR'] = ip
    ctx.logger.info("Executing: %s", gateway_deployment_command)
    output = subprocess.check_output(gateway_deployment_command, env=my_env)
    ctx.logger.info("Finished executing, output: %s", output)
def deploy_gateway_pu(**kwargs):
    script = "xap-scripts/deploy-gateway.groovy"
    puname = kwargs['space_name'] + "-gw"
    spacename = kwargs['space_name']
    gwname = kwargs['gateway_name']
    gatewayzones = kwargs['gateway_zones']
    targets = kwargs['gateway_targets']
    sources = kwargs['gateway_sources']
    lookups = kwargs['gateway_lookups']
    natmappings = kwargs['gateway_natmappings']
    script_path = ctx.download_resource(script)
    ctx.download_resource("xap-scripts/gateway-pu.xml", "/tmp/gateway-pu.xml")
    xapdir = "".join([line.strip() for line in open('/tmp/gsdir')])
    locators = ",".join([line.strip() for line in open('/tmp/locators')])

    ip = get_ip_from_interface_name(ctx.node.properties['interfacename'])
    mylocators = {'gwname': gwname, 'address': ip, 'discoport': kwargs['gateway_discoport'],
                  'commport': kwargs['gateway_commport']}
    lookups.append(mylocators)

    gateway_deployment_command = [
        xapdir + "/tools/groovy/bin/groovy",
        "-Dpuname=" + puname,
        "-Dspacename=" + spacename,
        "-Dzones=" + gatewayzones,
        "-Dtargets=" + list_to_str(targets),
        "-Dgwname=" + gwname,
        "-Dlocallocators=" + locators,
        "-Dlocalgwname=" + gwname,
        "-Dsources=" + list_to_str(sources),
        "-Dlookups=" + list_to_str(lookups),
        "-Dnatmappings=" + natmappings,
        "-Djava.rmi.server.hostname=" + ip,
        script_path
    ]

    my_env = os.environ.copy()
    my_env['XAP_LOOKUP_LOCATORS'] = locators
    my_env['XAP_NIC_ADDRESS'] = ip
    ctx.logger.info("Executing: %s", gateway_deployment_command)
    output = subprocess.check_output(gateway_deployment_command, env=my_env)
    ctx.logger.info("Finished executing, output: %s", output)
Exemple #28
0
def copy_notice(service):
    """Deploys a notice file to /opt/SERVICENAME_NOTICE.txt"""
    destn = os.path.join('/opt', service + '_NOTICE.txt')
    if os.path.isfile(destn):
        ctx.logger.info('NOTICE {0} already exists. Skipping...'.format(destn))
    else:
        source = 'components/{0}/NOTICE.txt'.format(service)
        ctx.logger.info('Copying {0} notice file to {1}...'.format(
            service, destn))
        notice_file = ctx.download_resource(source)
        move(notice_file, destn)
def copy_notice(service):
    """Deploys a notice file to /opt/SERVICENAME_NOTICE.txt"""
    destn = os.path.join('/opt', service + '_NOTICE.txt')
    if os.path.isfile(destn):
        ctx.logger.info('NOTICE {0} already exists. Skipping...'.format(destn))
    else:
        source = 'components/{0}/NOTICE.txt'.format(service)
        ctx.logger.info('Copying {0} notice file to {1}...'.format(
            service, destn))
        notice_file = ctx.download_resource(source)
        move(notice_file, destn)
Exemple #30
0
    def upload_blueprint(self):

        if 'blueprint' not in ctx.instance.runtime_properties.keys():
            ctx.instance.runtime_properties['blueprint'] = dict()

        update_attributes('blueprint', 'id', self.blueprint_id)
        update_attributes(
            'blueprint', 'blueprint_archive', self.blueprint_archive)
        update_attributes(
            'blueprint', 'application_file_name', self.blueprint_file_name)

        blueprint_is = any_bp_by_id(self.client, self.blueprint_id)
        create_config_type_per_bf(ctx, self.blueprint_archive, self.blueprint_id, self.blueprint_file_name)

        if self.blueprint.get(EXTERNAL_RESOURCE) and not blueprint_is:
            raise NonRecoverableError(
                'Blueprint ID {0} does not exist, '
                'but {1} is {2}.'.format(
                    self.blueprint_id,
                    EXTERNAL_RESOURCE,
                    self.blueprint.get(EXTERNAL_RESOURCE)))
        elif self.blueprint.get(EXTERNAL_RESOURCE) and blueprint_is:
            ctx.logger.info("Used external blueprint.")
            return False
        elif blueprint_is:
            ctx.logger.warn(
                'Blueprint ID {0} exists, '
                'but {1} is {2}. Will use.'.format(
                    self.blueprint_id,
                    EXTERNAL_RESOURCE,
                    self.blueprint.get(EXTERNAL_RESOURCE)))
            return False
        if not self.blueprint_archive:
            raise NonRecoverableError(
                'No blueprint_archive supplied, '
                'but {0} is False'.format(EXTERNAL_RESOURCE))

        # Parse the blueprint_archive in order to get url parts
        parse_url = urlparse(self.blueprint_archive)

        # Check if the ``blueprint_archive`` is not a URL then we need to
        # download it and pass the binaries to the client_args
        if not(parse_url.netloc and parse_url.scheme):
            self.blueprint_archive = \
                ctx.download_resource(self.blueprint_archive)

        client_args = \
            dict(blueprint_id=self.blueprint_id,
                 archive_location=self.blueprint_archive,
                 application_file_name=self.blueprint_file_name)

        return self.dp_get_client_response('blueprints',
                                           BP_UPLOAD,
                                           client_args)
def configure(output_config, files=None, path=None, beaver_config=None,
              beaver_config_file_path=None, validate=False, **kwargs):
    """Generates configuration for the beaver process.

    :params string files: A list of files to monitor.
    :params dict output_config: A configuration of a beaver output.
    :params dict beaver_config: General beaver level config.
    """
    ctx.logger.info('Generating beaver config....')
    destination_config_path = _set_beaver_config_path(ctx.node.id)
    if beaver_config_file_path:
        ctx.download_resource(beaver_config_file_path, destination_config_path)
    else:
        configurator = BeaverConfigurator(
            output_config, files, path, beaver_config, beaver_config_file_path)
        configurator.set_main_config()
        configurator.set_output()
        configurator.set_monitored_paths()
        configurator.set_additional_config()
        configurator.write_config_file(destination_config_path)
    ctx.instance.runtime_properties['config_file'] = destination_config_path
def kube_run_expose(**kwargs):
  ctx.logger.info("in kube_run_expose")
  config=ctx.node.properties['config']
  config_files=ctx.node.properties['config_files']

  def write_and_run(d):
    os.chdir(os.path.expanduser("~"))
    fname="/tmp/kub_{}.yaml".format(ctx.instance.id)
    with open(fname,'w') as f:
      yaml.safe_dump(d,f)
    cmd="./kubectl -s http://localhost:8080 create -f "+fname + " >> /tmp/kubectl.out 2>&1"
    ctx.logger.info("running create: {}".format(cmd))

    #retry a few times
    retry=0
    while subprocess.call(cmd,shell=True):
      if retry>3:
        raise Exception("couldn't connect to server on 8080")
      retry=retry+1
      ctx.logger.info("run failed retrying")
      time.sleep(2)

  if(config):
    write_and_run(config)
  elif(len(config_files)):
    for file in config_files:
      if (not ctx._local):
        local_path=ctx.download_resource(file['file'])
      else:
        local_path=file['file']
      with open(local_path) as f:
        base=yaml.load(f)
      if('overrides' in file):
        for o in file['overrides']:
          ctx.logger.info("exeing o={}".format(o))
          #check for substitutions
          o=process_subs(o)
          exec "base"+o in globals(),locals()
      write_and_run(base)
  else:
    # do kubectl run
    cmd='./kubectl -s http://localhost:8080 run {} --image={} --port={} --replicas={}'.format(ctx.node.properties['name'],ctx.node.properties['image'],ctx.node.properties['target_port'],ctx.node.properties['replicas'])
    if(ctx.node.properties['run_overrides']):
      cmd=cmd+" --overrides={}".format(ctx.node.properties['run_overrides'])

    subprocess.call(cmd,True)

    # do kubectl expose
    cmd='./kubectl -s http://localhost:8080 expose rc {} --port={} --protocol={}'.format(ctx.node.properties['name'],ctx.node.properties['port'],ctx.node.properties['protocol'])
    if(ctx.node.properties['expose_overrides']):
      cmd=cmd+" --overrides={}".format(ctx.node.properties['expose_overrides'])

    subprocess.call(cmd,shell=True)
def do_mapping(current_os, iaas, device_name):
    map_script_path = None
    ctx.logger.info("inside current os: '{0}'".format(current_os))
    command_prefix = None
    if 'windows' == current_os:
        ctx.logger.info('[MAPPING] windows')
        map_script_path = ctx.download_resource(
            "device-mapping-scripts/{0}/mapDevice.ps1".format(iaas))
        command_prefix = "C:\\Windows\\Sysnative\\WindowsPowerShell\\v1.0\\powershell.exe -executionpolicy bypass -File"
    else:
        ctx.logger.info("[MAPPING] linux")
        map_script_path = ctx.download_resource(
            "device-mapping-scripts/{0}/mapDevice.sh".format(iaas))
    env_map = {'DEVICE_NAME': device_name}
    new_script_process = {'env': env_map}
    convert_env_value_to_string(new_script_process['env'])
    outputs = execute(map_script_path,
                      new_script_process,
                      outputNames=None,
                      command_prefix=command_prefix)
    return outputs['last_output']
Exemple #34
0
def get_ssl_ca_file():
    configuration_property = ctx.node.properties.get(CLIENT_CONFIG, {}).get(
        CONFIGURATION, {})
    current_value = configuration_property.get(API_OPTIONS, {}).get(
        SSL_CA_CERT)

    if current_value and check_if_resource_inside_blueprint_folder(
            current_value):
        with tempfile.NamedTemporaryFile(delete=False) as f:
            f.close()
            ctx.download_resource(
                current_value,
                target_path=f.name)
            try:
                ctx.logger.info(
                    "using CA file:{file}".format(file=f.name))
                yield f.name
            finally:
                os.remove(f.name)

    elif current_value and os.path.isfile(current_value):
        ctx.logger.info("using CA file located at: {path}".format(
            path=current_value))
        yield current_value

    elif current_value and not os.path.isfile(current_value):
        # It means we have the ca as a string in the blueprint
        f = tempfile.NamedTemporaryFile('w',
                                        suffix='__cfy.helm.k8s__',
                                        delete=False)
        f.write(current_value)
        f.close()
        try:
            ctx.logger.info("using CA content from the blueprint.")
            yield f.name
        finally:
            os.remove(f.name)
    else:
        ctx.logger.info("CA file not found.")
        yield
Exemple #35
0
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)

        # If we do not have a real manager cloudify_agent is expected to be an
        # empty dict. This will be handled by get_broker_credentials.
        cloudify_agent = ctx.bootstrap_context.cloudify_agent

        broker_user, broker_pass = utils.internal.get_broker_credentials(
            cloudify_agent
        )

        config_changes = {
            'server': cloudify_agent.broker_ip,
            'user': broker_user,
            'password': broker_pass,
        }

        handlers['cloudify_handler.cloudify.CloudifyHandler'][
            'config'].update(config_changes)

    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop.keys():
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path, '{0}.conf'.format(
            name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return handlers.keys()
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)

        agent_workdir = os.environ.get(constants.AGENT_WORK_DIR_KEY)
        conf_file_path = os.path.join(agent_workdir, 'broker_config.json')
        if os.path.isfile(conf_file_path):
            with open(conf_file_path) as conf_handle:
                agent_config = json.load(conf_handle)

            config_changes = {
                'server': agent_config['broker_hostname'],
                'user': agent_config['broker_username'],
                'password': agent_config['broker_password'],
                'broker_cert_path': agent_config['broker_cert_path'],
                'broker_ssl_enabled': agent_config['broker_ssl_enabled'],
            }

            handlers['cloudify_handler.cloudify.CloudifyHandler'][
                'config'].update(config_changes)

    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop.keys():
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path, '{0}.conf'.format(
            name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return handlers.keys()
def start(additional_arguments=None, daemonize=True, **kwargs):
    """Starts the beaver process.

    :params string beaver_config_path: A blueprint relative path to a user
     supplied beaver config file.
    :params list additional_arguments: A list of additional arguments to pass
     to beaver's commandline.
    :params bool daemonize: whether to run the process as a daemon.
    """
    config_file = ctx.instance.runtime_properties.get(
        'beaver_config_file_path')
    if config_file:
        ctx.download_resource(config_file, '')
    ctx.logger.info('Starting beaver with config file: {0} and arguments: '
                    '{1}'.format(config_file, additional_arguments))
    beaver_cmd = ['beaver', '-c', config_file]
    if additional_arguments and not isinstance(additional_arguments, list):
        raise NonRecoverableError(
            '`additional_arguments` must be of type list.')
    beaver_cmd.extend(additional_arguments)
    if daemonize:
        beaver_cmd.append('-d')
Exemple #38
0
def download_service(service_name):
    service_path = "/usr/bin/" + service_name
    if not os.path.isfile(service_path):
        cfy_binary = ctx.download_resource('resources/{}'.format(service_name))
        ctx.logger.debug('{} downloaded.'.format(service_name))
        if execute_command(['sudo', 'cp', cfy_binary, service_path]) is False:
            raise NonRecoverableError("Can't copy {}.".format(service_path))
    # fix file attributes
    if execute_command(['sudo', 'chmod', '555', service_path]) is False:
        raise NonRecoverableError("Can't chmod {}.".format(service_path))
    if execute_command(['sudo', 'chown', 'root:root', service_path]) is False:
        raise NonRecoverableError("Can't chown {}.".format(service_path))
    ctx.logger.debug('{} attributes fixed'.format(service_name))
def upload_static_config(static_conf, conf_path):
    """ Upload the static config to the service. """

    ctx.logger.info('Copying config to {0}'.format(conf_path))

    try:
        downloaded_file = \
            ctx.download_resource(static_conf, tempfile.mktemp())
    except Exception as e:
        raise exceptions.NonRecoverableError(
            'failed to download. Error: {0}.'.format(str(e)))

    run('sudo cp {0} {1}'.format(downloaded_file, conf_path))
Exemple #40
0
def start():
    ctx.logger.info("starting codeserver")
    # create directory for serving code and load it up
    os.system("rm -rf /tmp/code")
    ctx.logger.info("creating server directory")
    os.mkdir("/tmp/code")
    ctx.logger.info("downloading resources")
    ctx.download_resource("resources/upgrade-test-1.0.tgz",
                          "/tmp/code/upgrade-test-1.0.tgz")
    ctx.download_resource("resources/upgrade-test-1.1.tgz",
                          "/tmp/code/upgrade-test-1.1.tgz")
    ctx.logger.info("starting server")

    # start server
    os.chdir("/tmp/code")
    proc = subprocess.Popen(
        ['nohup', 'python', '-m', 'SimpleHTTPServer', '8000'],
        stdout=open("/tmp/httpserver.out", "w"),
        stderr=open("/tmp/httpserver.err", "w"))

    ctx.instance.runtime_properties['pid'] = proc.pid
    ctx.logger.info("startup complete")
Exemple #41
0
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)

        agent_workdir = _calc_workdir()
        conf_file_path = os.path.join(agent_workdir, 'broker_config.json')
        if os.path.isfile(conf_file_path):
            with open(conf_file_path) as conf_handle:
                agent_config = json.load(conf_handle)

            config_changes = {
                'server': agent_config['broker_hostname'],
                'user': agent_config['broker_username'],
                'password': agent_config['broker_password'],
                'broker_cert_path': agent_config['broker_cert_path'],
                'broker_ssl_enabled': agent_config['broker_ssl_enabled'],
            }

            handlers['cloudify_handler.cloudify.CloudifyHandler'][
                'config'].update(config_changes)
    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop:
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path,
                            '{0}.conf'.format(name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return list(handlers.keys())
Exemple #42
0
def configure():
    ctx.logger.info('Configuring HAProxy')
    haproxy_config = ctx.download_resource(NEW_CONFIG_PATH)
    tmpfile = '/tmp/haproxy_{0}.cfg'.format(uuid.uuid4())
    put(haproxy_config, tmpfile)
    ctx.logger.info('Validating the given HAProxy configuration file')
    run('/usr/sbin/haproxy -f {0} -c'.format(tmpfile))
    ctx.logger.info(
        'Copying the configuration file to {0}'.format(CONFIG_PATH))
    sudo('mv {0} {1}'.format(tmpfile, CONFIG_PATH))
    ctx.logger.info('Restarting HAProxy service')
    sudo('service haproxy restart')
    ctx.logger.info('HAProxy was configured successfully')
def upload_static_config(static_conf, conf_path):
    """ Upload the static config to the service. """

    ctx.logger.info('Copying config to {0}'.format(conf_path))

    try:
        downloaded_file = \
            ctx.download_resource(static_conf, tempfile.mktemp())
    except Exception as e:
        raise exceptions.NonRecoverableError(
            'failed to download. Error: {0}.'.format(str(e)))

    run('sudo cp {0} {1}'.format(downloaded_file, conf_path))
def start(additional_arguments=None, daemonize=True,
          **kwargs):
    """Starts the beaver process.

    :params string beaver_config_path: A blueprint relative path to a user
     supplied beaver config file.
    :params list additional_arguments: A list of additional arguments to pass
     to beaver's commandline.
    :params bool daemonize: whether to run the process as a daemon.
    """
    config_file = ctx.instance.runtime_properties.get(
        'beaver_config_file_path')
    if config_file:
        ctx.download_resource(config_file, '')
    ctx.logger.info('Starting beaver with config file: {0} and arguments: '
                    '{1}'.format(config_file, additional_arguments))
    beaver_cmd = ['beaver', '-c', config_file]
    if additional_arguments and not isinstance(additional_arguments, list):
        raise NonRecoverableError(
            '`additional_arguments` must be of type list.')
    beaver_cmd.extend(additional_arguments)
    if daemonize:
        beaver_cmd.append('-d')
Exemple #45
0
def _resumable_task_base(ctx):
    ctx.instance.runtime_properties['resumed'] = False
    ctx.instance.update(_merge_handler)
    while not _is_unlocked():
        ctx.instance.refresh()
        ctx.logger.info('{0} WAITING'.format(ctx.operation.name))
        time.sleep(1)
    ctx.instance.runtime_properties['resumed'] = True

    # fetch some file, any file, to see if downloading works aftter a resume
    downloaded_data = ctx.download_resource('resumable_mgmtworker.yaml')
    ctx.logger.info('Downloaded data: %s bytes', len(downloaded_data))

    ctx.instance.update(_merge_handler)
Exemple #46
0
def run_docl_bootstrap_or_download():
    ctx.logger.info('Preparing docl bootstrap execution')
    docl_script_path = join(utils.WORKDIR, 'docl_init.sh')
    ctx.download_resource(join('scripts', 'docl_init.sh'), docl_script_path)
    utils.run('chmod +x {0}'.format(docl_script_path))

    ctx.logger.info('Cloning cloudify manager blueprints {0}'.format(
        MANAGER_BLUEPRINTS_REPO))
    repo_path = utils.clone(**MANAGER_BLUEPRINTS_REPO)

    simple_bp_path = os.path.join(repo_path, 'simple-manager-blueprint.yaml')
    ctx.logger.info('Creating private key file')
    private_key_path = create_key_file()

    rebuild = ctx.node.properties['rebuild']
    if MANAGER_BP_BRANCH != 'master':
        rebuild = 'true'
    utils.run('{0} {1} {2} {3} {4} {5}'.format(docl_script_path,
                                               utils.CLOUDIFY_VENV_PATH,
                                               simple_bp_path,
                                               private_key_path,
                                               utils.REPOS_DIR, rebuild),
              out=True)
Exemple #47
0
def _recover_deployments(docker_path=None, use_sudo=True):

    ctx.logger.info('Recovering deployments...')
    script_relpath = ctx.instance.runtime_properties.get(
        'recovery_script_relpath')
    if not script_relpath:
        raise NonRecoverableError('Cannot recover deployments. No recovery '
                                  'script specified.')
    script = ctx.download_resource(script_relpath)
    fabric.api.put(script, '~/recover_deployments.sh')
    _run_command('chmod +x ~/recover_deployments.sh')
    _run_command_in_cfy('/tmp/home/recover_deployments.sh',
                        docker_path=docker_path,
                        use_sudo=use_sudo)
def _download_local_file(local_path):
    """
    This is a method to download local file using context manager
    :param local_path: ``str``: local file path which is relative to
    blueprint package
    :return: path
    """
    try:
        path = ctx.download_resource(local_path)
    except HttpException as error:
        _, _, tb = sys.exc_info()
        raise NonRecoverableError('{} file does not exist.'.format(local_path),
                                  causes=[exception_to_error_cause(error, tb)])
    return path
Exemple #49
0
def start_service(**kwargs):
  ctx.logger.info("in start_service")

  if ctx.node.properties['compose_file'] != '':
    # get file, transfer to manager, and run
    ctx.logger.info("getting compose file:{}".format(ctx.node.properties['compose_file']))
    path=ctx.download_resource(ctx.node.properties['compose_file'])
    if not 'mgr_ssh_user' in ctx.instance.runtime_properties:
      raise NonRecoverableError('ssh user not specified') 
    if not 'mgr_ssh_keyfile' in ctx.instance.runtime_properties:
      raise NonRecoverableError('ssh keyfile not specified') 
    setfabenv(ctx)
    ctx.logger.info("putting compose file on manager")
    put(path,"/tmp/compose.in")
    ctx.logger.info("calling compose")
    sudo("/usr/local/bin/docker-compose  -H localhost:2375 -f /tmp/compose.in up")

  else:
    body={}
    body['Name']=ctx.node.properties['name']
    body['TaskTemplate']={}
    body['TaskTemplate']['Placement']=ctx.node.properties['placement']
    body['TaskTemplate']['Limits']=ctx.node.properties['limits']
    body['Mode']={}
    body['Mode']['Replicated']={}
    body['Mode']['Replicated']['Replicas']=ctx.node.properties['replicas']
    body['EndpointSpec']={}
    body['EndpointSpec']['Ports']=[]
    if 'labels' in ctx.node.properties:
      body['Labels']=ctx.node.properties['labels']

    #containers, volumes
    for k,v in ctx.instance.runtime_properties.iteritems():
      if str(k).startswith("container_"):
        body['TaskTemplate']['ContainerSpec']=camelmap(v)
      elif str(k).startswith("port_"):  
        body['EndpointSpec']={} if not 'EndpointSpec' in body else body['EndpointSpec']
        key=body['EndpointSpec']['Ports']=[] if not 'Ports' in body['EndpointSpec'] else body['EndpointSpec']['Ports']
        key.append(camelmap(v))
    ctx.logger.info("BODY={}".format(json.dumps(body)))
    resp =requests.post('http://{}:{}/services/create'.format(ctx.instance.runtime_properties['ip'],ctx.instance.runtime_properties['port']),data=json.dumps(body),headers={'Content-Type':'application/json'})

    print "RESP={} {}".format(resp.status_code,resp.text)
    if resp.status_code != 201:
      raise NonRecoverableError(resp.text) 
 
    # get service id
    resp=json.loads(resp.text)
    ctx.instance.runtime_properties['service_id']=resp['ID']
Exemple #50
0
def setup_helm(username, resource):
    """
    This task will install.setup helm inside K8S cluster and will init the
    tiller server
    """

    temp_file = NamedTemporaryFile()
    # Download helm script file to the home directory
    helm_script_path = '/home/{0}/{1}'.format(username, resource)
    ctx.download_resource('scripts/{0}'.format(resource), temp_file.name)

    # Copy file to the home directory
    ctx.logger.debug('Copy {0} to {1}'.format(temp_file.name,
                                              helm_script_path))
    put(temp_file.name, helm_script_path)

    # Change owner for the helm script file
    ctx.logger.debug('Change file {0} owner to {1}'.format(
        helm_script_path, username))
    sudo('chown {0} {1}'.format(username, helm_script_path))

    # Update Permissions
    ctx.logger.debug(
        'Change file {0} permission to 700'.format(helm_script_path))
    sudo('chmod 700 {0}'.format(helm_script_path))

    # Install Helm client
    ctx.logger.debug(
        'Install helm client using script file {0}'.format(helm_script_path))
    response = run('bash {0}'.format(helm_script_path))
    handle_fabric_response(response)

    # Initialize helm and install tiller server
    ctx.logger.debug('Initialize helm and install tiller server')
    response = run('helm init')
    handle_fabric_response(response)
Exemple #51
0
def _create_db_tables_and_add_defaults():
    ps_config_source = 'components/restservice/scripts/modify_cloudify_db.sh'
    ps_config_destination = join(utils.get_exec_tempdir(),
                                 'modify_cloudify_db.sh')
    ctx.download_resource(source=ps_config_source,
                          destination=ps_config_destination)
    ctx.logger.info('Creating SQL tables and adding default values...')
    script_name = 'create_tables_and_add_defaults.py'
    source_script_path = join('components/restservice/config', script_name)
    destination_script_path = join(tempfile.gettempdir(), script_name)
    ctx.download_resource(source_script_path, destination_script_path)

    args_dict = runtime_props['security_configuration']
    args_dict['amqp_host'] = runtime_props['rabbitmq_endpoint_ip']
    args_dict['amqp_username'] = runtime_props['rabbitmq_username']
    args_dict['amqp_password'] = runtime_props['rabbitmq_password']
    args_dict['postgresql_host'] = runtime_props['postgresql_host']
    args_dict['db_migrate_dir'] = join(utils.MANAGER_RESOURCES_HOME,
                                       'cloudify', 'migrations')

    # The script won't have access to the ctx, so we dump the relevant args
    # to a JSON file, and pass its path to the script
    args_file_location = join(tempfile.gettempdir(), 'security_config.json')
    with open(args_file_location, 'w') as f:
        json.dump(args_dict, f)

    # Directly calling with this python bin, in order to make sure it's run
    # in the correct venv
    python_path = join(runtime_props['home_dir'], 'env', 'bin', 'python')
    result = utils.sudo(
        [python_path, destination_script_path, args_file_location])
    utils.sudo(['bash', ps_config_destination])

    _log_results(result)
    utils.remove(args_file_location)
    utils.remove(destination_script_path)
Exemple #52
0
def prepare_kubernetes_script():
    """Upload kubernetes installation script, if missing.

    This method is executed at the very beginning, in a pre-configuration
    phase, to make sure that the kubernetes installation script has been
    uploaded to mist.io.

    This method is meant to be invoked early on by:

        configure_kubernetes_master()
        configure_kubernetes_worker()

    The script_id inside each instance's runtime properties is used later
    on in order to configure kubernetes on the provisioned machines.

    """
    if ctx.instance.runtime_properties.get('script_id'):
        ctx.logger.info('Kubernetes installation script already exists')
    else:
        ctx.logger.info('Uploading fresh kubernetes installation script')
        # If a script_id does not exist in the node instance's runtime
        # properties, perhaps because this is the first node that is being
        # configured, load the script from file, upload it to mist.io, and
        # run it over ssh.
        client = MistConnectionClient().client
        script = os.path.join(os.path.dirname(__file__), 'mega-deploy.sh')
        ctx.download_resource(os.path.join('scripts', 'mega-deploy.sh'),
                              script)
        with open(os.path.abspath(script)) as fobj:
            script = fobj.read()
        script = client.add_script(name='install_kubernetes_%s' %
                                   random_string(length=4),
                                   script=script,
                                   location_type='inline',
                                   exec_type='executable')
        ctx.instance.runtime_properties['script_id'] = script['id']
Exemple #53
0
def enable_collectors(ctx, collectors, config_path, collectors_path):
    for name, prop in collectors.items():
        if 'path' in prop:
            collector_dir = os.path.join(collectors_path, name)
            if os.path.exists(collector_dir):
                ctx.logger.warn(
                    'Collector path {path} already existed, removing.'.format(
                        path=collector_dir, ))
                rmtree(collector_dir)
            os.mkdir(collector_dir)
            collector_file = os.path.join(collector_dir, '{0}.py'.format(name))
            ctx.download_resource(prop['path'], collector_file)

        config = prop.get('config', {})
        config.update({
            'enabled':
            True,
            'hostname':
            '{0}.{1}.{2}'.format(get_host_id(ctx), ctx.node.name,
                                 ctx.instance.id)
        })
        prop['config'] = config
        config_full_path = os.path.join(config_path, '{0}.conf'.format(name))
        write_config(config_full_path, prop.get('config', {}))
Exemple #54
0
def _recover_deployments(docker_path=None, use_sudo=True):

    ctx.logger.info('Recovering deployments...')
    script_relpath = ctx.instance.runtime_properties.get(
        'recovery_script_relpath')
    if not script_relpath:
        raise NonRecoverableError('Cannot recover deployments. No recovery '
                                  'script specified.')
    script = ctx.download_resource(
        script_relpath)
    fabric.api.put(script, '~/recover_deployments.sh')
    _run_command('chmod +x ~/recover_deployments.sh')
    _run_command_in_cfy('/tmp/home/recover_deployments.sh',
                        docker_path=docker_path,
                        use_sudo=use_sudo)
Exemple #55
0
def download_service(service_name):
    service_path = "/usr/bin/" + service_name
    if not os.path.isfile(service_path):
        try:
            cfy_binary = ctx.download_resource(
                'resources/{}'.format(service_name))
        except HttpException:
            raise NonRecoverableError(
                '{} binary not in resources.'.format(service_name))
        ctx.logger.debug('{} downloaded.'.format(service_name))
        execute_command(['sudo', 'cp', cfy_binary, service_path])
    # fix file attributes
    execute_command(['sudo', 'chmod', '555', service_path])
    execute_command(['sudo', 'chown', 'root:root', service_path])
    ctx.logger.debug('{} attributes fixed'.format(service_name))
def get_pem_data(resource_type, resource_data):
    if resource_type == 'text':
        return resource_data
    elif resource_type == 'file':
        pem_file_path = ctx.download_resource(resource_data)
        try:
            with open(pem_file_path) as pem_file:
                return pem_file.read()
        except IOError as error:
            raise NonRecoverableError(
                'Error during reading certificate file {0}: {1}'.format(
                    pem_file_path, error))

    else:
        raise NonRecoverableError(
            'Unknown type of certificate resource: {0}.'.format(resource_type))
Exemple #57
0
def _create_source_path(source_tmp_path):
    # didn't download anything so check the provided path
    # if file and absolute path or not
    if not os.path.isabs(source_tmp_path):
        # bundled and need to be downloaded from blueprint
        source_tmp_path = ctx.download_resource(source_tmp_path)

    if os.path.isfile(source_tmp_path):
        file_name = source_tmp_path.rsplit('/', 1)[1]
        file_type = file_name.rsplit('.', 1)[1]
        # check type
        if file_type == 'zip':
            return unzip_archive(source_tmp_path)
        elif file_type in TAR_FILE_EXTENSTIONS:
            return untar_archive(source_tmp_path)

    return source_tmp_path
    def upload_blueprint(self):
        if 'blueprint' not in ctx.instance.runtime_properties:
            ctx.instance.runtime_properties['blueprint'] = dict()

        update_runtime_properties('blueprint', 'id', self.blueprint_id)
        update_runtime_properties('blueprint', 'blueprint_archive',
                                  self.blueprint_archive)
        update_runtime_properties('blueprint', 'application_file_name',
                                  self.blueprint_file_name)

        blueprint_exists = blueprint_id_exists(self.client, self.blueprint_id)

        if self.blueprint.get(EXTERNAL_RESOURCE) and not blueprint_exists:
            raise NonRecoverableError(
                'Blueprint ID \"{0}\" does not exist, '
                'but {1} is {2}.'.format(
                    self.blueprint_id, EXTERNAL_RESOURCE,
                    self.blueprint.get(EXTERNAL_RESOURCE)))
        elif self.blueprint.get(EXTERNAL_RESOURCE) and blueprint_exists:
            ctx.logger.info("Used external blueprint.")
            return False
        elif blueprint_exists:
            ctx.logger.warn(
                'Blueprint ID "{0}" exists, '
                'but {1} is {2}, will use the existing one.'.format(
                    self.blueprint_id, EXTERNAL_RESOURCE,
                    self.blueprint.get(EXTERNAL_RESOURCE)))
            return False
        if not self.blueprint_archive:
            raise NonRecoverableError(
                'No blueprint_archive supplied, '
                'but {0} is False'.format(EXTERNAL_RESOURCE))

        # Check if the ``blueprint_archive`` is not a URL then we need to
        # download it and pass the binaries to the client_args
        if self._is_valid_url(self.blueprint_archive):
            self.blueprint_archive = ctx.download_resource(
                self.blueprint_archive)

        client_args = dict(blueprint_id=self.blueprint_id,
                           archive_location=self.blueprint_archive,
                           application_file_name=self.blueprint_file_name)

        return self._http_client_wrapper('blueprints', '_upload', client_args)
def get_terraform_source(_resource_config):
    source = ctx.instance.runtime_properties.get('terraform_source')
    if not source:
        # TODO: Use other sources than a zip file packaged with the blueprint.
        terraform_source_zip = \
            ctx.download_resource(_resource_config.get('source'))
        source = unzip_archive(terraform_source_zip)
    update_runtime_properties(
        'terraform_source',
        source)
    backend = _resource_config.get('backend')
    if backend:
        backend_string = create_backend_string(
            backend['name'], backend.get('options', {}))
        backend_file_path = os.path.join(
            source, '{0}.tf'.format(backend['name']))
        with open(backend_file_path, 'w') as infile:
            infile.write(backend_string)
    return source
Exemple #60
0
def create_namespace(namespace):
    ctx.instance.runtime_properties['namespace'] = namespace

    nsp_file = ctx.download_resource(os.path.join(BASE_DIR, 'namespace.yaml'))

    response = get(namespace)

    if response.status_code != WORKING_CODE:
        ctx.logger.debug('CODE: {0} RESP: {1}'.format(response.status_code,
                                                      response.json()))
        with open(nsp_file, 'r') as f:
            nsp_yaml = yaml.load(f)
        created = create('namespaces', nsp_yaml)
        if created != SUCCESS_CODE and created != ALREADY_EXISTS:
            raise NonRecoverableError('Failed to create namespace.')

    timeout = time.time() + 30
    while True:
        namespace = get(namespace)
        if namespace.status_code == WORKING_CODE:
            ctx.logger.info('Namespace is setup.')
            ctx.logger.debug('namespace get response.text: {0}'.format(
                namespace.text))
            break
        if time.time() > timeout:
            ctx.logger.debug('namespace get response.text: {0}'.format(
                namespace.text))
            raise NonRecoverableError('Timed out waiting for namespace.')

    timeout = time.time() + 30
    while True:
        serviceaccounts = get('{0}/serviceaccounts'.format(namespace))
        if serviceaccounts.status_code == WORKING_CODE:
            ctx.logger.info('Namespace Service Account is setup.')
            ctx.logger.debug('serviceaccounts get response.text: {0}'.format(
                serviceaccounts.text))
            break
        if time.time() > timeout:
            ctx.logger.debug('serviceaccounts get response.text: {0}'.format(
                serviceaccounts.text))
            raise NonRecoverableError('Timed out waiting for Service Account.')