Esempio n. 1
0
def get_provisioners(config, log_file, unique, job_options, app):
    """
    Factory function to instantiate the right implementation Class

    :param config: YAML Config object
    :param log_file: Log file stream
    :param unique: Unique ID
    :param job_options: Job parameters
    :param app: Ghost Application

    :return: a FeaturesProvisioner sub-class object list
    """
    ret = []
    provisioners_config = get_provisioners_config(config)
    # Use skip_salt_bootstrap default value if job options not set.
    job_options = job_options or [
        config.get('skip_provisioner_bootstrap', True)
    ]
    for key, provisioner_config in provisioners_config.iteritems():
        if key == 'salt':
            ret.append(
                FeaturesProvisionerSalt(log_file, unique, job_options,
                                        provisioner_config, config))
        elif key == 'ansible':
            ret.append(
                FeaturesProvisionerAnsible(log_file, unique,
                                           app['build_infos']['ssh_username'],
                                           provisioner_config, config))
        else:
            log(
                "Invalid provisioner type. Please check your yaml 'config.yml' file",
                log_file)
            raise GCallException("Invalid features provisioner type")

    return ret
Esempio n. 2
0
def gcall(args, cmd_description, log_fd, dry_run=False, env=None):
    log(cmd_description, log_fd)
    log("CMD: {0}".format(args), log_fd)
    if not dry_run:
        ret = call(args, stdout=log_fd, stderr=log_fd, shell=True, env=env)
        if (ret != 0):
            raise GCallException("ERROR: %s" % cmd_description)
Esempio n. 3
0
    def _exec_script_single_host(self, script, module_name, single_host_ip):
        context_path, sudoer_uid, module = self._get_module_path_and_uid(
            module_name)
        ghost_env_vars = get_ghost_env_variables(self._app, module,
                                                 self._job['user'])

        ec2_obj = get_ec2_instance(self._cloud_connection, self._app['region'],
                                   {
                                       'private-ip-address': single_host_ip,
                                       'vpc-id': self._app['vpc_id'],
                                   })
        if not ec2_obj or ec2_obj.vpc_id != self._app[
                'vpc_id'] or ec2_obj.private_ip_address != single_host_ip:
            raise GCallException(
                "Cannot found the single instance with private IP '{ip}' in VPC '{vpc}'"
                .format(ip=single_host_ip, vpc=self._app['vpc_id']))
        if ec2_obj.tags['app'] != self._app['name'] or ec2_obj.tags[
                'env'] != self._app['env'] or ec2_obj.tags[
                    'role'] != self._app['role']:
            raise GCallException(
                "Cannot execute script on this instance ({ip} - {id}), invalid Ghost tags"
                .format(ip=single_host_ip, id=ec2_obj.id))

        log(
            "EC2 instance found, ready to execute script ({ip} - {id} - {name})"
            .format(ip=single_host_ip,
                    id=ec2_obj.id,
                    name=ec2_obj.tags.get('Name', '')), self._log_file)
        launch_executescript(self._app, script, context_path, sudoer_uid,
                             self._job['_id'], [single_host_ip], 'serial',
                             self._log_file, ghost_env_vars)
Esempio n. 4
0
    def execute(self):
        if not boolify(self._config.get('enable_executescript_command', True)):
            return self._abort(
                "This command has been disabled by your administrator.")
        script = self._job['options'][0] if 'options' in self._job and len(
            self._job['options']) > 0 else None
        module_name = self._job['options'][1] if 'options' in self._job and len(
            self._job['options']) > 1 else None
        execution_strategy = self._job['options'][
            2] if 'options' in self._job and len(
                self._job['options']) > 2 else None
        if execution_strategy == 'single':
            # option[3] is a single Host IP
            fabric_execution_strategy = None
            safe_deployment_strategy = None
            single_host_ip = self._job['options'][
                3] if 'options' in self._job and len(
                    self._job['options']) > 3 else None
        else:
            # option[2] is fabric type, option[3] might be Safe deploy group param
            fabric_execution_strategy = execution_strategy
            safe_deployment_strategy = self._job['options'][
                3] if 'options' in self._job and len(
                    self._job['options']) > 3 else None
            single_host_ip = None

        try:
            log(_green("STATE: Started"), self._log_file)
            try:
                if not script or not script.strip():
                    return self._abort("No valid script provided")
                script_data = b64decode_utf8(script)
                allowed_shebang = ('#!/bin/bash', '#! /bin/bash', '#!/bin/sh',
                                   '#! /bin/sh')
                if not script_data.startswith(allowed_shebang):
                    return self._abort(
                        "No valid shell script provided (shebang missing)")
            except:
                return self._abort("No valid script provided")

            if single_host_ip:
                log(
                    _yellow("Executing script on a single host: %s" %
                            single_host_ip), self._log_file)
                self._exec_script_single_host(script_data, module_name,
                                              single_host_ip)
            else:
                log(_yellow("Executing script on every running instance"),
                    self._log_file)
                self._exec_script(script_data, module_name,
                                  fabric_execution_strategy,
                                  safe_deployment_strategy)

            self._worker.update_status(
                "done", message=self._get_notification_message_done())
            log(_green("STATE: End"), self._log_file)
        except Exception as e:
            self._worker.update_status(
                "failed", message=self._get_notification_message_failed(e))
            log(_red("STATE: End"), self._log_file)
Esempio n. 5
0
    def _get_module_s3(self, module, source_url, working_directory):
        """
        Fetch the module sources from S3
        :param module:
        :param source_url:
        :param working_directory:
        :return: (source url, working directory, source version, version uid, version message)
        """
        if not source_url.startswith('s3://'):
            raise GCallException(
                'Invalid S3 source url given: "{}", it must starts with "s3://"'
                .format(source_url))
        revision = self._get_module_revision(module['name'])

        # If revision is HEAD, use the latest S3 object version
        if revision.lower().strip() in ['head', 'latest']:
            revision = 'latest'
            gcall(
                'aws s3 cp "{s}" "{w}" --recursive'.format(
                    s=source_url, w=working_directory),
                'Retrieving from S3 bucket ({url}) at latest revision'.format(
                    url=source_url), self._log_file)
        else:
            log(
                "Retrieving from S3 bucket ({url}) at revision '{rev}'".format(
                    url=source_url, rev=revision), self._log_file)
            download_s3_object(self._app, source_url, working_directory,
                               revision, self._log_file)

        return source_url, working_directory, revision, '', ''
Esempio n. 6
0
    def __init__(self, packer_config, config, log_file, job_id):
        self._log_file = log_file
        self.packer_config = json.loads(packer_config)
        if self.packer_config['credentials']['aws_access_key']:
            self._assumed_role = True
        else:
            self._assumed_role = False

        self.unique = str(job_id)
        if not os.path.exists(PACKER_JSON_PATH):
            os.makedirs(PACKER_JSON_PATH)

        provisioners_config = get_provisioners_config(config)

        self._provisioners = []
        for key, provisioner_config in provisioners_config.iteritems():
            if key == 'salt':
                self._provisioners.append(
                    FeaturesProvisionerSalt(self._log_file, self.unique,
                                            provisioner_config, config))
            elif key == 'ansible':
                self._provisioners.append(
                    FeaturesProvisionerAnsible(self._log_file, self.unique,
                                               provisioner_config, config))
            else:
                log(
                    "Invalid provisioner type. Please check your yaml 'config.yml' file",
                    self._log_file)
                raise GCallException("Invalid features provisioner type")
Esempio n. 7
0
    def _execute_redeploy(self, deploy_id, fabric_execution_strategy,
                          safe_deployment_strategy):
        module, package = self._get_deploy_infos(deploy_id)
        if module and package:
            before_update_manifest = update_app_manifest(
                self._app, self._config, module, package, self._log_file)
            all_app_modules_list = get_app_module_name_list(
                self._app['modules'])
            clean_local_module_workspace(
                get_path_from_app_with_color(self._app), all_app_modules_list,
                self._log_file)
            # Download and extract package before launching deploy
            clone_path = self._local_extract_package(module, package)

            try:
                # Re-deploy
                self._deploy_module(module, fabric_execution_strategy,
                                    safe_deployment_strategy)
            except GCallException as e:
                log(
                    "Redeploy error occured, app manifest will be restored to its previous state",
                    self._log_file)
                rollback_app_manifest(self._app, self._config,
                                      before_update_manifest, self._log_file)
                raise e

            # After all deploy exec
            execute_module_script_on_ghost(self._app, module,
                                           'after_all_deploy',
                                           'After all deploy', clone_path,
                                           self._log_file, self._job,
                                           self._config)
        else:
            raise GCallException(
                "Redeploy on deployment ID: {0} failed".format(deploy_id))
Esempio n. 8
0
def split_hosts_list(hosts_list, split_type, log_file=None):
    """
    Return a list of multiple hosts list for the safe deployment.

        :param hosts_list      list: Dictionnaries instances infos(id and private IP).
        :param split_type:     string:  The way to split the hosts list(1by1-1/3-25%-50%).
        :return                list:    Multiple hosts list or raise an Exception is the safe
                                        deployment process cannot be perform.

    >>> from io import StringIO

    >>> hosts_list = ['host1', 'host2']
    >>> split_hosts_list(hosts_list, '50%')
    [['host1'], ['host2']]
    >>> split_hosts_list(hosts_list, '1by1')
    [['host1'], ['host2']]

    >>> hosts_list = ['host1', 'host2', 'host3']
    >>> split_hosts_list(hosts_list, '50%')
    [['host1', 'host3'], ['host2']]
    >>> split_hosts_list(hosts_list, '1/3')
    [['host1'], ['host2'], ['host3']]
    >>> split_hosts_list(hosts_list, '1by1')
    [['host1'], ['host2'], ['host3']]

    >>> hosts_list = ['host1', 'host2', 'host3', 'host4']
    >>> split_hosts_list(hosts_list, '50%')
    [['host1', 'host3'], ['host2', 'host4']]
    >>> split_hosts_list(hosts_list, '1/3')
    [['host1', 'host4'], ['host2'], ['host3']]
    >>> split_hosts_list(hosts_list, '25%')
    [['host1'], ['host2'], ['host3'], ['host4']]
    >>> split_hosts_list(hosts_list, '1by1')
    [['host1'], ['host2'], ['host3'], ['host4']]

    >>> hosts_list = ['host1', 'host2', 'host3', 'host4', 'host5']
    >>> split_hosts_list(hosts_list, '50%')
    [['host1', 'host3', 'host5'], ['host2', 'host4']]
    >>> split_hosts_list(hosts_list, '1/3')
    [['host1', 'host4'], ['host2', 'host5'], ['host3']]
    >>> split_hosts_list(hosts_list, '25%')
    [['host1', 'host5'], ['host2'], ['host3'], ['host4']]
    >>> split_hosts_list(hosts_list, '1by1')
    [['host1'], ['host2'], ['host3'], ['host4'], ['host5']]
    """

    if split_type == '1by1' and len(hosts_list) > 1:
        return [hosts_list[i:i + 1] for i in range(0, len(hosts_list), 1)]
    elif split_type == '1/3' and len(hosts_list) > 2:
        chunk = 3
    elif split_type == '25%' and len(hosts_list) > 3:
        chunk = 4
    elif split_type == '50%' and len(hosts_list) >= 2:
        chunk = 2
    else:
        if log_file:
            log("Not enough instances to perform safe deployment. Number of instances: \
                {0} for safe deployment type: {1}".format(str(len(hosts_list)), str(split_type)), log_file)
        raise GCallException("Cannot continue, not enought instances to perform the safe deployment")
    return [hosts_list[i::chunk] for i in range(chunk)]
Esempio n. 9
0
 def check_credentials(self):
     result = False
     if not self._role_arn:
         result = True
     else:
         try:
             if self._parameters.get('assumed_region_name', None):
                 sts_connection = boto.sts.connect_to_region(
                     self._parameters['assumed_region_name'])
             else:
                 sts_connection = STSConnection()
             assumed_role_object = sts_connection.assume_role(
                 role_arn=self._role_arn,
                 role_session_name=self._role_session)
             self._parameters[
                 'access_key'] = assumed_role_object.credentials.access_key
             self._parameters[
                 'secret_key'] = assumed_role_object.credentials.secret_key
             self._parameters[
                 'session_token'] = assumed_role_object.credentials.session_token
             result = True
         except:
             if self._log_file:
                 log(
                     "An error occured when creating connection, check the exception error message for more details",
                     self._log_file)
             result = False
             raise
     return (result)
Esempio n. 10
0
    def _create_containers_profile(self, module=None, source_module=None):
        """ Generate Lxc profile to mount provisoner local tree and ghost application according build image or deployment
        """
        log("Creating container profile", self._log_file)
        devices = {
            'venv': {'path': self._config['ghost_venv'],
                     'source': self._config['ghost_venv'],
                     'type': 'disk'}
        }

        if self._job['command'] == u"buildimage":
            if 'salt' in self.provisioners:
                source_formulas = get_local_repo_path(PROVISIONER_LOCAL_TREE, 'salt', self._job['_id'])
                devices['salt'] = {'path': '/srv/salt', 'source': source_formulas, 'type': 'disk'}
            if 'ansible' in self.provisioners:
                source_formulas = get_local_repo_path(PROVISIONER_LOCAL_TREE, 'ansible', self._job['_id'])
                devices['ansible'] = {'path': '/srv/ansible', 'source': source_formulas, 'type': 'disk'}

            devices['hooks'] = {'path': '/ghost', 'source': self._source_hooks_path, 'type': 'disk'}

        elif self._job['command'] == u"deploy":
            devices['module'] = {'path': module['path'], 'source': source_module, 'type': 'disk'}

        else:
            raise Exception("Incompatible command given to LXD Builder")

        profile = self._client.profiles.create(self._container_name, devices=devices)
        log("Created container profile: {}".format(profile.name), self._log_file)
Esempio n. 11
0
    def copy_lb(self, new_lb_name, source_lb_name, additional_tags, log_file):
        elb_conn = self._get_elb_connection()
        dest_elb = self.get_by_name(new_lb_name)
        if dest_elb:
            log(
                "  INFO: ELB {0} already available, no copy needed".format(
                    new_lb_name), log_file)
            return dest_elb['DNSName']
        source_elb = self.get_by_name(source_lb_name)
        source_elb_tags = elb_conn.describe_tags(
            LoadBalancerNames=[source_lb_name])['TagDescriptions'][0]['Tags']
        source_elb_attributes = elb_conn.describe_load_balancer_attributes(
            LoadBalancerName=source_lb_name)['LoadBalancerAttributes']

        # Create ELB
        response = elb_conn.create_load_balancer(
            LoadBalancerName=new_lb_name,
            Listeners=[
                ld['Listener'] for ld in source_elb['ListenerDescriptions']
            ],
            Subnets=source_elb['Subnets'],
            SecurityGroups=source_elb['SecurityGroups'],
            Scheme=source_elb['Scheme'],
            Tags=source_elb_tags + ghost_aws.dict_to_aws_tags(additional_tags))

        # Configure Healthcheck
        elb_conn.configure_health_check(LoadBalancerName=new_lb_name,
                                        HealthCheck=source_elb['HealthCheck'])

        # Update ELB attributes
        elb_conn.modify_load_balancer_attributes(
            LoadBalancerName=new_lb_name,
            LoadBalancerAttributes=source_elb_attributes)

        return response['DNSName']
Esempio n. 12
0
    def _local_extract_package(self, module, package):
        clone_path = get_buildpack_clone_path_from_module(self._app, module)
        gcall(
            'rm -rf "%s"' % clone_path,
            'Cleaning old temporary redeploy module working directory "%s"' %
            clone_path, self._log_file)
        gcall('mkdir -p "%s"' % clone_path,
              'Recreating redeploy module working directory "%s"' % clone_path,
              self._log_file)

        key_path = '{path}/{module}/{pkg_name}'.format(
            path=get_path_from_app_with_color(self._app),
            module=module['name'],
            pkg_name=package)
        log("Downloading package: {0} from '{1}'".format(package, key_path),
            self._log_file)
        dest_package_path = "{0}/{1}".format(clone_path, package)
        cloud_connection = cloud_connections.get(
            self._app.get('provider', DEFAULT_PROVIDER))(self._log_file)
        conn = cloud_connection.get_connection(
            self._config.get('bucket_region', self._app['region']), ["s3"])
        bucket = conn.get_bucket(self._config['bucket_s3'])
        key = bucket.get_key(key_path)
        if not key:
            raise GCallException(
                "Package '{0}' doesn't exist on bucket '{1}'".format(
                    key_path, self._config['bucket_s3']))
        key.get_contents_to_filename(dest_package_path)

        gcall('tar -xf "{0}" -C "{1}"'.format(dest_package_path, clone_path),
              "Extracting package: %s" % package, self._log_file)
        return clone_path
Esempio n. 13
0
    def __init__(self, app, job, db, log_file, config):
        ImageBuilder.__init__(self, app, job, db, log_file, config)

        # Always use localhost to publish built Images and run containers
        self._client = LXDClient()

        self._source_hooks_path = ''
        self._container_name = self._ami_name.replace('.', '-')
        self._container_config = self._config.get('container', {
            'endpoint': self._config.get('endpoint', 'https://lxd.ghost.morea.fr:8443'),
            'debug': self._config.get('debug', False),
        })
        self._config['ghost_venv'] = sys.exec_prefix
        provisioners_config = get_provisioners_config()
        self.provisioners = []
        for key, provisioner_config in provisioners_config.iteritems():
            if key == 'salt':
                self.provisioners.append('salt')
            elif key == 'ansible':
                self.provisioners.append('ansible')
            else:
                log("Invalid provisioner type. Please check your yaml 'config.yml' file", self._log_file)
                raise GCallException("Invalid features provisioner type")
        self.skip_salt_bootstrap_option = self._job['options'][0] if 'options' in self._job and len(
            self._job['options']) > 0 else True
        self._ansible_log_level = self._config.get('provisioner_log_level', 'info')
        self._salt_log_level = self._config.get('provisioner_log_level', 'info')
Esempio n. 14
0
 def _build_ansible_galaxy_requirement(self, features):
     """ Generates ansible galaxy requirement file from features """
     with open(self._ansible_base_requirements_file,
               'r') as requirements_file:
         requirement_app = yaml.load(requirements_file)
     for role in self._get_ansible_roles(features):
         requirement_app.append(self._get_roles_from_requirement(role))
     if requirement_app != [None]:
         with open(self._ansible_requirement_app,
                   "w") as stream_requirement_app:
             yaml.dump(requirement_app,
                       stream_requirement_app,
                       default_flow_style=False)
         log(
             "Ansible - Getting roles from : {0}".format(
                 self._ansible_galaxy_rq_path), self._log_file)
         gcall(
             "{} install -r {} -p {}".format(
                 self._ansible_galaxy_command_path,
                 self._ansible_requirement_app,
                 self._ansible_galaxy_role_path),
             'Ansible -  ansible-galaxy command', self._log_file)
     else:
         raise GalaxyNoMatchingRolesException(
             "Ansible - ERROR: No roles match galaxy requirements for one or more features {0}"
             .format(features[-1]['roles']))
Esempio n. 15
0
def launch_deploy(app, module, hosts_list, fabric_execution_strategy,
                  log_file):
    """ Launch fabric tasks on remote hosts.

        :param  app:          dict: Ghost object which describe the application parameters.
        :param  module:       dict: Ghost object which describe the module parameters.
        :param  hosts_list:   list: Instances private IP.
        :param  fabric_execution_strategy: string: Deployment strategy(serial or parallel).
        :param  log_file:     object for logging.
    """
    # Clone the deploy task function to avoid modifying the original shared instance
    task = copy(deploy)

    task, app_ssh_username, key_filename, fabric_execution_strategy = _get_fabric_params(
        app, fabric_execution_strategy, task, log_file)

    bucket_region = config.get('bucket_region', app['region'])
    notification_endpoint = config.get('notification_endpoint', '')
    stage2 = render_stage2(config, bucket_region)

    log(
        "Updating current instances in {}: {}".format(
            fabric_execution_strategy, hosts_list), log_file)
    result = fab_execute(task,
                         module,
                         app_ssh_username,
                         key_filename,
                         stage2,
                         notification_endpoint,
                         log_file,
                         hosts=hosts_list)

    _handle_fabric_errors(result, "Deploy error")
Esempio n. 16
0
    def _package_module(self, module, ts, commit):
        path = get_buildpack_clone_path_from_module(self._app, module)
        os.chdir(path)
        pkg_name = "{0}_{1}_{2}".format(ts, module['name'], commit)
        pkg_path = '../{0}'.format(pkg_name)
        uid = module.get('uid', os.geteuid())
        gid = module.get('gid', os.getegid())
        tar_exclude_git = "--exclude '.git'" if boolify(self._config.get('deployment_package_exclude_git_metadata', False)) else ''
        gcall("tar czf {0} --owner={1} --group={2} {3} .".format(pkg_path, uid, gid, tar_exclude_git), "Creating package: %s" % pkg_name, self._log_file)

        log("Uploading package: %s" % pkg_name, self._log_file)
        cloud_connection = cloud_connections.get(self._app.get('provider', DEFAULT_PROVIDER))(self._log_file)
        conn = cloud_connection.get_connection(self._config.get('bucket_region', self._app['region']), ["s3"])
        bucket = conn.get_bucket(self._config['bucket_s3'])
        key_path = '{path}/{pkg_name}'.format(path=path, pkg_name=pkg_name)
        key = bucket.get_key(path)
        if not key:
            key = bucket.new_key(key_path)
        key.set_contents_from_filename(pkg_path)

        gcall("rm -f {0}".format(pkg_path), "Deleting local package: %s" % pkg_name, self._log_file)

        deployment_package_retention_config = self._config.get('deployment_package_retention', None)
        if deployment_package_retention_config and self._app['env'] in deployment_package_retention_config:
            deployment_package_retention = deployment_package_retention_config.get(self._app['env'], 42)
            self._purge_s3_package(path, bucket, module, pkg_name, deployment_package_retention)

        return pkg_name
Esempio n. 17
0
def get_blue_green_apps(app, apps_db, log_file):
    """
    Return app and alter_ego_app if at least one is online.

    Online app is returned first.
    """
    if app.get('blue_green') and app['blue_green'].get('alter_ego_id'):
        alter_ego_app = apps_db.find_one(
            {'_id': app['blue_green']['alter_ego_id']})
        # Both app online is inconsistent
        if app['blue_green']['is_online'] and alter_ego_app['blue_green'][
                'is_online']:
            log(
                "ERROR: Both blue ({0}) and green ({1}) app are setted as 'online' which is not possible."
                .format(app['_id'], alter_ego_app['_id']), log_file)
            return None, None
        if app['blue_green']['is_online']:
            return app, alter_ego_app
        else:
            if alter_ego_app['blue_green']['is_online']:
                return alter_ego_app, app
            else:
                log(
                    "ERROR: Nor blue ({0}) and green ({1}) app are setted as 'online'"
                    .format(app['_id'], alter_ego_app['_id']), log_file)
                return None, None
    else:
        return None, None
Esempio n. 18
0
    def _create_containers_config(self):
        """ Generate a container configuration according build image or deployment
        """
        config = {}
        if self._job["command"] == u"buildimage":
            fingerprint = self._app['build_infos']["source_container_image"]
            if self._container_config['endpoint'] == "localhost":
                config['source'] = {
                    "type": "image",
                    "fingerprint": fingerprint
                }
            else:
                config['source'] = {
                    "type": "image",
                    "protocol": "lxd",
                    "mode": "pull",
                    "fingerprint": fingerprint,
                    "server": self._container_config['endpoint']
                }
        elif self._job["command"] == u"deploy":
            alias = self._app['build_infos']["container_image"]
            config['source'] = {"type": "image", "alias": alias}
        else:
            raise Exception("Incompatible command given to LXD Builder")

        config['name'] = self._container_name
        config['ephemeral'] = False
        config['config'] = {"security.privileged": 'True'}
        config['profiles'] = ["default", self._container_name]
        log("Generated LXC container config {}".format(config), self._log_file)
        return config
Esempio n. 19
0
 def _delete_containers_profile(self):
     """ Delete the container profile
     """
     profile = self._client.profiles.get(self._container_name)
     profile.delete()
     log(
         "lxc profile delete {container_name}".format(
             container_name=self._container_name), self._log_file)
Esempio n. 20
0
def resume_autoscaling_group_processes(as_conn, as_group,
                                       as_group_processes_to_resume, log_file):
    if as_group and as_group_processes_to_resume:
        log(
            "Resuming auto-scaling group processes {0}".format(
                as_group_processes_to_resume), log_file)
        as_conn.resume_processes(AutoScalingGroupName=as_group,
                                 ScalingProcesses=as_group_processes_to_resume)
Esempio n. 21
0
 def update_status(self, status, message=None):
     self.job['status'] = status
     self.job['message'] = message
     log(message, self.log_file)
     self.job['_updated'] = datetime.utcnow()
     update_job(self.job['_id'], {
         'status': status,
         'message': message,
         '_updated': self.job['_updated']
     })
Esempio n. 22
0
def clean_local_module_workspace(app_path, all_app_modules_list, log_file):
    """
    Walk through app_path directory and check if module workspace should be cleaned.
    """

    log('Cleaning old module workspaces', log_file)
    for mod_dir in os.listdir(app_path):
        if not mod_dir in all_app_modules_list:
            gcall('rm -rf "{p}"'.format(p=os.path.join(app_path, mod_dir)), 'Removing deleted module : %s' % mod_dir,
                  log_file)
Esempio n. 23
0
def suspend_autoscaling_group_processes(as_conn, as_group,
                                        as_group_processes_to_suspend,
                                        log_file):
    if as_group and as_group_processes_to_suspend:
        log(
            "Suspending auto-scaling group processes {0}".format(
                as_group_processes_to_suspend), log_file)
        as_conn.suspend_processes(
            AutoScalingGroupName=as_group,
            ScalingProcesses=as_group_processes_to_suspend)
Esempio n. 24
0
 def start_builder(self):
     provisioner_bootstrap_option = self._job['options'][0] if 'options' in self._job and len(self._job['options']) > 0 else True
     json_packer = self._format_packer_from_app(provisioner_bootstrap_option)
     json_packer_for_log = json.loads(json_packer)
     del json_packer_for_log['credentials']
     log("Generating a new AMI", self._log_file)
     log("Packer options : %s" %json.dumps(json_packer_for_log, sort_keys=True, indent=4, separators=(',', ': ')), self._log_file)
     pack = Packer(json_packer, self._config, self._log_file, self._job['_id'])
     ami_id = pack.build_image(self._app['features'], self._get_buildimage_hooks())
     return ami_id, self._ami_name
Esempio n. 25
0
 def _execute_buildpack(self, script_path, module):
     log("Run deploy build pack", self._log_file)
     script = os.path.basename(script_path)
     self.container.execute(["sed", "2icd " + module['path'], "-i",
                             "{module_path}/{script}".format(module_path=module['path'], script=script)])
     buildpack = self.container.execute(["sh", "{module_path}/{script}".format(module_path=module['path'],
                                                                               script=script)])
     self._container_log(buildpack)
     self.container.execute(["chown", "-R", "1001:1002", "{module_path}".format(module_path=module['path'])])
     self._container_execution_error(buildpack, "buildpack")
Esempio n. 26
0
 def _create_container(self, module=None, source_module=None, wait=10):
     """ Create a container with his profile and set time parameter to wait until network was up (default: 5 sec)
     """
     log("Create container {container_name}".format(container_name=self._container_name), self._log_file)
     self._create_containers_profile(module, source_module)
     self.container = self._client.containers.create(self._create_containers_config(), wait=True)
     log("Created container, starting it", self._log_file)
     self.container.start(wait=True)
     time.sleep(wait)
     return self.container
Esempio n. 27
0
 def register_lbs_into_autoscale(self, as_name, lb_names_to_deregister, lb_names_to_register, log_file):
     as_conn = self._get_as_connection()
     try:
         if lb_names_to_deregister and len(lb_names_to_deregister) > 0:
             as_conn.detach_load_balancers(AutoScalingGroupName=as_name, LoadBalancerNames=lb_names_to_deregister)
         if lb_names_to_register and len(lb_names_to_register) > 0:
             as_conn.attach_load_balancers(AutoScalingGroupName=as_name, LoadBalancerNames=lb_names_to_register)
     except Exception as e:
         log("Exception during register ELB operation into ASG: {0}".format(str(e)), log_file)
         raise
Esempio n. 28
0
def get_app_tags(app, log_file=None):
    """ Return the tags defined for this application.

        :param  app dict The application object
        :param  log_file obj Log file objet
        :return dict Every tags defined for this Ghost Application

        >>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'autoscale': {'name': 'asg-mod1'}, 'environment_infos': {'instance_tags':[]}}
        >>> len(get_app_tags(app_original)) == 4
        True

        >>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'autoscale': {'name': 'asg-mod2'}, 'environment_infos': {'instance_tags':[{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}]}}
        >>> len(get_app_tags(app_original)) == 5
        True

    """
    tags_app = {}
    for ghost_tag_key, ghost_tag_val in {
            'app': 'name',
            'app_id': '_id',
            'env': 'env',
            'role': 'role'
    }.items():
        tags_app[ghost_tag_key] = {
            'Key': ghost_tag_key,
            'Value': str(app[ghost_tag_val]),
            'PropagateAtLaunch': True,
            'ResourceId': app['autoscale']['name'],
            'ResourceType': 'auto-scaling-group'
        }
    if app.get('blue_green') and app['blue_green'].get('color'):
        tags_app['color'] = {
            'Key': 'color',
            'Value': app['blue_green']['color'],
            'PropagateAtLaunch': True,
            'ResourceId': app['autoscale']['name'],
            'ResourceType': 'auto-scaling-group'
        }
    i_tags = app['environment_infos'][
        'instance_tags'] if 'instance_tags' in app[
            'environment_infos'] else []
    for app_tag in i_tags:
        tags_app[app_tag['tag_name']] = {
            'Key': app_tag['tag_name'],
            'Value': app_tag['tag_value'],
            'PropagateAtLaunch': True,
            'ResourceId': app['autoscale']['name'],
            'ResourceType': 'auto-scaling-group'
        }
    if log_file:
        log(
            "[{0}] will be updated with: {1}".format(
                app['autoscale']['name'], ", ".join(tags_app.keys())),
            log_file)
    return tags_app
Esempio n. 29
0
def git_release_lock(lock_path, log_file=None):
    """
    >>> import tempfile
    >>> temp_dir = tempfile.mkdtemp()
    >>> git_release_lock(temp_dir)
    >>> os.path.exists(temp_dir)
    False
    """
    if log_file:
        log('Removing git mirror lock (%s)' % lock_path, log_file)
    os.rmdir(lock_path)
Esempio n. 30
0
 def _publish_container(self):
     """ Publish container as image on registry local after build image
     """
     self._clean_lxd_images()
     log(
         "Publishing  container {container_name}".format(
             container_name=self._container_name), self._log_file)
     image = self.container.publish(wait=True)
     image.add_alias(str(self._job['_id']), self._container_name)
     log(
         "Image created with fingerprint: {fingerprint}".format(
             fingerprint=image.fingerprint), self._log_file)