Пример #1
0
    def _exec_script_single_host(self, script, module_name, single_host_ip):
        context_path, sudoer_uid, module = self._get_module_path_and_uid(
            module_name)
        ghost_env_vars = get_ghost_env_variables(self._app, module,
                                                 self._job['user'])

        ec2_obj = get_ec2_instance(self._cloud_connection, self._app['region'],
                                   {
                                       'private-ip-address': single_host_ip,
                                       'vpc-id': self._app['vpc_id'],
                                   })
        if not ec2_obj or ec2_obj.vpc_id != self._app[
                'vpc_id'] or ec2_obj.private_ip_address != single_host_ip:
            raise GCallException(
                "Cannot found the single instance with private IP '{ip}' in VPC '{vpc}'"
                .format(ip=single_host_ip, vpc=self._app['vpc_id']))
        if ec2_obj.tags['app'] != self._app['name'] or ec2_obj.tags[
                'env'] != self._app['env'] or ec2_obj.tags[
                    'role'] != self._app['role']:
            raise GCallException(
                "Cannot execute script on this instance ({ip} - {id}), invalid Ghost tags"
                .format(ip=single_host_ip, id=ec2_obj.id))

        log(
            "EC2 instance found, ready to execute script ({ip} - {id} - {name})"
            .format(ip=single_host_ip,
                    id=ec2_obj.id,
                    name=ec2_obj.tags.get('Name', '')), self._log_file)
        launch_executescript(self._app, script, context_path, sudoer_uid,
                             self._job['_id'], [single_host_ip], 'serial',
                             self._log_file, ghost_env_vars)
Пример #2
0
    def __init__(self, packer_config, config, log_file, job_id):
        self._log_file = log_file
        self.packer_config = json.loads(packer_config)
        if self.packer_config['credentials']['aws_access_key']:
            self._assumed_role = True
        else:
            self._assumed_role = False

        self.unique = str(job_id)
        if not os.path.exists(PACKER_JSON_PATH):
            os.makedirs(PACKER_JSON_PATH)

        provisioners_config = get_provisioners_config(config)

        self._provisioners = []
        for key, provisioner_config in provisioners_config.iteritems():
            if key == 'salt':
                self._provisioners.append(
                    FeaturesProvisionerSalt(self._log_file, self.unique,
                                            provisioner_config, config))
            elif key == 'ansible':
                self._provisioners.append(
                    FeaturesProvisionerAnsible(self._log_file, self.unique,
                                               provisioner_config, config))
            else:
                log(
                    "Invalid provisioner type. Please check your yaml 'config.yml' file",
                    self._log_file)
                raise GCallException("Invalid features provisioner type")
Пример #3
0
def get_provisioners(config, log_file, unique, job_options, app):
    """
    Factory function to instantiate the right implementation Class

    :param config: YAML Config object
    :param log_file: Log file stream
    :param unique: Unique ID
    :param job_options: Job parameters
    :param app: Ghost Application

    :return: a FeaturesProvisioner sub-class object list
    """
    ret = []
    provisioners_config = get_provisioners_config(config)
    # Use skip_salt_bootstrap default value if job options not set.
    job_options = job_options or [
        config.get('skip_provisioner_bootstrap', True)
    ]
    for key, provisioner_config in provisioners_config.iteritems():
        if key == 'salt':
            ret.append(
                FeaturesProvisionerSalt(log_file, unique, job_options,
                                        provisioner_config, config))
        elif key == 'ansible':
            ret.append(
                FeaturesProvisionerAnsible(log_file, unique,
                                           app['build_infos']['ssh_username'],
                                           provisioner_config, config))
        else:
            log(
                "Invalid provisioner type. Please check your yaml 'config.yml' file",
                log_file)
            raise GCallException("Invalid features provisioner type")

    return ret
Пример #4
0
    def _local_extract_package(self, module, package):
        clone_path = get_buildpack_clone_path_from_module(self._app, module)
        gcall(
            'rm -rf "%s"' % clone_path,
            'Cleaning old temporary redeploy module working directory "%s"' %
            clone_path, self._log_file)
        gcall('mkdir -p "%s"' % clone_path,
              'Recreating redeploy module working directory "%s"' % clone_path,
              self._log_file)

        key_path = '{path}/{module}/{pkg_name}'.format(
            path=get_path_from_app_with_color(self._app),
            module=module['name'],
            pkg_name=package)
        log("Downloading package: {0} from '{1}'".format(package, key_path),
            self._log_file)
        dest_package_path = "{0}/{1}".format(clone_path, package)
        cloud_connection = cloud_connections.get(
            self._app.get('provider', DEFAULT_PROVIDER))(self._log_file)
        conn = cloud_connection.get_connection(
            self._config.get('bucket_region', self._app['region']), ["s3"])
        bucket = conn.get_bucket(self._config['bucket_s3'])
        key = bucket.get_key(key_path)
        if not key:
            raise GCallException(
                "Package '{0}' doesn't exist on bucket '{1}'".format(
                    key_path, self._config['bucket_s3']))
        key.get_contents_to_filename(dest_package_path)

        gcall('tar -xf "{0}" -C "{1}"'.format(dest_package_path, clone_path),
              "Extracting package: %s" % package, self._log_file)
        return clone_path
Пример #5
0
    def _execute_redeploy(self, deploy_id, fabric_execution_strategy,
                          safe_deployment_strategy):
        module, package = self._get_deploy_infos(deploy_id)
        if module and package:
            before_update_manifest = update_app_manifest(
                self._app, self._config, module, package, self._log_file)
            all_app_modules_list = get_app_module_name_list(
                self._app['modules'])
            clean_local_module_workspace(
                get_path_from_app_with_color(self._app), all_app_modules_list,
                self._log_file)
            # Download and extract package before launching deploy
            clone_path = self._local_extract_package(module, package)

            try:
                # Re-deploy
                self._deploy_module(module, fabric_execution_strategy,
                                    safe_deployment_strategy)
            except GCallException as e:
                log(
                    "Redeploy error occured, app manifest will be restored to its previous state",
                    self._log_file)
                rollback_app_manifest(self._app, self._config,
                                      before_update_manifest, self._log_file)
                raise e

            # After all deploy exec
            execute_module_script_on_ghost(self._app, module,
                                           'after_all_deploy',
                                           'After all deploy', clone_path,
                                           self._log_file, self._job,
                                           self._config)
        else:
            raise GCallException(
                "Redeploy on deployment ID: {0} failed".format(deploy_id))
Пример #6
0
    def _get_module_s3(self, module, source_url, working_directory):
        """
        Fetch the module sources from S3
        :param module:
        :param source_url:
        :param working_directory:
        :return: (source url, working directory, source version, version uid, version message)
        """
        if not source_url.startswith('s3://'):
            raise GCallException(
                'Invalid S3 source url given: "{}", it must starts with "s3://"'
                .format(source_url))
        revision = self._get_module_revision(module['name'])

        # If revision is HEAD, use the latest S3 object version
        if revision.lower().strip() in ['head', 'latest']:
            revision = 'latest'
            gcall(
                'aws s3 cp "{s}" "{w}" --recursive'.format(
                    s=source_url, w=working_directory),
                'Retrieving from S3 bucket ({url}) at latest revision'.format(
                    url=source_url), self._log_file)
        else:
            log(
                "Retrieving from S3 bucket ({url}) at revision '{rev}'".format(
                    url=source_url, rev=revision), self._log_file)
            download_s3_object(self._app, source_url, working_directory,
                               revision, self._log_file)

        return source_url, working_directory, revision, '', ''
Пример #7
0
    def __init__(self, app, job, db, log_file, config):
        ImageBuilder.__init__(self, app, job, db, log_file, config)

        # Always use localhost to publish built Images and run containers
        self._client = LXDClient()

        self._source_hooks_path = ''
        self._container_name = self._ami_name.replace('.', '-')
        self._container_config = self._config.get('container', {
            'endpoint': self._config.get('endpoint', 'https://lxd.ghost.morea.fr:8443'),
            'debug': self._config.get('debug', False),
        })
        self._config['ghost_venv'] = sys.exec_prefix
        provisioners_config = get_provisioners_config()
        self.provisioners = []
        for key, provisioner_config in provisioners_config.iteritems():
            if key == 'salt':
                self.provisioners.append('salt')
            elif key == 'ansible':
                self.provisioners.append('ansible')
            else:
                log("Invalid provisioner type. Please check your yaml 'config.yml' file", self._log_file)
                raise GCallException("Invalid features provisioner type")
        self.skip_salt_bootstrap_option = self._job['options'][0] if 'options' in self._job and len(
            self._job['options']) > 0 else True
        self._ansible_log_level = self._config.get('provisioner_log_level', 'info')
        self._salt_log_level = self._config.get('provisioner_log_level', 'info')
Пример #8
0
    def alb_safe_deployment(self, instances_list):
        """ Manage the safe deployment process for the Application Load Balancer.

            :param  instances_list: list: Instances on which to
                    (list of dict. ex: [{'id':XXX, 'private_ip_address':XXXX}...]).
            :return True if operation successed or raise an Exception.
        """
        if not self._as_name:
            raise GCallException('Cannot continue because there is no AuoScaling Group configured')

        app_region = self._app['region']

        alb_mgr = load_balancing.get_lb_manager(self._cloud_connection, app_region, load_balancing.LB_TYPE_AWS_ALB)

        alb_targets = alb_mgr.get_instances_status_from_autoscale(self._as_name, self._log_file)

        if not len(alb_targets):
            raise GCallException('Cannot continue because there is no ALB configured in the AutoScaling Group')
        elif len([i for i in alb_targets.values() if 'unhealthy' in i.values()]):
            raise GCallException('Cannot continue because one or more instances are in the unhealthy state')
        else:
            alb_mgr.deregister_instances_from_lbs(self._as_name,
                                                  [host['id'] for host in instances_list],
                                                  self._log_file)
            wait_before_deploy = int(alb_mgr.get_lbs_max_connection_draining_value(self._as_name)) + int(
                self._safe_infos['wait_before_deploy'])
            log('Waiting {0}s: The deregistation delay time plus the custom value set for wait_before_deploy'.format(
                wait_before_deploy), self._log_file)
            time.sleep(wait_before_deploy)

            host_list = [host['private_ip_address'] for host in instances_list]
            self.trigger_launch(host_list)

            log('Waiting {0}s: The value set for wait_after_deploy'.format(self._safe_infos['wait_after_deploy']),
                self._log_file)
            time.sleep(int(self._safe_infos['wait_after_deploy']))
            alb_mgr.register_instances_from_lbs(self._as_name,
                                                [host['id'] for host in instances_list],
                                                self._log_file)
            while len([i for i in alb_mgr.get_instances_status_from_autoscale(self._as_name, self._log_file).values() if
                       'unhealthy' in i.values()]):
                log('Waiting 10s because the instance is unhealthy in the ALB', self._log_file)
                time.sleep(10)
            log('Instances: {0} have been deployed and are registered in their ALB'.format(
                str([host['private_ip_address'] for host in instances_list])), self._log_file)
            return True
Пример #9
0
def _handle_fabric_errors(result, message):
    hosts_error = []
    for host, ret_code in result.items():
        if ret_code != 0:
            hosts_error.append(host)
    if len(hosts_error):
        raise GCallException("{0} on: {1}".format(message,
                                                  ", ".join(hosts_error)))
Пример #10
0
    def deployment(self, safe_deployment_strategy):
        """ Main entry point for Host Deployment Manager process

            :type safe_deployment_strategy: string/enum
            :return True if operation succeed otherwise an Exception will be raised.
        """

        app_name = self._app['name']
        app_env = self._app['env']
        app_role = self._app['role']
        app_region = self._app['region']
        app_blue_green, app_color = get_blue_green_from_app(self._app)

        # Retrieve autoscaling infos, if any
        as_conn = self._cloud_connection.get_connection(app_region, ['autoscaling'], boto_version='boto3')
        as_group, as_group_processes_to_suspend = get_autoscaling_group_and_processes_to_suspend(as_conn, self._app,
                                                                                                 self._log_file)
        try:
            # Suspend autoscaling
            suspend_autoscaling_group_processes(as_conn, as_group, as_group_processes_to_suspend, self._log_file)
            # Wait for pending instances to become ready
            while True:
                pending_instances = find_ec2_pending_instances(self._cloud_connection, app_name, app_env, app_role,
                                                               app_region, as_group, ghost_color=app_color)
                if not pending_instances:
                    break
                log(
                    "INFO: waiting 10s for {} instance(s) to become running before proceeding with deployment: {}".format(
                        len(pending_instances), pending_instances), self._log_file)
                time.sleep(10)
            running_instances = find_ec2_running_instances(self._cloud_connection, app_name, app_env, app_role,
                                                           app_region, ghost_color=app_color)
            if running_instances:
                if safe_deployment_strategy and self._safe_infos:
                    self._as_name = as_group
                    self._hosts_list = running_instances
                    return self.safe_manager(safe_deployment_strategy)
                else:
                    self._hosts_list = [host['private_ip_address'] for host in running_instances]
                    self.trigger_launch(self._hosts_list)
                    return True
            else:
                raise GCallException(
                    "No instance found in region {region} with tags app:{app}, env:{env}, role:{role}{color}".format(
                        region=app_region,
                        app=app_name,
                        env=app_env,
                        role=app_role,
                        color=', color:%s' % app_color if app_color else ''))
        finally:
            resume_autoscaling_group_processes(as_conn, as_group, as_group_processes_to_suspend, self._log_file)
Пример #11
0
def execute_module_script_on_ghost(app, module, script_name,
                                   script_friendly_name, clone_path, log_file,
                                   job, config):
    """ Executes the given script on the Ghost instance

        :param app: Ghost application
        :param module: Ghost module to extract script from
        :param script_name: string: the name of the script to find in module
        :param script_friendly_name: string: the friendly name of the script for logs
        :param clone_path: string: working directory of the current module
        :param log_file: string: Log file path
        :param job: Ghost job
        :param config: Ghost config
    """
    # Execute script if available
    if script_name in module:
        theorical_script_path = "{0}/{1}".format(clone_path, script_name)
        if os.path.isfile(theorical_script_path):
            script_path = theorical_script_path
        else:
            script_source = b64decode_utf8(module[script_name])
            script, script_path = tempfile.mkstemp(dir=clone_path)
            os.close(script)
            with io.open(script_path, mode='w', encoding='utf-8') as f:
                f.write(script_source)

        script_env = os.environ.copy()
        script_env.update(get_ghost_env_variables(app, module))

        if app['build_infos'].get('container_image') and lxd_is_available(
                config):
            source_module = get_buildpack_clone_path_from_module(app, module)
            container = LXDImageBuilder(app, job, None, log_file, config)
            if not container.deploy(script_path, module, source_module):
                raise GCallException(
                    "ERROR: %s execution on container failed" % script_name)
        else:
            log("Change directory to working dir ({w})".format(w=clone_path),
                log_file)
            os.chdir(clone_path)
            gcall('bash %s' % script_path,
                  '%s: Execute' % script_friendly_name,
                  log_file,
                  env=script_env)

        gcall('du -hs .', 'Display current build directory disk usage',
              log_file)
        gcall('rm -vf %s' % script_path,
              '%s: Done, cleaning temporary file' % script_friendly_name,
              log_file)
Пример #12
0
 def _get_module_sources(self, module):
     """
     Fetch the current module source, using the right protocol
     :param module: app module object
     :return: (source url, working directory, source version, version uid, version message)
     """
     source = module.get('source', {})
     source_protocol = source['protocol'].strip()
     source_url = source['url'].strip()
     clone_path = get_buildpack_clone_path_from_module(self._app, module)
     if source_protocol == 'git':
         return self._get_module_git(module, source_url, clone_path)
     elif source_protocol == 's3':
         return self._get_module_s3(module, source_url, clone_path)
     else:
         raise GCallException(
             'Invalid source protocol provided ({})'.format(
                 source_protocol))
Пример #13
0
    def haproxy_safe_deployment(self, instances_list):
        """ Manage the safe deployment process for the Haproxy.

            :param  instances_list:  list: Instances on which to deploy
                    (list of dict. ex: [{'id':XXX, 'private_ip_address':XXXX}...]).
            :return True if operation successed or raise an Exception.
        """
        lb_infos = [host['private_ip_address'] for host in find_ec2_running_instances(self._cloud_connection,
                                                                                      self._safe_infos['app_tag_value'],
                                                                                      self._app['env'], 'loadbalancer',
                                                                                      self._app['region'])]
        if lb_infos:
            hapi = haproxy.Haproxyapi(lb_infos, self._log_file, self._safe_infos['api_port'])
            ha_urls = hapi.get_haproxy_urls()
            if not self.haproxy_configuration_validation(hapi, ha_urls, self._safe_infos['ha_backend']):
                raise GCallException('Cannot initialize the safe deployment process because there are differences in the Haproxy \
                                      configuration files between the instances: {0}'.format(lb_infos))
            if not hapi.change_instance_state('disableserver', self._safe_infos['ha_backend'],
                                              [host['private_ip_address'] for host in instances_list]):
                raise GCallException(
                    'Cannot disable some instances: {0} in {1}. Deployment aborted'.format(instances_list, lb_infos))
            log('Waiting {0}s: The value set for wait_before_deploy'.format(self._safe_infos['wait_before_deploy']),
                self._log_file)
            time.sleep(int(self._safe_infos['wait_before_deploy']))

            host_list = [host['private_ip_address'] for host in instances_list]
            self.trigger_launch(host_list)

            log('Waiting {0}s: The value set for wait_after_deploy'.format(self._safe_infos['wait_after_deploy']),
                self._log_file)
            time.sleep(int(self._safe_infos['wait_after_deploy']))
            if not hapi.change_instance_state('enableserver', self._safe_infos['ha_backend'],
                                              [host['private_ip_address'] for host in instances_list]):
                raise GCallException(
                    'Cannot enabled some instances: {0} in {1}. Deployment aborted'.format(instances_list, lb_infos))
            # Add a sleep to let the time to pass the health check process
            time.sleep(5)
            if not self.haproxy_configuration_validation(hapi, ha_urls, self._safe_infos['ha_backend']):
                raise GCallException('Error in the post safe deployment process because there are differences in the Haproxy \
                                    configuration files between the instances: {0}. Instances: {1} have been deployed but not well enabled'.format(
                    lb_infos, instances_list))
            if not hapi.check_all_instances_up(self._safe_infos['ha_backend'], hapi.get_haproxy_conf(ha_urls[0], True)):
                raise GCallException(
                    'Error in the post safe deployment process because some instances are disable or down in the Haproxy: {0}.'.format(
                        lb_infos, instances_list))
            log('Instances: {0} have been deployed and are registered in their Haproxy'.format(str(instances_list)),
                self._log_file)
            return True
        else:
            raise GCallException('Cannot continue because no Haproxy found with the parameters: app_tag_value: {0}, app_env: {1}, app_role: loadbalancer,\
                                 app_region: {2}'.format(self._safe_infos['app_tag_value'], self._app['env'],
                                                         self._app['region']))
Пример #14
0
    def do_rolling(self, rolling_strategy):
        """  Main entry point for Rolling Update process.

            :param  rolling_strategy string: The type of rolling strategy(1by1-1/3-25%-50%)
            :return True if operation succeed otherwise an Exception will be raised.
        """
        hosts = split_hosts_list(
            self.hosts_list,
            rolling_strategy) if rolling_strategy else [self.hosts_list]
        for host_group in hosts:
            if self.safe_infos['load_balancer_type'] == 'elb':
                self.elb_rolling_update(host_group)
#            elif self.safe_infos['load_balancer_type'] == 'alb':
            else:
                raise GCallException(
                    'Load balancer type not supported for Rolling update option'
                )
            log('Waiting 10s before going on next instance group',
                self.log_file)
            time.sleep(10)
        return True
Пример #15
0
def create_ec2_instance(cloud_connection, app, app_color, config, private_ip_address, subnet_id, log_file):
    """ Creates an EC2 instance and return its ID.

        :param  cloud_connection: The app Cloud Connection object
        :param  app: Ghost app document
        :param  app_color: Color value if BlueGreen application type
        :param  config: Ghost config settings
        :param  private_ip_address: Private IP address to use when creating the instance
        :param  subnet_id: Subnet to use when creating the instance
        :param  log_file: Logging file

        :return the EC2 instance object with all its details
    """
    log(_yellow(" INFO: Creating User-Data"), log_file)
    ghost_root_path = config.get('ghost_root_path', '/usr/local/share/ghost/')
    userdata = generate_userdata(config['bucket_s3'], config.get('bucket_region', app['region']), ghost_root_path)

    log(_yellow(" INFO: Creating EC2 instance"), log_file)
    if app['ami']:
        log(" CONF: AMI: {0}".format(app['ami']), log_file)
        log(" CONF: Region: {0}".format(app['region']), log_file)

        conn = cloud_connection.get_connection(app['region'], ["ec2"])
        interface = cloud_connection.launch_service(
                ["ec2", "networkinterface", "NetworkInterfaceSpecification"],
                subnet_id=subnet_id,
                groups=app['environment_infos']['security_groups'],
                associate_public_ip_address=app['environment_infos'].get('public_ip_address', True),
                private_ip_address=private_ip_address
                )
        interfaces = cloud_connection.launch_service(
                ["ec2", "networkinterface", "NetworkInterfaceCollection"],
                interface
                )
        if 'root_block_device' in app['environment_infos']:
            bdm = create_block_device(cloud_connection, app['region'], app, app['environment_infos']['root_block_device'])
        else:
            bdm = create_block_device(cloud_connection, app['region'], app, {})
        reservation = conn.run_instances(
            image_id=app['ami'],
            key_name=app['environment_infos']['key_name'],
            network_interfaces=interfaces,
            instance_type=app['instance_type'],
            instance_profile_name=app['environment_infos']['instance_profile'],
            user_data=userdata, block_device_map=bdm
        )

        # Getting instance metadata
        instance = reservation.instances[0]
        if instance.id:
            # Checking if instance is ready before tagging
            while not instance.state == u'running':
                log('Instance not running, waiting 10s before tagging.', log_file)
                time.sleep(10)
                instance.update()

            # Tagging
            for ghost_tag_key, ghost_tag_val in {'app': 'name', 'app_id': '_id', 'env': 'env', 'role': 'role'}.iteritems():
                log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk=ghost_tag_key, tv=str(app[ghost_tag_val])), log_file)
                conn.create_tags([instance.id], {ghost_tag_key: str(app[ghost_tag_val])})
            if app_color:
                log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk='color', tv=app_color), log_file)
                conn.create_tags([instance.id], {"color": app_color})

            tag_ec2_name = False
            if 'instance_tags' in app['environment_infos']:
                for app_tag in app['environment_infos']['instance_tags']:
                    log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk=app_tag['tag_name'], tv=app_tag['tag_value']), log_file)
                    conn.create_tags([instance.id], {app_tag['tag_name']: app_tag['tag_value']})
                    if app_tag['tag_name'] == 'Name':
                        tag_ec2_name = True
            if not tag_ec2_name:
                ec2_name = "ec2.{0}.{1}.{2}".format(app['env'], app['role'], app['name'])
                log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk='Name', tv=ec2_name), log_file)
                conn.create_tags([instance.id], {'Name': ec2_name})

            log(" CONF: Private IP: %s" % instance.private_ip_address, log_file)
            log(" CONF: Public IP: %s" % instance.ip_address, log_file)
            log(" CONF: Public DNS: %s" % instance.public_dns_name, log_file)
            return instance
        else:
            log(_red("ERROR: Cannot get instance metadata. Please check the AWS Console."), log_file)
            raise GCallException("ERROR: Cannot get instance metadata. Please check the AWS Console.")
    else:
        log(_red("ERROR: No AMI set, please use buildimage before"), log_file)
        raise GCallException("ERROR: No AMI set, please use buildimage before")

    return None
Пример #16
0
    def execute(self):
        log(_green("STATE: Started"), self._log_file)
        swap_execution_strategy = (
            self._job['options'][0] if 'options' in self._job
            and len(self._job['options']) > 0 else "isolated")
        online_app, to_deploy_app = get_blue_green_apps(
            self._app, self._worker._db.apps, self._log_file)
        if not online_app:
            self._worker.update_status(
                "aborted",
                message=self._get_notification_message_aborted(
                    self._app,
                    "Blue/green is not enabled on this app or not well configured"
                ))
            return

        running_jobs = get_running_jobs(self._db, online_app['_id'],
                                        to_deploy_app['_id'], self._job['_id'])
        if abort_if_other_bluegreen_job(
                running_jobs, self._worker,
                self._get_notification_message_aborted(
                    self._app,
                    "Please wait until the end of the current jobs before triggering a Blue/green operation"
                ), self._log_file):
            return

        try:
            lb_mgr = load_balancing.get_lb_manager(
                self._cloud_connection, self._app['region'],
                online_app["safe-deployment"]["load_balancer_type"])

            # Check AMI
            if 'ami' not in to_deploy_app:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app, "Please run `Buildimage` first"))
                return
            # Check if modules have been deployed
            if not check_app_manifest(
                    to_deploy_app, self._config, self._log_file,
                    get_path_from_app_with_color(to_deploy_app)):
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app, "Please deploy your app's modules"))
                return
            # Check ASG
            if to_deploy_app['autoscale']['name'] and online_app['autoscale'][
                    'name']:
                if not (check_autoscale_exists(
                        self._cloud_connection,
                        to_deploy_app['autoscale']['name'],
                        to_deploy_app['region']) and check_autoscale_exists(
                            self._cloud_connection,
                            online_app['autoscale']['name'],
                            online_app['region'])):
                    self._worker.update_status(
                        "aborted",
                        message=self._get_notification_message_aborted(
                            to_deploy_app,
                            "Please set an AutoScale on both green and blue app"
                        ))
                    return
            else:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app,
                        "Please set an AutoScale on both green and blue app."))
                return

            # Check if we have two different AS !
            if to_deploy_app['autoscale']['name'] == online_app['autoscale'][
                    'name']:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app,
                        "Please set a different AutoScale on green and blue app."
                    ))
                return

            # Check if we're ready to swap. If an instance is out of service
            # into the ELB pool raise an exception
            elb_instances = lb_mgr.get_instances_status_from_autoscale(
                to_deploy_app['autoscale']['name'], self._log_file)
            if len(elb_instances) == 0:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app,
                        "The offline application [{0}] doesn't have a valid Load Balancer associated.'"
                        .format(to_deploy_app['_id'])))
                return
            for e in elb_instances.values():
                if len(e.values()) == 0:
                    self._worker.update_status(
                        "aborted",
                        message=self._get_notification_message_aborted(
                            to_deploy_app,
                            "An ELB of the offline application [{0}] has no instances associated.'"
                            .format(to_deploy_app['_id'])))
                    return

            if len([
                    i for i in elb_instances.values()
                    if 'outofservice' in i.values()
            ]):
                raise GCallException(
                    'Cannot continue because one or more instances are in the out of service state in the temp ELB'
                )
            else:
                log(
                    _green(
                        "AutoScale blue [{0}] and green [{1}] ready for swap".
                        format(online_app['autoscale']['name'],
                               to_deploy_app['autoscale']['name'])),
                    self._log_file)

            self._execute_swap_hook(
                online_app, to_deploy_app, 'pre_swap',
                'Pre swap script for current {status} application',
                self._log_file)

            # Swap !
            elb_name, elb_dns = self._swap_asg(lb_mgr, swap_execution_strategy,
                                               online_app, to_deploy_app,
                                               self._log_file)
            if not elb_name:
                self._worker.update_status(
                    "failed",
                    message=self._get_notification_message_failed(
                        online_app, to_deploy_app,
                        'Unable to make blue-green swap'))
                return

            self._execute_swap_hook(
                online_app, to_deploy_app, 'post_swap',
                'Post swap script for previously {status} application',
                self._log_file)

            # All good
            done_notif = self._get_notification_message_done(
                online_app, online_app['autoscale']['name'],
                to_deploy_app['autoscale']['name'], elb_name, elb_dns)
            self._worker.update_status("done", message=done_notif)
        except GCallException as e:
            self._worker.update_status(
                "failed",
                message=self._get_notification_message_failed(
                    online_app, to_deploy_app, str(e)))
Пример #17
0
    def elb_rolling_update(self, instances_list):
        """ Manage the safe destroy process for the ELB.

            :param  instances_list  list: Instances on which to destroy (list of dict. ex: [{'id':XXX, 'private_ip_address':XXXX}...]).
            :return                True if operation successed or raise an Exception.
        """
        if not self.as_name:
            raise GCallException(
                'Cannot continue because there is no AutoScaling Group configured'
            )

        app_region = self.app['region']

        as_conn = self.cloud_connection.get_connection(app_region,
                                                       ['autoscaling'],
                                                       boto_version='boto3')
        lb_mgr = load_balancing.get_lb_manager(self.cloud_connection,
                                               app_region,
                                               load_balancing.LB_TYPE_AWS_CLB)
        destroy_asg_policy = ['OldestLaunchConfiguration']

        try:
            elb_instances = lb_mgr.get_instances_status_from_autoscale(
                self.as_name, self.log_file)
            asg_infos = get_autoscaling_group_object(as_conn, self.as_name)
            if not len(elb_instances):
                raise GCallException(
                    'Cannot continue because there is no ELB configured in the AutoScaling Group'
                )
            elif len([
                    i for i in elb_instances.values()
                    if 'outofservice' in i.values()
            ]):
                raise GCallException(
                    'Cannot continue because one or more instances are in the out of service state'
                )
            elif not check_autoscale_instances_lifecycle_state(
                    asg_infos['Instances']):
                raise GCallException(
                    'Cannot continue because one or more instances are not in InService Lifecycle state'
                )
            else:
                group_size = len(instances_list)
                original_termination_policies = asg_infos[
                    'TerminationPolicies']

                log(
                    _green(
                        'Suspending "Terminate" process in the AutoScale and provisioning %s instance(s)'
                        % group_size), self.log_file)
                suspend_autoscaling_group_processes(as_conn, self.as_name,
                                                    ['Terminate'],
                                                    self.log_file)
                update_auto_scaling_group_attributes(
                    as_conn, self.as_name, asg_infos['MinSize'],
                    asg_infos['MaxSize'] + group_size,
                    asg_infos['DesiredCapacity'] + group_size)

                log(
                    _green(
                        'Deregister old instances from the Load Balancer (%s)'
                        % str([host['id'] for host in instances_list])),
                    self.log_file)
                lb_mgr.deregister_instances_from_lbs(
                    elb_instances.keys(),
                    [host['id'] for host in instances_list], self.log_file)
                wait_con_draining = int(
                    lb_mgr.get_lbs_max_connection_draining_value(
                        elb_instances.keys()))
                log(
                    'Waiting {0}s: The connection draining time'.format(
                        wait_con_draining), self.log_file)
                time.sleep(wait_con_draining)

                asg_updated_infos = get_autoscaling_group_object(
                    as_conn, self.as_name)
                while len(asg_updated_infos['Instances']
                          ) < asg_updated_infos['DesiredCapacity']:
                    log(
                        'Waiting 30s because the instance(s) are not provisioned in the AutoScale',
                        self.log_file)
                    time.sleep(30)
                    asg_updated_infos = get_autoscaling_group_object(
                        as_conn, self.as_name)
                while not check_autoscale_instances_lifecycle_state(
                        asg_updated_infos['Instances']):
                    log(
                        'Waiting 30s because the instance(s) are not in InService state in the AutoScale',
                        self.log_file)
                    time.sleep(30)
                    asg_updated_infos = get_autoscaling_group_object(
                        as_conn, self.as_name)

                while len([
                        i for i in lb_mgr.get_instances_status_from_autoscale(
                            self.as_name, self.log_file).values()
                        if 'outofservice' in i.values()
                ]):
                    log(
                        'Waiting 10s because the instance(s) are not in service in the ELB',
                        self.log_file)
                    time.sleep(10)

                suspend_autoscaling_group_processes(as_conn, self.as_name,
                                                    ['Launch', 'Terminate'],
                                                    self.log_file)
                log(
                    _green(
                        'Restore initial AutoScale attributes and destroy old instances for this group (%s)'
                        % str([host['id'] for host in instances_list])),
                    self.log_file)
                update_auto_scaling_group_attributes(
                    as_conn, self.as_name, asg_infos['MinSize'],
                    asg_infos['MaxSize'], asg_infos['DesiredCapacity'],
                    destroy_asg_policy)
                destroy_specific_ec2_instances(self.cloud_connection, self.app,
                                               instances_list, self.log_file)

                resume_autoscaling_group_processes(as_conn, self.as_name,
                                                   ['Terminate'],
                                                   self.log_file)
                asg_updated_infos = get_autoscaling_group_object(
                    as_conn, self.as_name)
                while len(asg_updated_infos['Instances']
                          ) > asg_updated_infos['DesiredCapacity']:
                    log(
                        'Waiting 20s because the old instance(s) are not removed from the AutoScale',
                        self.log_file)
                    time.sleep(20)
                    asg_updated_infos = get_autoscaling_group_object(
                        as_conn, self.as_name)

                update_auto_scaling_group_attributes(
                    as_conn, self.as_name, asg_infos['MinSize'],
                    asg_infos['MaxSize'], asg_infos['DesiredCapacity'],
                    original_termination_policies)
                log(
                    _green(
                        '%s instance(s) have been re-generated and are registered in their ELB'
                        % group_size), self.log_file)
                return True
        except Exception as e:
            raise
        finally:
            resume_autoscaling_group_processes(as_conn, self.as_name,
                                               ['Launch', 'Terminate'],
                                               self.log_file)