예제 #1
0
    def _local_extract_package(self, module, package):
        clone_path = get_buildpack_clone_path_from_module(self._app, module)
        gcall(
            'rm -rf "%s"' % clone_path,
            'Cleaning old temporary redeploy module working directory "%s"' %
            clone_path, self._log_file)
        gcall('mkdir -p "%s"' % clone_path,
              'Recreating redeploy module working directory "%s"' % clone_path,
              self._log_file)

        key_path = '{path}/{module}/{pkg_name}'.format(
            path=get_path_from_app_with_color(self._app),
            module=module['name'],
            pkg_name=package)
        log("Downloading package: {0} from '{1}'".format(package, key_path),
            self._log_file)
        dest_package_path = "{0}/{1}".format(clone_path, package)
        cloud_connection = cloud_connections.get(
            self._app.get('provider', DEFAULT_PROVIDER))(self._log_file)
        conn = cloud_connection.get_connection(
            self._config.get('bucket_region', self._app['region']), ["s3"])
        bucket = conn.get_bucket(self._config['bucket_s3'])
        key = bucket.get_key(key_path)
        if not key:
            raise GCallException(
                "Package '{0}' doesn't exist on bucket '{1}'".format(
                    key_path, self._config['bucket_s3']))
        key.get_contents_to_filename(dest_package_path)

        gcall('tar -xf "{0}" -C "{1}"'.format(dest_package_path, clone_path),
              "Extracting package: %s" % package, self._log_file)
        return clone_path
예제 #2
0
    def _execute_redeploy(self, deploy_id, fabric_execution_strategy,
                          safe_deployment_strategy):
        module, package = self._get_deploy_infos(deploy_id)
        if module and package:
            before_update_manifest = update_app_manifest(
                self._app, self._config, module, package, self._log_file)
            all_app_modules_list = get_app_module_name_list(
                self._app['modules'])
            clean_local_module_workspace(
                get_path_from_app_with_color(self._app), all_app_modules_list,
                self._log_file)
            # Download and extract package before launching deploy
            clone_path = self._local_extract_package(module, package)

            try:
                # Re-deploy
                self._deploy_module(module, fabric_execution_strategy,
                                    safe_deployment_strategy)
            except GCallException as e:
                log(
                    "Redeploy error occured, app manifest will be restored to its previous state",
                    self._log_file)
                rollback_app_manifest(self._app, self._config,
                                      before_update_manifest, self._log_file)
                raise e

            # After all deploy exec
            execute_module_script_on_ghost(self._app, module,
                                           'after_all_deploy',
                                           'After all deploy', clone_path,
                                           self._log_file, self._job,
                                           self._config)
        else:
            raise GCallException(
                "Redeploy on deployment ID: {0} failed".format(deploy_id))
예제 #3
0
    def _purge_s3_package(self, path, bucket, module, pkg_name, deployment_package_retention=42):
        """
        Purge N old packages deployment for the current module from the current app
        """
        try:
            # Get all packages in S3 related to the current module
            keys_list = [i.name.split("/")[-1] for i in bucket.list(path[+1:])]

            # Get app manifest and extract package name
            manifest_key_path = '{path}/MANIFEST'.format(path=get_path_from_app_with_color(self._app))
            manifest_module_pkg_name = get_module_package_rev_from_manifest(bucket, manifest_key_path, module)

            # Remove the current production/used package from the purge list
            keys_list.remove(manifest_module_pkg_name)

            # Remove the current deployment package just generated from the purge list
            keys_list.remove(pkg_name)

            if len(keys_list) > deployment_package_retention:
                keys_list = keep_n_recent_elements_from_list(keys_list, deployment_package_retention, self._log_file)
                for obj in keys_list:
                    key_path_to_purge = '{path}/{obj}'.format(path=path, obj=obj)
                    try:
                        bucket.get_key(key_path_to_purge).delete()
                        log("Packages Purge: Deleted S3 Object: %s" % key_path_to_purge, self._log_file)
                    except:
                        log("Packages Purge: Delete FAILED for S3 Object: %s" % key_path_to_purge, self._log_file)
        except Exception, e:
            log("Packages Purge: Global exception | " + str(e), self._log_file)
예제 #4
0
    def execute(self):
        try:
            ami_id, ami_name = self._aws_image_builder.start_builder()
        except (GalaxyNoMatchingRolesException,
                GalaxyBadRequirementPathException, GCallException) as e:
            self._worker.update_status("aborted", message=str(e))
            return

        if ami_id is not "ERROR":
            if lxd_is_available() and self._app['build_infos'].get(
                    'source_container_image', None):
                log("Generating a new container", self._log_file)
                try:
                    lxd_image_builder = LXDImageBuilder(
                        self._app, self._job, self._db, self._log_file,
                        self._config)
                    lxd_image_builder.set_source_hooks(
                        get_path_from_app_with_color(self._app))
                    builder_result = lxd_image_builder.start_builder()
                except Exception as e:
                    traceback.print_exc(self._log_file)
                    log(
                        "An error occured during container process ({})".
                        format(e), self._log_file)
                    self._worker.update_status("failed")
                    return

                log("Update app in MongoDB to update container source image",
                    self._log_file)
                self._update_container_source(self._job['_id'])

            touch_app_manifest(self._app, self._config, self._log_file)
            log("Update app in MongoDB to update AMI: {0}".format(ami_id),
                self._log_file)
            self._update_app_ami(ami_id, ami_name)
            if self._aws_image_builder.purge_old_images():
                log("Old AMIs removed for this app", self._log_file)
            else:
                log("Purge old AMIs failed", self._log_file)
            if self._app['autoscale']['name']:
                try:
                    if create_userdata_launchconfig_update_asg(
                            ami_id, self._cloud_connection, self._app,
                            self._config, self._log_file):
                        self._worker.update_status(
                            "done",
                            message=self._get_notification_message_done(
                                ami_id))
                    else:
                        self._worker.update_status("failed")
                except:
                    traceback.print_exc(self._log_file)
                    self._worker.update_status("failed")
            else:
                log("No autoscaling group name was set", self._log_file)
                self._worker.update_status("done")
        else:
            log("ERROR: ami_id not found. The packer process had maybe fail.",
                self._log_file)
            self._worker.update_status("failed")
예제 #5
0
    def execute(self):
        try:
            app = self._app
            cloud_connection = cloud_connections.get(
                self._app.get('provider', DEFAULT_PROVIDER))(self._log_file)
            refresh_stage2(
                cloud_connection,
                self._config.get('bucket_region', self._app['region']),
                self._config)
            log('INFO: refreshed /ghost/stage2', self._log_file)

            # Store lifecycle hooks scripts in S3
            lifecycle_hooks = app.get('lifecycle_hooks', None)
            conn = cloud_connection.get_connection(
                self._config.get('bucket_region', self._app['region']), ["s3"])
            bucket = conn.get_bucket(self._config['bucket_s3'])
            prefix = get_path_from_app_with_color(app)
            self._refresh_env_vars(app.get('env_vars', []), bucket, prefix)
            self._refresh_lifecycle_hook_script('pre_bootstrap',
                                                lifecycle_hooks, bucket,
                                                prefix)
            self._refresh_lifecycle_hook_script('post_bootstrap',
                                                lifecycle_hooks, bucket,
                                                prefix)

            # Update Auto-Scaling Launch Configuration if possible
            ami_id = self._app['ami']
            if ami_id:
                if self._app['autoscale']['name']:
                    try:
                        if not create_userdata_launchconfig_update_asg(
                                ami_id, self._cloud_connection, self._app,
                                self._config, self._log_file):
                            self._worker.update_status("failed")
                            return
                    except:
                        traceback.print_exc(self._log_file)
                        self._worker.update_status(
                            "failed",
                            message="Scripts Update Failed: {0}".format(
                                str(sys.exc_info()[1])))
                        return
                else:
                    log(
                        "No autoscaling group name was set. No need to update LC.",
                        self._log_file)
            else:
                log(
                    "WARNING: ami_id not found. You must use the `buildimage` command first.",
                    self._log_file)

            self._worker.update_status("done", message="Scripts Update OK")
        except:
            traceback.print_exc(self._log_file)
            self._worker.update_status(
                "failed",
                message="Scripts Update Failed: {0}".format(
                    str(sys.exc_info()[1])))
예제 #6
0
    def _execute_swap_hook(online_app, to_deploy_app, script_name,
                           script_message, log_file):
        for status, app in (('active', online_app), ('inactive',
                                                     to_deploy_app)):
            script = app.get('blue_green', {}).get('hooks',
                                                   {}).get(script_name, None)
            if script:
                script_path = os.path.join(get_path_from_app_with_color(app),
                                           script_name)
                with open(script_path, 'w') as f:
                    f.write(b64decode_utf8(script))

                script_env = os.environ.copy()
                script_env.update(get_ghost_env_variables(app))

                gcall('bash {}'.format(script_path),
                      '{}: Execute'.format(
                          script_message.format(status=status)),
                      log_file,
                      env=script_env)
예제 #7
0
    def execute(self):
        """Execute all checks and preparations."""
        log(_green("STATE: Started"), self._log_file)

        online_app, offline_app = get_blue_green_apps(self._app, self._db.apps,
                                                      self._log_file)
        if not online_app:
            self._worker.update_status(
                "aborted",
                message=self._get_notification_message_aborted(
                    self._app,
                    "Blue/green is not enabled on this app or not well configured"
                ))
            return

        copy_ami_option = (self._job['options'][0] if 'options' in self._job
                           and len(self._job['options']) > 0 else
                           get_blue_green_copy_ami_config(self._config))
        copy_ami_option = boolify(copy_ami_option)

        app_region = self._app['region']
        as_conn3 = self._cloud_connection.get_connection(app_region,
                                                         ['autoscaling'],
                                                         boto_version='boto3')

        as_group, as_group_processes_to_suspend = get_autoscaling_group_and_processes_to_suspend(
            as_conn3, offline_app, self._log_file)
        suspend_autoscaling_group_processes(as_conn3, as_group,
                                            as_group_processes_to_suspend,
                                            self._log_file)

        try:
            lb_mgr = load_balancing.get_lb_manager(
                self._cloud_connection, self._app['region'],
                online_app["safe-deployment"]["load_balancer_type"])

            # check if app is online
            if not online_app:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        self._app,
                        "Blue/green is not enabled on this app or not well configured"
                    ))
                return

            running_jobs = get_running_jobs(self._db, online_app['_id'],
                                            offline_app['_id'],
                                            self._job['_id'])
            if abort_if_other_bluegreen_job(
                    running_jobs, self._worker,
                    self._get_notification_message_aborted(
                        self._app,
                        "Please wait until the end of the current jobs before triggering a Blue/green operation"
                    ), self._log_file):
                return

            # Check if app has up to date AMI
            if ((not copy_ami_option and 'ami' not in offline_app)
                    or (copy_ami_option and 'ami' not in online_app)):
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        offline_app,
                        "Please run `Buildimage` first or use the `copy_ami` option"
                    ))
                return

            # Check if app has AS
            if offline_app['autoscale']['name'] and online_app['autoscale'][
                    'name']:
                if not (check_autoscale_exists(
                        self._cloud_connection,
                        offline_app['autoscale']['name'],
                        offline_app['region']) and check_autoscale_exists(
                            self._cloud_connection,
                            online_app['autoscale']['name'],
                            online_app['region'])):
                    self._worker.update_status(
                        "aborted",
                        message=self._get_notification_message_aborted(
                            offline_app,
                            "Please check that the configured AutoScale on both green and blue app exists."
                        ))
                    return
            else:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        offline_app,
                        "Please set an AutoScale on both green and blue app."))
                return

            # Check if we have two different AS !
            if offline_app['autoscale']['name'] == online_app['autoscale'][
                    'name']:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        offline_app,
                        "Please set a different AutoScale on green and blue app."
                    ))
                return

            if copy_ami_option:
                log(
                    "Copy AMI option activated. AMI used by [{0}] will be reused by [{1}]"
                    .format(online_app['autoscale']['name'],
                            offline_app['autoscale']['name']), self._log_file)

            # Check if modules have been deployed
            if get_blue_green_config(self._config, 'preparebluegreen',
                                     'module_deploy_required', False):
                if not check_app_manifest(
                        offline_app, self._config, self._log_file,
                        get_path_from_app_with_color(offline_app)):
                    self._worker.update_status(
                        "aborted",
                        message=self._get_notification_message_aborted(
                            offline_app, "Please deploy your app's modules"))
                    return

            # Check if instances are already running
            if get_instances_from_autoscaling(offline_app['autoscale']['name'],
                                              as_conn3):
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        offline_app,
                        "Autoscaling Group of offline app should be empty."))
                return

            # Get the online ELB
            online_elbs = lb_mgr.list_lbs_from_autoscale(
                online_app['autoscale']['name'], self._log_file)
            if len(online_elbs) == 0:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        offline_app,
                        "Online app AutoScale is not attached to a valid Elastic Load Balancer"
                    ))
                return

            # Create the temporary ELB: ghost-bluegreentemp-{original ELB name}, duplicated from the online ELB
            temp_elb_name, new_elb_dns = (None, None)
            create_temporary_elb_option = (
                self._job['options'][1]
                if 'options' in self._job and len(self._job['options']) > 1
                else get_blue_green_create_temporary_elb_config(self._config))
            if boolify(create_temporary_elb_option):
                online_elb = online_elbs[0]
                temp_elb_name = "bgtmp-{0}".format(
                    offline_app['_id'])[:31]  # ELB name is 32 char long max
                log(
                    _green(
                        "Creating the temporary ELB [{0}] by copying parameters from [{1}]"
                        .format(temp_elb_name, online_elb)), self._log_file)
                new_elb_dns = lb_mgr.copy_lb(
                    temp_elb_name, online_elb, {
                        'app_id': str(offline_app['_id']),
                        'bluegreen-temporary': 'true'
                    }, self._log_file)

                # Register the temporary ELB into the AutoScale
                log(
                    _green("Attaching ELB [{0}] to the AutoScale [{1}]".format(
                        temp_elb_name, offline_app['autoscale']['name'])),
                    self._log_file)
                lb_mgr.register_lbs_into_autoscale(
                    offline_app['autoscale']['name'], [], [temp_elb_name],
                    self._log_file)

            offline_app['autoscale']['min'] = online_app['autoscale']['min']
            offline_app['autoscale']['max'] = online_app['autoscale']['max']
            if copy_ami_option:
                offline_app['ami'] = online_app['ami']
                offline_app['build_infos']['ami_name'] = online_app[
                    'build_infos']['ami_name']
                log(
                    "Copying AMI [{0}]({1}) into offline app [{2}]".format(
                        offline_app['ami'],
                        offline_app['build_infos']['ami_name'],
                        str(offline_app['_id'])), self._log_file)
                self._update_app_ami(offline_app)
            # Update AutoScale properties in DB App
            self._update_app_autoscale_options(offline_app, online_app,
                                               self._log_file)

            # Update AutoScale properties and starts instances
            if copy_ami_option:
                try:
                    if not create_userdata_launchconfig_update_asg(
                            offline_app['ami'],
                            self._cloud_connection,
                            offline_app,
                            self._config,
                            self._log_file,
                            update_as_params=True):
                        self._worker.update_status(
                            "failed",
                            message=self._get_notification_message_failed(
                                online_app, offline_app, ""))
                        return
                except:
                    traceback.print_exc(self._log_file)
                    self._worker.update_status(
                        "failed",
                        message=self._get_notification_message_failed(
                            online_app, offline_app, ""))
                    return
            else:
                update_auto_scale(self._cloud_connection,
                                  offline_app,
                                  None,
                                  self._log_file,
                                  update_as_params=True)

            log(
                _green(
                    "Starting at least [{0}] instance(s) into the AutoScale [{1}]"
                    .format(offline_app['autoscale']['min'],
                            offline_app['autoscale']['name'])), self._log_file)

            self._worker.update_status(
                "done",
                message=self._get_notification_message_done(
                    offline_app, temp_elb_name, new_elb_dns))
        except GCallException as e:
            self._worker.update_status(
                "failed",
                message=self._get_notification_message_failed(
                    online_app, offline_app, e))
        finally:
            resume_autoscaling_group_processes(as_conn3, as_group,
                                               as_group_processes_to_suspend,
                                               self._log_file)
예제 #8
0
    def execute(self):
        log(_green("STATE: Started"), self._log_file)
        swap_execution_strategy = (
            self._job['options'][0] if 'options' in self._job
            and len(self._job['options']) > 0 else "isolated")
        online_app, to_deploy_app = get_blue_green_apps(
            self._app, self._worker._db.apps, self._log_file)
        if not online_app:
            self._worker.update_status(
                "aborted",
                message=self._get_notification_message_aborted(
                    self._app,
                    "Blue/green is not enabled on this app or not well configured"
                ))
            return

        running_jobs = get_running_jobs(self._db, online_app['_id'],
                                        to_deploy_app['_id'], self._job['_id'])
        if abort_if_other_bluegreen_job(
                running_jobs, self._worker,
                self._get_notification_message_aborted(
                    self._app,
                    "Please wait until the end of the current jobs before triggering a Blue/green operation"
                ), self._log_file):
            return

        try:
            lb_mgr = load_balancing.get_lb_manager(
                self._cloud_connection, self._app['region'],
                online_app["safe-deployment"]["load_balancer_type"])

            # Check AMI
            if 'ami' not in to_deploy_app:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app, "Please run `Buildimage` first"))
                return
            # Check if modules have been deployed
            if not check_app_manifest(
                    to_deploy_app, self._config, self._log_file,
                    get_path_from_app_with_color(to_deploy_app)):
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app, "Please deploy your app's modules"))
                return
            # Check ASG
            if to_deploy_app['autoscale']['name'] and online_app['autoscale'][
                    'name']:
                if not (check_autoscale_exists(
                        self._cloud_connection,
                        to_deploy_app['autoscale']['name'],
                        to_deploy_app['region']) and check_autoscale_exists(
                            self._cloud_connection,
                            online_app['autoscale']['name'],
                            online_app['region'])):
                    self._worker.update_status(
                        "aborted",
                        message=self._get_notification_message_aborted(
                            to_deploy_app,
                            "Please set an AutoScale on both green and blue app"
                        ))
                    return
            else:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app,
                        "Please set an AutoScale on both green and blue app."))
                return

            # Check if we have two different AS !
            if to_deploy_app['autoscale']['name'] == online_app['autoscale'][
                    'name']:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app,
                        "Please set a different AutoScale on green and blue app."
                    ))
                return

            # Check if we're ready to swap. If an instance is out of service
            # into the ELB pool raise an exception
            elb_instances = lb_mgr.get_instances_status_from_autoscale(
                to_deploy_app['autoscale']['name'], self._log_file)
            if len(elb_instances) == 0:
                self._worker.update_status(
                    "aborted",
                    message=self._get_notification_message_aborted(
                        to_deploy_app,
                        "The offline application [{0}] doesn't have a valid Load Balancer associated.'"
                        .format(to_deploy_app['_id'])))
                return
            for e in elb_instances.values():
                if len(e.values()) == 0:
                    self._worker.update_status(
                        "aborted",
                        message=self._get_notification_message_aborted(
                            to_deploy_app,
                            "An ELB of the offline application [{0}] has no instances associated.'"
                            .format(to_deploy_app['_id'])))
                    return

            if len([
                    i for i in elb_instances.values()
                    if 'outofservice' in i.values()
            ]):
                raise GCallException(
                    'Cannot continue because one or more instances are in the out of service state in the temp ELB'
                )
            else:
                log(
                    _green(
                        "AutoScale blue [{0}] and green [{1}] ready for swap".
                        format(online_app['autoscale']['name'],
                               to_deploy_app['autoscale']['name'])),
                    self._log_file)

            self._execute_swap_hook(
                online_app, to_deploy_app, 'pre_swap',
                'Pre swap script for current {status} application',
                self._log_file)

            # Swap !
            elb_name, elb_dns = self._swap_asg(lb_mgr, swap_execution_strategy,
                                               online_app, to_deploy_app,
                                               self._log_file)
            if not elb_name:
                self._worker.update_status(
                    "failed",
                    message=self._get_notification_message_failed(
                        online_app, to_deploy_app,
                        'Unable to make blue-green swap'))
                return

            self._execute_swap_hook(
                online_app, to_deploy_app, 'post_swap',
                'Post swap script for previously {status} application',
                self._log_file)

            # All good
            done_notif = self._get_notification_message_done(
                online_app, online_app['autoscale']['name'],
                to_deploy_app['autoscale']['name'], elb_name, elb_dns)
            self._worker.update_status("done", message=done_notif)
        except GCallException as e:
            self._worker.update_status(
                "failed",
                message=self._get_notification_message_failed(
                    online_app, to_deploy_app, str(e)))
예제 #9
0
    def _execute_deploy(self, module, fabric_execution_strategy,
                        safe_deployment_strategy):
        """
        Returns the deployment id
        """

        now = datetime.datetime.utcnow()
        ts = calendar.timegm(now.timetuple())

        git_repo, clone_path, revision, commit, commit_message = self._get_module_sources(
            module)

        # Store predeploy script in tarball
        if 'pre_deploy' in module:
            log("Create pre_deploy script for inclusion in target package",
                self._log_file)
            predeploy_source = b64decode_utf8(module['pre_deploy'])
            with io.open(clone_path + '/predeploy', mode='w',
                         encoding='utf-8') as f:
                f.write(predeploy_source)
            gcall('du -hs .', 'Display current build directory disk usage',
                  self._log_file)

        # Execute buildpack
        execute_module_script_on_ghost(self._app, module, 'build_pack',
                                       'Buildpack', clone_path, self._log_file,
                                       self._job, self._config)

        # Store postdeploy script in tarball
        if 'post_deploy' in module:
            log("Create post_deploy script for inclusion in target package",
                self._log_file)
            postdeploy_source = b64decode_utf8(module['post_deploy'])
            with io.open(clone_path + '/postdeploy',
                         mode='w',
                         encoding='utf-8') as f:
                f.write(postdeploy_source)
            gcall('du -hs .', 'Display current build directory disk usage',
                  self._log_file)

        # Store after_all_deploy script in tarball
        if 'after_all_deploy' in module:
            log(
                "Create after_all_deploy script for inclusion in target package",
                self._log_file)
            afteralldeploy_source = b64decode_utf8(module['after_all_deploy'])
            with io.open(clone_path + '/after_all_deploy',
                         mode='w',
                         encoding='utf-8') as f:
                f.write(afteralldeploy_source)
            gcall('du -hs .', 'Display current build directory disk usage',
                  self._log_file)

        # Store module metadata in tarball
        log("Create metadata file for inclusion in target package",
            self._log_file)
        module_metadata = u"""
#!/bin/bash

GHOST_MODULE_REPO="{repo}"
GHOST_MODULE_REV="{rev}"
GHOST_MODULE_COMMIT="{commit}"
GHOST_MODULE_COMMIT_MESSAGE="{commitmsg}"
GHOST_MODULE_USER="******"

"""
        metavars = {
            "repo": git_repo,
            "rev": revision,
            "commit": commit,
            "commitmsg": commit_message,
            "user": self._job['user']
        }
        module_metadata = module_metadata.format(**metavars)
        custom_env_vars = self._app.get('env_vars', None)
        if custom_env_vars and len(custom_env_vars):
            module_metadata = module_metadata + u''.join([
                u'export {key}="{val}" \n'.format(
                    key=env_var['var_key'], val=env_var.get('var_value', ''))
                for env_var in custom_env_vars
            ])
        with io.open(clone_path + '/.ghost-metadata',
                     mode='w',
                     encoding='utf-8') as f:
            f.write(module_metadata)
        gcall('du -hs .', 'Display current build directory disk usage',
              self._log_file)

        # Create tar archive
        pkg_name = self._package_module(module, ts, commit)

        before_update_manifest = update_app_manifest(self._app, self._config,
                                                     module, pkg_name,
                                                     self._log_file)
        try:
            all_app_modules_list = get_app_module_name_list(
                self._app['modules'])
            clean_local_module_workspace(
                get_path_from_app_with_color(self._app), all_app_modules_list,
                self._log_file)
            self._deploy_module(module, fabric_execution_strategy,
                                safe_deployment_strategy)
        except GCallException as e:
            log(
                "Deploy error occured, app manifest will be restored to its previous state",
                self._log_file)
            rollback_app_manifest(self._app, self._config,
                                  before_update_manifest, self._log_file)
            raise e

        if 'after_all_deploy' in module:
            log(
                "After all deploy script found for '{0}'. Executing it.".
                format(module['name']), self._log_file)
            execute_module_script_on_ghost(self._app, module,
                                           'after_all_deploy',
                                           'After all deploy', clone_path,
                                           self._log_file, self._job,
                                           self._config)

        now = datetime.datetime.utcnow()
        deployment = {
            'app_id': self._app['_id'],
            'job_id': self._job['_id'],
            'module': module['name'],
            'revision': revision,
            'commit': commit,
            'commit_message': commit_message,
            'timestamp': ts,
            'package': pkg_name,
            'module_path': module['path'],
            '_created': now,
            '_updated': now,
        }
        return self._worker._db.deploy_histories.insert(deployment)
예제 #10
0
    def execute(self):
        try:
            ami_id, ami_name = self._aws_image_builder.start_builder()
            if ami_id is "ERROR":
                self._worker.update_status(
                    "failed",
                    message=
                    "ERROR: ami_id not found. The packer process may have failed."
                )
                return
        except (GalaxyNoMatchingRolesException,
                GalaxyBadRequirementPathException, GCallException) as e:
            self._worker.update_status("aborted", message=str(e))
            return

        # Update AMI in current object as this is used for creating the new launch configuration's root block device mapping
        self._app['ami'] = ami_id

        touch_app_manifest(self._app, self._config, self._log_file)
        log("Update app in MongoDB to update AMI: {0}".format(ami_id),
            self._log_file)
        self._update_app_ami(ami_id, ami_name)
        if self._aws_image_builder.purge_old_images():
            log("Old AMIs removed for this app", self._log_file)
        else:
            log("Purge old AMIs failed", self._log_file)

        if self._lxd_image_builder:
            log("Generating a new container", self._log_file)
            try:
                self._lxd_image_builder.set_source_hooks(
                    get_path_from_app_with_color(self._app))
                self._lxd_image_builder.start_builder()
            except Exception as e:
                traceback.print_exc(self._log_file)
                self._worker.update_status(
                    "failed",
                    message="An error occured during container process ({})".
                    format(e))
                return
            log("Update app in MongoDB to update container source image",
                self._log_file)
            self._update_container_source(self._job["_id"])

        if self._app['autoscale']['name']:
            try:
                if create_userdata_launchconfig_update_asg(
                        ami_id, self._cloud_connection, self._app,
                        self._config, self._log_file):
                    self._worker.update_status(
                        "done",
                        message=self._get_notification_message_done(ami_id))
                else:
                    self._worker.update_status(
                        "failed", message="Autoscaling group update failed")
            except Exception as e:
                traceback.print_exc(self._log_file)
                self._worker.update_status(
                    "failed",
                    message="Autoscaling group update failed: {0}".format(e))
        else:
            log("No autoscaling group name was set", self._log_file)
            self._worker.update_status(
                "done", message=self._get_notification_message_done(ami_id))
예제 #11
0
    def _execute_deploy(self, module, fabric_execution_strategy, safe_deployment_strategy):
        """
        Returns the deployment id
        """

        now = datetime.datetime.utcnow()
        ts = calendar.timegm(now.timetuple())

        git_repo = module['git_repo'].strip()
        mirror_path = get_mirror_path_from_module(module)
        clone_path = get_buildpack_clone_path_from_module(self._app, module)
        lock_path = get_lock_path_from_repo(git_repo)
        revision = self._get_module_revision(module['name'])

        try:
            git_acquire_lock(lock_path, self._log_file)

            if not os.path.exists(mirror_path):
                gcall('git --no-pager clone --bare --mirror {r} {m}'.format(r=git_repo, m=mirror_path),
                      'Create local git mirror for remote {r}'.format(r=git_repo),
                      self._log_file)

            # Update existing git mirror
            os.chdir(mirror_path)
            gcall('git --no-pager gc --auto',
                  'Cleanup local mirror before update {r}'.format(r=git_repo),
                  self._log_file)
            gcall('git --no-pager fetch --all --tags --prune',
                  'Update local git mirror from remote {r}'.format(r=git_repo),
                  self._log_file)
        finally:
            git_release_lock(lock_path, self._log_file)

        # Resolve HEAD symbolic reference to identify the default branch
        head = git('--no-pager', 'symbolic-ref', '--short', 'HEAD', _tty_out=False).strip()

        # If revision is HEAD, replace it by the default branch
        if revision == 'HEAD':
            revision = head

        # If revision is a commit hash, a full intermediate clone is required before getting a shallow clone
        if self._is_commit_hash(revision):
            # Create intermediate clone from the local git mirror, chdir into it and fetch all commits
            source_path = get_intermediate_clone_path_from_module(self._app, module)
            if os.path.exists(source_path):
                gcall('chmod -R u+rwx {p}'.format(p=source_path), 'Update rights on previous intermediate clone', self._log_file)
                gcall('rm -rf {p}'.format(p=source_path), 'Removing previous intermediate clone', self._log_file)
            os.makedirs(source_path)
            os.chdir(source_path)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager init', 'Git init intermediate clone', self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager remote add origin file://{m}'.format(m=mirror_path), 'Git add local mirror as origin for intermediate clone', self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager fetch origin', 'Git fetch all commits from origin', self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager checkout {r}'.format(r=revision), 'Git checkout revision into intermediate clone: {r}'.format(r=revision), self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)

            # Create shallow clone from the intermediate clone, chdir into it and retrieve submodules
            if os.path.exists(clone_path):
                gcall('chmod -R u+rwx {p}'.format(p=clone_path), 'Update rights on previous clone', self._log_file)
                gcall('rm -rf {p}'.format(p=clone_path), 'Removing previous clone', self._log_file)
            os.makedirs(clone_path)
            os.chdir(clone_path)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager clone file://{s} .'.format(s=source_path), 'Git clone from intermediate clone', self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager submodule update --init --recursive', 'Git update submodules', self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)

            # Destroy intermediate clone
            gcall('chmod -R u+rwx {p}'.format(p=source_path), 'Update rights on previous intermediate clone', self._log_file)
            gcall('rm -rf {p}'.format(p=source_path), 'Removing intermediate clone', self._log_file)
        else:
            # Create clone from the local git mirror, chdir into it, fetch requested revision and retrieve submodules
            if os.path.exists(clone_path):
                gcall('chmod -R u+rwx {p}'.format(p=clone_path), 'Update rights on previous clone', self._log_file)
                gcall('rm -rf {p}'.format(p=clone_path), 'Removing previous clone', self._log_file)
            os.makedirs(clone_path)
            os.chdir(clone_path)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager clone --depth=10 file://{m} -b {r} .'.format(m=mirror_path, r=revision), 'Git clone from local mirror with depth limited to 10 from a specific revision: {r}'.format(r=revision), self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)
            gcall('git --no-pager submodule update --init --recursive', 'Git update submodules', self._log_file)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)

        # Extract commit information
        commit = git('--no-pager', 'rev-parse', '--short', 'HEAD', _tty_out=False).strip()
        commit_message = git('--no-pager', 'log', '--max-count=1', '--format=%s', 'HEAD', _tty_out=False).strip()

        # At last, reset remote origin URL
        gcall('git --no-pager remote set-url origin {r}'.format(r=git_repo), 'Git reset remote origin to {r}'.format(r=git_repo), self._log_file)

        # Store predeploy script in tarball
        if 'pre_deploy' in module:
            log("Create pre_deploy script for inclusion in target package", self._log_file)
            predeploy_source = b64decode_utf8(module['pre_deploy'])
            with io.open(clone_path + '/predeploy', mode='w', encoding='utf-8') as f:
                f.write(predeploy_source)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)

        # Execute buildpack
        execute_module_script_on_ghost(self._app, module, 'build_pack', 'Buildpack', clone_path,
                                       self._log_file, self._job, self._config)

        # Store postdeploy script in tarball
        if 'post_deploy' in module:
            log("Create post_deploy script for inclusion in target package", self._log_file)
            postdeploy_source = b64decode_utf8(module['post_deploy'])
            with io.open(clone_path + '/postdeploy', mode='w', encoding='utf-8') as f:
                f.write(postdeploy_source)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)

        # Store after_all_deploy script in tarball
        if 'after_all_deploy' in module:
            log("Create after_all_deploy script for inclusion in target package", self._log_file)
            afteralldeploy_source = b64decode_utf8(module['after_all_deploy'])
            with io.open(clone_path + '/after_all_deploy', mode='w', encoding='utf-8') as f:
                f.write(afteralldeploy_source)
            gcall('du -hs .', 'Display current build directory disk usage', self._log_file)

        # Store module metadata in tarball
        log("Create metadata file for inclusion in target package", self._log_file)
        module_metadata = u"""
#!/bin/bash

GHOST_MODULE_REPO="{repo}"
GHOST_MODULE_REV="{rev}"
GHOST_MODULE_COMMIT="{commit}"
GHOST_MODULE_COMMIT_MESSAGE="{commitmsg}"
GHOST_MODULE_USER="******"

"""
        metavars = {
            "repo": git_repo,
            "rev": revision,
            "commit": commit,
            "commitmsg": commit_message,
            "user": self._job['user']
        }
        module_metadata = module_metadata.format(**metavars)
        custom_env_vars = self._app.get('env_vars', None)
        if custom_env_vars and len(custom_env_vars):
            module_metadata = module_metadata + u''.join([u'export {key}="{val}" \n'.format(key=env_var['var_key'], val=env_var.get('var_value', '')) for env_var in custom_env_vars])
        with io.open(clone_path + '/.ghost-metadata', mode='w', encoding='utf-8') as f:
            f.write(module_metadata)
        gcall('du -hs .', 'Display current build directory disk usage', self._log_file)

        # Create tar archive
        pkg_name = self._package_module(module, ts, commit)

        before_update_manifest = update_app_manifest(self._app, self._config, module, pkg_name, self._log_file)
        try:
            all_app_modules_list = get_app_module_name_list(self._app['modules'])
            clean_local_module_workspace(get_path_from_app_with_color(self._app), all_app_modules_list, self._log_file)
            self._deploy_module(module, fabric_execution_strategy, safe_deployment_strategy)
        except GCallException as e:
            log("Deploy error occured, app manifest will be restored to its previous state", self._log_file)
            rollback_app_manifest(self._app, self._config, before_update_manifest, self._log_file)
            raise e

        if 'after_all_deploy' in module:
            log("After all deploy script found for '{0}'. Executing it.".format(module['name']), self._log_file)
            execute_module_script_on_ghost(self._app, module, 'after_all_deploy', 'After all deploy', clone_path,
                                           self._log_file, self._job, self._config)

        now = datetime.datetime.utcnow()
        deployment = {
            'app_id': self._app['_id'],
            'job_id': self._job['_id'],
            'module': module['name'],
            'revision': revision,
            'commit': commit,
            'commit_message': commit_message,
            'timestamp': ts,
            'package': pkg_name,
            'module_path': module['path'],
            '_created': now,
            '_updated': now,
        }
        return self._worker._db.deploy_histories.insert(deployment)