def post_fetched_app(response): # Do we need to embed each module's last_deployment? embedded = json.loads(request.args.get('embedded', '{}')) embed_last_deployment = boolify( embedded.get('modules.last_deployment', False)) _post_fetched_app(response, embed_last_deployment)
def post_fetched_deployments(response): embedded = json.loads(request.args.get('embedded', '{}')) embed_app = boolify(embedded.get('app_id', False)) if embed_app: for deployment in response['_items']: normalize_app(deployment.get('app_id'), False)
def execute(self): if not boolify(self._config.get('enable_executescript_command', True)): return self._abort( "This command has been disabled by your administrator.") script = self._job['options'][0] if 'options' in self._job and len( self._job['options']) > 0 else None module_name = self._job['options'][1] if 'options' in self._job and len( self._job['options']) > 1 else None execution_strategy = self._job['options'][ 2] if 'options' in self._job and len( self._job['options']) > 2 else None if execution_strategy == 'single': # option[3] is a single Host IP fabric_execution_strategy = None safe_deployment_strategy = None single_host_ip = self._job['options'][ 3] if 'options' in self._job and len( self._job['options']) > 3 else None else: # option[2] is fabric type, option[3] might be Safe deploy group param fabric_execution_strategy = execution_strategy safe_deployment_strategy = self._job['options'][ 3] if 'options' in self._job and len( self._job['options']) > 3 else None single_host_ip = None try: log(_green("STATE: Started"), self._log_file) try: if not script or not script.strip(): return self._abort("No valid script provided") script_data = b64decode_utf8(script) allowed_shebang = ('#!/bin/bash', '#! /bin/bash', '#!/bin/sh', '#! /bin/sh') if not script_data.startswith(allowed_shebang): return self._abort( "No valid shell script provided (shebang missing)") except: return self._abort("No valid script provided") if single_host_ip: log( _yellow("Executing script on a single host: %s" % single_host_ip), self._log_file) self._exec_script_single_host(script_data, module_name, single_host_ip) else: log(_yellow("Executing script on every running instance"), self._log_file) self._exec_script(script_data, module_name, fabric_execution_strategy, safe_deployment_strategy) self._worker.update_status( "done", message=self._get_notification_message_done()) log(_green("STATE: End"), self._log_file) except Exception as e: self._worker.update_status( "failed", message=self._get_notification_message_failed(e)) log(_red("STATE: End"), self._log_file)
def _package_module(self, module, ts, commit): path = get_buildpack_clone_path_from_module(self._app, module) os.chdir(path) pkg_name = "{0}_{1}_{2}".format(ts, module['name'], commit) pkg_path = '../{0}'.format(pkg_name) uid = module.get('uid', os.geteuid()) gid = module.get('gid', os.getegid()) tar_exclude_git = "--exclude '.git'" if boolify(self._config.get('deployment_package_exclude_git_metadata', False)) else '' gcall("tar czf {0} --owner={1} --group={2} {3} .".format(pkg_path, uid, gid, tar_exclude_git), "Creating package: %s" % pkg_name, self._log_file) log("Uploading package: %s" % pkg_name, self._log_file) cloud_connection = cloud_connections.get(self._app.get('provider', DEFAULT_PROVIDER))(self._log_file) conn = cloud_connection.get_connection(self._config.get('bucket_region', self._app['region']), ["s3"]) bucket = conn.get_bucket(self._config['bucket_s3']) key_path = '{path}/{pkg_name}'.format(path=path, pkg_name=pkg_name) key = bucket.get_key(path) if not key: key = bucket.new_key(key_path) key.set_contents_from_filename(pkg_path) gcall("rm -f {0}".format(pkg_path), "Deleting local package: %s" % pkg_name, self._log_file) deployment_package_retention_config = self._config.get('deployment_package_retention', None) if deployment_package_retention_config and self._app['env'] in deployment_package_retention_config: deployment_package_retention = deployment_package_retention_config.get(self._app['env'], 42) self._purge_s3_package(path, bucket, module, pkg_name, deployment_package_retention) return pkg_name
def post_fetched_apps(response): # Do we need to embed each module's last_deployment? embedded = json.loads(request.args.get('embedded', '{}')) embed_last_deployment = boolify( embedded.get('modules.last_deployment', False)) for app in response['_items']: normalize_app(app, embed_last_deployment)
def _lxd_bootstrap(self): log("Bootstrap container", self._log_file) self._set_ghost_env_vars() if not boolify(self.skip_salt_bootstrap_option): if 'salt' in self.provisioners: salt_bootstrap = self.container.execute( ["wget", "-O", "bootstrap-salt.sh", "https://bootstrap.saltstack.com"]) self._container_log(salt_bootstrap) self._container_execution_error(salt_bootstrap, "Salt Download") salt_bootstrap = self.container.execute(["sh", "bootstrap-salt.sh"]) self._container_log(salt_bootstrap) self._container_execution_error(salt_bootstrap, "Salt Install")
def post_fetched_deployment(response): embedded = json.loads(request.args.get('embedded', '{}')) embed_app = boolify(embedded.get('app_id', False)) if embed_app: normalize_app(response.get('app_id'), False)
def execute(self): """Execute all checks and preparations.""" log(_green("STATE: Started"), self._log_file) online_app, offline_app = get_blue_green_apps(self._app, self._db.apps, self._log_file) if not online_app: self._worker.update_status( "aborted", message=self._get_notification_message_aborted( self._app, "Blue/green is not enabled on this app or not well configured" )) return copy_ami_option = (self._job['options'][0] if 'options' in self._job and len(self._job['options']) > 0 else get_blue_green_copy_ami_config(self._config)) copy_ami_option = boolify(copy_ami_option) app_region = self._app['region'] as_conn3 = self._cloud_connection.get_connection(app_region, ['autoscaling'], boto_version='boto3') as_group, as_group_processes_to_suspend = get_autoscaling_group_and_processes_to_suspend( as_conn3, offline_app, self._log_file) suspend_autoscaling_group_processes(as_conn3, as_group, as_group_processes_to_suspend, self._log_file) try: lb_mgr = load_balancing.get_lb_manager( self._cloud_connection, self._app['region'], online_app["safe-deployment"]["load_balancer_type"]) # check if app is online if not online_app: self._worker.update_status( "aborted", message=self._get_notification_message_aborted( self._app, "Blue/green is not enabled on this app or not well configured" )) return running_jobs = get_running_jobs(self._db, online_app['_id'], offline_app['_id'], self._job['_id']) if abort_if_other_bluegreen_job( running_jobs, self._worker, self._get_notification_message_aborted( self._app, "Please wait until the end of the current jobs before triggering a Blue/green operation" ), self._log_file): return # Check if app has up to date AMI if ((not copy_ami_option and 'ami' not in offline_app) or (copy_ami_option and 'ami' not in online_app)): self._worker.update_status( "aborted", message=self._get_notification_message_aborted( offline_app, "Please run `Buildimage` first or use the `copy_ami` option" )) return # Check if app has AS if offline_app['autoscale']['name'] and online_app['autoscale'][ 'name']: if not (check_autoscale_exists( self._cloud_connection, offline_app['autoscale']['name'], offline_app['region']) and check_autoscale_exists( self._cloud_connection, online_app['autoscale']['name'], online_app['region'])): self._worker.update_status( "aborted", message=self._get_notification_message_aborted( offline_app, "Please check that the configured AutoScale on both green and blue app exists." )) return else: self._worker.update_status( "aborted", message=self._get_notification_message_aborted( offline_app, "Please set an AutoScale on both green and blue app.")) return # Check if we have two different AS ! if offline_app['autoscale']['name'] == online_app['autoscale'][ 'name']: self._worker.update_status( "aborted", message=self._get_notification_message_aborted( offline_app, "Please set a different AutoScale on green and blue app." )) return if copy_ami_option: log( "Copy AMI option activated. AMI used by [{0}] will be reused by [{1}]" .format(online_app['autoscale']['name'], offline_app['autoscale']['name']), self._log_file) # Check if modules have been deployed if get_blue_green_config(self._config, 'preparebluegreen', 'module_deploy_required', False): if not check_app_manifest( offline_app, self._config, self._log_file, get_path_from_app_with_color(offline_app)): self._worker.update_status( "aborted", message=self._get_notification_message_aborted( offline_app, "Please deploy your app's modules")) return # Check if instances are already running if get_instances_from_autoscaling(offline_app['autoscale']['name'], as_conn3): self._worker.update_status( "aborted", message=self._get_notification_message_aborted( offline_app, "Autoscaling Group of offline app should be empty.")) return # Get the online ELB online_elbs = lb_mgr.list_lbs_from_autoscale( online_app['autoscale']['name'], self._log_file) if len(online_elbs) == 0: self._worker.update_status( "aborted", message=self._get_notification_message_aborted( offline_app, "Online app AutoScale is not attached to a valid Elastic Load Balancer" )) return # Create the temporary ELB: ghost-bluegreentemp-{original ELB name}, duplicated from the online ELB temp_elb_name, new_elb_dns = (None, None) create_temporary_elb_option = ( self._job['options'][1] if 'options' in self._job and len(self._job['options']) > 1 else get_blue_green_create_temporary_elb_config(self._config)) if boolify(create_temporary_elb_option): online_elb = online_elbs[0] temp_elb_name = "bgtmp-{0}".format( offline_app['_id'])[:31] # ELB name is 32 char long max log( _green( "Creating the temporary ELB [{0}] by copying parameters from [{1}]" .format(temp_elb_name, online_elb)), self._log_file) new_elb_dns = lb_mgr.copy_lb( temp_elb_name, online_elb, { 'app_id': str(offline_app['_id']), 'bluegreen-temporary': 'true' }, self._log_file) # Register the temporary ELB into the AutoScale log( _green("Attaching ELB [{0}] to the AutoScale [{1}]".format( temp_elb_name, offline_app['autoscale']['name'])), self._log_file) lb_mgr.register_lbs_into_autoscale( offline_app['autoscale']['name'], [], [temp_elb_name], self._log_file) offline_app['autoscale']['min'] = online_app['autoscale']['min'] offline_app['autoscale']['max'] = online_app['autoscale']['max'] if copy_ami_option: offline_app['ami'] = online_app['ami'] offline_app['build_infos']['ami_name'] = online_app[ 'build_infos']['ami_name'] log( "Copying AMI [{0}]({1}) into offline app [{2}]".format( offline_app['ami'], offline_app['build_infos']['ami_name'], str(offline_app['_id'])), self._log_file) self._update_app_ami(offline_app) # Update AutoScale properties in DB App self._update_app_autoscale_options(offline_app, online_app, self._log_file) # Update AutoScale properties and starts instances if copy_ami_option: try: if not create_userdata_launchconfig_update_asg( offline_app['ami'], self._cloud_connection, offline_app, self._config, self._log_file, update_as_params=True): self._worker.update_status( "failed", message=self._get_notification_message_failed( online_app, offline_app, "")) return except: traceback.print_exc(self._log_file) self._worker.update_status( "failed", message=self._get_notification_message_failed( online_app, offline_app, "")) return else: update_auto_scale(self._cloud_connection, offline_app, None, self._log_file, update_as_params=True) log( _green( "Starting at least [{0}] instance(s) into the AutoScale [{1}]" .format(offline_app['autoscale']['min'], offline_app['autoscale']['name'])), self._log_file) self._worker.update_status( "done", message=self._get_notification_message_done( offline_app, temp_elb_name, new_elb_dns)) except GCallException as e: self._worker.update_status( "failed", message=self._get_notification_message_failed( online_app, offline_app, e)) finally: resume_autoscaling_group_processes(as_conn3, as_group, as_group_processes_to_suspend, self._log_file)
def update_auto_scale(cloud_connection, app, launch_config, log_file, update_as_params=False): """ Update the AutoScaling parameters. :param cloud_connection :param app dict The app config define in Ghost. :param launch_config boto obj The new launch configuration. :param log_file log file obj :param update_as_params Bool If set to True the desired_capacity/min_size/max_size/subnets will be updated :return None """ as_conn = cloud_connection.get_connection(app['region'], ['autoscaling'], boto_version='boto3') connvpc = cloud_connection.get_connection(app['region'], ["vpc"]) az = [ i.availability_zone for i in connvpc.get_all_subnets( subnet_ids=app['environment_infos']['subnet_ids']) ] as_group = get_autoscaling_group_object(as_conn, app['autoscale']['name']) if launch_config: as_conn.update_auto_scaling_group( AutoScalingGroupName=app['autoscale']['name'], LaunchConfigurationName=launch_config.name) if update_as_params: as_conn.update_auto_scaling_group( AutoScalingGroupName=app['autoscale']['name'], MinSize=app['autoscale']['min'], MaxSize=app['autoscale']['max'], AvailabilityZones=az, VPCZoneIdentifier=','.join(app['environment_infos']['subnet_ids'])) asg_metrics = [ "GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity", "GroupInServiceInstances", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances" ] if boolify(app['autoscale'].get('enable_metrics', True)): log( "Enabling Autoscaling group [{0}] metrics ({1}).".format( app['autoscale']['name'], asg_metrics), log_file) as_conn.enable_metrics_collection( AutoScalingGroupName=app['autoscale']['name'], Granularity='1Minute', ) else: log( "Disabling Autoscaling group [{0}] metrics ({1}).".format( app['autoscale']['name'], asg_metrics), log_file) as_conn.disable_metrics_collection( AutoScalingGroupName=app['autoscale']['name'], ) log("Autoscaling group [{0}] updated.".format(app['autoscale']['name']), log_file) if update_as_params: app_tags = get_app_tags(app, log_file) as_tags = get_autoscale_tags(as_group, log_file) to_delete_tags = [ v for k, v in as_tags.items() if k and k not in app_tags.keys() and v ] if to_delete_tags and len(to_delete_tags): as_conn.delete_tags(Tags=to_delete_tags) as_conn.create_or_update_tags(Tags=app_tags.values()) log("Autoscaling tags [{0}] updated.".format(app['autoscale']['name']), log_file)
def is_available(app_context=None): return boolify(ghost_config.get('enable_executescript_command', True))