def launch_deploy(app, module, hosts_list, fabric_execution_strategy, log_file): """ Launch fabric tasks on remote hosts. :param app: dict: Ghost object which describe the application parameters. :param module: dict: Ghost object which describe the module parameters. :param hosts_list: list: Instances private IP. :param fabric_execution_strategy: string: Deployment strategy(serial or parallel). :param log_file: object for logging. """ # Clone the deploy task function to avoid modifying the original shared instance task = copy(deploy) task, app_ssh_username, key_filename, fabric_execution_strategy = _get_fabric_params( app, fabric_execution_strategy, task, log_file) bucket_region = config.get('bucket_region', app['region']) notification_endpoint = config.get('notification_endpoint', '') stage2 = render_stage2(config, bucket_region) log( "Updating current instances in {}: {}".format( fabric_execution_strategy, hosts_list), log_file) result = fab_execute(task, module, app_ssh_username, key_filename, stage2, notification_endpoint, log_file, hosts=hosts_list) _handle_fabric_errors(result, "Deploy error")
def _get_fabric_params(app, fabric_execution_strategy, task, log_file): app_region = app['region'] app_assumed_account_id = app.get('assumed_account_id', '') # FIXME: key_name and ssh_username should be dynamically retrieved from each EC2 instance. # Indeed, in case of mixed deployments they may differ from one to another. # This can happen when these values are changed on the Ghost app # but not all live instances are replaced to use the new values. app_key_name = app['environment_infos']['key_name'] app_ssh_username = app['build_infos']['ssh_username'] key_filename = get_key_path(config, app_region, app_assumed_account_id, app_key_name, log_file) if fabric_execution_strategy not in ['serial', 'parallel']: fabric_execution_strategy = config.get('fabric_execution_strategy', 'serial') if fabric_execution_strategy == 'parallel': setattr(task, 'serial', False) setattr(task, 'parallel', True) else: setattr(task, 'serial', True) setattr(task, 'parallel', False) return task, app_ssh_username, key_filename, fabric_execution_strategy
def _get_app_manifest_from_s3(app, config, log_file): key_path = get_path_from_app_with_color(app) + '/MANIFEST' cloud_connection = cloud_connections.get( app.get('provider', DEFAULT_PROVIDER))(config) conn = cloud_connection.get_connection( config.get('bucket_region', app['region']), ["s3"]) bucket = conn.get_bucket(config['bucket_s3']) key = bucket.get_key(key_path) return key, key_path, bucket
def get_blue_green_config(config, command, key, default_value): """ Return the Blue Green command option from global config """ blue_green_section = config.get('blue_green', None) if not blue_green_section: return default_value command_section = blue_green_section.get(command, None) if not command_section: return default_value return command_section.get(key, default_value)
def get_regions(self, services): regions = [] try: for partition_name in config.get('aws_partitions', ['aws']): for region in Session().get_available_regions( services[0], partition_name): regions.append(region) except: if self._log_file: log( "An error occured when creating connection, check the exception error message for more details", self._log_file) raise return (regions)
def download_s3_object(app, source_url, working_directory, revision, log_file): cloud_connection = cloud_connections.get( app.get('provider', DEFAULT_PROVIDER))(log_file) conn = cloud_connection.get_connection(config.get('bucket_region', app['region']), ["s3"], boto_version='boto3') pattern = re.compile('s3://([a-z0-9][a-z0-9-.]*)?/(.*)') matches = pattern.search(source_url) bucket_name = matches.group(1) bucket_key_path = matches.group(2) conn.download_file(bucket_name, bucket_key_path, os.path.join(working_directory, os.path.basename(bucket_key_path)), ExtraArgs={'VersionId': revision})
def generate_userdata(bucket_s3, s3_region, ghost_root_path): """ Generates an EC2 userdata script using the Ghost's "stage1" script. :return The formatted stage1 script """ jinja_templates_path = '%s/scripts' % ghost_root_path if os.path.exists('%s/stage1' % jinja_templates_path): loader = FileSystemLoader(jinja_templates_path) jinja_env = Environment(loader=loader) template = jinja_env.get_template('stage1') userdata = template.render(bucket_s3=bucket_s3, bucket_region=s3_region, notification_endpoint=config.get( 'notification_endpoint', '')) return userdata else: return ""
def check_app_manifest(app, config, log_file, app_path): key_path = app_path + '/MANIFEST' cloud_connection = cloud_connections.get( app.get('provider', DEFAULT_PROVIDER))(config) conn = cloud_connection.get_connection( config.get('bucket_region', app['region']), ["s3"]) bucket = conn.get_bucket(config['bucket_s3']) key = bucket.get_key(key_path) if not key: log("ERROR: MANIFEST [{0}] not found.' ".format(key_path), log_file) return False manifest = key.get_contents_as_string() if sys.version > '3': manifest = manifest.decode('utf-8') deployed_modules = manifest.strip().split('\n') nb_deployed_modules = len(deployed_modules) nb_app_modules = len(app['modules']) if not nb_deployed_modules == nb_app_modules: log( "ERROR: Configured modules in the app [{0}] doesn't match number " "of deployed modules according to the MANIFEST.' ".format( app['_id']), log_file) return False for idx, up_module in enumerate(deployed_modules): mod = up_module.strip().split(':') # Deployed and app modules should be same if not mod[0] == app['modules'][idx]['name']: log( "ERROR: Deployed module name ({0}) doesn't match " "the configured module name ({1}) ".format( mod[0], app['modules'][idx]['name']), log_file) return False return True
'http://docs.cloud-deploy.io', 'https://docs.cloud-deploy.io', 'http://docs.cloudeploy.io', 'https://docs.cloudeploy.io', 'http://editor.swagger.io', ] X_HEADERS = ['Authorization', 'Content-Type', 'If-Match'] API_NAME = 'GHOST API' # Let's just use the local mongod instance. Edit as needed. # Please note that MONGO_HOST and MONGO_PORT could very well be left # out as they already default to a bare bones local 'mongod' instance. # Get env var first (e.g. from docker-compose and the like), then config file, then default MONGO_HOST = os.getenv('MONGO_HOST', config.get('mongo_host', 'localhost')) MONGO_PORT = 27017 #MONGO_USERNAME = '******' #MONGO_PASSWORD = '******' MONGO_DBNAME = 'apitest' MONGO_QUERY_BLACKLIST = ['$where'] # RQ Workers params RQ_JOB_TIMEOUT = config.get('rq_worker_job_timeout', 3600) REDIS_HOST = os.getenv('REDIS_HOST', config.get('redis_host', 'localhost')) # Enable reads (GET) and inserts (POST) for resources/collections # (if you omit this line, the API will default to ['GET'] and provide # read-only access to the endpoint). RESOURCE_METHODS = ['GET', 'POST']
def ghost_has_blue_green_enabled(): """ Return if Ghost has Blue/Green option enabled globally """ return config.get('blue_green') and config.get('blue_green').get( 'enabled', False)
def create_ec2_instance(cloud_connection, app, app_color, config, private_ip_address, subnet_id, log_file): """ Creates an EC2 instance and return its ID. :param cloud_connection: The app Cloud Connection object :param app: Ghost app document :param app_color: Color value if BlueGreen application type :param config: Ghost config settings :param private_ip_address: Private IP address to use when creating the instance :param subnet_id: Subnet to use when creating the instance :param log_file: Logging file :return the EC2 instance object with all its details """ log(_yellow(" INFO: Creating User-Data"), log_file) ghost_root_path = config.get('ghost_root_path', '/usr/local/share/ghost/') userdata = generate_userdata(config['bucket_s3'], config.get('bucket_region', app['region']), ghost_root_path) log(_yellow(" INFO: Creating EC2 instance"), log_file) if app['ami']: log(" CONF: AMI: {0}".format(app['ami']), log_file) log(" CONF: Region: {0}".format(app['region']), log_file) conn = cloud_connection.get_connection(app['region'], ["ec2"]) interface = cloud_connection.launch_service( ["ec2", "networkinterface", "NetworkInterfaceSpecification"], subnet_id=subnet_id, groups=app['environment_infos']['security_groups'], associate_public_ip_address=app['environment_infos'].get( 'public_ip_address', True), private_ip_address=private_ip_address) interfaces = cloud_connection.launch_service( ["ec2", "networkinterface", "NetworkInterfaceCollection"], interface) devices = get_block_devices_mapping(cloud_connection, app) bdm = cloud_connection.launch_service( ["ec2", "blockdevicemapping", "BlockDeviceMapping"], connection=conn) for device in devices: for path, sda in device.iteritems(): bdm[path] = sda reservation = conn.run_instances( image_id=app['ami'], key_name=app['environment_infos']['key_name'], network_interfaces=interfaces, instance_type=app['instance_type'], instance_profile_name=app['environment_infos']['instance_profile'], user_data=userdata, block_device_map=bdm, ) # Getting instance metadata instance = reservation.instances[0] if instance.id: # Checking if instance is ready before tagging while not instance.state == u'running': log('Instance not running, waiting 10s before tagging.', log_file) time.sleep(10) instance.update() # Tagging for ghost_tag_key, ghost_tag_val in { 'app': 'name', 'app_id': '_id', 'env': 'env', 'role': 'role' }.iteritems(): log( "Tagging instance [{id}] with '{tk}':'{tv}'".format( id=instance.id, tk=ghost_tag_key, tv=str(app[ghost_tag_val])), log_file) conn.create_tags([instance.id], {ghost_tag_key: str(app[ghost_tag_val])}) if app_color: log( "Tagging instance [{id}] with '{tk}':'{tv}'".format( id=instance.id, tk='color', tv=app_color), log_file) conn.create_tags([instance.id], {"color": app_color}) tag_ec2_name = False if 'instance_tags' in app['environment_infos']: for app_tag in app['environment_infos']['instance_tags']: log( "Tagging instance [{id}] with '{tk}':'{tv}'".format( id=instance.id, tk=app_tag['tag_name'], tv=app_tag['tag_value']), log_file) conn.create_tags( [instance.id], {app_tag['tag_name']: app_tag['tag_value']}) if app_tag['tag_name'] == 'Name': tag_ec2_name = True if not tag_ec2_name: ec2_name = "ec2.{0}.{1}.{2}".format(app['env'], app['role'], app['name']) log( "Tagging instance [{id}] with '{tk}':'{tv}'".format( id=instance.id, tk='Name', tv=ec2_name), log_file) conn.create_tags([instance.id], {'Name': ec2_name}) log(" CONF: Private IP: %s" % instance.private_ip_address, log_file) log(" CONF: Public IP: %s" % instance.ip_address, log_file) log(" CONF: Public DNS: %s" % instance.public_dns_name, log_file) return instance else: log( _red( "ERROR: Cannot get instance metadata. Please check the AWS Console." ), log_file) raise GCallException( "ERROR: Cannot get instance metadata. Please check the AWS Console." ) else: log(_red("ERROR: No AMI set, please use buildimage before"), log_file) raise GCallException("ERROR: No AMI set, please use buildimage before")
def is_available(app_context=None): return boolify(ghost_config.get('enable_executescript_command', True))
def get_key_path(config, region, account, key_name, log_file): """ Maps an AWS EC2 key pair name to a local private key path :param key_name: string: the name of the key as defined in AWS EC2 Key Pairs Without any configuration, an empty path is returned: >>> from StringIO import StringIO >>> import yaml >>> get_key_path({}, None, None, None, StringIO()) '' Given a configuration with a single key path for all EC2 instances in all regions: >>> config = yaml.load(\"\"\" ... key_path: /home/admin/key/claranet.pem ... \"\"\") The same key path is returned, whatever the region or key name: >>> get_key_path(config, None, None, None, StringIO()) '/home/admin/key/claranet.pem' >>> get_key_path(config, 'eu-west-1', 'account', None, StringIO()) '/home/admin/key/claranet.pem' >>> get_key_path(config, 'eu-west-1', 'account', 'claranet', StringIO()) '/home/admin/key/claranet.pem' Given a configuration with a mapping from regions to key paths: >>> config = yaml.load(\"\"\" ... key_path: ... eu-west-1: /home/admin/key/claranet-eu-west-1.pem ... us-west-2: /home/admin/key/claranet-us-west-2.pem ... \"\"\") The key path corresponding to the requested region is returned, whatever the key name: >>> get_key_path(config, 'eu-west-1', 'account', None, StringIO()) '/home/admin/key/claranet-eu-west-1.pem' >>> get_key_path(config, 'eu-west-1', 'account', 'claranet', StringIO()) '/home/admin/key/claranet-eu-west-1.pem' >>> get_key_path(config, 'us-west-2', 'account', None, StringIO()) '/home/admin/key/claranet-us-west-2.pem' >>> get_key_path(config, 'us-west-2', 'account', 'claranet', StringIO()) '/home/admin/key/claranet-us-west-2.pem' If a mapping is missing, an empty key path is returned: >>> get_key_path(config, 'us-west-1', 'account', 'claranet', StringIO()) '' Given a configuration with mappings from regions to accounts to key paths: >>> config = yaml.load(\"\"\" ... key_path: ... eu-west-1: ... '123456789': /home/admin/key/claranet-account-1-eu-west-1.pem ... '987654321': /home/admin/key/claranet-account-2-eu-west-1.pem ... us-west-2: ... '123456789': /home/admin/key/claranet-account-1-us-west-2.pem ... '987654321': /home/admin/key/claranet-account-2-us-west-2.pem ... \"\"\") The key path corresponding to the requested region and account is returned: >>> get_key_path(config, 'eu-west-1', '123456789', 'claranet-key', StringIO()) '/home/admin/key/claranet-account-1-eu-west-1.pem' >>> get_key_path(config, 'eu-west-1', '987654321', 'claranet-key', StringIO()) '/home/admin/key/claranet-account-2-eu-west-1.pem' >>> get_key_path(config, 'us-west-2', '123456789', 'claranet-key', StringIO()) '/home/admin/key/claranet-account-1-us-west-2.pem' >>> get_key_path(config, 'us-west-2', '987654321', 'claranet-key', StringIO()) '/home/admin/key/claranet-account-2-us-west-2.pem' If a mapping is missing, an empty key path is returned: >>> get_key_path(config, 'us-west-2', 'claranet-account-3', 'claranet-key', StringIO()) '' >>> get_key_path(config, 'us-west-1', 'claranet-123456789', 'claranet-key', StringIO()) '' Given a configuration with mappings from regions to accounts to key names to key paths: >>> config = yaml.load(\"\"\" ... key_path: ... eu-west-1: ... default: ... claranet-key-1: /home/admin/key/claranet-default-key-1-eu-west-1.pem ... claranet-key-2: /home/admin/key/claranet-default-key-2-eu-west-1.pem ... # Account 1 ... '123456789': ... claranet-key-1: /home/admin/key/claranet-account-1-key-1-eu-west-1.pem ... claranet-key-2: /home/admin/key/claranet-account-1-key-2-eu-west-1.pem ... # Account 2 ... '987654321': ... claranet-key-1: /home/admin/key/claranet-account-2-key-1-eu-west-1.pem ... claranet-key-2: /home/admin/key/claranet-account-2-key-2-eu-west-1.pem ... us-west-2: ... default: /home/admin/key/claranet-default-us-west-2.pem ... # Account 1 ... '123456789': ... claranet-key-1: /home/admin/key/claranet-account-1-key-1-us-west-2.pem ... claranet-key-2: /home/admin/key/claranet-account-1-key-2-us-west-2.pem ... '987654321': ... claranet-key-1: /home/admin/key/claranet-account-2-key-1-us-west-2.pem ... claranet-key-2: /home/admin/key/claranet-account-2-key-2-us-west-2.pem ... \"\"\") The key path corresponding to the requested region, account and key name is returned: >>> get_key_path(config, 'eu-west-1', '123456789', 'claranet-key-1', StringIO()) '/home/admin/key/claranet-account-1-key-1-eu-west-1.pem' >>> get_key_path(config, 'eu-west-1', '123456789', 'claranet-key-2', StringIO()) '/home/admin/key/claranet-account-1-key-2-eu-west-1.pem' >>> get_key_path(config, 'us-west-2', '987654321', 'claranet-key-1', StringIO()) '/home/admin/key/claranet-account-2-key-1-us-west-2.pem' >>> get_key_path(config, 'us-west-2', '987654321', 'claranet-key-2', StringIO()) '/home/admin/key/claranet-account-2-key-2-us-west-2.pem' If a mapping is missing, an empty key path is returned: >>> get_key_path(config, 'us-west-1', '666666666', 'claranet-key-1', StringIO()) '' >>> get_key_path(config, 'us-west-1', '123456789', 'claranet-key-3', StringIO()) '' Defaults are also available in case no assumed account id is defined on the Ghost application: >>> get_key_path(config, 'eu-west-1', '', 'claranet-key-1', StringIO()) '/home/admin/key/claranet-default-key-1-eu-west-1.pem' >>> get_key_path(config, 'eu-west-1', '', 'claranet-key-2', StringIO()) '/home/admin/key/claranet-default-key-2-eu-west-1.pem' >>> get_key_path(config, 'us-west-2', '', 'claranet-key-3', StringIO()) '/home/admin/key/claranet-default-us-west-2.pem' """ key_path = config.get('key_path', '') if isinstance(key_path, dict): key_path = key_path.get(region, '') if isinstance(key_path, dict): key_path = key_path.get(account if account else 'default', '') if isinstance(key_path, dict): key_path = key_path.get(key_name, '') # Uncomment the following lines for debugging purposes locally (do not commit this change) # log("Selected '{}' key path for '{}' keypair name in '{}' region of '{}' account" # .format(key_path, key_name, region, account), log_file) return key_path