def fetch_all(self, credentials, regions=[], partition_name='aws', targets=None): """ Fetch all the SNS configuration supported by Scout2 :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None api_service = self.service.lower() # Connect to the service if self.service in ['s3' ]: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service( self.service, credentials, 'us-east-1') # TODO: use partition's default region else: api_client = connect_service(self.service, credentials) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self.service in ['s3']: params['api_clients'] = api_clients q = self._init_threading(self.__fetch_target, params, 20) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': q} if self.service in ['s3']: params['api_clients'] = api_clients qt = self._init_threading(self.__fetch_service, params, 10) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: qt.put(target) # Join qt.join() q.join() # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True)
def fetch_all(self, credentials, regions=[], partition_name='aws', targets=None): """ Fetch all the SNS configuration supported by Scout2 :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets if not targets: try: targets = type( self).targets # TODO: remove this case eventually except: targets = self.targets # Tweak params realtargets = () for i, target in enumerate(targets): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ( (target[0], target[1], target[2], params, target[4]), ) targets = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets, True) api_service = 'ec2' if self.service.lower( ) == 'vpc' else self.service.lower() # Init regions regions = build_region_list( api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, 20) # Threading to list resources (queue feeder) qr = self._init_threading( self._fetch_region, { 'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': targets }, 10) # Go for region in regions: qr.put(region) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets # Tweak params realtargets = () if not targets: targets = self.targets for i, target in enumerate(targets['first_region']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['first_region'] = realtargets realtargets = () for i, target in enumerate(targets['other_regions']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['other_regions'] = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True) api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower() # Init regions regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, self.thread_config['parse']) # Threading to list resources (queue feeder) qr = self._init_threading(self._fetch_region, {'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()}, self.thread_config['list']) # Go for i, region in enumerate(regions): qr.put((region, targets['first_region'] if i == 0 else targets['other_regions'])) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
def fetch_all(self, credentials, regions=[], partition_name='aws', targets=None): """ :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. :return: """ global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None # FIXME the below should be in moved to each provider's code # Connect to the service if self._is_provider('aws'): if self.service in ['s3']: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region, silent=True) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service(self.service, credentials, 'us-east-1', silent=True) # TODO: use partition's default region else: api_client = connect_service(self.service, credentials, silent=True) elif self._is_provider('gcp'): api_client = gcp_connect_service(service=self.service, credentials=credentials) elif self._is_provider('azure'): api_client = azure_connect_service(service=self.service, credentials=credentials) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self._is_provider('aws'): if self.service in ['s3']: params['api_clients'] = api_clients # Threading to parse resources (queue feeder) target_queue = self._init_threading(self.__fetch_target, params, self.thread_config['parse']) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': target_queue} if self._is_provider('aws'): if self.service in ['s3']: params['api_clients'] = api_clients service_queue = self._init_threading(self.__fetch_service, params, self.thread_config['list']) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: service_queue.put(target) # Join service_queue.join() target_queue.join() if self._is_provider('aws'): # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True) else: self.fetchstatuslogger.show(True)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('force') parser.add_argument('dry-run') parser.add_argument('regions') parser.add_argument('partition-name') parser.parser.add_argument('--interactive', dest='interactive', default=False, action='store_true', help='Interactive prompt to manually enter CIDRs.') parser.parser.add_argument('--csv-ip-ranges', dest='csv_ip_ranges', default=[], nargs='+', help='CSV file(s) containing CIDRs information.') parser.parser.add_argument('--skip-first-line', dest='skip_first_line', default=False, action='store_true', help='Skip first line when parsing CSV file.') parser.parser.add_argument('--attributes', dest='attributes', default=[], nargs='+', help='Name of the attributes to enter for each CIDR.') parser.parser.add_argument('--mappings', dest='mappings', default=[], nargs='+', help='Column number matching attributes when headers differ.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Initialize the list of regions to work with regions = build_region_list('ec2', args.regions, args.partition_name) # For each profile/environment... for profile_name in args.profile: # Interactive mode if args.interactive: # Initalize prefixes attributes = args.attributes filename = 'ip-ranges-%s.json' % profile_name if os.path.isfile(filename): printInfo('Loading existing IP ranges from %s' % filename) prefixes = read_ip_ranges(filename) # Initialize attributes from existing values if attributes == []: for prefix in prefixes: for key in prefix: if key not in attributes: attributes.append(key) else: prefixes = [] # IP prefix does not need to be specified as an attribute attributes = [a for a in attributes if a != 'ip_prefix'] # Prompt for new entries while prompt_4_yes_no('Add a new IP prefix to the ip ranges'): ip_prefix = prompt_4_value('Enter the new IP prefix:') obj = {} for a in attributes: obj[a] = prompt_4_value('Enter the \'%s\' value:' % a) prefixes.append(new_prefix(ip_prefix, obj)) # Support loading from CSV file elif len(args.csv_ip_ranges) > 0: # Initalize prefixes prefixes = [] # Load CSV file contents for filename in args.csv_ip_ranges: with open(filename, 'rt') as f: csv_contents = f.readlines() # Initialize mappings attributes = args.attributes mappings = {} if attributes == []: # Follow structure of first line headers = csv_contents.pop(0).strip().split(',') for index, attribute in enumerate(headers): mappings[attribute] = index elif attributes and args.mappings == []: # Follow structure of first line but only map a subset of fields headers = csv_contents.pop(0).strip().split(',') attributes.append('ip_prefix') for attribute in set(attributes): mappings[attribute] = headers.index(attribute) else: # Indices of columns are provided as an argument for index, attribute in enumerate(attributes): mappings[attribute] = int(args.mappings[index]) if args.skip_first_line: csv_contents.pop(0) # For each line... for line in csv_contents: ip_prefix = {} values = line.strip().split(',') if len(values) < len(mappings): continue for attribute in mappings: ip_prefix[attribute] = values[mappings[attribute]] if 'ip_prefix' in mappings and 'mask' in mappings: ip = ip_prefix.pop('ip_prefix') mask = ip_prefix.pop('mask') ip_prefix['ip_prefix'] = '%s/%s' % (ip, mask.replace('/','')) prefixes.append(ip_prefix) # AWS mode else: # Initialize IP addresses printInfo('Fetching public IP information for the \'%s\' environment...' % profile_name) ip_addresses = {} # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # For each region... for region in regions: # Connect to EC2 ec2_client = connect_service('ec2', credentials, region) if not ec2_client: continue # Get public IP addresses associated with EC2 instances printInfo('...in %s: EC2 instances' % region) reservations = handle_truncated_response(ec2_client.describe_instances, {}, ['Reservations']) for reservation in reservations['Reservations']: for i in reservation['Instances']: if 'PublicIpAddress' in i: ip_addresses[i['PublicIpAddress']] = new_ip_info(region, i['InstanceId'], False) get_name(i, ip_addresses[i['PublicIpAddress']], 'InstanceId') if 'NetworkInterfaces' in i: for eni in i['NetworkInterfaces']: if 'Association' in eni: ip_addresses[eni['Association']['PublicIp']] = new_ip_info(region, i['InstanceId'], False) # At that point, we don't know whether it's an EIP or not... get_name(i, ip_addresses[eni['Association']['PublicIp']], 'InstanceId') # Get all EIPs (to handle unassigned cases) printInfo('...in %s: Elastic IP addresses' % region) eips = handle_truncated_response(ec2_client.describe_addresses, {}, ['Addresses']) for eip in eips['Addresses']: instance_id = eip['InstanceId'] if 'InstanceId' in eip else None # EC2-Classic non associated EIPs have an empty string for instance ID (instead of lacking the attribute in VPC) if instance_id == '': instance_id = None ip_addresses[eip['PublicIp']] = new_ip_info(region, instance_id, True) ip_addresses[eip['PublicIp']]['name'] = instance_id # Format prefixes = [] for ip in ip_addresses: prefixes.append(new_prefix(ip, ip_addresses[ip])) # Generate an ip-ranges-<profile>.json file save_ip_ranges(profile_name, prefixes, args.force_write, args.debug)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions') parser.add_argument('partition-name') parser.add_argument('bucket-name') parser.parser.add_argument('--aws-account-id', dest='aws_account_id', default=[ None ], nargs='+', help='Bleh.') parser.parser.add_argument('--from', dest='from_date', default=[ None ], nargs='+', help='Bleh.') parser.parser.add_argument('--to', dest='to_date', default=[ None ], nargs='+', help='Bleh.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Arguments profile_name = args.profile[0] try: from_date = datetime.datetime.strptime(args.from_date[0], "%Y/%m/%d").date() to_date = datetime.datetime.strptime(args.to_date[0], "%Y/%m/%d").date() delta = to_date - from_date except Exception as e: printException(e) printError('Error: dates must be formatted of the following format YYYY/MM/DD') return 42 if delta.days < 0: printError('Error: your \'to\' date is earlier than your \'from\' date') return 42 # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Fetch AWS account ID if not args.aws_account_id[0]: printInfo('Fetching the AWS account ID...') aws_account_id = get_aws_account_id(credentials) else: aws_account_id = args.aws_account_id[0] global cloudtrail_log_path cloudtrail_log_path = cloudtrail_log_path.replace('AWS_ACCOUNT_ID', aws_account_id) # Create download dir if not os.path.exists(download_folder): os.makedirs(download_folder) # Iterate through regions s3_clients = {} for region in build_region_list('cloudtrail', args.regions, args.partition_name): # Connect to CloudTrail cloudtrail_client = connect_service('cloudtrail', credentials, region) if not cloudtrail_client: continue # Get information about the S3 bucket that receives CloudTrail logs trails = cloudtrail_client.describe_trails() for trail in trails['trailList']: bucket_name = trail['S3BucketName'] prefix = trail['S3KeyPrefix'] if 'S3KeyPrefix' in trail else '' # Connect to S3 manage_dictionary(s3_clients, region, connect_service('s3', credentials, region)) target_bucket_region = get_s3_bucket_location(s3_clients[region], bucket_name) manage_dictionary(s3_clients, target_bucket_region, connect_service('s3', credentials, target_bucket_region)) s3_client = s3_clients[target_bucket_region] # Generate base path for files log_path = os.path.join(prefix, cloudtrail_log_path.replace('REGION', region)) # Download files printInfo('Downloading log files in %s... ' % region, False) keys = [] for i in range(delta.days + 1): day = from_date + timedelta(days=i) folder_path = os.path.join(log_path, day.strftime("%Y/%m/%d")) try: objects = handle_truncated_response(s3_client.list_objects, {'Bucket': bucket_name, 'Prefix': folder_path}, ['Contents']) for o in objects['Contents']: keys.append([o['Key'], 0]) except Exception as e: printException(e) pass thread_work(keys, download_object, params = {'Bucket': bucket_name, 'S3Client': s3_client}, num_threads = 100) printInfo('Done') # Iterate through files and gunzip 'em printInfo('Decompressing files...') gzlogs = [] for root, dirnames, filenames in os.walk(download_folder): for filename in filenames: gzlogs.append(filename) thread_work(gzlogs, gunzip_file, num_threads = 30)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions') parser.add_argument('partition-name') parser.parser.add_argument('--filters', dest='filters', default=None, help='') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Build list of region regions = build_region_list('ec2', args.regions, args.partition_name) printInfo(str(regions)) # Build filters filters = json.loads(args.filters) if args.filters else None # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # List all EC2 instances instances = [] for region in regions: printInfo('Fetching instances in %s...' % region) ec2_client = connect_service('ec2', credentials, region_name=region) args = {'Filters': filters} if filters else {} reservations = handle_truncated_response( ec2_client.describe_instances, args, ['Reservations'])['Reservations'] for r in reservations: instances += r['Instances'] printInfo(' Found %d instances' % len(instances)) # Build list of private and public IPs prvips = {} pubips = {} for i in instances: security_groups = i['SecurityGroups'] for eni in i['NetworkInterfaces']: for prvip in eni['PrivateIpAddresses']: prvips[prvip['PrivateIpAddress']] = { 'security_groups': security_groups } if 'Association' in prvip: pubips[prvip['Association']['PublicIp']] = { 'security_groups': security_groups } # Create target files with open('targets-%s-prv.txt' % profile_name, 'wt') as f: for prvip in prvips: f.write('%s\n' % prvip) with open('targets-%s-pub.txt' % profile_name, 'wt') as f: for pubip in pubips: f.write('%s\n' % pubip)
def fetch_all(self, credentials, regions = None, partition_name = 'aws', targets = None): """ Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets # Tweak params regions = [] if regions is None else regions realtargets = () if not targets: targets = self.targets for i, target in enumerate(targets['first_region']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['first_region'] = realtargets realtargets = () for i, target in enumerate(targets['other_regions']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['other_regions'] = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True) api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower() # Init regions regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, self.thread_config['parse']) # Threading to list resources (queue feeder) qr = self._init_threading(self._fetch_region, {'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()}, self.thread_config['list']) # Go for i, region in enumerate(regions): qr.put((region, targets['first_region'] if i == 0 else targets['other_regions'])) # Blocks until all items in the queue have been gotten and processed. qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True) # Threads should stop running as queues are empty self.run_qr_threads = False self.run_q_threads = False # Put x items in the queues to ensure threads run one last time (and exit) for i in range(self.thread_config['parse']): q.put(None) for j in range(self.thread_config['list']): qr.put(None)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions', help='Regions where the stack(s) will be created.') parser.add_argument('partition-name') parser.parser.add_argument('--template', dest='template', default=None, required=True, help='Path to the CloudFormation template.') parser.parser.add_argument('--parameters', dest='parameters', default=None, nargs='+', help='Optional parameters for the stack.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Search for AWS credentials credentials = read_creds(profile_name) if __name__ == '__main__': if not credentials['AccessKeyId']: return 42 # Validate the regions regions = build_region_list('cloudformation', args.regions, args.partition_name) if len(args.regions) == 0 and not prompt_4_yes_no( 'You didn\'t specify a region for this stack, do you want to create it in all regions ?' ): return 42 for region in regions: try: # Create stack api_client = connect_service('cloudformation', credentials, region) params = {} params['api_client'] = api_client if not args.template.startswith('/'): params['template_path'] = os.path.join( (os.path.dirname(os.path.realpath(__file__))), args.template) else: params['template_path'] = args.template if args.parameters: params['template_parameters'] = args.parameters params['stack_name'] = make_awsrecipes_stack_name( params['template_path']) create_or_update_stack(**params) except Exception as e: printException(e)
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions', help = 'Regions where stack instances will be created.') parser.add_argument('partition-name') parser.parser.add_argument('--stack-set-region', dest='stack_set_region', default=None, required=True, help='Region where the stack set will be created.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Validate the stack set region regions = build_region_list('events', args.regions, args.partition_name) if args.stack_set_region not in regions: printError('Error, the stack set region \'%s\' is not valid. Acceptable values are:' % args.stack_set_region) printError(', '.join(regions)) return 42 # Determine the master account id to exclude it from the list of accounts to be configured for event forwarding monitoring_account_id = get_aws_account_id(credentials) # Connect to the AWS Organizations API api_client = connect_service('organizations', credentials) # List all accounts in the organization org_account_ids = [] org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] org_account_ids = [ account['Id'] for account in org_accounts if account['Status'] == 'ACTIVE' and account['Id'] != monitoring_account_id ] printInfo('Found %d accounts in the organization.' % len(org_account_ids)) printDebug(str(org_account_ids)) # Verify that the account has been configured for stack sets by attempting to assume the stack set execution role api_client = connect_service('sts', credentials, silent = True) configured_org_account_ids = [] for account_id in org_account_ids: try: role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id api_client.assume_role(RoleArn = role_arn, RoleSessionName = 'foobar') configured_org_account_ids.append(account_id) except Exception as e: pass if len(configured_org_account_ids) != len(org_account_ids): printInfo('Only %d of these accounts have the necessary stack set execution role:' % len(configured_org_account_ids)) printInfo(str(configured_org_account_ids)) # For each region with cloudwatch events, put a permission for each account printInfo('Adding permissions on the default event buses...') for region in regions: api_client = connect_service('events', credentials, region) for account in org_accounts: account_id = account['Id'] if account_id not in configured_org_account_ids: continue account_name = account['Name'] api_client.put_permission(Action = 'events:PutEvents', Principal = account_id, StatementId = 'AWSRecipesAllow%s' % account_id) # Create the stack set try: stack_set_name = 'CloudwatchEventsForwarding' api_client = connect_service('cloudformation', credentials, args.stack_set_region) # TBD: need for the region where the stack set is created and the regions where the stack instances are created... template_path = os.path.join((os.path.dirname(os.path.realpath(__file__))), '../CloudFormationTemplates/CloudwatchEventsForwarding.region.yml') with open(template_path, 'rt') as f: template_body = f.read() template_parameters = [ {'ParameterKey': 'EventsMonitoringAccountID', 'ParameterValue': get_aws_account_id(credentials) } ] printInfo('Creating the stack set...') response = api_client.create_stack_set(StackSetName = stack_set_name, TemplateBody = template_body, Parameters = template_parameters) except Exception as e: if e.response['Error']['Code'] != 'NameAlreadyExistsException': printException(e) printError('Failed to create the stack set.') return 42 # Create the stack instances: one per region in every account operation_preferences = { 'FailureTolerancePercentage': 100, 'MaxConcurrentPercentage': 100 } response = api_client.create_stack_instances(StackSetName = stack_set_name, Accounts = configured_org_account_ids, Regions = regions, OperationPreferences = operation_preferences) printInfo('Successfully started operation Id %s' % response['OperationId'])
def main(): # Parse arguments parser = OpinelArgumentParser() parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('regions', help='Regions where stack instances will be created.') parser.add_argument('partition-name') parser.parser.add_argument( '--master-region', dest='master_region', default=None, required=True, help='Region where the global stacks and stack sets will be created.') parser.parser.add_argument( '--stack-prefix', dest='stack_prefix', default=None, required=True, help= 'Prefix of the CF Templates to be used when creating/updating stacks.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Get the master AWS account ID master_account_id = get_aws_account_id(credentials) # Get list of accounts ready for Stack sets api_client = connect_service('organizations', credentials, silent=True) try: org_account_ids = get_organization_account_ids(api_client, quiet=False) except: org_account_ids = [master_account_id] configured_org_account_ids = get_stackset_ready_accounts(credentials, org_account_ids, quiet=False) # Validate the stack set region regions = build_region_list('cloudformation', args.regions, args.partition_name) if args.master_region not in regions: printError( 'Error, the stack set region \'%s\' is not valid. Acceptable values are:' % args.master_region) printError(', '.join(regions)) return 42 # Connect printInfo('') api_client = connect_service('cloudformation', credentials, args.master_region, silent=True) # Establish the list of existing stacks and stack sets deployed_resources = {'stacks': {}, 'stack_sets': {}} printInfo('Fetching existing stacks and stack sets in %s in %s...' % (args.master_region, master_account_id)) for stack in handle_truncated_response( api_client.list_stacks, {}, ['StackSummaries'])['StackSummaries']: if stack['StackStatus'] not in ['CREATE_FAILED', 'DELETE_COMPLETE']: deployed_resources['stacks'][stack['StackName']] = stack for stack_set in handle_truncated_response(api_client.list_stack_sets, {'Status': 'ACTIVE'}, ['Summaries'])['Summaries']: stack_set = api_client.describe_stack_set( StackSetName=stack_set['StackSetName'])['StackSet'] deployed_resources['stack_sets'][stack_set['StackSetName']] = stack_set printInfo(' - Found %d stacks.' % len(deployed_resources['stacks'])) for stack_name in deployed_resources['stacks']: printInfo(' - %s' % stack_name) printInfo(' - Found %d stacks sets.' % len(deployed_resources['stack_sets'])) for stack_set_name in deployed_resources['stack_sets']: printInfo(' - %s' % stack_set_name) # Create the list of stacks to deploy templates = get_cloudformation_templates(args.stack_prefix) # Master stacks for stack_name in sorted(templates['master_stacks'].keys()): if stack_name not in deployed_resources['stacks']: create_stack(api_client, stack_name, templates['master_stacks'][stack_name]['file_path'], wait_for_completion=templates['master_stacks'] [stack_name]['wait_for_completion']) elif resource_older_than_template( 'stack', deployed_resources['stacks'][stack_name], templates['master_stacks'][stack_name]['file_path']): update_stack(api_client, stack_name, templates['master_stacks'][stack_name]['file_path'], wait_for_completion=templates['master_stacks'] [stack_name]['wait_for_completion']) if len(configured_org_account_ids) == 0: printInfo( '\nNo account IDs that support stack sets were found, skipping stack set configuration.' ) return return # Stack sets for stack_set_name in sorted(templates['master_stack_sets'].keys()): if stack_set_name not in deployed_resources['stack_sets']: create_stack_set( api_client, stack_set_name, templates['master_stack_sets'][stack_set_name]['file_path'], wait_for_completion=True) elif resource_older_than_template( 'stack_set', deployed_resources['stack_sets'][stack_set_name], templates['master_stack_sets'][stack_set_name]['file_path']): update_stack_set( api_client, stack_set_name, templates['master_stack_sets'][stack_set_name]['file_path'], wait_for_completion=True)