def __init__(self, args): self.parser = import_from_string('pmcf.parsers', args['parser'])() self.policy = import_from_string('pmcf.policy', args['policy'])( json_file=args['policyfile'] ) self.output = import_from_string('pmcf.outputs', args['output'])() self.args = args
def do_audit(self, data, metadata): """ Records audit logs for current transaction :param data: Stack definition :type data: str. :param metadata: Additional information for stack launch (tags, etc). :type metadata: dict. """ try: audit = import_from_string('pmcf.audit', metadata['audit'])() creds = {} if metadata.get('use_iam_profile'): creds['use_iam_profile'] = metadata['use_iam_profile'] else: creds['access'] = metadata['access'] creds['secret'] = metadata['secret'] creds['audit_output'] = metadata.get('audit_output', None) dest = 'audit/%s/%s/%s-%s' % ( metadata['name'], metadata['environment'], metadata['name'], time.strftime('%Y%m%dT%H%M%S')) audit.record_stack(data, dest, creds) except AuditException, exc: LOG.error(exc)
def _add_instances(self, data, instances, config, sgs, lbs): """ Iterates and creates AWS ELBs :param data: Template object :type data: :class:`troposphere.Template` :param instances: list of instance definitions :type instances: list. :param config: Config key/value pairs :type config: dict. :param sgs: list of security group definitions :type sgs: list. :param lbs: list of load balancer definitions :type lbs: list. :returns: dict. :raises: :class:`pmcf.exceptions.ProvisionerException` """ for inst in instances: udata = None cfni = None args = inst['provisioner']['args'] args.update({ 'environment': config['environment'], 'name': inst['name'], 'stackname': config['name'], 'resource': "LC%s" % inst['name'], }) args['appname'] = args.get('appname', config['name']) if config.get("version", None): args["version"] = config["version"] if inst.get('nat'): args['eip'] = [] records = [] for idx in range(0, inst['count']): eip = ec2.EIP( "EIP%s%s" % (inst['name'], idx), Domain='vpc', ) args['eip'].append(eip) records.append(Ref(eip)) data.add_resource(eip) data.add_resource(route53.RecordSetType( "EIPDNS%s%02d" % (inst['name'], idx + 1), HostedZoneName="%s.%s" % ( config['environment'], inst['dnszone'] ), Comment="EIP for %s in %s" % ( inst['name'], config['environment']), Name="%s%02d.%s.%s" % ( inst['name'], idx + 1, config['environment'], inst['dnszone'] ), Type="A", TTL="300", ResourceRecords=[Ref(eip)], )) data.add_resource(route53.RecordSetType( "EIPDNS%s" % inst['name'], HostedZoneName="%s.%s" % ( config['environment'], inst['dnszone'] ), Comment="EIP for %s in %s" % ( inst['name'], config['environment']), Name="%s.%s.%s" % ( inst['name'], config['environment'], inst['dnszone'] ), Type="A", TTL="300", ResourceRecords=records, )) provider = inst['provisioner']['provider'] provisioner = import_from_string('pmcf.provisioners', provider)() if provisioner.wants_wait(): waithandle = cfn.WaitConditionHandle( "Handle%s" % inst['name'], ) args['WaitHandle'] = waithandle data.add_resource(waithandle) if inst['count'] > 0: cnt = 1 else: cnt = 0 data.add_resource(cfn.WaitCondition( "Wait%s" % inst['name'], DependsOn="ASG%s" % inst['name'], Handle=Ref(waithandle), Count=cnt, Timeout=3600 )) if provisioner.wants_profile(): assume_policy_doc = { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] } iam_role = iam.Role( "Role%s" % inst['name'], AssumeRolePolicyDocument=assume_policy_doc, Path='/%s/%s/' % (inst['name'], config['environment']) ) data.add_resource(iam_role) args.update({'role': Ref(iam_role)}) policy_doc = provisioner.provisioner_policy(args) if policy_doc: data.add_resource(iam.PolicyType( "Policy%s" % inst['name'], PolicyName='iam-%s-%s' % ( inst['name'], config['environment']), PolicyDocument=policy_doc, Roles=[Ref(iam_role)] )) iip = iam.InstanceProfile( "Profile%s" % inst['name'], Path="/%s/%s/" % ( inst['name'], config['environment']), Roles=[Ref(iam_role)] ) data.add_resource(iip) args.update({'profile': Ref(iip)}) udata = provisioner.userdata(args) cfni = provisioner.cfn_init(args) lcargs = { 'ImageId': inst['image'], 'InstanceType': inst['size'], 'KeyName': inst['sshKey'], 'InstanceMonitoring': inst['monitoring'], } extra_disk_table = { "c1.medium": 1, "c1.xlarge": 4, "c3.large": 2, "c3.xlarge": 2, "c3.2xlarge": 2, "c3.4xlarge": 2, "c3.8xlarge": 2, "cc2.8xlarge": 4, "cg1.4xlarge": 2, "cr1.8xlarge": 2, "g2.2xlarge": 1, "hi1.4xlarge": 2, "hs1.8xlarge": 24, "i2.xlarge": 1, "i2.2xlarge": 2, "i2.4xlarge": 4, "i2.8xlarge": 8, "m1.small": 1, "m1.medium": 1, "m1.large": 2, "m1.xlarge": 4, "m2.xlarge": 1, "m2.2xlarge": 1, "m2.4xlarge": 2, "m3.medium": 1, "m3.large": 1, "m3.xlarge": 2, "m3.2xlarge": 2, "r3.large": 1, "r3.xlarge": 1, "r3.2xlarge": 1, "r3.4xlarge": 1, "r3.8xlarge": 2, } block_devs = [] if inst['size'] in extra_disk_table.keys(): for disk in range(extra_disk_table[inst['size']]): # Magic Number - ASCII 'b' is 98 block_devs.append(autoscaling.BlockDeviceMapping( VirtualName="ephemeral%d" % disk, DeviceName="/dev/xvd%s" % chr(98 + disk) )) for block_dev in inst.get('block_device', []): block_devs.append(autoscaling.BlockDeviceMapping( DeviceName=block_dev['device'], Ebs=autoscaling.EBSBlockDevice( VolumeSize=block_dev['size'], VolumeType=block_dev['type'], ) )) if block_devs: lcargs['BlockDeviceMappings'] = block_devs inst_sgs = [] for secg in inst['sg']: if sgs.get(secg): inst_sgs.append(Ref(sgs[secg])) else: inst_sgs.append(secg) lcargs['SecurityGroups'] = inst_sgs if udata is not None: lcargs['UserData'] = Base64(udata) if cfni is not None: lcargs['Metadata'] = cfni if inst.get('public'): lcargs['AssociatePublicIpAddress'] = True if args.get('profile'): lcargs['IamInstanceProfile'] = args['profile'] lcfg = autoscaling.LaunchConfiguration( 'LC%s' % inst['name'], **lcargs ) LOG.debug('Adding lc: %s', lcfg.JSONrepr()) data.add_resource(lcfg) asgtags = [ autoscaling.Tag( key='Name', value='%s::%s::%s' % ( config['name'], inst['name'], config['environment'] ), propogate=True, ), autoscaling.Tag( key='App', value=inst['name'], propogate=True, ) ] custom_tags = inst['provisioner']['args'].get('custom_tags', {}) for k, v in custom_tags.iteritems(): asgtags.append( autoscaling.Tag( key=k, value=v, propogate=True, ) ) if inst.get('dns'): dnstag = { 'r': inst['dns'].get('record', inst['name']), 'z': "%s.%s" % ( config['environment'], inst['dns']['zone'], ), 't': inst['dns']['type'], } asgtags.append( autoscaling.Tag( key='DNS', value=json.dumps(dnstag, sort_keys=True), propogate=True, )) strategy = import_from_string( 'pmcf.strategy', config.get('strategy', 'BlueGreen') )() inst['min'] = inst.get('min', inst['count']) inst['max'] = inst.get('max', inst['count']) asgargs = { 'AvailabilityZones': inst.get('zones', GetAZs('')), 'DesiredCapacity': inst['count'], 'LaunchConfigurationName': Ref(lcfg), 'MaxSize': inst['max'], 'MinSize': inst['min'], 'Tags': asgtags, 'HealthCheckType': inst.get('healthcheck', 'EC2'), 'HealthCheckGracePeriod': 600, } if strategy.termination_policy() != ['Default']: asgargs['TerminationPolicies'] = strategy.termination_policy() if config.get('vpcid') and inst.get('subnets'): asgargs['VPCZoneIdentifier'] = inst['subnets'] if inst.get('lb'): asgargs['LoadBalancerNames'] = [ Ref(lbs["ELB" + x]) for x in inst['lb'] ] if inst.get('depends'): asgargs['DependsOn'] = inst['depends'] if inst.get('notify'): ncfg = autoscaling.NotificationConfiguration( TopicARN=inst['notify'], NotificationTypes=[ "autoscaling:EC2_INSTANCE_LAUNCH", "autoscaling:EC2_INSTANCE_LAUNCH_ERROR", "autoscaling:EC2_INSTANCE_TERMINATE", "autoscaling:EC2_INSTANCE_TERMINATE_ERROR" ] ) asgargs['NotificationConfiguration'] = ncfg asg = autoscaling.AutoScalingGroup( 'ASG%s' % inst['name'], **asgargs ) LOG.debug('Adding asg: %s', asg.JSONrepr()) data.add_resource(asg) if inst.get('timed_scaling_policy'): pol = inst['timed_scaling_policy']['up'] scaleuppolargs = { "AutoScalingGroupName": Ref("ASG%s" % inst['name']), "Recurrence": pol['recurrence'], } if pol.get('count', None) is not None: scaleuppolargs['DesiredCapacity'] = pol['count'] if pol.get('min', None) is not None: scaleuppolargs['MinSize'] = pol['min'] if pol.get('max', None) is not None: scaleuppolargs['MaxSize'] = pol['max'] data.add_resource(autoscaling.ScheduledAction( "ASGTimedScaleUp%s" % inst['name'], **scaleuppolargs )) pol = inst['timed_scaling_policy'].get('down', None) if pol: scaledownpolargs = { "AutoScalingGroupName": Ref("ASG%s" % inst['name']), "Recurrence": pol['recurrence'] } if pol.get('count', None) is not None: scaledownpolargs['DesiredCapacity'] = pol['count'] if pol.get('min', None) is not None: scaledownpolargs['MinSize'] = pol['min'] if pol.get('max', None) is not None: scaledownpolargs['MaxSize'] = pol['max'] data.add_resource(autoscaling.ScheduledAction( "ASGTimedScaleDown%s" % inst['name'], **scaledownpolargs )) if inst.get('scaling_policy'): pol = inst['scaling_policy'] scaleuppolargs = { "AutoScalingGroupName": Ref("ASG%s" % inst['name']), "Cooldown": pol['up'].get('wait', 300), } amount = pol['up']['change'] if pol['up']['change'].find('%') != -1: scaleuppolargs["AdjustmentType"] =\ "PercentChangeInCapacity" amount = amount.replace('%', '') else: scaleuppolargs["AdjustmentType"] = "ChangeInCapacity" scaleuppolargs["ScalingAdjustment"] = amount data.add_resource(autoscaling.ScalingPolicy( "ASGScaleUp%s" % inst['name'], **scaleuppolargs )) scaledownpolargs = { "AutoScalingGroupName": Ref("ASG%s" % inst['name']), "Cooldown": pol['up'].get('wait', 300), } amount = pol['down']['change'] if pol['down']['change'].find('%') != -1: scaledownpolargs["AdjustmentType"] =\ "PercentChangeInCapacity" amount = amount.replace('%', '') else: scaledownpolargs["AdjustmentType"] = "ChangeInCapacity" scaledownpolargs["ScalingAdjustment"] = amount data.add_resource(autoscaling.ScalingPolicy( "ASGScaleDown%s" % inst['name'], **scaledownpolargs )) def str_cond(cond): """Helper method used locally""" # expect '>= 40' cond = cond.split()[0] lookup = { '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold', '<': 'LessThanThreshold', '<=': 'LessThanOrEqualToThreshold', } return lookup[cond] upalarmargs = { "ActionsEnabled": True, "AlarmActions": [Ref("ASGScaleUp%s" % inst['name'])], "ComparisonOperator": str_cond(pol['up']['condition']), "Namespace": ('/').join(pol['metric'].split('/')[:2]), "MetricName": pol['metric'].split('/')[2], "Dimensions": [ cloudwatch.MetricDimension( Name="AutoScalingGroupName", Value=Ref("ASG%s" % inst['name']), ) ], "EvaluationPeriods": pol.get('wait', 5), "Statistic": pol['up']['stat'], "Threshold": pol['up']['condition'].split()[1], "Unit": pol['unit'], } if inst['monitoring']: upalarmargs['Period'] = 60 else: upalarmargs['Period'] = 300 data.add_resource(cloudwatch.Alarm( "CloudwatchUp%s" % inst['name'], **upalarmargs )) downalarmargs = { "ActionsEnabled": True, "OKActions": [Ref("ASGScaleDown%s" % inst['name'])], "ComparisonOperator": str_cond(pol['down']['condition']), "Namespace": ('/').join(pol['metric'].split('/')[:2]), "MetricName": pol['metric'].split('/')[2], "Dimensions": [ cloudwatch.MetricDimension( Name="AutoScalingGroupName", Value=Ref("ASG%s" % inst['name']), ) ], "EvaluationPeriods": pol.get('wait', 5), "Statistic": pol['down']['stat'], "Threshold": pol['down']['condition'].split()[1], "Unit": pol['unit'], } if inst['monitoring']: downalarmargs['Period'] = 60 else: downalarmargs['Period'] = 300 data.add_resource(cloudwatch.Alarm( "CloudwatchDown%s" % inst['name'], **downalarmargs ))
def test_import_from_string_success(self): kls = utils.import_from_string('pmcf.parsers', 'BaseParser') from pmcf.parsers import BaseParser assert_equals(BaseParser, kls)
def run(self, data, metadata=None, poll=False, action='create', upload=False): """ Interfaces with public and private cloud providers - responsible for actual stack creation and update in AWS. :param data: Stack definition :type data: str. :param metadata: Additional information for stack launch (tags, etc). :type metadata: dict. :param poll: Whether to poll until completion :type poll: boolean. :param action: Action to take on the stack :type action: str. :param upload: Whether to upload stack definition to s3 before launch :type upload: bool. :raises: :class:`pmcf.exceptions.ProvisionerException` :returns: boolean """ metadata = metadata or {} LOG.debug('metadata is %s', metadata) if metadata.get('region', None) is None: raise ProvisionerException('Need to supply region in metadata') cfn = None for region in boto.regioninfo.get_regions('cloudformation'): if region.name == metadata['region']: if metadata.get('use_iam_profile'): cfn = boto.connect_cloudformation(region=region) else: cfn = boto.connect_cloudformation( aws_access_key_id=metadata['access'], aws_secret_access_key=metadata['secret'], region=region ) if cfn is None: raise ProvisionerException("Can't find a valid region") strategy = import_from_string( 'pmcf.strategy', metadata.get('strategy', 'BlueGreen') )() tags = metadata.get('tags', {}) capabilities = None if self._need_iam_caps(data): capabilities = ['CAPABILITY_IAM'] try: if action == 'delete': if self._stack_exists(cfn, metadata['name']): LOG.info('stack %s exists, deleting', metadata['name']) if strategy.should_prompt('delete'): answer = self._get_input( "Proceed with deletion of %s? [Yn]: " % metadata['name'] ) if answer.lower().startswith('n'): return False cfn.delete_stack(metadata['name']) return self.do_poll(cfn, metadata['name'], poll, action) else: LOG.info("stack %s doesn't exist", metadata['name']) return True if upload: creds = { 'access': metadata['access'], 'secret': metadata['secret'], 'audit_output': metadata.get('audit_output', None) } dest = 'launch/%s/%s/%s-%s' % ( metadata['name'], metadata['environment'], metadata['name'], time.strftime('%Y%m%dT%H%M%S')) url = 'https://%s.%s/%s' % ( metadata['audit_output'], 's3.amazonaws.com', dest ) else: creds = {} dest = '' url = '' data = json.dumps(json.loads(data)) if action == 'trigger': if self._stack_updatable(cfn, metadata['name']): LOG.info('stack %s exists, triggering', metadata['name']) allowed_update = strategy.allowed_update() diff = self._get_difference(cfn, metadata['name'], data) if len(diff) == 0: LOG.warning('No difference, not updating') return True for change in diff: if not allowed_update.match(change): raise ProvisionerException( 'Not updating: %s not allowed field' % change) if upload: self._upload_stack(data, dest, creds) cfn.validate_template(template_url=url) cfn.update_stack(metadata['name'], template_url=url, capabilities=capabilities, tags=tags) else: cfn.validate_template(data) cfn.update_stack(metadata['name'], data, capabilities=capabilities, tags=tags) else: LOG.info("stack %s not updatable", metadata['name']) return True elif action == 'create': if self._stack_exists(cfn, metadata['name']): LOG.info("stack %s already exists", metadata['name']) return True LOG.info("stack %s doesn't exist, creating", metadata['name']) if upload: self._upload_stack(data, dest, creds) cfn.validate_template(template_url=url) cfn.create_stack(metadata['name'], template_url=url, capabilities=capabilities, tags=tags) else: cfn.validate_template(data) cfn.create_stack(metadata['name'], data, capabilities=capabilities, tags=tags) elif action == 'update': if not self._stack_exists(cfn, metadata['name']): LOG.info("stack %s doesn't exist", metadata['name']) return True if self._stack_updatable(cfn, metadata['name']): if not strategy.should_update(action): raise ProvisionerException( 'Stack exists but strategy does not allow update') allowed_update = strategy.allowed_update() diff = self._get_difference(cfn, metadata['name'], data) if len(diff) == 0: LOG.warning('No difference, not updating') return True changes = 0 for change in diff: if allowed_update.match(change): continue changes += 1 if changes == 0: LOG.warning('No difference, not updating') return True if strategy.should_prompt('update'): if not self._show_prompt( cfn, metadata['name'], data, allowed_update): return True LOG.info("stack %s exists, updating", metadata['name']) if upload: self._upload_stack(data, dest, creds) cfn.validate_template(template_url=url) cfn.update_stack(metadata['name'], template_url=url, capabilities=capabilities, tags=tags) else: cfn.validate_template(data) cfn.update_stack(metadata['name'], data, capabilities=capabilities, tags=tags) else: LOG.info("stack %s not updateable", metadata['name']) return True self.do_audit(data, metadata) return self.do_poll(cfn, metadata['name'], poll, action) except boto.exception.BotoServerError, exc: raise ProvisionerException(str(exc))