class CreateLoadBalancer(ELBRequest, TabifyingMixin): DESCRIPTION = ('Create a load balancer\n\nAfter the load balancer is ' 'created, instances must be registered with it separately.') ARGS = [ Arg('LoadBalancerName', metavar='ELB', help='name of the new load balancer (required)'), MutuallyExclusiveArgList( Arg('-s', '--subnets', metavar='SUBNET1,SUBNET2,...', dest='Subnets.member', type=delimited_list(','), help='''[VPC only] subnets the load balancer should run in (required)'''), Arg('-z', '--availability-zones', metavar='ZONE1,ZONE2,...', dest='AvailabilityZones.member', type=delimited_list(','), help='''[Non-VPC only] availability zones the load balancer should run in (required)''')).required(), Arg('-l', '--listener', dest='Listeners.member', action='append', metavar=('"lb-port=PORT, protocol={HTTP,HTTPS,SSL,TCP}, ' 'instance-port=PORT, instance-protocol={HTTP,HTTPS,' 'SSL,TCP}, cert-id=ARN"'), required=True, type=listener, help='''port/protocol settings for the load balancer, where lb-port is the external port number, protocol is the external protocol, instance-port is the back end server port number, instance-protocol is the protocol to use for routing traffic to back end instances, and cert-id is the ARN of the server certificate to use for encrypted connections. lb-port, protocol, and instance-port are required. This option may be used multiple times. (at least 1 required)'''), Arg('-i', '--scheme', dest='Scheme', choices=('internal', ), metavar='internal', help='''[VPC only] "internal" to make the new load balancer private to a VPC'''), Arg('-g', '--security-groups', dest='SecurityGroups.member', metavar='GROUP1,GROUP2,...', type=delimited_list(','), help='''[VPC only] IDs of the security groups to assign to the new load balancer''') ] def print_result(self, result): print self.tabify(('DNS_NAME', result.get('DNSName')))
class PutMetricAlarm(CloudWatchRequest): DESCRIPTION = 'Create or update an alarm' ARGS = [Arg('AlarmName', metavar='ALARM', help='name of the alarm (required)'), Arg('--comparison-operator', dest='ComparisonOperator', choices=('GreaterThanOrEqualToThreshold', 'GreaterThanThreshold', 'LessThanThreshold', 'LessThanOrEqualToThreshold'), required=True, help='''arithmetic operator with which the comparison with the threshold will be made (required)'''), Arg('--evaluation-periods', dest='EvaluationPeriods', type=int, metavar='COUNT', required=True, help='''number of consecutive periods for which the value of the metric needs to be compared to the threshold (required)'''), Arg('--metric-name', dest='MetricName', metavar='METRIC', required=True, help="name for the alarm's associated metric (required)"), Arg('--namespace', dest='Namespace', metavar='NAMESPACE', required=True, help="namespace for the alarm's associated metric (required)"), Arg('--period', dest='Period', metavar='SECONDS', type=int, required=True, help='''period over which the specified statistic is applied (required)'''), Arg('--statistic', dest='Statistic', choices=('Average', 'Maximum', 'Minimum', 'SampleCount', 'Sum'), required=True, help='statistic on which to alarm (required)'), Arg('--threshold', dest='Threshold', metavar='FLOAT', type=float, required=True, help='value to compare the statistic against (required)'), Arg('--actions-enabled', dest='ActionsEnabled', choices=('true', 'false'), help='''whether this alarm's actions should be executed when it changes state'''), Arg('--alarm-actions', dest='AlarmActions.member', metavar='ARN1,ARN2,...', type=delimited_list(','), help='''ARNs of SNS topics to publish to when the alarm changes to the ALARM state'''), Arg('--alarm-description', dest='AlarmDescription', metavar='DESCRIPTION', help='description of the alarm'), Arg('-d', '--dimensions', dest='Dimensions.member', metavar='KEY1=VALUE1,KEY2=VALUE2,...', type=delimited_list(',', item_type=cloudwatch_dimension), help="dimensions for the alarm's associated metric"), Arg('--insufficient-data-actions', metavar='ARN1,ARN2,...', dest='InsufficientDataActions.member', type=delimited_list(','), help='''ARNs of SNS topics to publish to when the alarm changes to the INSUFFICIENT_DATA state'''), Arg('--ok-actions', dest='OKActions.member', metavar='ARN1,ARN2,...', type=delimited_list(','), help='''ARNs of SNS topics to publish to when the alarm changes to the OK state'''), Arg('--unit', dest='Unit', help="unit for the alarm's associated metric")]
class PutMetricData(CloudWatchRequest): DESCRIPTION = 'Add data points or statistics to a metric' ARGS = [Arg('-m', '--metric-name', dest='MetricData.member.1.MetricName', metavar='METRIC', required=True, help='name of the metric to add data points to (required)'), Arg('-n', '--namespace', dest='Namespace', required=True, help="the metric's namespace (required)"), MutuallyExclusiveArgList( Arg('-v', '--value', dest='MetricData.member.1.Value', metavar='FLOAT', type=float, help='data value for the metric'), Arg('-s', '--statistic-values', '--statisticValues', dest='MetricData.member.1.StatisticValues', metavar=('Maximum=FLOAT,Minimum=FLOAT,SampleCount=FLOAT,' 'Sum=FLOAT'), type=_statistic_set, help='''statistic values for the metric. Maximum, Minimum, SampleCount, and Sum values are all required.''')) .required(), Arg('-d', '--dimensions', dest='Dimensions.member', metavar='KEY1=VALUE1,KEY2=VALUE2,...', type=delimited_list(',', item_type=cloudwatch_dimension), help='the dimensions of the metric to add data points to'), Arg('-t', '--timestamp', dest='MetricData.member.1.Timestamp', metavar='YYYY-MM-DDThh:mm:ssZ', help='timestamp of the data point'), Arg('-u', '--unit', dest='MetricData.member.1.Unit', metavar='UNIT', help='unit the metric is being reported in')]
class SetLoadBalancerPoliciesOfListener(ELBRequest): DESCRIPTION = 'Change the policy associated with a load balancer listener' ARGS = [ Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('-l', '--lb-port', dest='LoadBalancerPort', metavar='PORT', type=int, required=True, help='port of the listener to modify (required)'), Arg('-p', '--policy-names', dest='PolicyNames.member', metavar='POLICY1,POLICY2,...', type=delimited_list(','), required=True, help='''list of policies to associate with the listener (required)''') ] def preprocess(self): if not self.args.get('PolicyNames.member'): self.params['PolicyNames'] = EMPTY
class CreateStack(CloudFormationRequest): DESCRIPTION = 'Create a new stack' ARGS = [Arg('StackName', metavar='STACK', help='name of the new stack (required)'), MutuallyExclusiveArgList( Arg('--template-file', dest='TemplateBody', metavar='FILE', type=open, help="file containing the new stack's JSON template"), Arg('--template-url', dest='TemplateURL', metavar='URL', help="URL pointing to the new stack's JSON template")) .required(), Arg('-d', '--disable-rollback', dest='DisableRollback', action='store_true', help='disable rollback on failure'), Arg('-n', '--notification-arns', dest='NotificationARNs', metavar='ARN[,...]', type=delimited_list(','), action='append', help='''SNS ARNs to publish stack actions to'''), Arg('-p', '--parameter', dest='param_sets', route_to=None, metavar='KEY=VALUE', type=parameter_list, action='append', help='''key and value of the parameters to use with the new stack's template, separated by an "=" character'''), Arg('-t', '--timeout', dest='TimeoutInMinutes', type=int, metavar='MINUTES', help='timeout for stack creation'), Arg('--tag', dest='Tags.member', metavar='KEY[=VALUE]', type=binary_tag_def, action='append', help='''key and optional value of the tag to create, separated by an "=" character. If no value is given the tag's value is set to an empty string.''')] def configure(self): CloudFormationRequest.configure(self) stack_params = sum(self.args.get('param_sets') or [], []) self.params['Parameters.member'] = stack_params def print_result(self, result): print result.get('StackId')
class DescribeAlarmsForMetric(CloudWatchRequest, TabifyingMixin): DESCRIPTION = ('Describe alarms for a single metric.\n\nNote that all ' "of an alarm's metrics must match exactly to obtain any " 'results.') ARGS = [ Arg('--metric-name', dest='MetricName', metavar='METRIC', required=True, help='name of the metric (required)'), Arg('--namespace', dest='Namespace', metavar='NAMESPACE', required=True, help='namespace of the metric (required)'), # --alarm-description is supported by the tool, but not the service Arg('--alarm-description', route_to=None, help=argparse.SUPPRESS), Arg('--dimensions', dest='Dimensions.member', metavar='KEY1=VALUE1,KEY2=VALUE2,...', type=delimited_list(',', item_type=cloudwatch_dimension), help='dimensions of the metric'), Arg('--period', dest='Period', metavar='SECONDS', help='period over which statistics are applied'), Arg('--show-long', action='store_true', route_to=None, help="show all of the alarms' info"), Arg('--statistic', dest='Statistic', choices=('Average', 'Maximum', 'Minimum', 'SampleCount', 'Sum'), help='statistic of the metric on which to trigger alarms'), Arg('--unit', dest='Unit', help='unit of measurement for statistics') ] LIST_TAGS = [ 'MetricAlarms', 'AlarmActions', 'Dimensions', 'InsufficientDataActions', 'OKActions' ] def main(self): return PaginatedResponse(self, (None, ), ('MetricAlarms', )) def prepare_for_page(self, page): self.params['NextToken'] = page # pylint: disable=no-self-use def get_next_page(self, response): return response.get('NextToken') or None # pylint: enable=no-self-use def print_result(self, result): for alarm in result.get('MetricAlarms', []): self.print_alarm(alarm)
class EnableMetricsCollection(AutoScalingRequest): DESCRIPTION = "Enable monitoring of an auto-scaling group's group metrics" ARGS = [Arg('AutoScalingGroupName', metavar='ASGROUP', help='name of the auto-scaling group to update (required)'), Arg('-g', '--granularity', dest='Granularity', required=True, help='''granularity at which to collect metrics (e.g., '1Minute') (required)'''), Arg('-m', '--metrics', dest='Metrics.member', metavar='METRIC1,METRIC2,...', type=delimited_list(','), help='list of metrics to collect (default: all metrics)')]
class UpdateStack(CloudFormationRequest): DESCRIPTION = 'Update a stack with a new template' ARGS = [ Arg('StackName', metavar='STACK', help='name of the stack to update (required)'), MutuallyExclusiveArgList( Arg('--template-file', dest='TemplateBody', metavar='FILE', type=open, help='file containing a new JSON template for the stack'), Arg('--template-url', dest='TemplateURL', metavar='URL', help='URL pointing to a new JSON template for the stack')). required(), Arg('--capabilities', dest='Capabilities.member', metavar='CAP[,...]', type=delimited_list(','), help='capabilities needed to update the stack'), Arg('-p', '--parameter', dest='param_sets', route_to=None, metavar='KEY=VALUE', type=parameter_list, action='append', help='''key and value of the parameters to use with the stack's template, separated by an "=" character'''), MutuallyExclusiveArgList( Arg('--tag', dest='Tags.member', metavar='KEY[=VALUE]', type=binary_tag_def, action='append', help='''key and optional value of a tag to add, separated by an "=" character. If no value is given the tag's value is set to an empty string.'''), Arg('--delete-tags', dest='Tags', action='store_const', const=EMPTY, help='remove all tags associated with the stack')) ] def configure(self): CloudFormationRequest.configure(self) stack_params = sum(self.args.get('param_sets') or [], []) self.params['Parameters.member'] = stack_params # pylint: disable=no-self-use def print_result(self, result): print result.get('StackId')
class PutNotificationConfiguration(AutoScalingRequest): DESCRIPTION = ("Create or replace an auto-scaling group's notification " "configuration") ARGS = [Arg('AutoScalingGroupName', metavar='ASGROUP', help='name of the auto-scaling group to update (required)'), Arg('-n', '--notification-types', dest='NotificationTypes.member', metavar='TYPE1,TYPE2,...', type=delimited_list(','), required=True, help=('''comma-separated list of event types that will trigger notification (required)''')), Arg('-t', '--topic-arn', dest='TopicARN', metavar='TOPIC', required=True, help='''ARN of the SNS topic to publish notifications to (required)''')]
class UpdateAutoScalingGroup(AutoScalingRequest): DESCRIPTION = "Update an auto-scaling group's parameters" ARGS = [Arg('AutoScalingGroupName', metavar='ASGROUP', help='name of the auto-scaling group to update (required)'), Arg('--default-cooldown', dest='DefaultCooldown', metavar='SECONDS', type=int, help='''amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities may start'''), Arg('--desired-capacity', dest='DesiredCapacity', metavar='COUNT', type=int, help='number of running instances the group should contain'), Arg('--grace-period', dest='HealthCheckGracePeriod', metavar='SECONDS', type=int, help='''number of seconds to wait before starting health checks on newly-created instances'''), Arg('--health-check-type', dest='HealthCheckType', choices=('EC2', 'ELB'), help='service to obtain health check status from'), Arg('-l', '--launch-configuration', dest='LaunchConfigurationName', metavar='LAUNCHCONFIG', help='''name of the launch configuration to use with the new group (required)'''), Arg('-M', '--max-size', dest='MaxSize', metavar='COUNT', type=int, help='maximum group size (required)'), Arg('-m', '--min-size', dest='MinSize', metavar='COUNT', type=int, help='minimum group size (required)'), Arg('--placement-group', dest='PlacementGroup', help='placement group in which to launch new instances'), Arg('--termination-policies', dest='TerminationPolicies.member', metavar='POLICY1,POLICY2,...', type=delimited_list(','), help='''ordered list of termination policies. The first has the highest precedence.'''), Arg('--vpc-zone-identifier', dest='VPCZoneIdentifier', metavar='ZONE1,ZONE2,...', help='''comma-separated list of subnet identifiers. If you specify availability zones as well, ensure the subnets' availability zones match the ones you specified'''), Arg('-z', '--availability-zones', dest='AvailabilityZones.member', metavar='ZONE1,ZONE2,...', type=delimited_list(','), help='''comma-separated list of availability zones for the new group (required unless subnets are supplied)''')]
class ResumeProcesses(AutoScalingRequest): DESCRIPTION = "Resume an auto-scaling group's auto-scaling processes" ARGS = [ Arg('AutoScalingGroupName', metavar='ASGROUP', help='name of the auto-scaling group to update (required)'), Arg('--processes', dest='ScalingProcesses.member', metavar='PROCESS1,PROCESS2,...', type=delimited_list(','), help='''comma-separated list of auto-scaling processes to resume (default: all processes)''') ]
class DisableAvailabilityZonesForLoadBalancer(ELBRequest, TabifyingMixin): DESCRIPTION = 'Remove a load balancer from one or more availability zones' ARGS = [Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('-z', '--availability-zones', dest='AvailabilityZones.member', metavar='ZONE1,ZONE2,...', type=delimited_list(','), required=True, help='''availability zones to remove the load balancer from (required)''')] LIST_TAGS = ['AvailabilityZones'] def print_result(self, result): print self.tabify(('AVAILABILITY_ZONES', ', '.join(result.get('AvailabilityZones', []))))
class DisableMetricsCollection(AutoScalingRequest): DESCRIPTION = "Disable monitoring of an auto-scaling group's group metrics" ARGS = [ Arg('AutoScalingGroupName', metavar='ASGROUP', help='name of the auto-scaling group to update (required)'), Arg('-m', '--metrics', dest='Metrics.member', metavar='METRIC1,METRIC2,...', type=delimited_list(','), help='list of metrics to disable (default: all metrics)') ]
class ListMetrics(CloudWatchRequest, TabifyingMixin): DESCRIPTION = 'Show a list of monitoring metrics' ARGS = [ Arg('-d', '--dimensions', dest='Dimensions.member', metavar='KEY1=VALUE1,KEY2=VALUE2,...', type=delimited_list(',', item_type=cloudwatch_dimension), help='limit results to metrics with specific dimensions'), Arg('-m', '--metric-name', dest='MetricName', metavar='METRIC', help='limit results to a specific metric'), Arg('-n', '--namespace', dest='Namespace', metavar='NAMESPACE', help='limit results to metrics in a specific namespace') ] LIST_TAGS = ['Metrics', 'Dimensions'] def main(self): return PaginatedResponse(self, (None, ), ('Metrics', )) def prepare_for_page(self, page): self.params['NextToken'] = page # pylint: disable=no-self-use def get_next_page(self, response): return response.get('NextToken') or None # pylint: enable=no-self-use def print_result(self, result): out_lines = [] for metric in sorted(result.get('Metrics', [])): if len(metric.get('Dimensions', [])) > 0: formatted_dims = [ '{0}={1}'.format(dimension.get('Name'), dimension.get('Value')) for dimension in metric['Dimensions'] ] out_lines.append( (metric.get('MetricName'), metric.get('Namespace'), '{{{0}}}'.format(','.join(formatted_dims)))) else: out_lines.append( (metric.get('MetricName'), metric.get('Namespace'), None)) for out_line in sorted(out_lines): print self.tabify(out_line)
class RegisterInstancesWithLoadBalancer(ELBRequest, TabifyingMixin): DESCRIPTION = 'Add one or more instances to a load balancer' ARGS = [Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('--instances', dest='Instances.member', required=True, metavar='INSTANCE1,INSTANCE2,...', type=delimited_list(',', item_type=instance_id), help='''IDs of the instances to register with the load balancer (required)''')] LIST_TAGS = ['Instances'] def print_result(self, result): for instance in result.get('Instances', []): print self.tabify(('INSTANCE', instance.get('InstanceId')))
class CreateLoadBalancerPolicy(ELBRequest): DESCRIPTION = 'Add a new policy to a load balancer' ARGS = [Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('-n', '--policy-name', dest='PolicyName', metavar='POLICY', required=True, help='name of the new policy (required)'), Arg('-t', '--policy-type', dest='PolicyTypeName', metavar='POLTYPE', required=True, help='''type of the new policy. For a list of policy types, use eulb-describe-lb-policy-types. (required)'''), Arg('-a', '--attribute', dest='PolicyAttributes.member', action='append', metavar='"name=NAME, value=VALUE"', type=attribute, help='''name and value for each attribute associated with the new policy. Use this option multiple times to supply multiple attributes.'''), Arg('-A', '--attributes', dest='new_attr_lists', route_to=None, metavar='NAME=VALUE,...', action='append', type=delimited_list(',', item_type=key_value_attribute), help='''a comma-delimited list of attribute names and values to associate with the new policy, each pair of which is separated by "=". This is a more concise alternative to the -a/--attribute option.'''), Arg('--attributes-from-file', dest='attr_filename', metavar='FILE', route_to=None, help='''a file containing attribute names and values to associate with the new policy, one per line, each pair of which is separated by "=". Lines that are blank or begin with "#" are ignored.''')] def preprocess(self): if not self.params.get('PolicyAttributes.member'): self.params['PolicyAttributes.member'] = [] for attr_list in self.args.get('new_attr_lists') or []: self.params['PolicyAttributes.member'].extend(attr_list or []) if self.args.get('attr_filename'): if self.args['attr_filename'] == '-': attr_file = sys.stdin else: attr_file = open(self.args['attr_filename']) with attr_file: for line_no, line in enumerate(attr_file, 1): if line.strip() and not line.startswith('#'): try: self.params['PolicyAttributes.member'].append( key_value_attribute(line.strip())) except ArgumentError as err: raise ValueError( 'error on {0} line {1}: {2}' .format(self.args['attr_filename'], line_no, err.args[0]))
class ApplySecurityGroupsToLoadBalancer(ELBRequest, TabifyingMixin): DESCRIPTION = ('[VPC only] Associate one or more security groups with a ' 'load balancer. All previous associations with security ' 'groups will be replaced.') ARGS = [Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('-g', '--security-groups', dest='SecurityGroups.member', metavar='GROUP1,GROUP2,...', type=delimited_list(','), required=True, help='''security groups to associate the load balancer with (required)''')] LIST_TAGS = ['SecurityGroups'] def print_result(self, result): print self.tabify(('SECURITY_GROUPS', ', '.join(result.get('SecurityGroups', []))))
class SetLoadBalancerPoliciesForBackendServer(ELBRequest): DESCRIPTION = ('Change the policies associated with a port on which load-' 'balanced back end servers listen.') ARGS = [Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('-i', '--instance-port', dest='InstancePort', metavar='PORT', type=int, required=True, help='port number of the back end server (required)'), Arg('-p', '--policy-names', dest='PolicyNames.member', metavar='POLICY1,POLICY2,...', type=delimited_list(','), required=True, help='''list of policies to associate with the back end server (required)''')] def preprocess(self): if not self.args.get('PolicyNames.member'): self.params['PolicyNames'] = EMPTY
class AttachLoadBalancerToSubnets(ELBRequest, TabifyingMixin): DESCRIPTION = '[VPC only] Add a load balancer to one or more subnets' ARGS = [ Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('-s', '--subnets', dest='Subnets.member', required=True, metavar='SUBNET1,SUBNET2,...', type=delimited_list(','), help='''IDs of the subnets to add the load balancer to (required)''') ] LIST_TAGS = ['Subnets'] def print_result(self, result): print self.tabify(('SUBNETS', ', '.join(result.get('Subnets', []))))
class DeleteLoadBalancerListeners(ELBRequest): DESCRIPTION = ('Delete one or more listeners from a load balancer\n\nIf ' 'a listener named with -l/--lb-ports does not exist, this ' 'command still succeeds.') ARGS = [ Arg('LoadBalancerName', metavar='ELB', help='name of the load balancer to modify (required)'), Arg('-l', '--lb-ports', dest='LoadBalancerPorts.member', metavar='PORT1,PORT2,...', required=True, type=delimited_list(',', item_type=int), help='port numbers of the listeners to remove (required)'), Arg('--force', action='store_true', route_to=None, help=argparse.SUPPRESS) ] # for compatibility
class DescribeInstanceHealth(ELBRequest, TabifyingMixin): DESCRIPTION = 'Show the state of instances registered with a load balancer' ARGS = [Arg('LoadBalancerName', metavar='ELB', help='''name of the load balancer to describe instances for (required)'''), Arg('--instances', dest='Instances.member', metavar='INSTANCE1,INSTANCE2,...', type=delimited_list(',', item_type=instance_id), help='limit results to specific instances'), Arg('--show-long', action='store_true', route_to=None, help="show all of the instances' info")] LIST_TAGS = ['InstanceStates'] def print_result(self, result): for instance in result.get('InstanceStates', []): bits = ['INSTANCE', instance.get('InstanceId'), instance.get('State')] if self.args['show_long']: bits.append(instance.get('Description')) bits.append(instance.get('ReasonCode')) print self.tabify(bits)
class DescribeLoadBalancerPolicies(ELBRequest, TabifyingMixin): DESCRIPTION = 'Show information about load balancer policies' ARGS = [ Arg('LoadBalancerName', metavar='ELB', nargs='?', help='''show policies associated with a specific load balancer (default: only describe sample policies provided by the service)'''), Arg('-p', '--policy-names', dest='PolicyNames.member', metavar='POLICY1,POLICY2,...', type=delimited_list(','), help='limit results to specific policies'), Arg('--show-long', action='store_true', route_to=None, help="show all of the policies' info") ] LIST_TAGS = ['PolicyDescriptions', 'PolicyAttributeDescriptions'] def print_result(self, result): for policy in result.get('PolicyDescriptions', []): bits = [ 'POLICY', policy.get('PolicyName'), policy.get('PolicyTypeName') ] if self.args['show_long']: attrs = [] for attr in policy.get('PolicyAttributeDescriptions', []): attrs.append('{{name={0},value={1}}}'.format( attr.get('AttributeName'), attr.get('AttributeValue'))) if len(attrs) > 0: bits.append(','.join(attrs)) else: bits.append(None) print self.tabify(bits)
class BundleCreator(BaseCommand, FileTransferProgressBarMixin): SUITE = Euca2ools ARGS = [ Arg('-r', '--arch', choices=('i386', 'x86_64', 'armhf'), required=True, help="the image's processor architecture (required)"), Arg('-c', '--cert', metavar='FILE', help='file containing your X.509 certificate.'), Arg('-k', '--privatekey', metavar='FILE', help='''file containing the private key to sign the bundle's manifest with. This private key will also be required to unbundle the image in the future.'''), Arg('-u', '--user', metavar='ACCOUNT', help='your account ID'), Arg('--region', dest='userregion', metavar='USER@REGION', help='''use encryption keys and the account ID specified for a user and/or region in configuration files'''), Arg('--ec2cert', metavar='FILE', help='''file containing the cloud's X.509 certificate'''), Arg('--kernel', metavar='IMAGE', help='''ID of the kernel image to associate with the machine bundle'''), Arg('--ramdisk', metavar='IMAGE', help='''ID of the ramdisk image to associate with the machine bundle'''), Arg('-B', '--block-device-mappings', metavar='VIRTUAL1=DEVICE1,VIRTUAL2=DEVICE2,...', type=manifest_block_device_mappings, help='''default block device mapping scheme with which to launch instances of this machine image'''), Arg('-d', '--destination', metavar='DIR', help='''location to place the bundle's files (default: dir named by TMPDIR, TEMP, or TMP environment variables, or otherwise /var/tmp)'''), Arg( '--part-size', type=filesize, default=10485760, # 10m help=argparse.SUPPRESS), Arg('--productcodes', metavar='CODE1,CODE2,...', type=delimited_list(','), default=[], help='comma-separated list of product codes'), Arg('--batch', action='store_true', help=argparse.SUPPRESS) ] # noinspection PyExceptionInherit def configure(self): BaseCommand.configure(self) set_userregion(self.config, self.args.get('userregion')) set_userregion(self.config, os.getenv('EUCA_REGION')) # Get creds add_bundle_creds(self.args, self.config) if not self.args.get('cert'): raise ArgumentError( 'missing certificate; please supply one with -c') self.log.debug('certificate: %s', self.args['cert']) if not self.args.get('privatekey'): raise ArgumentError( 'missing private key; please supply one with -k') self.log.debug('private key: %s', self.args['privatekey']) if not self.args.get('ec2cert'): raise ArgumentError( 'missing cloud certificate; please supply one with --ec2cert') self.log.debug('cloud certificate: %s', self.args['ec2cert']) if not self.args.get('user'): raise ArgumentError( 'missing account ID; please supply one with --user') self.log.debug('account ID: %s', self.args['user']) if (self.args.get('destination') and os.path.exists(self.args['destination']) and not os.path.isdir(self.args['destination'])): raise ArgumentError("argument -d/--destination: '{0}' is not a " "directory".format(self.args['destination']))
class PutMetricData(CloudWatchRequest): """ Add data values or statistics to a CloudWatch metric A metric datum consists of a metric name, any of several attributes, and either a simple, numeric value (-v) or a set of statistical values (-s). All metric data in a given invocation of %(prog)s must belong to one namespace. %(prog)s supports the following attributes (and equivalent aliases, in parentheses) for all data. Each of these attributes has a corresponding command line option that specifies that attribute for all metric data. * MetricName (metric) * Dimensions (dim) * Timestamp (ts) * Unit (unit) Simple metric data have one additional attribute for their values: * Value (val) Statistical metric data have four additional attributes: * Maximum (max) * Minimum (min) * SampleCount (count) * Sum (sum) The -v/--value option allows you to specify the value of a simple metric datum. To specify other attributes for data given using this option, use the options that correspond to them, such as -d/--dimensions. In particular, the -m/--metric-name option is required when -v/--value is used. The -s/--metric-datum option allows for full control of each data point's attributes. This is necessary for statistical data points. To specify a metric datum using this option, join each attribute's name or alias from the lists above with its value using an '=' character, and join each of those pairs with ',' characters. If a value contains a ',' character, surround the entire attribute with '"' characters. For example, each of the following is a valid string to pass to -s/--metric-datum: MetricName=MyMetric,Value=1.5 MetricName=MyMetric,Maximum=5,Minimum=1,SampleCount=5,Sum=10 metric=MyMetric,val=9,"dim=InstanceId:i-12345678,Volume:/dev/sda" Attributes specified via -s/--metric-datum take precedence over those specified with attribute-specific command line options, such as -d/--dimensions. Timestamps must use a format specified in ISO 8601, such as "1989-11-09T19:17:45.000+01:00". Note that the CloudWatch service does not accept data with timestamps more than two weeks in the past. Dimensions' attributes are specified as a comma-separated list of dimension names and values that are themselves separated by ':' characters. This means that when more than one dimension is necessary, the entire Dimensions attribute must be enclosed in '"' characters. Most shell environments require this to be escaped. """ ARGS = [ Arg('-n', '--namespace', dest='Namespace', required=True, help='namespace for the new metric data (required)'), Arg('-v', '--value', dest='simple_values', route_to=None, metavar='FLOAT', type=float, action='append', help='''a simple value for a metric datum. Each use specifies a new metric datum.'''), Arg('-s', '--metric-datum', dest='attr_values', route_to=None, action='append', metavar='KEY1=VALUE1,KEY2=VALUE2,...', help='''names and values of the attributes for a metric datum. When values include ',' characters, enclose the entire name/value pair in '"' characters.'''), # Euca2ools 3.4 extended the "key=value"-based syntax to allow # one to supply arbitrary attributes of each datum. Since this # this format is a strict superset of the original format for # statistic values we silently treat the old option names as # aliases for the newer, generic one. Arg('--statistic-values', '--statisticValues', action='append', dest='attr_values', route_to=None, help=argparse.SUPPRESS), Arg('-m', '--metric-name', route_to=None, metavar='METRIC', help='name of the metric to add metric data to'), Arg('-d', '--dimensions', metavar='KEY1=VALUE1,KEY2=VALUE2,...', route_to=None, type=delimited_list(',', item_type=cloudwatch_dimension), help='''one or more dimensions to associate with the new metric data'''), Arg('-t', '--timestamp', route_to=None, metavar='YYYY-MM-DDThh:mm:ssZ', help='timestamp for the new metric data'), Arg('-u', '--unit', route_to=None, metavar='UNIT', help='''unit in which to report the new metric data points (e.g. Bytes)''') ] def configure(self): CloudWatchRequest.configure(self) data = [] # Plain values for val in self.args.get('simple_values') or (): data.append(self.__build_datum_from_value(val)) # Key/value-based data for val in self.args.get('attr_values') or (): data.append(self.__build_datum_from_pairs(val)) self.args['data'] = data def main(self): # The API limits us to 20 points per request. There are also # limits of 40 KB per POST request and 8 KB per GET request # that we do not consider here. data = self.args.get('data') or [] for slice_start in range(0, len(data), POINTS_PER_REQUEST): slice_end = min(slice_start + POINTS_PER_REQUEST, len(data)) self.params['MetricData'] = {'member': data[slice_start:slice_end]} self.send() return self.args['data'] def __build_datum_from_value(self, value): datum = {} try: datum['Value'] = float(value) except ValueError: raise argparse.ArgumentTypeError( "argument -v/--value: value '{0}' must be numeric".format( value)) self.__complete_datum(datum) if not datum.get('MetricName'): raise requestbuilder.exceptions.ArgumentError( 'argument -v/--value requires -m/--metric-name') return datum def __build_datum_from_pairs(self, pairs_as_str): statistic_set_keys = ['Maximum', 'Minimum', 'SampleCount', 'Sum'] datum = {} if not pairs_as_str.strip(): raise argparse.ArgumentTypeError( "argument -s/--metric-datum: value must not be empty") for pair in next(csv.reader(io.BytesIO(pairs_as_str))): try: key, val = pair.split('=') except ValueError: if pair.startswith('dim=') or pair.startswith('Dimensions='): raise argparse.ArgumentTypeError( "argument -s/--metric-datum: dimension names and " "values in datum '{0}' must be separated with ':', " "not '='".format(pairs_as_str)) raise argparse.ArgumentTypeError( "argument -s/--metric-datum: '{0}' in datum '{1}' must " "have format KEY=VALUE,...".format(pair, pairs_as_str)) key = DATUM_KEYS.get(key, key) if key in statistic_set_keys: try: datum.setdefault('StatisticValues', {})[key] = float(val) except ValueError: raise argparse.ArgumentTypeError( "argument -s/--metric-datum: {0} value for datum " "'{1}' must be numeric".format(key, pairs_as_str)) elif key == 'Value': try: datum[key] = float(val) except ValueError: raise argparse.ArgumentTypeError( "argument -s/--metric-datum: {0} value for datum " "'{1}' must be numeric".format(key, pairs_as_str)) elif key == 'Dimensions': datum.setdefault(key, {'member': []}) for dim_pair in val.split(','): try: dim_name, dim_val = dim_pair.split(':') except ValueError: raise argparse.ArgumentTypeError( "argument -s/--metric-datum: dimension '{0}' for " "datum '{1}' must have format KEY:VALUE,...". format(dim_pair, pairs_as_str)) datum[key]['member'].append({ 'Name': dim_name, 'Value': dim_val }) elif key in ('MetricName', 'Timestamp', 'Unit'): datum[key] = val else: raise argparse.ArgumentTypeError( "argument -s/--metric-datum: datum '{0}' contains " "unrecognized attribute '{1}'".format(pairs_as_str, key)) self.__complete_datum(datum) # Validate if not datum.get('MetricName'): raise argparse.ArgumentTypeError( "argument -s/--metric-datum: datum '{0}' must have a " "metric name; supply one individually with 'MetricName=NAME' " "or set a default for this request with -m/--metric-name". format(pairs_as_str)) if 'StatisticValues' in datum: if 'Value' in datum: raise argparse.ArgumentTypeError( "argument -s/--metric-datum: datum '{0}' must not " "contain both Value and {1} attributes".format( pairs_as_str, next(datum['StatisticValues'].values()))) for key in statistic_set_keys: if key not in datum['StatisticValues']: raise argparse.ArgumentTypeError( "argument -s/--metric-datum: a {0} is required for " "statistic datum '{1}'".format(key, pairs_as_str)) elif 'Value' not in datum: raise argparse.ArgumentTypeError( "argument -s/--metric-datum: datum '{0}' must contain " "either a Value or a Maximum, Minimum, SampleCount, and Sum". format(pairs_as_str)) return datum def __complete_datum(self, datum): attr_map = { 'MetricName': 'metric_name', 'Timestamp': 'timestamp', 'Unit': 'unit' } for key, val in attr_map.items(): if self.args.get(val): datum.setdefault(key, self.args.get(val)) if self.args.get('dimensions'): datum.setdefault('Dimensions', {'member': self.args.get('dimensions')})
class BundleVolume(BaseCommand, FileTransferProgressBarMixin): SUITE = Euca2ools DESCRIPTION = ("Prepare this machine's filesystem for use in the cloud\n\n" "This command must be run as the superuser.") REGION_ENVVAR = 'AWS_DEFAULT_REGION' ARGS = [Arg('-p', '--prefix', help='''the file name prefix to give the bundle's files (default: image)'''), Arg('-d', '--destination', metavar='DIR', help='''location to place the bundle's files (default: dir named by TMPDIR, TEMP, or TMP environment variables, or otherwise /var/tmp)'''), # -r/--arch is required, but to keep the UID check we do at the # beginning of configure() first we enforce that there instead. Arg('-r', '--arch', help="the image's architecture (required)", choices=('i386', 'x86_64', 'armhf', 'ppc', 'ppc64')), Arg('-e', '--exclude', metavar='PATH,...', type=delimited_list(','), help='comma-separated list of paths to exclude'), Arg('-i', '--include', metavar='PATH,...', type=delimited_list(','), help='comma-separated list of paths to include'), Arg('-s', '--size', metavar='MiB', type=int, default=10240, help='size of the image to create (default: 10240 MiB)'), Arg('--no-filter', action='store_true', help='do not filter out sensitive/system files'), Arg('--all', action='store_true', help='''include all filesystems regardless of type (default: only include local filesystems)'''), MutuallyExclusiveArgList( Arg('--inherit', dest='inherit', action='store_true', help='''use the metadata service to provide metadata for the bundle (this is the default)'''), Arg('--no-inherit', dest='inherit', action='store_false', help='''do not use the metadata service for bundle metadata''')), Arg('-v', '--volume', metavar='DIR', default='/', help='''location of the volume from which to create the bundle (default: /)'''), Arg('-P', '--partition', choices=('mbr', 'gpt', 'none'), help='''the type of partition table to create (default: attempt to guess based on the existing disk)'''), Arg('-S', '--script', metavar='FILE', help='''location of a script to run immediately before bundling. It will receive the volume's mount point as its only argument.'''), MutuallyExclusiveArgList( Arg('--fstab', metavar='FILE', help='''location of an fstab(5) file to copy into the bundled image'''), Arg('--generate-fstab', action='store_true', help='''automatically generate an fstab(5) file for the bundled image''')), Arg('--grub-config', metavar='FILE', help='''location of a GRUB 1 configuration file to copy to /boot/grub/menu.lst on the bundled image'''), # Bundle-related stuff Arg('-k', '--privatekey', metavar='FILE', help='''file containing your private key to sign the bundle's manifest with. This private key will also be required to unbundle the image in the future.'''), Arg('-c', '--cert', metavar='FILE', help='file containing your X.509 certificate'), Arg('--ec2cert', metavar='FILE', help='''file containing the cloud's X.509 certificate'''), Arg('-u', '--user', metavar='ACCOUNT', help='your account ID'), Arg('--kernel', metavar='IMAGE', help='''ID of the kernel image to associate with this machine image'''), Arg('--ramdisk', metavar='IMAGE', help='''ID of the ramdisk image to associate with this machine image'''), Arg('-B', '--block-device-mappings', metavar='VIRTUAL1=DEVICE1,VIRTUAL2=DEVICE2,...', type=manifest_block_device_mappings, help='''block device mapping scheme with which to launch instances of this machine image'''), Arg('--productcodes', metavar='CODE1,CODE2,...', type=delimited_list(','), default=[], help='comma-separated list of product codes for the image'), Arg('--part-size', type=filesize, default=10485760, help=argparse.SUPPRESS), Arg('--enc-key', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS), Arg('--enc-iv', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS)] def configure(self): if os.geteuid() != 0: raise RuntimeError('must be superuser') if not self.args.get('arch'): raise ArgumentError('argument -r/--arch is required') # Farm all the bundle arg validation out to BundleImage self.__build_bundle_command('/dev/null', image_size=1) root_device = _get_root_device() if self.args.get('inherit'): self.__populate_args_from_metadata() if not self.args.get('partition'): self.args['partition'] = _get_partition_table_type(root_device) if not self.args['partition']: self.log.warn('could not determine the partition table type ' 'for root device %s', root_device) raise ArgumentError( 'could not determine the type of partition table to use; ' 'specify one with -P/--partition'.format(root_device)) self.log.info('discovered partition table type %s', self.args['partition']) if not self.args.get('fstab') and not self.args.get('generate_fstab'): self.args['fstab'] = '/etc/fstab' def main(self): if self.args.get('destination'): destdir = self.args['destination'] else: destdir = euca2ools.util.mkdtemp_for_large_files(prefix='bundle-') image = os.path.join(destdir, self.args.get('prefix') or 'image') mountpoint = tempfile.mkdtemp(prefix='target-', dir=destdir) # Prepare the disk image device = self.__create_disk_image(image, self.args['size']) try: self.__create_and_mount_filesystem(device, mountpoint) try: # Copy files exclude_opts = self.__get_exclude_and_include_args() exclude_opts.extend(['--exclude', image, '--exclude', mountpoint]) self.__copy_to_target_dir(mountpoint, exclude_opts) self.__insert_fstab(mountpoint) self.__insert_grub_config(mountpoint) if self.args.get('script'): cmd = [self.args['script'], mountpoint] self.log.info("running user script ``%s''", _quote_cmd(cmd)) subprocess.check_call(cmd) except KeyboardInterrupt: self.log.info('received ^C; skipping to cleanup') msg = ('Cleaning up after ^C -- pressing ^C again will ' 'result in the need for manual device cleanup') print >> sys.stderr, msg raise # Cleanup finally: time.sleep(0.2) self.__unmount_filesystem(device) os.rmdir(mountpoint) finally: self.__detach_disk_image(image, device) bundle_cmd = self.__build_bundle_command(image) result = bundle_cmd.main() os.remove(image) return result def print_result(self, result): for manifest_filename in result[1]: print 'Wrote manifest', manifest_filename def __build_bundle_command(self, image_filename, image_size=None): bundle_args = ('prefix', 'destination', 'arch', 'privatekey', 'cert', 'ec2cert', 'user', 'kernel', 'ramdisk', 'block_device_mappings', 'productcodes', 'part_size', 'enc_key', 'enc_iv', 'show_progress') bundle_args_dict = dict((key, self.args.get(key)) for key in bundle_args) return BundleImage.from_other(self, image=image_filename, image_size=image_size, image_type='machine', **bundle_args_dict) # INSTANCE METADATA # def __read_metadata_value(self, path): self.log.debug("reading metadata service value '%s'", path) url = 'http://169.254.169.254/2012-01-12/meta-data/' + path response = requests.get(url, timeout=1) if response.status_code == 200: return response.text return None def __read_metadata_list(self, path): value = self.__read_metadata_value(path) if value: return [line.rstrip('/') for line in value.splitlines() if line] return [] def __read_metadata_dict(self, path): metadata = {} if not path.endswith('/'): path += '/' keys = self.__read_metadata_list(path) for key in keys: if key: metadata[key] = self.__read_metadata_value(path + key) return metadata def __populate_args_from_metadata(self): """ Populate missing/empty values in self.args using info obtained from the metadata service. """ try: if not self.args.get('kernel'): self.args['kernel'] = self.__read_metadata_value('kernel-id') self.log.info('inherited kernel: %s', self.args['kernel']) if not self.args.get('ramdisk'): self.args['ramdisk'] = self.__read_metadata_value('ramdisk-id') self.log.info('inherited ramdisk: %s', self.args['ramdisk']) if not self.args.get('productcodes'): self.args['productcodes'] = self.__read_metadata_list( 'product-codes') if self.args['productcodes']: self.log.info('inherited product codes: %s', ','.join(self.args['productcodes'])) if not self.args.get('block_device_mappings'): self.args['block_device_mappings'] = {} for key, val in (self.__read_metadata_dict( 'block-device-mapping') or {}).iteritems(): if not key.startswith('ebs'): self.args['block_device_mappings'][key] = val for key, val in self.args['block_device_mappings'].iteritems(): self.log.info('inherited block device mapping: %s=%s', key, val) except requests.exceptions.Timeout: raise ClientError('metadata service is absent or unresponsive; ' 'use --no-inherit to proceed without it') # DISK MANAGEMENT # def __create_disk_image(self, image, size_in_mb): subprocess.check_call(['dd', 'if=/dev/zero', 'of={0}'.format(image), 'bs=1M', 'count=1', 'seek={0}'.format(int(size_in_mb) - 1)]) if self.args['partition'] == 'mbr': # Why use sfdisk when we can use parted? :-) parted_script = ( b'unit s', b'mklabel msdos', b'mkpart primary 64 -1s', b'set 1 boot on', b'print', b'quit') subprocess.check_call(['parted', '-s', image, '--', ' '.join(parted_script)]) elif self.args['partition'] == 'gpt': # type 0xef02 == BIOS boot (we'll put it at the end of the list) subprocess.check_call( ['sgdisk', '--new', '128:1M:+1M', '--typecode', '128:ef02', '--change-name', '128:BIOS Boot', image]) # type 0x8300 == Linux filesystem data subprocess.check_call( ['sgdisk', '--largest-new=1', '--typecode', '1:8300', '--change-name', '1:Image', image]) subprocess.check_call(['sgdisk', '--print', image]) mapped = self.__map_disk_image(image) assert os.path.exists(mapped) return mapped def __map_disk_image(self, image): if self.args['partition'] in ('mbr', 'gpt'): # Create /dev/mapper/loopXpY and return that. # We could do this with losetup -Pf as well, but that isn't # available on RHEL 6. self.log.debug('mapping partitioned image %s', image) kpartx = subprocess.Popen(['kpartx', '-s', '-v', '-a', image], stdout=subprocess.PIPE) try: for line in kpartx.stdout.readlines(): line_split = line.split() if line_split[:2] == ['add', 'map']: device = line_split[2] if device.endswith('p1'): return '/dev/mapper/{0}'.format(device) self.log.error('failed to get usable map output from kpartx') raise RuntimeError('device mapping failed') finally: # Make sure the process exits kpartx.communicate() else: # No partition table self.log.debug('mapping unpartitioned image %s', image) losetup = subprocess.Popen(['losetup', '-f', image, '--show'], stdout=subprocess.PIPE) loopdev, _ = losetup.communicate() return loopdev.strip() def __create_and_mount_filesystem(self, device, mountpoint): root_device = _get_root_device() fsinfo = _get_filesystem_info(root_device) self.log.info('creating filesystem on %s using metadata from %s: %s', device, root_device, fsinfo) fs_cmds = [['mkfs', '-t', fsinfo['type']]] if fsinfo.get('label'): fs_cmds[0].extend(['-L', fsinfo['label']]) elif fsinfo['type'] in ('ext2', 'ext3', 'ext4'): if fsinfo.get('uuid'): fs_cmds[0].extend(['-U', fsinfo['uuid']]) # Time-based checking doesn't make much sense for cloud images fs_cmds.append(['tune2fs', '-i', '0']) elif fsinfo['type'] == 'jfs': if fsinfo.get('uuid'): fs_cmds.append(['jfs_tune', '-U', fsinfo['uuid']]) elif fsinfo['type'] == 'xfs': if fsinfo.get('uuid'): fs_cmds.append(['xfs_admin', '-U', fsinfo['uuid']]) for fs_cmd in fs_cmds: fs_cmd.append(device) self.log.info("formatting with ``%s''", _quote_cmd(fs_cmd)) subprocess.check_call(fs_cmd) self.log.info('mounting %s filesystem %s at %s', fsinfo['type'], device, mountpoint) subprocess.check_call(['mount', '-t', fsinfo['type'], device, mountpoint]) def __unmount_filesystem(self, device): self.log.info('unmounting %s', device) subprocess.check_call(['sync']) time.sleep(0.2) subprocess.check_call(['umount', device]) def __detach_disk_image(self, image, device): if self.args['partition'] in ('mbr', 'gpt'): self.log.debug('unmapping partitioned image %s', image) cmd = ['kpartx', '-s', '-d', image] else: self.log.debug('unmapping unpartitioned device %s', device) cmd = ['losetup', '-d', device] subprocess.check_call(cmd) # FILE MANAGEMENT # def __get_exclude_and_include_args(self): args = [] for exclude in self.args.get('exclude') or []: args.extend(['--exclude', exclude]) for include in self.args.get('include') or []: args.extend(['--include', include]) # Exclude remote filesystems if not self.args.get('all'): for device, mountpoint, fstype in _get_all_mounts(): if fstype not in ALLOWED_FILESYSTEM_TYPES: self.log.debug('excluding %s filesystem %s at %s', fstype, device, mountpoint) args.extend(['--exclude', os.path.join(mountpoint, '**')]) # Add pre-defined exclusions if not self.args.get('no_filter') and os.path.isfile(EXCLUDES_FILE): self.log.debug('adding path exclusions from %s', EXCLUDES_FILE) args.extend(['--exclude-from', EXCLUDES_FILE]) return args def __copy_to_target_dir(self, dest, exclude_opts): source = self.args.get('volume') or '/' if not source.endswith('/'): source += '/' if not dest.endswith('/'): dest += '/' rsync_opts = ['-rHlpogDtS'] if self.args.get('show_progress'): rsync = subprocess.Popen(['rsync', '--version'], stdout=subprocess.PIPE) out, _ = rsync.communicate() rsync_version = (out.partition('version ')[2] or '\0').split()[0] if rsync_version >= '3.1.0': # Use the new summarizing version rsync_opts.append('--info=progress2') else: rsync_opts.append('--progress') else: rsync_opts.append('--quiet') cmd = ['rsync', '-X'] + rsync_opts + exclude_opts + [source, dest] self.log.info("copying files with ``%s''", _quote_cmd(cmd)) print 'Copying files...' rsync = subprocess.Popen(cmd) rsync.wait() if rsync.returncode == 1: # Try again without xattrs self.log.info('rsync exited with code %i; retrying without xattrs', rsync.returncode) print 'Retrying without extended attributes' cmd = ['rsync'] + rsync_opts + exclude_opts + [source, dest] rsync = subprocess.Popen(cmd) rsync.wait() if rsync.returncode not in (0, 23): self.log.error('rsync exited with code %i', rsync.returncode) raise subprocess.CalledProcessError(rsync.returncode, 'rsync') def __insert_fstab(self, mountpoint): fstab_filename = os.path.join(mountpoint, 'etc', 'fstab') if os.path.exists(fstab_filename): fstab_bak = fstab_filename + '.bak' self.log.debug('backing up original fstab file as %s', fstab_bak) _copy_with_xattrs(fstab_filename, fstab_bak) if self.args.get('generate_fstab'): # This isn't really a template, but if the need arises we # can add something of that sort later. self.log.info('generating fstab file from %s', self.args['fstab']) _copy_with_xattrs(FSTAB_TEMPLATE_FILE, fstab_filename) elif self.args.get('fstab'): self.log.info('using fstab file %s', self.args['fstab']) _copy_with_xattrs(self.args['fstab'], fstab_filename) def __insert_grub_config(self, mountpoint): if self.args.get('grub_config'): grub_filename = os.path.join(mountpoint, 'boot', 'grub', 'menu.lst') if os.path.exists(grub_filename): grub_back = grub_filename + '.bak' self.log.debug('backing up original grub1 config file as %s', grub_back) _copy_with_xattrs(grub_filename, grub_back) self.log.info('using grub1 config file %s', self.args['grub_config']) _copy_with_xattrs(self.args['grub_config'], grub_filename)
class BundleCreatingMixin(object): ARGS = [ Arg('-i', '--image', metavar='FILE', required=True, help='file containing the image to bundle (required)'), Arg('-p', '--prefix', help='''the file name prefix to give the bundle's files (required when bundling stdin; otherwise defaults to the image's file name)'''), Arg('-d', '--destination', metavar='DIR', help='''location to place the bundle's files (default: dir named by TMPDIR, TEMP, or TMP environment variables, or otherwise /var/tmp)'''), Arg('-r', '--arch', required=True, choices=('i386', 'x86_64', 'armhf', 'ppc', 'ppc64'), help="the image's architecture (required)"), # User- and cloud-specific stuff Arg('-k', '--privatekey', metavar='FILE', help='''file containing your private key to sign the bundle's manifest with. This private key will also be required to unbundle the image in the future.'''), Arg('-c', '--cert', metavar='FILE', help='file containing your X.509 certificate'), Arg('--ec2cert', metavar='FILE', help='''file containing the cloud's X.509 certificate'''), Arg('-u', '--user', metavar='ACCOUNT', help='your account ID'), Arg('--kernel', metavar='IMAGE', help='''ID of the kernel image to associate with this machine image'''), Arg('--ramdisk', metavar='IMAGE', help='''ID of the ramdisk image to associate with this machine image'''), # Obscurities Arg('-B', '--block-device-mappings', metavar='VIRTUAL1=DEVICE1,VIRTUAL2=DEVICE2,...', type=manifest_block_device_mappings, help='''block device mapping scheme with which to launch instances of this machine image'''), Arg('--productcodes', metavar='CODE1,CODE2,...', type=delimited_list(','), default=[], help='comma-separated list of product codes for the image'), Arg('--image-type', choices=('machine', 'kernel', 'ramdisk'), default='machine', help=argparse.SUPPRESS), # Stuff needed to fill out TarInfo when input comes from stdin. # # We technically could ask for a lot more, but most of it is # unnecessary since owners/modes/etc will be ignored at unbundling # time anyway. # # When bundling stdin we interpret --prefix as the image's file # name. Arg('--image-size', type=filesize, help='''the image's size (required when bundling stdin)'''), # Overrides for debugging and other entertaining uses Arg( '--part-size', type=filesize, default=10485760, # 10M help=argparse.SUPPRESS), Arg('--enc-key', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS), # a hex string Arg('--enc-iv', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS), # a hex string # Noop, for compatibility Arg('--batch', action='store_true', help=argparse.SUPPRESS) ] # CONFIG METHODS # def configure_bundle_creds(self): # User's X.509 certificate (user-level in config) if not self.args.get('cert'): config_cert = self.config.get_user_option('certificate') if 'EC2_CERT' in os.environ: self.args['cert'] = os.getenv('EC2_CERT') elif 'EUCA_CERT' in os.environ: # used by the NC self.args['cert'] = os.getenv('EUCA_CERT') elif config_cert: self.args['cert'] = config_cert if self.args.get('cert'): self.args['cert'] = os.path.expanduser( os.path.expandvars(self.args['cert'])) _assert_is_file(self.args['cert'], 'user certificate') # User's private key (user-level in config) if not self.args.get('privatekey'): config_privatekey = self.config.get_user_option('private-key') if 'EC2_PRIVATE_KEY' in os.environ: self.args['privatekey'] = os.getenv('EC2_PRIVATE_KEY') if 'EUCA_PRIVATE_KEY' in os.environ: # used by the NC self.args['privatekey'] = os.getenv('EUCA_PRIVATE_KEY') elif config_privatekey: self.args['privatekey'] = config_privatekey if self.args.get('privatekey'): self.args['privatekey'] = os.path.expanduser( os.path.expandvars(self.args['privatekey'])) _assert_is_file(self.args['privatekey'], 'private key') # Cloud's X.509 cert (region-level in config) if not self.args.get('ec2cert'): config_privatekey = self.config.get_region_option('certificate') if 'EUCALYPTUS_CERT' in os.environ: # This has no EC2 equivalent since they just bundle their cert. self.args['ec2cert'] = os.getenv('EUCALYPTUS_CERT') elif config_privatekey: self.args['ec2cert'] = config_privatekey if self.args.get('ec2cert'): self.args['ec2cert'] = os.path.expanduser( os.path.expandvars(self.args['ec2cert'])) _assert_is_file(self.args['ec2cert'], 'cloud certificate') # User's account ID (user-level) if not self.args.get('user'): config_account_id = self.config.get_user_option('account-id') if 'EC2_USER_ID' in os.environ: self.args['user'] = os.getenv('EC2_USER_ID') elif config_account_id: self.args['user'] = config_account_id # Now validate everything if not self.args.get('cert'): raise ArgumentError( 'missing certificate; please supply one with -c') self.log.debug('certificate: %s', self.args['cert']) if not self.args.get('privatekey'): raise ArgumentError( 'missing private key; please supply one with -k') self.log.debug('private key: %s', self.args['privatekey']) if not self.args.get('ec2cert'): raise ArgumentError( 'missing cloud certificate; please supply one with --ec2cert') self.log.debug('cloud certificate: %s', self.args['ec2cert']) if not self.args.get('user'): raise ArgumentError( 'missing account ID; please supply one with --user') self.log.debug('account ID: %s', self.args['user']) def configure_bundle_output(self): if (self.args.get('destination') and os.path.exists(self.args['destination']) and not os.path.isdir(self.args['destination'])): raise ArgumentError("argument -d/--destination: '{0}' is not a " "directory".format(self.args['destination'])) if self.args['image'] == '-': self.args['image'] = os.fdopen(os.dup(sys.stdin.fileno())) if not self.args.get('prefix'): raise ArgumentError( 'argument --prefix is required when bundling stdin') if not self.args.get('image_size'): raise ArgumentError( 'argument --image-size is required when bundling stdin') elif isinstance(self.args['image'], basestring): if not self.args.get('prefix'): self.args['prefix'] = os.path.basename(self.args['image']) if not self.args.get('image_size'): self.args['image_size'] = euca2ools.util.get_filesize( self.args['image']) self.args['image'] = open(self.args['image']) else: # Assume it is already a file object if not self.args.get('prefix'): raise ArgumentError('argument --prefix is required when ' 'bundling a file object') if not self.args.get('image_size'): raise ArgumentError('argument --image-size is required when ' 'bundling a file object') if self.args['image_size'] > EC2_BUNDLE_SIZE_LIMIT: self.log.warn( 'image is incompatible with EC2 due to its size (%i > %i)', self.args['image_size'], EC2_BUNDLE_SIZE_LIMIT) def configure_bundle_properties(self): if self.args.get('kernel') == 'true': self.args['image_type'] = 'kernel' if self.args.get('ramdisk') == 'true': self.args['image_type'] = 'ramdisk' if self.args['image_type'] == 'kernel': if self.args.get('kernel') and self.args['kernel'] != 'true': raise ArgumentError("argument --kernel: not compatible with " "image type 'kernel'") if self.args.get('ramdisk'): raise ArgumentError("argument --ramdisk: not compatible with " "image type 'kernel'") if self.args.get('block_device_mappings'): raise ArgumentError("argument -B/--block-device-mappings: not " "compatible with image type 'kernel'") if self.args['image_type'] == 'ramdisk': if self.args.get('kernel'): raise ArgumentError("argument --kernel: not compatible with " "image type 'ramdisk'") if self.args.get('ramdisk') and self.args['ramdisk'] != 'true': raise ArgumentError("argument --ramdisk: not compatible with " "image type 'ramdisk'") if self.args.get('block_device_mappings'): raise ArgumentError("argument -B/--block-device-mappings: not " "compatible with image type 'ramdisk'") def generate_encryption_keys(self): srand = random.SystemRandom() if self.args.get('enc_key'): self.log.info('using preexisting encryption key') enc_key_i = self.args['enc_key'] else: enc_key_i = srand.getrandbits(128) if self.args.get('enc_iv'): self.log.info('using preexisting encryption IV') enc_iv_i = self.args['enc_iv'] else: enc_iv_i = srand.getrandbits(128) self.args['enc_key'] = '{0:0>32x}'.format(enc_key_i) self.args['enc_iv'] = '{0:0>32x}'.format(enc_iv_i) # MANIFEST GENERATION METHODS # def build_manifest(self, digest, partinfo): manifest = euca2ools.bundle.manifest.BundleManifest( loglevel=self.log.level) manifest.image_arch = self.args['arch'] manifest.kernel_id = self.args.get('kernel') manifest.ramdisk_id = self.args.get('ramdisk') if self.args.get('block_device_mappings'): manifest.block_device_mappings.update( self.args['block_device_mappings']) if self.args.get('productcodes'): manifest.product_codes.extend(self.args['productcodes']) manifest.image_name = self.args['prefix'] manifest.account_id = self.args['user'] manifest.image_type = self.args['image_type'] manifest.image_digest = digest manifest.image_digest_algorithm = 'SHA1' # shouldn't be hardcoded here manifest.image_size = self.args['image_size'] manifest.bundled_image_size = sum(part.size for part in partinfo) manifest.enc_key = self.args['enc_key'] manifest.enc_iv = self.args['enc_iv'] manifest.enc_algorithm = 'AES-128-CBC' # shouldn't be hardcoded here manifest.image_parts = partinfo return manifest def dump_manifest_to_file(self, manifest, filename, pretty_print=False): with open(filename, 'w') as manifest_file: manifest_file.write( self.dump_manifest_to_str(manifest, pretty_print=pretty_print)) def dump_manifest_to_str(self, manifest, pretty_print=False): return manifest.dump_to_str(self.args['privatekey'], self.args['cert'], self.args['ec2cert'], pretty_print=pretty_print)
class GetMetricStatistics(CloudWatchRequest, TabifyingMixin): DESCRIPTION = "Show a metric's statistics" ARGS = [ Arg('MetricName', metavar='METRIC', help='name of the metric to get statistics for (required)'), Arg('-n', '--namespace', dest='Namespace', required=True, help="the metric's namespace (required)"), Arg('-s', '--statistics', dest='Statistics.member', required=True, metavar='STAT1,STAT2,...', type=delimited_list(','), help='the metric statistics to show (at least 1 required)'), Arg('--dimensions', dest='Dimensions.member', metavar='KEY1=VALUE1,KEY2=VALUE2,...', type=delimited_list(',', item_type=cloudwatch_dimension), help='the dimensions of the metric to show'), Arg('--start-time', dest='StartTime', metavar='YYYY-MM-DDThh:mm:ssZ', help='''earliest time to retrieve data points for (default: one hour ago)'''), Arg('--end-time', dest='EndTime', metavar='YYYY-MM-DDThh:mm:ssZ', help='''latest time to retrieve data points for (default: now)'''), Arg('--period', dest='Period', metavar='SECONDS', type=int, help='''granularity of the returned data points (must be a multiple of 60)'''), Arg('--unit', dest='Unit', help='unit the metric is reported in') ] LIST_TAGS = ['Datapoints'] # noinspection PyExceptionInherit def configure(self): CloudWatchRequest.configure(self) if self.args.get('period'): if self.args['period'] <= 0: raise ArgumentError( 'argument --period: value must be positive') elif self.args['period'] % 60 != 0: raise ArgumentError( 'argument --period: value must be a multiple of 60') def main(self): now = datetime.datetime.utcnow() then = now - datetime.timedelta(hours=1) if not self.args.get('StartTime'): self.params['StartTime'] = then.strftime('%Y-%m-%dT%H:%M:%SZ') if not self.args.get('EndTime'): self.params['EndTime'] = now.strftime('%Y-%m-%dT%H:%M:%SZ') return PaginatedResponse(self, (None, ), ('Datapoints', )) def prepare_for_page(self, page): self.params['NextToken'] = page def get_next_page(self, response): return response.get('NextToken') or None def print_result(self, result): points = [] for point in result.get('Datapoints', []): timestamp = point.get('Timestamp', '') try: parsed = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ') timestamp = parsed.strftime('%Y-%m-%d %H:%M:%S') except ValueError: # We'll just print it verbatim pass points.append( (timestamp, point.get('SampleCount'), point.get('Average'), point.get('Sum'), point.get('Minimum'), point.get('Maximum'), point.get('Unit'))) for point in sorted(points): print self.tabify(point)
class CreateLaunchConfiguration(AutoScalingRequest): DESCRIPTION = 'Create a new auto-scaling instance launch configuration' ARGS = [ Arg('LaunchConfigurationName', metavar='LAUNCHCONFIG', help='name of the new launch configuration (required)'), Arg('-i', '--image-id', dest='ImageId', metavar='IMAGE', required=True, help='machine image to use for instances (required)'), Arg('-t', '--instance-type', dest='InstanceType', metavar='TYPE', required=True, help='instance type for use for instances (required)'), Arg('--block-device-mapping', dest='BlockDeviceMappings.member', metavar='DEVICE1=MAPPED1,DEVICE2=MAPPED2,...', type=delimited_list(','), help='''a comma-separated list of block device mappings for the image, in the form DEVICE=MAPPED, where "MAPPED" is "none", "ephemeral(0-3)", or "[SNAP-ID]:[SIZE]:[true|false]'''), Arg('--ebs-optimized', dest='EbsOptimized', action='store_const', const='true', help='whether the instance is optimized for EBS I/O'), Arg('--group', dest='SecurityGroups.member', metavar='GROUP1,GROUP2,...', type=delimited_list(','), help='''a comma-separated list of security groups with which to associate instances. Either all group names or all group IDs are allowed, but not both.'''), Arg('--iam-instance-profile', dest='IamInstanceProfile', metavar='PROFILE', help='''ARN of the instance profile associated with instances' IAM roles'''), Arg('--kernel', dest='KernelId', metavar='KERNEL', help='kernel image to use for instances'), Arg('--key', dest='KeyName', metavar='KEYPAIR', help='name of the key pair to use for instances'), Arg('--monitoring-enabled', dest='InstanceMonitoring.Enabled', action='store_const', const='true', help='enable detailed monitoring (enabled by default)'), Arg('--monitoring-disabled', dest='InstanceMonitoring.Enabled', action='store_const', const='false', help='disable detailed monitoring (enabled by default)'), Arg('--ramdisk', dest='RamdiskId', metavar='RAMDISK', help='ramdisk image to use for instances'), Arg('--spot-price', dest='SpotPrice', metavar='PRICE', help='maximum hourly price for any spot instances launched'), MutuallyExclusiveArgList( Arg('-d', '--user-data', metavar='DATA', route_to=None, help='user data to make available to instances'), Arg('--user-data-force', metavar='DATA', route_to=None, help='''same as -d/--user-data, but without checking if a file by that name exists first'''), Arg('-f', '--user-data-file', metavar='FILE', route_to=None, help='''file containing user data to make available to instances''')) ] # noinspection PyExceptionInherit def configure(self): AutoScalingRequest.configure(self) if self.args.get('user_data'): if os.path.isfile(self.args['user_data']): raise ArgumentError( 'argument -d/--user-data: to pass the contents of a file ' 'as user data, use -f/--user-data-file. To pass the ' "literal value '{0}' as user data even though it matches " 'the name of a file, use --user-data-force.') else: self.params['UserData'] = base64.b64encode( self.args['user_data']) elif self.args.get('user_data_force'): self.params['UserData'] = base64.b64encode( self.args['user_data_force']) elif self.args.get('user_data_file'): with open(self.args['user_data_file']) as user_data_file: self.params['UserData'] = base64.b64encode( user_data_file.read()) def preprocess(self): if self.args.get('block_device_mapping'): mappings = map(ec2_block_device_mapping, self.args['block_device_mapping']) self.params['BlockDeviceMappings.member'] = mappings
class BundleVol(BundleCreator): DESCRIPTION = ("Create a bundled iamge based on the running machine's " 'filesystem\n\nThis command must be run as the superuser.') ARGS = [Arg('-s', '--size', metavar='MB', type=filesize, default=IMAGE_MAX_SIZE_IN_MB, help='''Size of the image in MB (default: {0}; recommended maximum: {0}).'''.format(IMAGE_MAX_SIZE_IN_MB)), Arg('-p', '--prefix', metavar='PREFIX', default='image', help='''the file name prefix to give the bundle's files (defaults to 'image').'''), Arg('-a', '--all', dest="bundle_all_dirs", action='store_true', help='''Bundle all directories (including mounted filesystems).'''), MutuallyExclusiveArgList( Arg('--no-inherit', dest='inherit', action='store_false', default=True, help='''Do not add instance metadata to the bundled image (defaults to inheriting metadata).'''), Arg('--inherit', dest='inherit', action='store_true', default=True, help='''Explicitly inherit instance metadata and add it to the bundled image (this is the default behavior)''')), Arg('-i', '--include', metavar='FILE1,FILE2,...', type=delimited_list(','), help='''Comma-separated list of absolute file paths to include.'''), Arg('-e', '--exclude', metavar='DIR1,DIR2,...', type=delimited_list(','), help='''Comma-separated list of directories to exclude.'''), Arg('--volume', metavar='PATH', default='/', help='''Path to mounted volume to bundle (defaults to '/').'''), Arg('--no-filter', dest='filter', action='store_false', help='''Do not use the default filtered files list.'''), MutuallyExclusiveArgList( Arg('--fstab', metavar='PATH', help='''Path to the fstab to be bundled with image.'''), Arg('--generate-fstab', action='store_true', help='Generate fstab to bundle in image.'))] def __init__(self, **kwargs): if (os.geteuid() != 0 and '--help' not in sys.argv and '-h' not in sys.argv): # Inform people with insufficient privileges before parsing args # so they don't have to wade through required arg messages and # whatnot first. raise Exception("must be superuser") BundleCreator.__init__(self, **kwargs) def _inherit_metadata(self): """Read instance metadata which we will propagate to the BundleImage command. These values are used for generating a manifest once we have a bundled image. """ try: check_metadata() if not self.args.get('ramdisk'): self.args['ramdisk'] = get_metadata('ramdisk-id') self.log.debug("inheriting ramdisk: {0}" .format(self.args.get('ramdisk'))) if not self.args.get('kernel'): self.args['kernel'] = get_metadata('kernel-id') self.log.debug("inheriting kernel: {0}" .format(self.args.get('kernel'))) if not self.args.get('block_device_mappings'): self.args['block_device_mappings'] = \ get_metadata_dict('block-device-mapping') self.log.debug("inheriting block device mappings: {0}".format( self.args.get('block_device_mappings'))) # # Product codes and ancestor ids are special cases since they # aren't always there. # try: productcodes = get_metadata_list('product-codes') self.args['productcodes'].extend(productcodes) self.log.debug("inheriting product codes: {0}" .format(productcodes)) except (ClientError, ServerError): msg = 'unable to read product codes from metadata.' print sys.stderr, msg self.log.warn(msg) try: if not self.args.get('ancestor_image_ids'): self.args['ancestor_image_ids'] = [] ancestor_ids = get_metadata_list('ancestor-ami-ids') self.args['ancestor_image_ids'].extend(ancestor_ids) self.log.debug("inheriting ancestor ids: {0}" .format(ancestor_ids)) except (ClientError, ServerError): msg = 'unable to read ancestor ids from metadata.' print sys.stderr, msg self.log.warn(msg) except (ClientError, ServerError): msg = ('Unable to read instance metadata. Use --no-inherit if ' 'you want to proceed without the metadata service.') print >> sys.stderr, msg self.log.warn(msg) raise def _filter_args_for_bundle_image(self): """Make a complete copy of args to pass along to BundleImage. We first need to remove any arguments that BundleImage would not know about. """ args = copy.deepcopy(self.args) for arg in BUNDLE_IMAGE_ARG_FILTER: try: del args[arg] except KeyError: pass return args def configure(self): BundleCreator.configure(self) self.args['user'] = self.args.get('user').replace('-', '') def main(self): if self.args.get('inherit'): self._inherit_metadata() image_file = ImageCreator(log=self.log, **self.args).run() try: image_args = self._filter_args_for_bundle_image() image_args.update(image=image_file, image_type='machine') self.log.info("bundling image: {0}".format(image_file)) return BundleImage(**image_args).main() finally: if os.path.exists(image_file): os.remove(image_file) if len(os.listdir(os.path.dirname(image_file))) == 0: os.rmdir(os.path.dirname(image_file)) def print_result(self, result): for part_filename in result[0]: print 'Wrote', part_filename print 'Wrote manifest', result[1]
class BundleCreatingMixin(object): ARGS = [ Arg('-i', '--image', metavar='FILE', required=True, help='file containing the image to bundle (required)'), Arg('-p', '--prefix', help='''the file name prefix to give the bundle's files (required when bundling stdin; otherwise defaults to the image's file name)'''), Arg('-d', '--destination', metavar='DIR', help='''location to place the bundle's files (default: dir named by TMPDIR, TEMP, or TMP environment variables, or otherwise /var/tmp)'''), Arg('-r', '--arch', required=True, choices=('i386', 'x86_64', 'armhf', 'ppc', 'ppc64', 'ppc64le'), help="the image's architecture (required)"), # User- and cloud-specific stuff Arg('-k', '--privatekey', metavar='FILE', help='''file containing your private key to sign the bundle's manifest with. If one is not available the bundle will not be signed.'''), Arg('-c', '--cert', metavar='FILE', help='''file containing your X.509 certificate. If one is not available it will not be possible to unbundle the bundle without cloud administrator assistance.'''), Arg('--ec2cert', metavar='FILE', help='''file containing the cloud's X.509 certificate. If one is not available locally it must be available from the bootstrap service.'''), Arg('-u', '--user', metavar='ACCOUNT', help='your account ID'), Arg('--kernel', metavar='IMAGE', help='''ID of the kernel image to associate with this machine image'''), Arg('--ramdisk', metavar='IMAGE', help='''ID of the ramdisk image to associate with this machine image'''), Arg('--bootstrap-url', route_to=None, help='''[Eucalyptus only] bootstrap service endpoint URL (used for obtaining --ec2cert automatically'''), Arg('--bootstrap-service', route_to=None, help=argparse.SUPPRESS), Arg('--bootstrap-auth', route_to=None, help=argparse.SUPPRESS), # Obscurities Arg('-B', '--block-device-mappings', metavar='VIRTUAL1=DEVICE1,VIRTUAL2=DEVICE2,...', type=manifest_block_device_mappings, help='''block device mapping scheme with which to launch instances of this machine image'''), Arg('--productcodes', metavar='CODE1,CODE2,...', type=delimited_list(','), default=[], help='comma-separated list of product codes for the image'), Arg('--image-type', choices=('machine', 'kernel', 'ramdisk'), default='machine', help=argparse.SUPPRESS), # Stuff needed to fill out TarInfo when input comes from stdin. # # We technically could ask for a lot more, but most of it is # unnecessary since owners/modes/etc will be ignored at unbundling # time anyway. # # When bundling stdin we interpret --prefix as the image's file # name. Arg('--image-size', type=filesize, help='''the image's size (required when bundling stdin)'''), # Overrides for debugging and other entertaining uses Arg( '--part-size', type=filesize, default=10485760, # 10M help=argparse.SUPPRESS), Arg('--enc-key', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS), # a hex string Arg('--enc-iv', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS), # a hex string # Noop, for compatibility Arg('--batch', action='store_true', help=argparse.SUPPRESS) ] # CONFIG METHODS # def configure_bundle_creds(self): # User's account ID (user-level) if not self.args.get('user'): config_val = self.config.get_user_option('account-id') if 'EC2_USER_ID' in os.environ: self.log.debug('using account ID from environment') self.args['user'] = os.getenv('EC2_USER_ID') elif config_val: self.log.debug('using account ID from configuration') self.args['user'] = config_val if self.args.get('user'): self.args['user'] = self.args['user'].replace('-', '') if not self.args.get('user'): raise ArgumentError( 'missing account ID; please supply one with --user') self.log.debug('account ID: %s', self.args['user']) # User's X.509 certificate (user-level in config) if not self.args.get('cert'): config_val = self.config.get_user_option('certificate') if 'EC2_CERT' in os.environ: self.log.debug('using certificate from environment') self.args['cert'] = os.getenv('EC2_CERT') elif 'EUCA_CERT' in os.environ: # used by the NC self.log.debug('using certificate from environment') self.args['cert'] = os.getenv('EUCA_CERT') elif config_val: self.log.debug('using certificate from configuration') self.args['cert'] = config_val if self.args.get('cert'): self.args['cert'] = os.path.expanduser( os.path.expandvars(self.args['cert'])) _assert_is_file(self.args['cert'], 'user certificate') self.log.debug('certificate: %s', self.args.get('cert')) # User's private key (user-level in config) if not self.args.get('privatekey'): config_val = self.config.get_user_option('private-key') if 'EC2_PRIVATE_KEY' in os.environ: self.log.debug('using private key from environment') self.args['privatekey'] = os.getenv('EC2_PRIVATE_KEY') if 'EUCA_PRIVATE_KEY' in os.environ: # used by the NC self.log.debug('using private key from environment') self.args['privatekey'] = os.getenv('EUCA_PRIVATE_KEY') elif config_val: self.log.debug('using private key from configuration') self.args['privatekey'] = config_val if self.args.get('privatekey'): self.args['privatekey'] = os.path.expanduser( os.path.expandvars(self.args['privatekey'])) _assert_is_file(self.args['privatekey'], 'private key') self.log.debug('private key: %s', self.args.get('privatekey')) # Cloud's X.509 cert (region-level in config) if not self.args.get('ec2cert'): config_val = self.config.get_region_option('certificate') if 'EUCALYPTUS_CERT' in os.environ: # This has no EC2 equivalent since they just bundle their cert. self.log.debug('using cloud certificate from environment') self.args['ec2cert'] = os.getenv('EUCALYPTUS_CERT') elif config_val: self.log.debug('using cloud certificate from configuration') self.args['ec2cert'] = config_val elif (self.args.get('bootstrap_service') and self.args.get('bootstrap_auth')): # Sending requests during configure() can be precarious. # Pay close attention to ordering to ensure all # of this request's dependencies have been fulfilled. try: fetched_cert = self.__get_bundle_certificate( self.args['bootstrap_service'], self.args['bootstrap_auth']) except AWSError as err: self.log.debug('failed to fetch ec2cert', exc_info=True) if err.response.status_code == 403: msg = ('permission error retrieving cloud ' 'certificate; please supply one with ' '--ec2cert or obtain an IAM policy that ' 'allows "euserv:DescribeServiceCertificates"') else: msg = ('error retrieving cloud certificate; please ' 'supply one with --ec2cert') six.raise_from(ArgumentError(msg), err) if fetched_cert: self.log.debug('using cloud certificate from ' 'bootstrap service') self.args['ec2cert'] = fetched_cert if self.args.get('ec2cert'): self.args['ec2cert'] = os.path.expanduser( os.path.expandvars(self.args['ec2cert'])) _assert_is_file(self.args['ec2cert'], 'cloud certificate') if not self.args.get('ec2cert'): raise ArgumentError( 'missing cloud certificate; please supply one with ' '--ec2cert or use --bootstrap-url and access keys to ' 'fetch one automatically') self.log.debug('cloud certificate: %s', self.args['ec2cert']) def configure_bundle_output(self): if (self.args.get('destination') and os.path.exists(self.args['destination']) and not os.path.isdir(self.args['destination'])): raise ArgumentError("argument -d/--destination: '{0}' is not a " "directory".format(self.args['destination'])) if self.args['image'] == '-': self.args['image'] = os.fdopen(os.dup(sys.stdin.fileno())) if not self.args.get('prefix'): raise ArgumentError( 'argument --prefix is required when bundling stdin') if not self.args.get('image_size'): raise ArgumentError( 'argument --image-size is required when bundling stdin') elif isinstance(self.args['image'], six.string_types): if not self.args.get('prefix'): self.args['prefix'] = os.path.basename(self.args['image']) if not self.args.get('image_size'): self.args['image_size'] = euca2ools.util.get_filesize( self.args['image']) self.args['image'] = open(self.args['image']) else: # Assume it is already a file object if not self.args.get('prefix'): raise ArgumentError('argument --prefix is required when ' 'bundling a file object') if not self.args.get('image_size'): raise ArgumentError('argument --image-size is required when ' 'bundling a file object') if self.args['image_size'] > EC2_BUNDLE_SIZE_LIMIT: self.log.warn( 'image is incompatible with EC2 due to its size (%i > %i)', self.args['image_size'], EC2_BUNDLE_SIZE_LIMIT) def configure_bundle_properties(self): if self.args.get('kernel') == 'true': self.args['image_type'] = 'kernel' if self.args.get('ramdisk') == 'true': self.args['image_type'] = 'ramdisk' if self.args['image_type'] == 'kernel': if self.args.get('kernel') and self.args['kernel'] != 'true': raise ArgumentError("argument --kernel: not compatible with " "image type 'kernel'") if self.args.get('ramdisk'): raise ArgumentError("argument --ramdisk: not compatible with " "image type 'kernel'") if self.args.get('block_device_mappings'): raise ArgumentError("argument -B/--block-device-mappings: not " "compatible with image type 'kernel'") if self.args['image_type'] == 'ramdisk': if self.args.get('kernel'): raise ArgumentError("argument --kernel: not compatible with " "image type 'ramdisk'") if self.args.get('ramdisk') and self.args['ramdisk'] != 'true': raise ArgumentError("argument --ramdisk: not compatible with " "image type 'ramdisk'") if self.args.get('block_device_mappings'): raise ArgumentError("argument -B/--block-device-mappings: not " "compatible with image type 'ramdisk'") def generate_encryption_keys(self): srand = random.SystemRandom() if self.args.get('enc_key'): self.log.info('using preexisting encryption key') enc_key_i = self.args['enc_key'] else: enc_key_i = srand.getrandbits(128) if self.args.get('enc_iv'): self.log.info('using preexisting encryption IV') enc_iv_i = self.args['enc_iv'] else: enc_iv_i = srand.getrandbits(128) self.args['enc_key'] = '{0:0>32x}'.format(enc_key_i) self.args['enc_iv'] = '{0:0>32x}'.format(enc_iv_i) def __get_bundle_certificate(self, bootstrap_service, bootstrap_auth): self.log.info('attempting to obtain cloud certificate from ' 'bootstrap service') req = DescribeServiceCertificates(config=self.config, loglevel=self.log.level, service=bootstrap_service, auth=bootstrap_auth, Format='pem', FingerprintDigest='SHA-256') response = req.main() for cert in response.get('serviceCertificates') or []: if (cert.get('certificateUsage') == 'image-bundling' and cert.get('serviceType') == 'compute'): cert_file = tempfile.NamedTemporaryFile(delete=False) cert_file.write(cert['certificate']) cert_file.file.flush() self.args['ec2cert'] = cert_file.name atexit.register(os.remove, cert_file.name) return cert_file.name # MANIFEST GENERATION METHODS # def build_manifest(self, digest, partinfo): manifest = euca2ools.bundle.manifest.BundleManifest( loglevel=self.log.level) manifest.image_arch = self.args['arch'] manifest.kernel_id = self.args.get('kernel') manifest.ramdisk_id = self.args.get('ramdisk') if self.args.get('block_device_mappings'): manifest.block_device_mappings.update( self.args['block_device_mappings']) if self.args.get('productcodes'): manifest.product_codes.extend(self.args['productcodes']) manifest.image_name = self.args['prefix'] manifest.account_id = self.args['user'] manifest.image_type = self.args['image_type'] manifest.image_digest = digest manifest.image_digest_algorithm = 'SHA1' # shouldn't be hardcoded here manifest.image_size = self.args['image_size'] manifest.bundled_image_size = sum(part.size for part in partinfo) manifest.enc_key = self.args['enc_key'] manifest.enc_iv = self.args['enc_iv'] manifest.enc_algorithm = 'AES-128-CBC' # shouldn't be hardcoded here manifest.image_parts = partinfo return manifest def dump_manifest_to_file(self, manifest, filename, pretty_print=False): with open(filename, 'w') as manifest_file: manifest_file.write( self.dump_manifest_to_str(manifest, pretty_print=pretty_print)) def dump_manifest_to_str(self, manifest, pretty_print=False): return manifest.dump_to_str(self.args['privatekey'], self.args['cert'], self.args['ec2cert'], pretty_print=pretty_print)