def _info_command(args): _display_package_info() info = get_package_info() conf = get_drift_config() name = info['name'] print "Config info:" d = conf.table_store.get_table('deployable-names').find( {'deployable_name': name}) if d: print "\t{deployable_name}".format( **d[0]).ljust(25), "{display_name}".format(**d[0]).ljust(25) else: print "\t(not registered. run 'deployable register' command to register it.)" deployables = conf.table_store.get_table('deployables') d = deployables.find({'deployable_name': name}) if d: deps = deployables.find({ 'deployable_name': d[0]['deployable_name'], 'is_active': True }) tiers = ", ".join(dep['tier_name'] for dep in deps) if tiers: print "\tActive on tiers:".ljust(25), tiers
def _find_latest_ami(service_name, release=None): name = get_app_name() tier_name = get_tier_name() conf = get_drift_config(tier_name=tier_name, deployable_name=name) domain = conf.domain.get() aws_region = conf.tier['aws']['region'] ec2 = boto3.resource('ec2', region_name=aws_region) filters = [ { 'Name': 'tag:service-name', 'Values': [name] }, { 'Name': 'tag:domain-name', 'Values': [domain['domain_name']] }, ] if release: filters.append({'Name': 'tag:git-release', 'Values': [release]}, ) amis = list(ec2.images.filter(Owners=['self'], Filters=filters)) if not amis: criteria = {d['Name']: d['Values'][0] for d in filters} print "No '{}' AMI found using the search criteria {}.".format( UBUNTU_BASE_IMAGE_NAME, criteria) sys.exit(1) ami = max(amis, key=operator.attrgetter("creation_date")) return ami
def _copy_image(ami_id): conf = get_drift_config() domain = conf.domain.get() aws_region = domain['aws']['ami_baking_region'] # Grab the source AMI source_ami = boto3.resource('ec2', region_name=aws_region).Image(ami_id) # Create a list of all regions that are active active_tiers = conf.table_store.get_table('tiers').find( {'state': 'active'}) regions = set( [tier['aws']['region'] for tier in active_tiers if 'aws' in tier]) if aws_region in regions: regions.remove(aws_region) # This is the source region print "Distributing {} to region(s) {}.".format(source_ami.id, ', '.join(regions)) jobs = [] for region_id in regions: ec2_client = boto3.client('ec2', region_name=region_id) ret = ec2_client.copy_image( SourceRegion=aws_region, SourceImageId=source_ami.id, Name=source_ami.name or "", Description=source_ami.description or "", ) job = { 'id': ret['ImageId'], 'region_id': region_id, 'client': ec2_client, } jobs.append(job) # Wait on jobs and copy tags for job in jobs: ami = boto3.resource('ec2', region_name=job['region_id']).Image(job['id']) print "Waiting on {}...".format(ami.id) ami.wait_until_exists(Filters=[{ 'Name': 'state', 'Values': ['available'] }]) if ami.state != 'available': continue print "AMI {id} in {region_id} is available. Copying tags...".format( **job) job['client'].create_tags(Resources=[job['id']], Tags=source_ami.tags) print "All done."
def xxxxcreate_command(args): tenant_name = args.tenant if not tenant_name: tenants_report() return os.environ['DRIFT_DEFAULT_TENANT'] = tenant_name # Minor hack: from drift.flaskfactory import load_flask_config try: conf = get_drift_config( tier_name=get_tier_name(), tenant_name=tenant_name, drift_app=load_flask_config(), ) except TenantNotConfigured as e: raise except Exception as e: print Fore.RED + "'tenant {}' command failed: {}".format( args.action, e) return if not args.action: tenant_report(conf) return if args.action in ['create', 'recreate']: # Provision resources with TSTransaction() as ts: conf = get_config(ts=ts) resources = conf.drift_app.get("resources") for module_name in resources: m = importlib.import_module(module_name) if hasattr(m, "provision"): provisioner_name = m.__name__.split('.')[-1] print "Provisioning '%s' for tenant '%s' on tier '%s'" % ( provisioner_name, tenant_name, conf.tier['tier_name']) if 0: # THIS IS BONKERS LOGIC! FIIIIX! conf.tier['resource_defaults'].append({ 'resource_name': provisioner_name, 'parameters': getattr(m, 'NEW_TIER_DEFAULTS', {}), }) recreate = 'recreate' if args.action == 'recreate' else 'skip' m.provision(conf, {}, recreate=recreate) row = ts.get_table('tenants').get(conf.tenant) row['state'] = 'active' tenant_report(conf)
def _get_info(): """ Return package info using command line arguments if set, otherwise assume we are in a package location. """ name, version = get_app_name(), get_app_version() ret = { 'name': name, 'version': version, 'conf': get_drift_config(), } return ret
def _list_command(args): conf = get_drift_config() deployables = conf.table_store.get_table('deployables') print "Deployables:" for d in conf.table_store.get_table('deployable-names').find(): deps = deployables.find({ 'deployable_name': d['deployable_name'], 'is_active': True }) tiers = ", ".join(dep['tier_name'] for dep in deps) if tiers: tiers = "active on " + tiers print "\t{deployable_name}".format( **d).ljust(25), "{display_name}".format(**d).ljust(25), tiers
def before_request(self): try: conf = get_drift_config( ts=current_app.extensions['driftconfig'].table_store, tenant_name=tenant_from_hostname, tier_name=get_tier_name(), deployable_name=current_app.config['name']) except TenantNotConfigured as e: abort(httplib.NOT_FOUND, description=str(e)) if 0: # Disabling this as it needs to be refactored into a JIT like feature. This simply blocks everything. if conf.tenant and conf.tenant[ 'state'] != 'active' and request.endpoint != "admin.adminprovisionapi": raise TenantNotFoundError( "Tenant '{}' for tier '{}' and deployable '{}' is not active, but in state '{}'." .format(conf.tenant['tenant_name'], get_tier_name(), current_app.config['name'], conf.tenant['state'])) # Add applicable config tables to 'g' g.conf = conf
def setUpClass(cls): config_size = { 'num_org': 5, 'num_tiers': 2, 'num_deployables': 4, 'num_products': 2, 'num_tenants': 2, } ts = create_test_domain(config_size) cls.ts = ts cls.tier_name = ts.get_table('tiers').find()[0]['tier_name'] cls.product_name = ts.get_table('products').find()[0]['product_name'] cls.tenant_name_1 = ts.get_table('tenant-names').find({'product_name': cls.product_name})[0]['tenant_name'] cls.tenant_name_2 = ts.get_table('tenant-names').find({'product_name': cls.product_name})[1]['tenant_name'] cls.deployable_1 = ts.get_table('deployables').find({'tier_name': cls.tier_name})[0]['deployable_name'] cls.conf = get_drift_config( ts=ts, tenant_name=cls.tenant_name_1, tier_name=cls.tier_name, deployable_name=cls.deployable_1, ) cls._add_rules()
def post(self): tenant_name = g.conf.tenant_name['tenant_name'] tier_name = g.conf.tier['tier_name'] # quick check for tenant state before downloading config if g.conf.tenant["state"] != "initializing": abort( httplib.BAD_REQUEST, message= "You can only provision tenants which are in state 'initializing'. Tenant '%s' is in state '%s'" % (tenant_name, g.conf.tenant["state"])) args_per_provisioner = {} if request.json: for arg in request.json.get("provisioners", {}): if "provisioner" not in arg or "arguments" not in arg: log.warning( "Provisioner argument missing 'provisioner' or 'arguments'" ) continue args_per_provisioner[arg["provisioner"]] = arg["arguments"] origin = g.conf.domain['origin'] ts = get_store_from_url(origin) conf = get_drift_config(ts=ts, tenant_name=tenant_name, tier_name=tier_name, deployable_name=current_app.config['name']) if conf.tenant["state"] != "initializing": raise RuntimeError("Tenant unexpectedly found in state '%s': %s" % (conf.tenant["state"], conf.tenant)) resources = current_app.config.get("resources") for module_name in resources: m = importlib.import_module(module_name) if hasattr(m, "provision"): provisioner_name = m.__name__.split('.')[-1] log.info("Provisioning '%s' for tenant '%s' on tier '%s'", provisioner_name, tenant_name, tier_name) args = args_per_provisioner.get(provisioner_name, {}) m.provision(conf, args) # Mark the tenant as ready conf.tenant["state"] = "active" # Save out config log.info("Saving config to %s", origin) origin_backend = create_backend(origin) origin_backend.save_table_store(ts) local_origin = 'file://~/.drift/config/' + g.conf.domain['domain_name'] log.info("Saving config to %s", local_origin) local_store = create_backend(local_origin) local_store.save_table_store(ts) # invalidate flask config current_app.extensions['driftconfig'].refresh() return "OK"
def _run_command(args): # Always autoscale! args.autoscale = True if args.launch and args.autoscale: print "Error: Can't use --launch and --autoscale together." sys.exit(1) name = get_app_name() tier_name = get_tier_name() conf = get_drift_config(tier_name=tier_name, deployable_name=name, drift_app=load_flask_config()) aws_region = conf.tier['aws']['region'] print "AWS REGION:", aws_region print "DOMAIN:\n", json.dumps(conf.domain.get(), indent=4) print "DEPLOYABLE:\n", json.dumps(conf.deployable, indent=4) ec2_conn = boto.ec2.connect_to_region(aws_region) iam_conn = boto.iam.connect_to_region(aws_region) if conf.tier['is_live']: print "NOTE! This tier is marked as LIVE. Special restrictions may apply. Use --force to override." autoscaling = { "min": 1, "max": 1, "desired": 1, "instance_type": args.instance_type, } autoscaling.update(conf.deployable.get('autoscaling', {})) release = conf.deployable.get('release', '') if args.launch and autoscaling and not args.force: print "--launch specified, but tier config specifies 'use_autoscaling'. Use --force to override." sys.exit(1) if args.autoscale and not autoscaling and not args.force: print "--autoscale specified, but tier config doesn't specify 'use_autoscaling'. Use --force to override." sys.exit(1) print "Launch an instance of '{}' on tier '{}'".format(name, tier_name) if release: print "Using AMI with release tag: ", release else: print "Using the newest AMI baked (which may not be what you expect)." ami = _find_latest_ami(name, release) print "Latest AMI:", ami if args.ami: print "Using a specified AMI:", args.ami ec2 = boto3.resource('ec2', region_name=aws_region) if ami.id != args.ami: print "AMI found is different from AMI specified on command line." if conf.tier['is_live'] and not args.force: print "This is a live tier. Can't run mismatched AMI unless --force is specified" sys.exit(1) try: ami = ec2.Image(args.ami) except Exception as e: raise RuntimeError("Ami '%s' not found or broken: %s" % (args.ami, e)) if not ami: sys.exit(1) ami_info = dict( ami_id=ami.id, ami_name=ami.name, ami_created=ami.creation_date, ami_tags={d['Key']: d['Value'] for d in ami.tags}, ) print "AMI Info:\n", pretty(ami_info) if autoscaling: print "Autoscaling group:\n", pretty(autoscaling) else: print "EC2:" print "\tInstance Type:\t{}".format(args.instance_type) ec2 = boto3.resource('ec2', region_name=aws_region) # Get all 'private' subnets filters = {'tag:tier': tier_name, 'tag:realm': 'private'} subnets = list(ec2.subnets.filter(Filters=filterize(filters))) if not subnets: print "Error: No subnet available matching filter", filters sys.exit(1) print "Subnets:" for subnet in subnets: print "\t{} - {}".format(fold_tags(subnet.tags)['Name'], subnet.id) # Get the "one size fits all" security group filters = { 'tag:tier': tier_name, 'tag:Name': '{}-private-sg'.format(tier_name) } security_group = list( ec2.security_groups.filter(Filters=filterize(filters)))[0] print "Security Group:\n\t{} [{} {}]".format( fold_tags(security_group.tags)["Name"], security_group.id, security_group.vpc_id) # The key pair name for SSH key_name = conf.tier['aws']['ssh_key'] if "." in key_name: key_name = key_name.split( ".", 1)[0] # TODO: Distinguish between key name and .pem key file name print "SSH Key:\t", key_name ''' autoscaling group: Name LIVENORTH-themachines-backend-auto api-port 10080 api-target themachines-backend service-name themachines-backend service-type rest-api tier LIVENORTH ec2: Name DEVNORTH-drift-base launched-by nonnib api-port 10080 api-target drift-base service-name drift-base service-type rest-api tier DEVNORTH ''' target_name = "{}-{}".format(tier_name, name) if autoscaling: target_name += "-auto" # To auto-generate Redis cache url, we create the Redis backend using our config, # and then ask for a url representation of it: drift_config_url = get_redis_cache_backend(conf.table_store, tier_name).get_url() # Specify the app app_root = '/etc/opt/{service_name}'.format(service_name=name) tags = { "Name": target_name, "tier": tier_name, "service-name": name, "service-type": conf.drift_app.get('service_type', 'web-app'), "config-url": drift_config_url, "app-root": app_root, "launched-by": iam_conn.get_user().user_name, } if tags['service-type'] == 'web-app': # Make instance part of api-router round-robin load balancing tags.update({ "api-target": name, "api-port": str(conf.drift_app.get('PORT', 10080)), "api-status": "online", }) tags.update(fold_tags(ami.tags)) print "Tags:" for k in sorted(tags.keys()): print " %s: %s" % (k, tags[k]) user_data = '''#!/bin/bash # Environment variables set by drift-admin run command: export DRIFT_CONFIG_URL={drift_config_url} export DRIFT_TIER={tier_name} export DRIFT_APP_ROOT={app_root} export DRIFT_SERVICE={service_name} export AWS_REGION={aws_region} # Shell script from ami-run.sh: '''.format(drift_config_url=drift_config_url, tier_name=tier_name, app_root=app_root, service_name=name, aws_region=aws_region) user_data += pkg_resources.resource_string(__name__, "ami-run.sh") custom_script_name = os.path.join(conf.drift_app['app_root'], 'scripts', 'ami-run.sh') if os.path.exists(custom_script_name): print "Using custom shell script", custom_script_name user_data += "\n# Custom shell script from {}\n".format( custom_script_name) user_data += open(custom_script_name, 'r').read() else: print "Note: No custom ami-run.sh script found for this application." print "user_data:" from drift.utils import pretty as poo print poo(user_data, 'bash') if args.preview: print "--preview specified, exiting now before actually doing anything." sys.exit(0) if autoscaling: client = boto3.client('autoscaling', region_name=aws_region) launch_config_name = '{}-{}-launchconfig-{}-{}'.format( tier_name, name, datetime.utcnow(), release) launch_config_name = launch_config_name.replace(':', '.') kwargs = dict( LaunchConfigurationName=launch_config_name, ImageId=ami.id, KeyName=key_name, SecurityGroups=[security_group.id], InstanceType=autoscaling['instance_type'] or args.instance_type, IamInstanceProfile=IAM_ROLE, InstanceMonitoring={'Enabled': True}, UserData=user_data, ) print "Creating launch configuration using params:\n", pretty(kwargs) client.create_launch_configuration(**kwargs) # Update current autoscaling group or create a new one if it doesn't exist. groups = client.describe_auto_scaling_groups( AutoScalingGroupNames=[target_name]) kwargs = dict( AutoScalingGroupName=target_name, LaunchConfigurationName=launch_config_name, MinSize=autoscaling['min'], MaxSize=autoscaling['max'], DesiredCapacity=autoscaling['desired'], VPCZoneIdentifier=','.join([subnet.id for subnet in subnets]), ) if not groups['AutoScalingGroups']: print "Creating a new autoscaling group using params:\n", pretty( kwargs) client.create_auto_scaling_group(**kwargs) else: print "Updating current autoscaling group", target_name client.update_auto_scaling_group(**kwargs) # Prepare tags which get propagated to all new instances tagsarg = [{ 'ResourceId': tags['Name'], 'ResourceType': 'auto-scaling-group', 'Key': k, 'Value': v, 'PropagateAtLaunch': True, } for k, v in tags.items()] print "Updating tags on autoscaling group that get propagated to all new instances." client.create_or_update_tags(Tags=tagsarg) # Define a 2 min termination cooldown so api-router can drain the connections. response = client.put_lifecycle_hook( LifecycleHookName='Wait-2-minutes-on-termination', AutoScalingGroupName=target_name, LifecycleTransition='autoscaling:EC2_INSTANCE_TERMINATING', HeartbeatTimeout=120, DefaultResult='CONTINUE') print "Configuring lifecycle hook, response:", response.get( 'ResponseMetadata') print "Done!" print "YOU MUST TERMINATE THE OLD EC2 INSTANCES YOURSELF!" else: # Pick a random subnet from list of available subnets subnet = random.choice(subnets) print "Randomly picked this subnet to use: ", subnet print "Launching EC2 instance..." reservation = ec2_conn.run_instances( ami.id, instance_type=args.instance_type, subnet_id=subnet.id, security_group_ids=[security_group.id], key_name=key_name, instance_profile_name=IAM_ROLE, user_data=user_data, ) if len(reservation.instances) == 0: print "No instances in reservation!" sys.exit(1) instance = reservation.instances[0] print "{} starting up...".format(instance) # Check up on its status every so often status = instance.update() while status == 'pending': time.sleep(10) status = instance.update() if status == 'running': for k, v in tags.items(): instance.add_tag(k, v) print "{} running at {}".format(instance, instance.private_ip_address) slackbot.post_message( "Started up AMI '{}' for '{}' on tier '{}' with ip '{}'". format(ami.id, name, tier_name, instance.private_ip_address)) else: print "Instance was not created correctly" sys.exit(1)
def _bake_command(args): if args.ubuntu: name = UBUNTU_BASE_IMAGE_NAME else: name = get_app_name() name = get_app_name() tier_name = get_tier_name() conf = get_drift_config(tier_name=tier_name, deployable_name=name, drift_app=load_flask_config()) domain = conf.domain.get() aws_region = domain['aws']['ami_baking_region'] ec2 = boto3.resource('ec2', region_name=aws_region) print "DOMAIN:\n", json.dumps(domain, indent=4) if not args.ubuntu: print "DEPLOYABLE:", name print "AWS REGION:", aws_region # Create a list of all regions that are active if args.ubuntu: # Get all Ubuntu images from the appropriate region and pick the most recent one. # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS. print "Finding the latest AMI on AWS that matches", UBUNTU_RELEASE filters = [ { 'Name': 'name', 'Values': [UBUNTU_RELEASE] }, ] amis = list( ec2.images.filter(Owners=[AMI_OWNER_CANONICAL], Filters=filters)) if not amis: print "No AMI found matching '{}'. Not sure what to do now.".format( UBUNTU_RELEASE) sys.exit(1) ami = max(amis, key=operator.attrgetter("creation_date")) else: filters = [ { 'Name': 'tag:service-name', 'Values': [UBUNTU_BASE_IMAGE_NAME] }, { 'Name': 'tag:domain-name', 'Values': [domain['domain_name']] }, ] amis = list(ec2.images.filter(Owners=['self'], Filters=filters)) if not amis: criteria = {d['Name']: d['Values'][0] for d in filters} print "No '{}' AMI found using the search criteria {}.".format( UBUNTU_BASE_IMAGE_NAME, criteria) print "Bake one using this command: {} ami bake --ubuntu".format( sys.argv[0]) sys.exit(1) ami = max(amis, key=operator.attrgetter("creation_date")) print "Using source AMI:" print "\tID:\t", ami.id print "\tName:\t", ami.name print "\tDate:\t", ami.creation_date if args.ubuntu: manifest = None packer_vars = { 'setup_script': pkg_resources.resource_filename(__name__, "ubuntu-packer.sh"), 'ubuntu_release': UBUNTU_RELEASE, } else: current_branch = get_branch() if not args.tag: args.tag = current_branch print "Using branch/tag", args.tag # Wrap git branch modification in RAII. checkout(args.tag) try: setup_script = "" setup_script_custom = "" with open( pkg_resources.resource_filename(__name__, "driftapp-packer.sh"), 'r') as f: setup_script = f.read() custom_script_name = os.path.join(conf.drift_app['app_root'], 'scripts', 'ami-bake.sh') if os.path.exists(custom_script_name): print "Using custom bake shell script", custom_script_name setup_script_custom = "echo Executing custom bake shell script from {}\n".format( custom_script_name) setup_script_custom += open(custom_script_name, 'r').read() setup_script_custom += "\necho Custom bake shell script completed\n" else: print "Note: No custom ami-bake.sh script found for this application." # custom setup needs to happen first because we might be installing some requirements for the regular setup setup_script = setup_script_custom + setup_script tf = tempfile.NamedTemporaryFile(delete=False) tf.write(setup_script) tf.close() setup_script_filename = tf.name manifest = create_deployment_manifest('ami', comment=None) packer_vars = { 'version': get_app_version(), 'setup_script': setup_script_filename, } if not args.preview: cmd = ['python', 'setup.py', 'sdist', '--formats=zip'] ret = subprocess.call(cmd) if ret != 0: print "Failed to execute build command:", cmd sys.exit(ret) cmd = ["zip", "-r", "dist/aws.zip", "aws"] ret = subprocess.call(cmd) if ret != 0: print "Failed to execute build command:", cmd sys.exit(ret) finally: print "Reverting to ", current_branch checkout(current_branch) user = boto.iam.connect_to_region( aws_region).get_user() # The current IAM user running this command packer_vars.update({ "service": name, "region": aws_region, "source_ami": ami.id, "user_name": user.user_name, "domain_name": domain['domain_name'], }) print "Packer variables:\n", pretty(packer_vars) # See if Packer is installed and generate sensible error code if something is off. # This will also write the Packer version to the terminal which is useful info. try: subprocess.call(['packer', 'version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as e: print "Error:", e print "'packer version' command failed. Please install it if it's missing." sys.exit(127) cmd = "packer build " if args.debug: cmd += "-debug " cmd += "-only=amazon-ebs " for k, v in packer_vars.iteritems(): cmd += "-var {}=\"{}\" ".format(k, v) # Use generic packer script if project doesn't specify one pkg_resources.cleanup_resources() if args.ubuntu: scriptfile = pkg_resources.resource_filename(__name__, "ubuntu-packer.json") cmd += scriptfile elif os.path.exists("config/packer.json"): cmd += "config/packer.json" else: scriptfile = pkg_resources.resource_filename(__name__, "driftapp-packer.json") cmd += scriptfile print "Baking AMI with: {}".format(cmd) if args.preview: print "Not building or packaging because --preview is on. Exiting now." return start_time = time.time() try: # Execute Packer command and parse the output to find the ami id. p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: line = p.stdout.readline() print line, if line == '' and p.poll() is not None: break # The last lines from the packer execution look like this: # ==> Builds finished. The artifacts of successful builds are: # --> amazon-ebs: AMIs were created: # # eu-west-1: ami-0ee5eb68 if 'ami-' in line: ami_id = line[line.rfind('ami-'):].strip() ami = ec2.Image(ami_id) print "" print "AMI ID: %s" % ami.id print "" finally: pkg_resources.cleanup_resources() if p.returncode != 0: print "Failed to execute packer command:", cmd sys.exit(p.returncode) duration = time.time() - start_time if manifest: print "Adding manifest tags to AMI:" pretty(manifest) prefix = "drift:manifest:" tags = [] for k, v in manifest.iteritems(): tag_name = "{}{}".format(prefix, k) tags.append({'Key': tag_name, 'Value': v or ''}) ami.create_tags(DryRun=False, Tags=tags) if not args.skipcopy: _copy_image(ami.id) print "Done after %.0f seconds" % (duration) slackbot.post_message( "Successfully baked a new AMI for '{}' in %.0f seconds".format( name, duration))
def _register_command(args): info = get_package_info() conf = get_drift_config() name = info['name'] is_active = not args.inactive print "Registering/updating deployable: ", name _display_package_info() if not is_active: print "Marking the deployable as inactive!" with TSTransaction(commit_to_origin=not args.preview) as ts: # Insert or update name row = {'deployable_name': name, 'display_name': info['description']} if 'long-description' in info and info['long-description'] != "UNKNOWN": row['description'] = info['long-cdescription'] ts.get_table('deployable-names').update(row) # Make deployable (in)active on all tiers deployables = ts.get_table('deployables') for tier in ts.get_table('tiers').find(): row = { 'tier_name': tier['tier_name'], 'deployable_name': name, 'is_active': is_active } deployables.update(row) # Now let's do some api-router specific stuff which is by no means my concern! if name == 'drift-base': api = 'drift' elif name == 'themachines-backend': api = 'themachines' elif name == 'themachines-admin': api = 'admin' elif name == 'kaleo-web': api = 'kaleo' elif name == 'kards-backend': api = 'kards' else: api = name row = { 'tier_name': tier['tier_name'], 'deployable_name': name, 'api': api } ts.get_table('routing').update(row) # Now let's do some drift-base specific stuff which is by no means my concern! # Generate RSA key pairs from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization from cryptography.hazmat.backends import default_backend private_key = rsa.generate_private_key(public_exponent=65537, key_size=1024, backend=default_backend()) public_key = private_key.public_key() private_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()) public_pem = public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo) now = datetime.datetime.utcnow() row = { 'tier_name': tier['tier_name'], 'deployable_name': name, 'keys': [{ 'issued': now.isoformat() + "Z", 'expires': (now + datetime.timedelta(days=365)).isoformat() + "Z", 'public_key': public_pem, 'private_key': private_pem, }] } ts.get_table('public-keys').update(row) if args.preview: print "Preview changes only, not committing to origin." # Display the diff _diff_ts(ts, get_default_drift_config())