def test_create_instance_from_image(host): # Create instance image_name = 'Cirros-0.3.5' flavor = 'm1.tiny' network = 'PRIVATE_NET' instance_name = 'test_instance_01' new_instance_name = 'test_instance_02' snapshot_name = 'test_snapshot_02' data_image = { "instance_name": instance_name, "from_source": 'image', "source_name": image_name, "flavor": flavor, "network_name": network, } data_snapshot = { "instance_name": new_instance_name, "from_source": 'image', "source_name": snapshot_name, "flavor": flavor, "network_name": network, } utils.create_instance(data_image, host) utils.verify_asset_in_list('server', instance_name, host) # Verify the new instance is ACTIVE. utils.get_expected_status('server', instance_name, "ACTIVE", host) # Shutdown the newly created instance utils.stop_server_instance(instance_name, host) # Verify that the instance is shutdown utils.get_expected_status('server', instance_name, "SHUTOFF", host) # Create snapshot from newly created/shutdown instance utils.create_snapshot_from_instance(snapshot_name, instance_name, host) # Verify the snapshot is successfully created: utils.get_expected_status('image', snapshot_name, 'active', host) # Boot new instance using the newly created snapshot: utils.create_instance(data_snapshot, host) # Verify the new instance is successfully booted using the snapshot utils.get_expected_status('server', new_instance_name, "ACTIVE", host) # Tear down: utils.delete_it('server', instance_name, host) utils.stop_server_instance(new_instance_name, host) utils.delete_it('server', new_instance_name, host) utils.delete_it('image', snapshot_name, host)
def create_dataloader(name, dataset, sampler=None, collate=None, **kwargs): if collate: return create_instance(name, globals(), dataset=dataset, sampler=sampler, collate_fn=collate, **kwargs) else: return create_instance(name, globals(), dataset=dataset, sampler=sampler, **kwargs)
def __init__(self, arch_config, warper_config, pred_score_threshold, nms_iou_threshold, card_area_threshold, weight_path, image_size, cuda): super(CardExtractor, self).__init__() self.image_size = image_size self.nms_iou_threshold = nms_iou_threshold self.card_area_threshold = card_area_threshold self.pred_score_threshold = pred_score_threshold self.device = torch.device('cuda' if cuda else 'cpu') self.card_warper = utils.create_instance(warper_config) self.model = utils.create_instance(arch_config) self.model.load_state_dict( torch.load(utils.abs_path(weight_path), map_location='cpu')) self.model.to(self.device) self.model.eval()
def test_container_firewall(self): config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small containers: test80: image: andreasjansson/hello ports: - 80 environment: PORT: 80 MESSAGE: foo test8080: image: andreasjansson/hello ports: - 8080 environment: PORT: 8080 MESSAGE: bar firewall: 8080: "*" ''' % { 'name': self.name } self.instance = create_instance(config) self.assertEquals(self.instance.call_port(80), None) self.assertEquals(self.instance.call_port(8080), "bar") self.instance.terminate()
def test_container_dependencies(self): config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small containers: test1: image: andreasjansson/hello ports: - 80 environment: PORT: 80 MESSAGE: foo test2: image: andreasjansson/hello ports: - 8080 environment: PORT: 8080 MESSAGE: ${host.containers.test1.ip} ''' % {'name': self.name} self.instance = create_instance(config) test1_ip = self.instance.containers['test1'].fields['ip'] self.assertEquals(self.instance.call_port(8080), test1_ip) self.assertEquals(check_changes(config), {})
def text_to_pdf(text, font, size, out_file): """ Converting text to pdf Parameters ____________ * param1: text * type: string * param2: font * type: string * param3: size * type: int * param4: out_file * type: string Returns __________ * return1: check * type: bool """ pdf = utl.create_instance() pdf = utl.add_new_page(pdf) pdf = utl.set_font_size(pdf, font, size) pdf = utl.page_config(text,pdf) check = utl.save_pdf(out_file, pdf) return check
def save_data(data, company, model, unique, name_cleaner, value_cleaner, exclude): if name_cleaner == None: name_cleaner = lambda name: name if value_cleaner == None: value_cleaner = lambda name, value: value if unique == None: unique = lambda instance: instance model = apps.get_model(*model.split('.')) dups = [] saved = [] value_errors = [] for index, row in enumerate(data): dirty = dirty_key(row, model, name_cleaner=name_cleaner, value_cleaner=value_cleaner) if dirty: #if a key is wrong the whole file will fail anyway return dict(dups=dups, saved=saved, key_errors=dirty, value_errors=value_errors) unique_instance = create_instance(row, model, name_cleaner=name_cleaner, value_cleaner=value_cleaner, unique=unique, company=company, exclude=exclude) if unique_instance: try: unique_instance.save() except Exception as e: message = getattr(e, 'messages', [row])[0] value_errors.append((e.__class__.__name__, index, message)) continue #need to deconstruct the model instance for later json serialization saved.append( dict([(k, v) for k, v in unique_instance.__dict__.items() if k in [f.name for f in unique_instance._meta.fields]])) else: decoded = dict( (k.decode('utf-8', 'ignore'), v.decode('utf-8', 'ignore')) for k, v in row.iteritems()) dups.append(decoded) return dict(dups=dups, saved=saved, key_errors=[], value_errors=value_errors)
def create_sampler(name, dataset, kwargs): loc = locals() nargs = {} for key, expr in kwargs.items(): try: nargs.update({key: eval(str(expr), globals(), loc)}) except NameError: nargs.update({key: expr}) # nargs = {key: eval(str(expr), globals(), loc) for key, expr in kwargs.items()} return create_instance(name, globals(), **nargs)
def test_modify_container(self): config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small containers: test: image: andreasjansson/hello ports: - 80 environment: PORT: 80 MESSAGE: foo ''' % {'name': self.name} self.instance = create_instance(config) self.assertEquals(check_changes(config), {}) config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small containers: test: image: andreasjansson/hello ports: - 80 environment: PORT: 80 MESSAGE: bar ''' % {'name': self.name} self.assertEquals(list(check_changes(config)['changing_containers'])[0].host.name, self.name) make_changes(config) self.assertEquals(check_changes(config), {}) config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small containers: test: image: andreasjansson/hello ports: - 80 - 81 environment: PORT: 80 MESSAGE: bar ''' % {'name': self.name} self.assertEquals(list(check_changes(config)['changing_containers'])[0].host.name, self.name) make_changes(config) self.assertEquals(check_changes(config), {})
def __init__(self, mode=None, config_path=None, *args, **kwargs): if config_path is None: config_path = utils.Path(inspect.getfile( self.__class__)).with_name('config.yaml') if mode is None: self.processor = Processor() elif config_path.exists(): config = utils.load_yaml(config_path) self.processor = utils.create_instance(config[mode], *args, **kwargs) else: raise FileNotFoundError('{} not found.'.format(config_path))
def test_ec2(self): config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small ''' % { 'name': self.name } self.instance = create_instance(config) self.assertTrue(self.instance.is_pingable()) self.assertTrue(self.instance.has_open_port(22)) self.instance.terminate() time.sleep(20) self.assertFalse(self.instance.is_pingable())
def test_digitalocean(self): config = ''' %(name)s: provider: digitalocean image: Ubuntu 14.04 x64 size: 512MB ''' % { 'name': self.name } self.instance = create_instance(config) self.assertTrue(self.instance.is_pingable()) self.assertTrue(self.instance.has_open_port(22)) self.instance.terminate() time.sleep(20) self.assertFalse(self.instance.is_pingable())
def test_instance_firewall(self): config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small firewall: 8080: "*" ''' % { 'name': self.name } self.instance = create_instance(config) self.instance.netcat_listen(80) self.instance.netcat_listen(8080) self.assertTrue(self.instance.is_pingable()) self.assertFalse(self.instance.has_open_port(80)) self.assertTrue(self.instance.has_open_port(8080)) self.instance.terminate()
def test_modify_server(self): config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small ''' % {'name': self.name} self.instance = create_instance(config) self.assertEquals(check_changes(config), {}) config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.medium ''' % {'name': self.name} self.assertEquals(list(check_changes(config)['changing_servers'])[0].name, self.name) make_changes(config) self.assertEquals(check_changes(config), {})
def test_modify_firewall(self): config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small firewall: 12345: "*" ''' % {'name': self.name} self.instance = create_instance(config) self.assertEquals(check_changes(config), {}) config = ''' %(name)s: provider: ec2 image: ami-a427efcc size: m1.small firewall: 12345: 1.1.1.1 ''' % {'name': self.name} self.assertEquals(list(check_changes(config)['changing_firewalls'])[0].host.name, self.name) make_changes(config) self.assertEquals(check_changes(config), {})
def save_data(data, company, model, unique, name_cleaner, value_cleaner, exclude): if name_cleaner == None: name_cleaner = lambda name: name if value_cleaner == None: value_cleaner = lambda name, value: value if unique == None: unique = lambda instance: instance model = apps.get_model(*model.split('.')) dups = [] saved = [] value_errors = [] for index, row in enumerate(data): dirty = dirty_key(row, model, name_cleaner=name_cleaner, value_cleaner=value_cleaner) if dirty: #if a key is wrong the whole file will fail anyway return dict(dups=dups, saved=saved, key_errors=dirty, value_errors=value_errors) unique_instance = create_instance(row, model, name_cleaner=name_cleaner, value_cleaner=value_cleaner, unique=unique, company=company, exclude=exclude) if unique_instance: try: unique_instance.save() except Exception as e: message = getattr(e, 'messages', [row])[0] value_errors.append((e.__class__.__name__, index, message)) continue #need to deconstruct the model instance for later json serialization saved.append(dict([(k, v)for k,v in unique_instance.__dict__.items() if k in [f.name for f in unique_instance._meta.fields]])) else: decoded = dict((k.decode('utf-8', 'ignore'),v.decode('utf-8', 'ignore')) for k,v in row.iteritems()) dups.append(decoded) return dict(dups=dups, saved=saved, key_errors=[], value_errors=value_errors)
def __init__(self, *args, **kwargs): self.k = -1 self.m = -1 self.ec_type = None self.chksum_type = None for (key, value) in kwargs.items(): if key == "k": try: self.k = positive_int_value(value) except ValueError as e: raise ECDriverError("Invalid number of data fragments (k)") elif key == "m": try: self.m = positive_int_value(value) except ValueError as e: raise ECDriverError("Invalid number of data fragments (m)") elif key == "ec_type": if value in ["flat_xor_hd_3", "flat_xor_hd_4"]: value = "flat_xor_hd" if PyECLib_EC_Types.has_enum(value): self.ec_type = PyECLib_EC_Types.get_by_name(value) else: raise ECDriverError("%s is not a valid EC type for PyECLib!" % value) elif key == "chksum_type": if PyECLib_FRAGHDRCHKSUM_Types.has_enum(value): self.chksum_type = PyECLib_FRAGHDRCHKSUM_Types.get_by_name(value) else: raise ECDriverError("%s is not a valid checksum type for PyECLib!" % value) self.library_import_str = kwargs.pop("library_import_str", "pyeclib.core.ECPyECLibDriver") # # Instantiate EC backend driver # self.ec_lib_reference = create_instance( self.library_import_str, k=self.k, m=self.m, ec_type=self.ec_type, chksum_type=self.chksum_type ) # # Verify that the imported library implements the required functions # required_methods = { "decode": 0, "encode": 0, "reconstruct": 0, "fragments_needed": 0, "min_parity_fragments_needed": 0, "get_metadata": 0, "verify_stripe_metadata": 0, "get_segment_info": 0, } for attr in dir(self.ec_lib_reference): if hasattr(getattr(self.ec_lib_reference, attr), "__call__"): required_methods[attr] = 1 not_implemented_str = "" for (method, is_implemented) in required_methods.items(): if is_implemented == 0: not_implemented_str += method + " " if len(not_implemented_str) > 0: raise ECDriverError( "The following required methods are not implemented " "in %s: %s" % (self.library_import_str, not_implemented_str) )
def __init__(self, *args, **kwargs): self.k = -1 self.m = -1 self.ec_type = None self.chksum_type = None for (key, value) in kwargs.items(): if key == "k": try: self.k = positive_int_value(value) except ValueError as e: raise ECDriverError( "Invalid number of data fragments (k)") elif key == "m": try: self.m = positive_int_value(value) except ValueError as e: raise ECDriverError( "Invalid number of data fragments (m)") elif key == "ec_type": if value in ["flat_xor_hd_3", "flat_xor_hd_4"]: value = "flat_xor_hd" if PyECLib_EC_Types.has_enum(value): self.ec_type = \ PyECLib_EC_Types.get_by_name(value) else: raise ECDriverError( "%s is not a valid EC type for PyECLib!" % value) elif key == "chksum_type": if PyECLib_FRAGHDRCHKSUM_Types.has_enum(value): self.chksum_type = \ PyECLib_FRAGHDRCHKSUM_Types.get_by_name(value) else: raise ECDriverError( "%s is not a valid checksum type for PyECLib!" % value) self.library_import_str = kwargs.pop('library_import_str', 'pyeclib.core.ECPyECLibDriver') # # Instantiate EC backend driver # self.ec_lib_reference = create_instance( self.library_import_str, k=self.k, m=self.m, ec_type=self.ec_type, chksum_type=self.chksum_type) # # Verify that the imported library implements the required functions # required_methods = { 'decode': 0, 'encode': 0, 'reconstruct': 0, 'fragments_needed': 0, 'min_parity_fragments_needed': 0, 'get_metadata': 0, 'verify_stripe_metadata': 0, 'get_segment_info': 0 } for attr in dir(self.ec_lib_reference): if hasattr(getattr(self.ec_lib_reference, attr), "__call__"): required_methods[attr] = 1 not_implemented_str = "" for (method, is_implemented) in required_methods.items(): if is_implemented == 0: not_implemented_str += method + " " if len(not_implemented_str) > 0: raise ECDriverError( "The following required methods are not implemented " "in %s: %s" % (self.library_import_str, not_implemented_str))
def create_loss(loss_name, **kwargs): return create_instance(loss_name, globals(), **kwargs)
def create_lr_schedule(name, optimizer, **kwargs): return create_instance(name, globals(), optimizer=optimizer, **kwargs)
help='glob pattern if image_path is a dir.') parser.add_argument('--starting-file', default=1) args = parser.parse_args() if args.pattern: image_paths = natsorted(Path(args.input_dir).glob(args.pattern), key=lambda x: x.stem) else: image_paths = [Path(args.input_dir)] output_dir = Path(args.output_dir) if args.output_dir else Path('output') if not output_dir.exists(): output_dir.mkdir(parents=True) config = utils.load_yaml('config.yaml') card_extractor = utils.create_instance(config['card_extraction']) print('[INFO] mode: ', config['card_extraction']['CardExtraction']['mode'], '\n') if not output_dir.exists(): output_dir.mkdir(parents=True) for idx, image_path in enumerate(image_paths[int(args.starting_file) - 1:], int(args.starting_file)): print(f'[{idx} / {len(image_paths)}] image name: {image_path.name}') card_infos, = module_time(card_extractor, 'Card Extraction', cv2.imread(str(image_path))) if not len(card_infos): print(image_path.name)
def create_optimizer(name, parameters, **kwargs): return create_instance(name, globals(), parameters, **kwargs)
def create_dataset(name, **kwargs): return create_instance(name, globals(), **kwargs)
def spin_saltmaster(subnet_id, key_user, op_system=None or DEFAULT_OS, aws_id=None or AWS_ID, aws_key=None or AWS_KEY, region=None or REGION): """ Spin a saltmaster instance in the target subnet :param subnet_id: Target subnet :param key_user: A string to identify the PEM key you want to use :param op_system: The OS you want to install Saltmaster :param aws_id: Amazon Access Key ID :param aws_key: Amazon Secret Access Key :param region: Target VPC region :return: Nothing """ vpc_conn = boto.vpc.connect_to_region(region_name=region, aws_access_key_id=aws_id, aws_secret_access_key=aws_key) subnet = vpc_conn.get_all_subnets(subnet_ids=[subnet_id])[0] vpc = vpc_conn.get_all_vpcs(vpc_ids=subnet.vpc_id)[0] # Check if there is a proper Secuirty Group already ec2_conn = boto.ec2.connect_to_region(region_name=region, aws_access_key_id=aws_id, aws_secret_access_key=aws_key) security_groups = ec2_conn.get_all_security_groups() saltmaster_security_group = [x for x in security_groups if 'Saltmaster_SG' in x.name] if not saltmaster_security_group: print('Creating Saltmaster Security Group...') saltmaster_security_group = ec2_conn.create_security_group('Saltmaster_SG', 'Saltmaster Security Group', vpc_id=subnet.vpc_id) saltmaster_security_group.add_tag('Name', 'Saltmaster Security Group') saltmaster_security_group.authorize(ip_protocol='tcp', from_port=4505, to_port=4506, cidr_ip=vpc.cidr_block) saltmaster_security_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1, cidr_ip=vpc.cidr_block) saltmaster_security_group.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='0.0.0.0/0') print('Done') else: saltmaster_security_group = saltmaster_security_group[0] print('Saltmaster Security Group already exists: {}'.format(saltmaster_security_group.id)) # Check how many Saltmaster instances already running saltmaster_reservations = ec2_conn.get_all_instances(filters={'tag:Name': 'saltmaster*', 'instance-state-name': 'running'}) if not saltmaster_reservations: saltmaster_name = 'saltmaster.' + subnet.availability_zone + '.' + DEFAULT_INTERNAL_DOMAIN print('New Saltmaster instance name: {}'.format(saltmaster_name)) print('New Saltmaster OS: {}'.format(op_system)) saltmaster_key = [x for x in ec2_conn.get_all_key_pairs() if key_user in x.name][0] # Run the instance saltmaster_instance = create_instance(ec2_conn=ec2_conn, name=saltmaster_name, image_id=AMI_LIST[op_system]['regions'][region], key_name=saltmaster_key.name, type_id='t2.micro', subnet_id=subnet.id, security_group_ids=saltmaster_security_group.id) else: saltmaster_instance = saltmaster_reservations[0].instances[0] print('Saltmaster instance {} already running'.format(saltmaster_instance.id)) # Look for the appropriate NAT instance public ip nat_instance = find_subnet_nat_instance(subnet_id=subnet_id, ec2_conn=ec2_conn, vpc_conn=vpc_conn) print('Public IP to connect to: {}'.format(nat_instance.ip_address)) conn_key = os.path.join(DEFAULT_SSH_DIR, saltmaster_instance.key_name+'.pem') salt_script_folder = os.path.abspath(os.path.join(os.path.abspath(os.curdir), os.pardir, 'saltstack')) bootstrap_script = os.path.join(salt_script_folder, '/bootstrap_salt.sh') username = AMI_USER[op_system] with settings(gateway='ec2-user@'+nat_instance.ip_address, host_string=username+'@'+saltmaster_instance.private_ip_address, user=username, key_filename=conn_key, forward_agent=True): # run('uname -a') put(bootstrap_script, mode=0700) run('./bootstrap_salt.sh master') run('service iptables stop')
def spin_instance(instance_tag, env_tag, subnet_id, key_name, security_group, op_system=None or 'CentOS', instance_type=None or 't2.micro', internal_domain=None or DEFAULT_INTERNAL_DOMAIN, aws_id=None or AWS_ID, aws_key=None or AWS_KEY, region=None or REGION): """ Spin a generic instance :param instance_tag: :param env_tag: :param subnet_id: :param key_name: :param aws_id: :param aws_key: :param region: :return: """ vpc_conn = boto.vpc.connect_to_region(region_name=region, aws_access_key_id=aws_id, aws_secret_access_key=aws_key) ec2_conn = boto.ec2.connect_to_region(region_name=region, aws_access_key_id=aws_id, aws_secret_access_key=aws_key) subnet = vpc_conn.get_all_subnets(subnet_ids=[subnet_id])[0] vpc = vpc_conn.get_all_vpcs(vpc_ids=subnet.vpc_id)[0] print("Ready to spin a new {} instance".format(op_system)) security_groups = ec2_conn.get_all_security_groups(filters={'description': security_group.upper() + '*'}) if not security_groups: print('You specified a security group that not exists, I will create it') # Create a new security group based on the tag instance_security_group = ec2_conn.create_security_group(security_group.upper() + '_SG', security_group.upper() + ' Security Group', vpc_id=subnet.vpc_id) instance_security_group.add_tag('Name', security_group.upper() + ' Security Group') instance_security_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1, cidr_ip=vpc.cidr_block) instance_security_group.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='0.0.0.0/0') if 'WEB' in instance_tag.upper(): instance_security_group.authorize(ip_protocol='tcp', from_port=80, to_port=80, cidr_ip='0.0.0.0/0') instance_security_group.authorize(ip_protocol='tcp', from_port=443, to_port=443, cidr_ip='0.0.0.0/0') if 'MTA' in instance_tag.upper(): instance_security_group.authorize(ip_protocol='tpc', from_port=587, to_port=587, cidr_ip='0.0.0.0/0') instance_security_group.authorize(ip_protocol='tcp', from_port=993, to_port=993, cidr_ip='0.0.0.0/0') instance_security_group.authorize(ip_protocol='tcp', from_port=995, to_port=995, cidr_ip='0.0.0.0/0') print('Security group {} [{}] created'.format(instance_security_group.id)) else: # Use the secuirty group if len(security_groups) > 1: print(red('Error, there is more than one security group based on your choice. Be more specific')) for sg in security_groups: print('\t{} ({})'.format(sg.description, sg.id)) sys.exit(1) else: instance_security_group = security_groups[0] print("Security group {} selected".format(instance_security_group.id)) keys = [k for k in ec2_conn.get_all_key_pairs() if key_name in k.name] if not keys: print(red('Error, there is no key with the string {}. Be more specific'.format(key_name))) sys.exit(1) elif len(keys) > 2: print(red('Error, there is more than one key based on your choice. Be more specific')) for k in keys: print('\t{}'.format(k.name)) sys.exit(1) else: instance_key = keys[0] print('Key {} selected'.format(instance_key.name)) # How many instance of this type already running? instances = ec2_conn.get_all_instances(filters={'tag:Name': instance_tag + '*'}) # Instance name: web.prd.001.eu-west-1a.example.com instance_name = instance_tag + '.' + env_tag + '.' + str(len(instances) + 1).zfill(3) + '.' +\ subnet.availability_zone + '.' + DEFAULT_INTERNAL_DOMAIN print('Creating instance {}'.format(instance_name)) instance = create_instance(ec2_conn=ec2_conn, name=instance_name, image_id=AMI_LIST[op_system]['regions'][region], key_name=instance_key.name, type_id=instance_type, subnet_id=subnet.id, security_group_ids=instance_security_group.id) # Check if the subnet is Public or Private if 'Private' in subnet.tags['Name']: print("Instance in private subnet with IP {}".format(instance.private_ip_address)) elif 'Public' in subnet.tags['Name']: elastic_ips = ec2_conn.get_all_addresses() if len(elastic_ips) > 5: print(red("You don't have any Elastic IP available")) print("Your public instance is without a public IP") else: new_ip = ec2_conn.allocate_address() ec2_conn.associate_address(instance_id=instance.id, public_ip=new_ip.public_ip) time.sleep(3) instance.update() print(green("Instance {} [{}] accessible at {}".format(instance_name, instance.id, instance.ip_address))) # Add the DNS entry route53_conn = boto.route53.connect_to_region(region_name=region, aws_access_key_id=aws_id, aws_secret_access_key=aws_key) zone_id = get_zone_id(route53_conn=route53_conn, domain_name=internal_domain) if not zone_id: print(red("Error, can't find the domain {}".format(internal_domain))) print("Instance spinnded successfully but the DNS record creation failed") sys.exit(1) else: zone_changes = boto.route53.record.ResourceRecordSets(route53_conn, zone_id) a_record = zone_changes.add_change(action='CREATE', name=instance_name, type='A') a_record.add_value(instance.private_ip_address) result = zone_changes.commit() result.update() while 'PENDING' in result.update(): print("Propagating DNS record...") time.sleep(5) instance_ssh_user = find_ssh_user(instance_id=instance.id, ec2_conn=ec2_conn) instance_ssh_key = DEFAULT_SSH_DIR + instance.key_name + '.pem' # Find the NAT parameters nat_instance = find_subnet_nat_instance(subnet_id=instance.subnet_id, ec2_conn=ec2_conn, vpc_conn=vpc_conn) if not nat_instance: print(red('Error, NAT instance for instance {} not found'.format(instance.id))) sys.exit(1) else: nat_ssh_user = find_ssh_user(instance_id=nat_instance.id, ec2_conn=ec2_conn) # Update hostname if 'Debian' in op_system: with settings(gateway=nat_instance.ip_address, host_string=instance_ssh_user + '@' + instance.private_ip_address, user=nat_ssh_user, key_filename=instance_ssh_key, forward_agent=True, warn_only=True): sudo('echo ' + instance_name + ' > /etc/hostname') sudo('echo ' + instance.private_ip_address + ' ' + instance_name + ' >> /etc/hosts') print(green("Instance {} spinned!".format(instance.id)))
def create_model(arch, **kwargs): model = create_instance(arch, globals(), **kwargs) return model