def test_base_node_image(self): NodeImage(id=0, name=0, driver=FakeDriver())
def _to_image(self, image): return NodeImage(id=image['distro_code'], name=image['distro_description'], driver=self.connection.driver)
def _to_image(self, img): return NodeImage(id=img['id'], name=img['name'], driver=self.connection.driver)
def create(self, name=None, image=None, size=None, location=None, timeout=360, **kwargs): """ creates a named node :param name: the name of the node :param image: the image used :param size: the size of the image :param timeout: a timeout in seconds that is invoked in case the image does not boot. The default is set to 3 minutes. :param kwargs: additional arguments HEADING(c=".")ed along at time of boot :return: """ if image is None: image = self.spec["default"]['flavor'] if size is None: size = self.spec["default"]['size'] database = CmDatabase() image_use = None flavor_use = None if self.cloudtype in ["openstack", "aws", "google"]: image_dict = database.find(collection='{}-image'.format(self.cloudtype), name=image)[0] image_use = NodeImage(id=image_dict['id'], name=image_dict['name'], driver=self.driver) elif self.cloudtype == 'azure_arm': image_use = self.cloudman.get_image(image) flavor_dict = database.find(collection='{}-flavor'.format(self.cloudtype), name=size)[0] flavor_use = NodeSize(id=flavor_dict['id'], name=flavor_dict['name'], ram=flavor_dict['ram'], disk=flavor_dict['disk'], bandwidth=flavor_dict['bandwidth'], price=flavor_dict['price'], driver=self.driver) if self.cloudtype == "openstack": if "ex_security_groups" in kwargs: secgroupsobj = [] # # this gives existing secgroups in obj form secgroups = self.list_secgroups(raw=True) for secgroup in kwargs["ex_security_groups"]: for _secgroup in secgroups: if _secgroup.name == secgroup: secgroupsobj.append(_secgroup) # now secgroup name is converted to object which # is required by the libcloud api call kwargs["ex_security_groups"] = secgroupsobj if self.cloudtype in ["openstack", "aws"]: node = self.cloudman.create_node(name=name, image=image_use, size=flavor_use, **kwargs) elif self.cloudtype == 'azure_arm': auth = None if "sshpubkey" in kwargs: auth = NodeAuthSSHKey(kwargs["sshpubkey"]) pubip = self.cloudman.ex_create_public_ip( name='{nodename}-ip'.format( nodename=name), resource_group=kwargs["resource_group"] ) networks = self.cloudman.ex_list_networks() network_use = None for network in networks: if network.name == kwargs["network"]: network_use = network pprint(network_use) break subnets = self.cloudman.ex_list_subnets(network_use) subnet_use = None for subnet in subnets: if subnet.name == kwargs["subnet"]: subnet_use = subnet break nic_use = self.cloudman.ex_create_network_interface( name='{nodename}-nic'.format( nodename=name), subnet=subnet_use, resource_group=kwargs["resource_group"], public_ip=pubip ) node = self.cloudman.create_node(name=name, image=image_use, size=flavor_use, auth=auth, # the following three were created in azure portal ex_resource_group=kwargs["resource_group"], # for storage account, use the default v2 setting ex_storage_account=kwargs["storage_account"], # under the storage account, blobs services, # create 'vhds' container ex_blob_container=kwargs["blob_container"], ex_nic=nic_use ) elif self.cloudtype == 'google': location_use = self.spec["credentials"]["datacenter"] metadata = {"items": [{"value": self.user + ":" + self.key_val, "key": "ssh-keys"}]} node = self.cloudman.create_node(name=name, image=image_use, size=flavor_use, location=location_use, ex_metadata=metadata, **kwargs) else: sys.exit("this cloud is not yet supported") return self.update_dict(node, kind='node')[0]
def list_images(self, ex_project=None): return [NodeImage("fake_image_id", "fake_image_id", self)]
def _image_from_id(self, image_id=None): image = NodeImage(id=image_id, name="", driver="") return image
def launch(self, inf, radl, requested_radl, num_vm, auth_data): driver = self.get_driver(auth_data) system = radl.systems[0] image_id = self.get_image_id(system.getValue("disk.0.image.url")) image = NodeImage(id=image_id, name=None, driver=driver) instance_type = self.get_instance_type(driver.list_sizes(), system) name = system.getValue("instance_name") if not name: name = system.getValue("disk.0.image.name") if not name: name = "userimage" args = { 'size': instance_type, 'image': image, 'name': "%s-%s" % (name, int(time.time() * 100)) } keypair = None public_key = system.getValue("disk.0.os.credentials.public_key") if self.driver_uses_keypair(driver): if public_key: keypair = driver.get_key_pair(public_key) if keypair: system.setUserKeyCredentials( system.getCredentials().username, None, keypair.private_key) else: if "ssh_key" in driver.features.get("create_node", []): args["auth"] = NodeAuthSSHKey(public_key) else: args["ex_keyname"] = keypair.name elif not system.getValue("disk.0.os.credentials.password"): keypair_name = "im-%d" % int(time.time() * 100.0) keypair = driver.create_key_pair(keypair_name) system.setUserKeyCredentials(system.getCredentials().username, None, keypair.private_key) if keypair.public_key and "ssh_key" in driver.features.get( "create_node", []): args["auth"] = NodeAuthSSHKey(keypair.public_key) else: args["ex_keyname"] = keypair_name res = [] i = 0 while i < num_vm: self.logger.debug("Creating node") node = driver.create_node(**args) if node: vm = VirtualMachine(inf, node.id, self.cloud, radl, requested_radl, self.cloud.getCloudConnector()) vm.info.systems[0].setValue('instance_id', str(node.id)) vm.info.systems[0].setValue('instance_name', str(node.name)) # Add the keypair name to remove it later vm.keypair = keypair_name self.logger.debug("Node successfully created.") res.append((True, vm)) else: res.append((False, "Error creating the node")) i += 1 return res
def _to_image(self, img): return NodeImage(id=img['disk_id'], name=img['label'], driver=self.connection.driver)
def asLibcloudImage(self): return NodeImage(id=self.image, name='dummy image', driver=self.driver, extra={})
from ConfigParser import SafeConfigParser parser = SafeConfigParser() parser.read('/home/docent/.aws/credentials') ACCESS_ID = parser.get('docent', 'aws_access_key_id') SECRET_KEY = parser.get('docent', 'aws_secret_access_key') config = { 'ami': 'ami-83cfd1ef', 'instance_type': 't2.micro', 'region': 'eu-central-1', 'keypair': 'docent_ocado', 'security_group': 'web-1' } cls = get_driver(Provider.EC2) driver = cls(ACCESS_ID, SECRET_KEY, region=config['region']) # Here we select sizes = driver.list_sizes() size = [s for s in sizes if s.id == config['instance_type']][0] image = NodeImage(id=config['ami'], name=None, driver=driver) node = driver.create_node( name='test-node', image=image, size=size, ex_keyname=config['keypair'], ex_securitygroup=config['security_group'])
def upload(self, compose_meta): """ Registers the image in each EC2 region. """ log.info('EC2 upload process started') # Get a starting utility AMI in some region to use as an origin ami = self.util_amis[0] # Select the starting AMI to begin self.destination = 'EC2 ({region})'.format(region=ami['region']) fedimg.messenger.message('image.upload', self.raw_url, self.destination, 'started', compose=compose_meta) try: # Connect to the region through the appropriate libcloud driver cls = ami['driver'] driver = cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY) # select the desired node attributes sizes = driver.list_sizes() reg_size_id = 'm1.xlarge' # check to make sure we have access to that size node # TODO: Add try/except if for some reason the size isn't # available? size = [s for s in sizes if s.id == reg_size_id][0] base_image = NodeImage(id=ami['ami'], name=None, driver=driver) # Name the utility node name = 'Fedimg AMI builder' # Block device mapping for the utility node # (Requires this second volume to write the image to for # future registration.) mappings = [{ 'VirtualName': None, # cannot specify with Ebs 'Ebs': { 'VolumeSize': fedimg.AWS_UTIL_VOL_SIZE, 'VolumeType': self.vol_type, 'DeleteOnTermination': 'false' }, 'DeviceName': '/dev/sdb' }] # Read in the SSH key with open(fedimg.AWS_PUBKEYPATH, 'rb') as f: key_content = f.read() # Add key to authorized keys for root user step_1 = SSHKeyDeployment(key_content) # Add script for deployment # Device becomes /dev/xvdb on instance script = "touch test" # this isn't so important for the util inst. step_2 = ScriptDeployment(script) # Create deployment object (will set up SSH key and run script) msd = MultiStepDeployment([step_1, step_2]) log.info('Deploying utility instance') while True: try: self.util_node = driver.deploy_node( name=name, image=base_image, size=size, ssh_username=fedimg.AWS_UTIL_USER, ssh_alternate_usernames=[''], ssh_key=fedimg.AWS_KEYPATH, deploy=msd, kernel_id=ami['aki'], ex_metadata={'build': self.build_name}, ex_keyname=fedimg.AWS_KEYNAME, ex_security_groups=['ssh'], ex_ebs_optimized=True, ex_blockdevicemappings=mappings) except KeyPairDoesNotExistError: # The keypair is missing from the current region. # Let's install it and try again. log.exception('Adding missing keypair to region') driver.ex_import_keypair(fedimg.AWS_KEYNAME, fedimg.AWS_PUBKEYPATH) continue except Exception as e: # We might have an invalid security group, aka the 'ssh' # security group doesn't exist in the current region. The # reason this is caught here is because the related # exception that prints`InvalidGroup.NotFound is, for # some reason, a base exception. if 'InvalidGroup.NotFound' in str(e): log.exception('Adding missing security' 'group to region') # Create the ssh security group driver.ex_create_security_group('ssh', 'ssh only') driver.ex_authorize_security_group( 'ssh', '22', '22', '0.0.0.0/0') continue else: raise break # Wait until the utility node has SSH running while not ssh_connection_works(fedimg.AWS_UTIL_USER, self.util_node.public_ips[0], fedimg.AWS_KEYPATH): sleep(10) log.info('Utility node started with SSH running') # Connect to the utility node via SSH client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(self.util_node.public_ips[0], username=fedimg.AWS_UTIL_USER, key_filename=fedimg.AWS_KEYPATH) # Curl the .raw.xz file down from the web, decompressing it # and writing it to the secondary volume defined earlier by # the block device mapping. # curl with -L option, so we follow redirects cmd = "sudo sh -c 'curl -L {0} | xzcat > /dev/xvdb'".format( self.raw_url) chan = client.get_transport().open_session() chan.get_pty() # Request a pseudo-term to get around require tty log.info('Executing utility script') # Run the above command and wait for its exit status chan.exec_command(cmd) status = chan.recv_exit_status() if status != 0: # There was a problem with the SSH command log.error('Problem writing volume with utility instance') data = "(no data)" if chan.recv_ready(): data = chan.recv(1024 * 32) fedimg.messenger.message('image.upload', self.raw_url, self.destination, 'failed', extra={'data': data}, compose=compose_meta) raise EC2UtilityException( "Problem writing image to utility instance volume. " "Command exited with status {0}.\n" "command: {1}\n" "output: {2}".format(status, cmd, data)) client.close() # Get volume name that image was written to vol_id = [ x['ebs']['volume_id'] for x in self.util_node.extra['block_device_mapping'] if x['device_name'] == '/dev/sdb' ][0] log.info('Destroying utility node') # Terminate the utility instance driver.destroy_node(self.util_node) # Wait for utility node to be terminated while ssh_connection_works(fedimg.AWS_UTIL_USER, self.util_node.public_ips[0], fedimg.AWS_KEYPATH): sleep(10) # Wait a little longer since loss of SSH connectivity doesn't mean # that the node's destroyed # TODO: Check instance state rather than this lame sleep thing sleep(45) # Take a snapshot of the volume the image was written to self.util_volume = [ v for v in driver.list_volumes() if v.id == vol_id ][0] snap_name = 'fedimg-snap-{0}'.format(self.build_name) log.info('Taking a snapshot of the written volume') self.snapshot = driver.create_volume_snapshot(self.util_volume, name=snap_name) snap_id = str(self.snapshot.id) while self.snapshot.extra['state'] != 'completed': # Re-obtain snapshot object to get updates on its state self.snapshot = [ s for s in driver.list_snapshots() if s.id == snap_id ][0] sleep(10) # Make the snapshot public, so that the AMIs can be copied is_snapshot_public = False while True: is_snapshot_public = driver.ex_modify_snapshot_attribute( self.snapshot, {'CreateVolumePermission.Add.1.Group': 'all'}) if is_snapshot_public: break log.info('Snapshot is not public yet. Retry in 20') sleep(20) log.info('Snapshot taken & made public') # Delete the volume now that we've got the snapshot driver.destroy_volume(self.util_volume) # make sure Fedimg knows that the vol is gone self.util_volume = None log.info('Destroyed volume') # Actually register image log.info('Registering image as an AMI') if self.virt_type == 'paravirtual': image_name = "{0}-{1}-PV-{2}-0".format(self.build_name, ami['region'], self.vol_type) test_size_id = 'm1.xlarge' # test_amis will include AKIs of the appropriate arch registration_aki = [ a['aki'] for a in self.test_amis if a['region'] == ami['region'] ][0] reg_root_device_name = '/dev/sda' else: # HVM image_name = "{0}-{1}-HVM-{2}-0".format( self.build_name, ami['region'], self.vol_type) test_size_id = 'm3.2xlarge' # Can't supply a kernel image with HVM registration_aki = None reg_root_device_name = '/dev/sda1' # For this block device mapping, we have our volume be # based on the snapshot's ID mapping = [{ 'DeviceName': reg_root_device_name, 'Ebs': { 'SnapshotId': snap_id, 'VolumeSize': fedimg.AWS_TEST_VOL_SIZE, 'VolumeType': self.vol_type, 'DeleteOnTermination': 'true' } }] # Avoid duplicate image name by incrementing the number at the # end of the image name if there is already an AMI with that name. # TODO: This process could be written nicer. while True: try: if self.dup_count > 0: # Remove trailing '-0' or '-1' or '-2' or... image_name = '-'.join(image_name.split('-')[:-1]) # Re-add trailing dup number with new count image_name += '-{0}'.format(self.dup_count) # Try to register with that name self.images.append( driver.ex_register_image( image_name, description=self.image_desc, root_device_name=reg_root_device_name, block_device_mapping=mapping, virtualization_type=self.virt_type, kernel_id=registration_aki, architecture=self.image_arch)) except Exception as e: # Check if the problem was a duplicate name if 'InvalidAMIName.Duplicate' in str(e): # Keep trying until an unused name is found self.dup_count += 1 continue else: raise break log.info('Completed image registration') # Emit success fedmsg # TODO: Can probably move this into the above try/except, # to avoid just dumping all the messages at once. for image in self.images: fedimg.messenger.message('image.upload', self.raw_url, self.destination, 'completed', extra={ 'id': image.id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }, compose=compose_meta) # Now, we'll spin up a node of the AMI to test: # Add script for deployment # Device becomes /dev/xvdb on instance script = "touch test" step_2 = ScriptDeployment(script) # Create deployment object msd = MultiStepDeployment([step_1, step_2]) log.info('Deploying test node') # Pick a name for the test instance name = 'Fedimg AMI tester' # Select the appropriate size for the instance size = [s for s in sizes if s.id == test_size_id][0] # Alert the fedmsg bus that an image test is starting fedimg.messenger.message('image.test', self.raw_url, self.destination, 'started', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }, compose=compose_meta) # Actually deploy the test instance try: self.test_node = driver.deploy_node( name=name, image=self.images[0], size=size, ssh_username=fedimg.AWS_TEST_USER, ssh_alternate_usernames=['root'], ssh_key=fedimg.AWS_KEYPATH, deploy=msd, kernel_id=registration_aki, ex_metadata={'build': self.build_name}, ex_keyname=fedimg.AWS_KEYNAME, ex_security_groups=['ssh'], ) except Exception as e: fedimg.messenger.message('image.test', self.raw_url, self.destination, 'failed', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }, compose=compose_meta) raise EC2AMITestException("Failed to boot test node %r." % e) # Wait until the test node has SSH running while not ssh_connection_works(fedimg.AWS_TEST_USER, self.test_node.public_ips[0], fedimg.AWS_KEYPATH): sleep(10) log.info('Starting AMI tests') client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(self.test_node.public_ips[0], username=fedimg.AWS_TEST_USER, key_filename=fedimg.AWS_KEYPATH) # Run /bin/true on the test instance as a simple "does it # work" test cmd = "/bin/true" chan = client.get_transport().open_session() chan.get_pty() # Request a pseudo-term to get around requiretty log.info('Running AMI test script') chan.exec_command(cmd) # Again, wait for the test command's exit status if chan.recv_exit_status() != 0: # There was a problem with the SSH command log.error('Problem testing new AMI') data = "(no data)" if chan.recv_ready(): data = chan.recv(1024 * 32) fedimg.messenger.message('image.test', self.raw_url, self.destination, 'failed', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type, 'data': data }, compose=compose_meta) raise EC2AMITestException("Tests on AMI failed.\n" "output: %s" % data) client.close() log.info('AMI test completed') fedimg.messenger.message('image.test', self.raw_url, self.destination, 'completed', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }, compose=compose_meta) # Let this EC2Service know that the AMI test passed, so # it knows how to proceed. self.test_success = True log.info('Destroying test node') # Destroy the test node driver.destroy_node(self.test_node) # Make AMIs public for image in self.images: driver.ex_modify_image_attribute( image, {'LaunchPermission.Add.1.Group': 'all'}) except EC2UtilityException as e: log.exception("Failure") if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 except EC2AMITestException as e: log.exception("Failure") if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 except DeploymentException as e: log.exception("Problem deploying node: {0}".format(e.value)) if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 except Exception as e: # Just give a general failure message. log.exception("Unexpected exception") if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 else: self._clean_up(driver) if self.test_success: # Copy the AMI to every other region if tests passed copied_images = list() # completed image copies (ami: image) # Use the AMI list as a way to cycle through the regions for ami in self.test_amis[1:]: # we don't need the origin region # Choose an appropriate destination name for the copy alt_dest = 'EC2 ({region})'.format(region=ami['region']) fedimg.messenger.message('image.upload', self.raw_url, alt_dest, 'started', compose=compose_meta) # Connect to the libcloud EC2 driver for the region we # want to copy into alt_cls = ami['driver'] alt_driver = alt_cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY) # Construct the full name for the image copy if self.virt_type == 'paravirtual': image_name = "{0}-{1}-PV-{2}-0".format( self.build_name, ami['region'], self.vol_type) else: # HVM image_name = "{0}-{1}-HVM-{2}-0".format( self.build_name, ami['region'], self.vol_type) log.info('AMI copy to {0} started'.format(ami['region'])) # Avoid duplicate image name by incrementing the number at the # end of the image name if there is already an AMI with # that name. # TODO: Again, this could be written better while True: try: if self.dup_count > 0: # Remove trailing '-0' or '-1' or '-2' or... image_name = '-'.join(image_name.split('-')[:-1]) # Re-add trailing dup number with new count image_name += '-{0}'.format(self.dup_count) # Actually run the image copy from the origin region # to the current region. for image in self.images: image_copy = alt_driver.copy_image( image, self.test_amis[0]['region'], name=image_name, description=self.image_desc) # Add the image copy to a list so we can work with # it later. copied_images.append(image_copy) log.info('AMI {0} copied to AMI {1}'.format( image, image_name)) except Exception as e: # Check if the problem was a duplicate name if 'InvalidAMIName.Duplicate' in str(e): # Keep trying until an unused name is found. # This probably won't trigger, since it seems # like EC2 doesn't mind duplicate AMI names # when they are being copied, only registered. # Strange, but apprently true. self.dup_count += 1 continue else: # TODO: Catch a more specific exception log.exception('Image copy to {0} failed'.format( ami['region'])) fedimg.messenger.message('image.upload', self.raw_url, alt_dest, 'failed', compose=compose_meta) break # Now cycle through and make all of the copied AMIs public # once the copy process has completed. Again, use the test # AMI list as a way to have region and arch data: # We don't need the origin region, since the AMI was made there: self.test_amis = self.test_amis[1:] for image in copied_images: ami = self.test_amis[copied_images.index(image)] alt_cls = ami['driver'] alt_driver = alt_cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY) # Get an appropriate name for the region in question alt_dest = 'EC2 ({region})'.format(region=ami['region']) # Need to wait until the copy finishes in order to make # the AMI public. is_image_public = False while True: try: # Make the image public is_image_public = alt_driver.ex_modify_image_attribute( image, {'LaunchPermission.Add.1.Group': 'all'}) except Exception as e: if 'InvalidAMIID.Unavailable' in str(e): # The copy isn't done, so wait 20 seconds # and try again. sleep(20) continue break if is_image_public: log.info('Made {0} public ({1}, {2}, {3}, {4})'.format( image.id, self.build_name, self.virt_type, self.vol_type, ami['region'])) else: log.info('{0} is private ({1}, {2}, {3}, {4})'.format( image.id, self.build_name, self.virt_type, self.vol_type, ami['region'])) # Make the snapshot for the image public. is_snapshot_public = False snapshot = None alt_ami = alt_driver.get_image(image.id) blk_device_mapping = alt_ami.extra['block_device_mapping'] if len(blk_device_mapping) == 1: snapshot_id = blk_device_mapping[0]['ebs']['snapshot_id'] # The `list_snapshots` method requires a snapshot object. # which then fetches the id of the snapshot and fetches the # detail of the snapshot. So, I am making an empty snapshot # object here and attaching the value to the `id` attribute # so that the list_snapshots method just works snapshot_obj = type('', (), {})() snapshot_obj.id = snapshot_id snapshot = alt_driver.list_snapshots(snapshot=snapshot_obj) if snapshot is not None: snapshot = snapshot[0] while True: is_snapshot_public = ( alt_driver.ex_modify_snapshot_attribute( snapshot, {'CreateVolumePermission.Add.1.Group': 'all'})) if is_snapshot_public: break log.info('Snapshot is not public yet. Retry in 20') sleep(20) else: is_snapshot_public = False log.info('Search (%s, %s) returned no results' % (snapshot_id, ami['region'])) if is_snapshot_public: log.info('Snapshot (%s, %s) made public' % (snapshot_id, ami['region'])) else: log.info('Snapshot (%s, %s) still private' % (snapshot_id, ami['region'])) fedimg.messenger.message('image.upload', self.raw_url, alt_dest, 'completed', extra={ 'id': image.id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }, compose=compose_meta) return 0
def _to_image(self, data): extra = {'distribution': data['distribution']} return NodeImage(id=data['id'], name=data['name'], extra=extra, driver=self)
def _to_image(self, data): extra = {'distro': data['distro'], 'version': data['version']} return NodeImage(id=data['slug'], name=data['name'], extra=extra, driver=self)
def _to_image(self, element): return NodeImage(id=element.get('id'), name=element.get('name'), driver=self, extra={} )
def _to_image(self, element): i = NodeImage(id=int(element.findtext('id')), name=str(element.findtext('name')), driver=self.connection.driver) return i
def _to_image(self, data): extra = {"operating_system": data["operating_system"]} return NodeImage(id=data["slug"], name=data["name"], extra=extra, driver=self)
def _to_image(self, data): extra = {'arch': data['arch'], 'family': data['family']} return NodeImage(id=data['OSID'], name=data['name'], extra=extra, driver=self)
def _to_image(self, image): return NodeImage(id=image.findtext('ID'), name=image.findtext('Name'), driver=self.connection.driver, extra={'parametersURL': image.findtext('Manifest')})
def launch_libcloud(driver, num_instance, config, cluster_id=c.CLUSTER_ID, assume_yes=False): """Launch num_instance instances on the desired provider given by the driver, using a provider depended config :param driver: the desired provider driver :param num_instance: the number of instances to be launched :param config: the configuration dictionary of the user :return: list of the created nodes, if provider AWS_SPOT also list of spot request is returned """ proceed = True if assume_yes else query_yes_no( "Are you sure to launch " + str(num_instance) + " new instances on " + cluster_id + "?", "no") if proceed: if (c.PROVIDER == "AWS_SPOT"): check_spot_price(driver, config) # pick size and images sizes = driver.list_sizes() size = [s for s in sizes if s.id == config["Aws"]["InstanceType"]][0] image = NodeImage(id=config["Aws"]["AMI"], name=None, driver=driver) locations = driver.list_locations() location = [l for l in locations if l.name == config["Aws"]["AZ"]][0] # Array of EC2SpotRequest spot_request = driver.ex_request_spot_instances( image=image, size=size, spot_price=config["Aws"]["Price"], instance_count=num_instance, type='one-time', location=location, keyname=config["Aws"]["KeyPair"], security_groups=[config["Aws"]["SecurityGroup"]], #ebs_optimized=config["Aws"]["EbsOptimized"], blockdevicemappings=[{ "DeviceName": "/dev/sda1", "Ebs": { "DeleteOnTermination": True, "VolumeType": "gp2", "VolumeSize": 200, "SnapshotId": config["Aws"]["SnapshotId"] } }, { "DeviceName": "/dev/sdb", "VirtualName": "ephemeral0" }]) print(spot_request) # Request created spot_request_ids = [s.id for s in spot_request] print(spot_request_ids) wait_for_fulfillment_libcloud(driver, spot_request_ids, copy.deepcopy(spot_request_ids)) # Spot Instances ACTIVE spot_request_updates = driver.ex_list_spot_requests( spot_request_ids) instance_ids = [s.instance_id for s in spot_request_updates] nodes = driver.list_nodes(ex_node_ids=instance_ids) return nodes, spot_request_updates if c.PROVIDER == "AZURE": # obtain size print("Collecting node size") sizes = driver.list_sizes() size = [s for s in sizes if s.id == config["Azure"]["NodeSize"]][0] # obtain image print("Collecting node image") # image = driver.get_image('Canonical:UbuntuServer:14.04.5-LTS:14.04.201703230') image = AzureVhdImage( storage_account=config["Azure"]["NodeImage"]["StorageAccount"], blob_container=config["Azure"]["NodeImage"]["BlobContainer"], name=config["Azure"]["NodeImage"]["Name"], driver=driver) # create resource group print("Creating resource group") driver.ex_create_resource_group( resource_group=config["Azure"]["ResourceGroup"]) # create storage account print("Creating storage account") driver.ex_create_storage_account( resource_group=config["Azure"]["ResourceGroup"], storage_account=config["Azure"]["StorageAccount"]["Name"], sku=config["Azure"]["StorageAccount"]["Sku"], kind=config["Azure"]["StorageAccount"]["Kind"]) # create security group print("Creating security group") security_rules = [{ "name": "default-allow-ssh", "properties": { "protocol": "TCP", "sourcePortRange": "*", "destinationPortRange": "22", "sourceAddressPrefix": "*", "destinationAddressPrefix": "*", "access": "allow", "priority": 1000, "direction": "Inbound" } }, { "name": "hadoop-9000", "properties": { "protocol": "*", "sourcePortRange": "*", "destinationPortRange": "9000", "sourceAddressPrefix": "*", "destinationAddressPrefix": "*", "access": "allow", "priority": 1010, "direction": "Inbound" } }, { "name": "hadoop-50090", "properties": { "protocol": "*", "sourcePortRange": "*", "destinationPortRange": "50090", "sourceAddressPrefix": "*", "destinationAddressPrefix": "*", "access": "allow", "priority": 1020, "direction": "Inbound" } }, { "name": "hadoop-50070", "properties": { "protocol": "*", "sourcePortRange": "*", "destinationPortRange": "50070", "sourceAddressPrefix": "*", "destinationAddressPrefix": "*", "access": "allow", "priority": 1030, "direction": "Inbound" } }, { "name": "hadoop-50010", "properties": { "protocol": "*", "sourcePortRange": "*", "destinationPortRange": "50010", "sourceAddressPrefix": "*", "destinationAddressPrefix": "*", "access": "allow", "priority": 1040, "direction": "Inbound" } }, { "name": "spark-7077", "properties": { "protocol": "*", "sourcePortRange": "*", "destinationPortRange": "7077", "sourceAddressPrefix": "*", "destinationAddressPrefix": "*", "access": "allow", "priority": 1050, "direction": "Inbound" } }, { "name": "spark-webui-4040", "properties": { "protocol": "*", "sourcePortRange": "*", "destinationPortRange": "4040", "sourceAddressPrefix": "*", "destinationAddressPrefix": "*", "access": "allow", "priority": 1060, "direction": "Inbound" } }] driver.ex_create_security_group( resource_group=config["Azure"]["ResourceGroup"], security_group=config["Azure"]["SecurityGroup"], security_rules=security_rules) # create network and subnet print("Create network") network_parameters = { "addressSpace": { "addressPrefixes": ["10.0.0.0/16"] }, "subnets": [{ "name": config["Azure"]["Subnet"], "properties": { "addressPrefix": "10.0.0.0/24" } }] } network = driver.ex_create_network( resource_group=config["Azure"]["ResourceGroup"], network=config["Azure"]["Network"], extra=network_parameters) # retrieve subnet print("Find default subnet") subnets = driver.ex_list_subnets(network) subnet = [s for s in subnets if s.name == "default"][0] # public ips print("Create public ips") public_ips = [ driver.ex_create_public_ip( name="{}ip{}".format(cluster_id, i), #name="testip", resource_group=config["Azure"]["ResourceGroup"]) for i in range(num_instance) ] # network interface print("Create network interfaces") network_interfaces = [ driver.ex_create_network_interface( name="{}nic{}".format(cluster_id, i), #name="testnic", subnet=subnet, resource_group=config["Azure"]["ResourceGroup"], public_ip=public_ips[i]) for i in range(num_instance) ] # auth print("Load public SSH key") with open(config["Azure"]["PubKeyPath"], 'r') as pubkey: pubdata = pubkey.read() auth = NodeAuthSSHKey(pubdata) # create nodes print("Beginning node creation") nodes = [ driver.create_node( name="{}node{}".format(cluster_id, i), #name="vm", size=size, image=image, auth=auth, ex_resource_group=config["Azure"]["ResourceGroup"], ex_storage_account=config["Azure"]["StorageAccount"] ["Name"], ex_blob_container='vhds', ex_user_name='ubuntu', ex_network=None, ex_subnet=None, ex_nic=network_interfaces[i], ex_customdata='') for i in range(num_instance) ] print("Created {} nodes".format(len(nodes))) return nodes
def _to_image(self, element): n = NodeImage(id=element['id'], name=element['friendlyName'], driver=self.connection.driver) return n
def list_images(self): images = [] response = self.connection.request("api/os") for image in response.object["os"]: images.append(NodeImage(image["Id"], image["Name"], self)) return images
def _to_image(self, image): image = NodeImage(id=image.get('href'), name=image.get('name'), driver=self.connection.driver) return image
from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.compute.base import NodeImage ACCESS_ID = 'your access id' SECRET_KEY = 'your secret key' # Image with Netflix Asgard available in us-west-1 region # https://github.com/Answers4AWS/netflixoss-ansible/wiki/AMIs-for-NetflixOSS AMI_ID = 'ami-c8052d8d' SIZE_ID = 't1.micro' # 'us-west-1' region is available in Libcloud under EC2_US_WEST provider # constant cls = get_driver(Provider.EC2) driver = cls(ACCESS_ID, SECRET_KEY, region="us-west-1") # Here we select sizes = driver.list_sizes() size = [s for s in sizes if s.id == 't1.micro'][0] image = NodeImage(id=AMI_ID, name=None, driver=driver) node = driver.create_node(name='test-node', image=image, size=size)
def launch(self, inf, radl, requested_radl, num_vm, auth_data): driver = self.get_driver(auth_data) system = radl.systems[0] image_id = self.get_image_id(system.getValue("disk.0.image.url")) image = NodeImage(id=image_id, name=None, driver=driver) instance_type = self.get_instance_type(driver.list_sizes(), system) if not instance_type: raise Exception( "No flavor found for the specified VM requirements.") name = system.getValue("instance_name") if not name: name = system.getValue("disk.0.image.name") if not name: name = "userimage" nets = self.get_networks(driver, radl) sgs = self.create_security_groups(driver, inf, radl) args = { 'size': instance_type, 'image': image, 'networks': nets, 'ex_security_groups': sgs, 'name': "%s-%s" % (name, int(time.time() * 100)) } keypair = None keypair_name = None keypair_created = False public_key = system.getValue("disk.0.os.credentials.public_key") if public_key: keypair = driver.get_key_pair(public_key) if keypair: system.setUserKeyCredentials(system.getCredentials().username, None, keypair.private_key) else: if "ssh_key" in driver.features.get("create_node", []): args["auth"] = NodeAuthSSHKey(public_key) elif not system.getValue("disk.0.os.credentials.password"): keypair_name = "im-%s" % str(uuid.uuid1()) self.log_info("Create keypair: %s" % keypair_name) keypair = driver.create_key_pair(keypair_name) keypair_created = True public_key = keypair.public_key system.setUserKeyCredentials(system.getCredentials().username, None, keypair.private_key) if keypair.public_key and "ssh_key" in driver.features.get( "create_node", []): args["auth"] = NodeAuthSSHKey(keypair.public_key) else: args["ex_keyname"] = keypair_name user = system.getValue('disk.0.os.credentials.username') if not user: user = self.DEFAULT_USER system.setValue('disk.0.os.credentials.username', user) cloud_init = self.get_cloud_init_data(radl) if public_key: cloud_init = self.gen_cloud_config(public_key, user, cloud_init) if cloud_init: args['ex_userdata'] = cloud_init if self.CONFIG_DRIVE: args['ex_config_drive'] = self.CONFIG_DRIVE if system.getValue('availability_zone'): self.log_debug("Setting availability_zone: %s" % system.getValue('availability_zone')) args['ex_availability_zone'] = system.getValue('availability_zone') res = [] i = 0 all_failed = True while i < num_vm: self.log_info("Creating node") node = None try: node = driver.create_node(**args) vm = VirtualMachine(inf, node.id, self.cloud, radl, requested_radl, self.cloud.getCloudConnector(inf)) vm.info.systems[0].setValue('instance_id', str(node.id)) vm.info.systems[0].setValue('instance_name', str(node.name)) # Add the keypair name to remove it later if keypair_name: vm.keypair = keypair_name self.log_info("Node successfully created.") all_failed = False inf.add_vm(vm) res.append((True, vm)) except Exception as ex: res.append((False, str(ex))) i += 1 # if all the VMs have failed, remove the sgs and keypair if all_failed: if keypair_created: # only delete in case of the user do not specify the keypair name self.log_info("Deleting keypair: %s." % keypair_name) driver.delete_key_pair(keypair) for sg in sgs: self.log_info("Deleting security group: %s." % sg.id) driver.ex_delete_security_group(sg) return res
def launch(self, inf, radl, requested_radl, num_vm, auth_data): driver = self.get_driver(auth_data) system = radl.systems[0] image_id = self.get_image_id(system.getValue("disk.0.image.url")) image = NodeImage(id=image_id, name=None, driver=driver) instance_type = self.get_instance_type(driver.list_sizes(), system) name = system.getValue("instance_name") if not name: name = system.getValue("disk.0.image.name") if not name: name = "userimage" sgs = self.create_security_groups(driver, inf, radl) args = { 'size': instance_type, 'image': image, 'ex_security_groups': sgs, 'ex_start_vm': True, 'name': "%s-%s" % (name, str(uuid.uuid1())) } if system.getValue('availability_zone'): args['location'] = system.getValue('availability_zone') keypair = None public_key = system.getValue("disk.0.os.credentials.public_key") if public_key and public_key.find('-----BEGIN CERTIFICATE-----') == -1: public_key = None keypair = driver.get_key_pair(public_key) if keypair: system.setUserKeyCredentials(system.getCredentials().username, None, keypair.private_key) else: args["ex_keyname"] = keypair.name else: public_key, private_key = self.keygen() system.setUserKeyCredentials(system.getCredentials().username, None, private_key) user = system.getValue('disk.0.os.credentials.username') if not user: user = self.DEFAULT_USER system.setValue('disk.0.os.credentials.username', user) tags = self.get_instance_tags(system) res = [] i = 0 while i < num_vm: self.log_debug("Creating node") vm = VirtualMachine(inf, None, self.cloud, radl, requested_radl, self.cloud.getCloudConnector(inf)) vm.destroy = True inf.add_vm(vm) cloud_init = self.get_cloud_init_data(radl, vm, public_key, user) if cloud_init: args['ex_userdata'] = cloud_init msg = "Error creating the node" try: node = driver.create_node(**args) except Exception as ex: msg += ": %s" % str(ex) self.log_exception("Error creating node.") node = None if node: if tags: try: driver.ex_create_tags([node.id], tags) except Exception: self.log_exception("Error adding tags to node %s." % node.id) vm.id = node.id vm.info.systems[0].setValue('instance_id', str(node.id)) vm.info.systems[0].setValue('instance_name', str(node.name)) if 'zone_name' in node.extra: vm.info.systems[0].setValue('availability_zone', node.extra["zone_name"]) self.log_debug("Node successfully created.") vm.destroy = False inf.add_vm(vm) res.append((True, vm)) else: res.append((False, msg)) i += 1 return res
def _to_image(self, img): return NodeImage(id=img['template']['operatingSystemReferenceCode'], name=img['itemPrice']['item']['description'], driver=self.connection.driver)
def _set_libcloud_mock(self): MockEC2NodeDriver.clear_mock() node = Node(id="i-aaaaaaaa", name="EC2", state=0, public_ips=["127.0.0.1"], private_ips=["127.0.0.1"], driver=self.driver, extra={ "dns_name": "ec2.example.com", "instanceId": "i-aaaaaaaa", "imageId": "ami-aaaaaaaa", "private_dns": "", "status": "running", "keyname": "", "launchindex": "", "productcode": "", "instancetype": "m1.small", "launchdatetime": "", "availability": "ap-northeast-1a", "kernelid": "", "ramdiskid": "", "clienttoken": "", "groups": [], "tags": {}, }) MockEC2NodeDriver.add_mock_node(node) node = Node( **{ "id": "i-bbbbbbbb", "name": "a" * 62, "state": 0, "public_ips": [], "private_ips": [], "driver": self.driver, "extra": { "dns_name": "ec2.example.com", "instanceId": "i-bbbbbbbb", "imageId": "ami-bbbbbbbb", "private_dns": "", "status": "running", "keyname": "", "launchindex": "", "productcode": "", "instancetype": "m1.small", "launchdatetime": "", "availability": "ap-northeast-1a", "kernelid": "", "ramdiskid": "", "clienttoken": "", "groups": [], "tags": {}, } }) MockEC2NodeDriver.add_mock_node(node) MockEC2NodeDriver.add_mock_image( NodeImage(id="ami-aaaaaaaa", name="Linux Image", driver=self.driver, extra={"platform": None})) MockEC2NodeDriver.add_mock_image( NodeImage(id="ami-bbbbbbbb", name="Windows Image", driver=self.driver, extra={"platform": "windows"}))
def _to_image(self, image): image = NodeImage(id=image['id'], name=image['description'], driver=self.connection.driver) return image
def _to_image(self, obj): extra = {'region': obj['region'], 'visibility': obj['visibility']} return NodeImage(id=obj['id'], name=obj['name'], driver=self, extra=extra)
def _to_image(self, image): image = NodeImage(id=image.get('href'), name=image.get('name'), driver=self.connection.driver) return image
def do_newawsvm(self, args, arguments): """ :: Usage: newawsvm default newawsvm status [NAMES] [--cloud=CLOUDS] newawsvm start [NAMES] [--cloud=CLOUD] [--dryrun] newawsvm stop [NAMES] [--cloud=CLOUD] [--dryrun] newawsvm terminate [NAMES] [--cloud=CLOUD] [--dryrun] newawsvm boot [--n=COUNT] [--name=NAME] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPS] [--key=KEY] [--dryrun] newawsvm boot [--name=NAME] [--cloud=CLOUD] [--username=USERNAME] [--image=IMAGE] [--flavor=FLAVOR] [--public] [--secgroup=SECGROUPs] [--key=KEY] [--dryrun] Arguments: COMMAND positional arguments, the commands you want to execute on the server(e.g. ls -a) separated by ';', you will get a return of executing result instead of login to the server, note that type in -- is suggested before you input the commands NAME server name. By default it is set to the name of last vm from database. NAMES server name. By default it is set to the name of last vm from database. KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. NEWNAMES New names of the VM while renaming. OLDNAMES Old names of the VM while renaming. Options: -H --modify-knownhosts Do not modify ~/.ssh/known_hosts file when ssh'ing into a machine --username=USERNAME the username to login into the vm. If not specified it will be guessed from the image name and the cloud --ip=IP give the public ip of the server --cloud=CLOUD give a cloud to work on, if not given, selected or default cloud will be used --count=COUNT give the number of servers to start --detail for table print format, a brief version is used as default, use this flag to print detailed table --flavor=FLAVOR give the name or id of the flavor --group=GROUP give the group name of server --secgroup=SECGROUP security group name for the server --image=IMAGE give the name or id of the image --key=KEY specify a key to use, input a string which is the full path to the private key file --keypair_name=KEYPAIR_NAME Name of the vm keypair to be used to create VM. Note this is not a path to key. --user=USER give the user name of the server that you want to use to login --name=NAME give the name of the virtual machine --force rename/ delete vms without user's confirmation --command=COMMAND specify the commands to be executed Description: commands used to boot, start or delete servers of a cloud newawsvm default [options...] Displays default parameters that are set for vm boot either on the default cloud or the specified cloud. newaws vm boot [options...] Boots servers on a cloud, user may specify flavor, image .etc, otherwise default values will be used, see how to set default values of a cloud: cloud help newawsvm stop [options...] Stops a vm instance . newawsvm status [options...] Retrieves status of VM booted on cloud and displays it. Tip: give the VM name, but in a hostlist style, which is very convenient when you need a range of VMs e.g. sample[1-3] => ['sample1', 'sample2', 'sample3'] sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18'] Quoting commands: cm vm login gvonlasz-004 --command=\"uname -a\" """ #print(args, arguments) def map_parameters(arguments, *args): for arg in args: flag = "--" + arg if flag in arguments: arguments[arg] = arguments[flag] else: arguments[arg] = None def get_clouds(arguments, variables): clouds = arguments["cloud"] or arguments["--cloud"] or variables[ "cloud"] if "active" == clouds: active = Active() clouds = active.clouds() elif "aws" == clouds: conf = Config("~/.cloudmesh/cloudmesh.yaml")["cloudmesh"] auth=conf["cloud"]['aws'] aws = AwsActions( aws_access_key_id=auth['credentials']['EC2_ACCESS_ID'], aws_secret_access_key=auth['credentials']['EC2_SECRET_KEY'], region_name=auth['default']['region'] ) pprint("loaded aws") else: clouds = Parameter.expand(clouds) return clouds def get_names(arguments, variables): names = arguments["NAME"] or arguments["NAMES"] or arguments[ "--name"] or variables["vm"] if names is None: return None else: return Parameter.expand(names) def name_loop(names, label, f): names = get_names(arguments, variables) for name in names: Console.msg("{label} {name}".format(label=label, name=name)) # r = f(name) def increment_string(strng): ls = list(strng) numb_where = [0]*len(ls) numb = "" count_zeros = 0 i = 0 insert = len(ls) - 1 while i < len(ls): try: int(ls[i]) numb_where[i] = 1 if int(ls[i]) == 0 and numb_where[i-1] == 0: numb_where[i] = 1 count_zeros += 1 elif numb_where[i-1] == 0: # found a second number (disconnected) if numb_where[i] == 1: j = 0 while j < i: numb_where[j] == 0 j += 1 numb = "" numb = numb + ls[i] insert = i else: numb = numb + ls[i] i += 1 except: i += 1 continue if sum(numb_where) == 0: if count_zeros == 0: return strng + str(1) else: new_strng = "" i = 0 rm_zero = 1 while i < len(ls): if ls[i] == str(0) and rm_zero == 1: rm_zero = 0 else: new_strng = new_strng + ls[i] i += 1 return new_strng + str(1) b4 = len(numb) numb = int(numb) + 1 after = len(str(numb)) if b4 != after: rm_zero = 1 else: rm_zero = 0 new_strng = "" new_numb_added = 0 i = 0 while i < len(numb_where): if numb_where[i] == 0: if numb_where[i+1] == 1 and rm_zero == 1 and ls[i] == str(0): rm_zero = 0 else: new_strng = new_strng + ls[i] elif new_numb_added == 0: if i == insert: new_strng = new_strng + str(numb) new_numb_added = 1 else: new_strng = new_strng + ls[i] i += 1 return new_strng map_parameters(arguments, 'active', 'cloud', 'command', 'dryrun', 'flavor', 'force', 'format', 'group', 'image', 'interval', 'ip', 'key', 'modify-knownhosts', 'n', 'name', 'public', 'quiet', 'refresh', 'secgroup', 'size', 'username') variables = Variables() # INITIALIZE conf = Config("~/.cloudmesh/cloudmesh.yaml")["cloudmesh"] auth=conf["cloud"]['aws'] aws_access_key_id=auth['credentials']['EC2_ACCESS_ID'] aws_secret_access_key=auth['credentials']['EC2_SECRET_KEY'] region_name=auth['default']['region'] image_default=auth['default']['image'] flavor_default=auth['default']['size'] name_default="test02_cloudmesh00" EC2Driver = get_driver(LibcloudProvider.EC2) driver_ec2 = EC2Driver(aws_access_key_id, aws_secret_access_key, region='us-east-2') # drivers contains list of drivers, could work with multiple drivers drivers = [EC2Driver(aws_access_key_id, aws_secret_access_key, region='us-east-2')] status_list=[] current_status=[] nodes = [] for driver in drivers: nodes += driver.list_nodes() for node in nodes: status_dict = { "Name": node.name, "Status": node.state, "InstanceID": node.id, } current_status.append(status_dict.copy()) status_list.append(node.name) order=["Name","Status","InstanceID"] output = Printer.write(current_status, order=order, header=None, output="table", sort_keys=True) if arguments.status: if arguments["--cloud"]: clouds = get_clouds(arguments, variables) else: names = get_names(arguments, variables) if names == None: names = [] # nodes contains all current nodes associated with aws_access_key_id numb_of_nodes=len(nodes) status_print = [] found = 0 if "all" in names: print("--Status on all nodes:") if numb_of_nodes == 1: print("--Currently, there is",numb_of_nodes,"node.") else: print("--Currently, there are",numb_of_nodes,"nodes.") print(output) return elif len(names) == 1: name=names[0] print("--Finding the status on:", name,"...") found = 0 for node in nodes: if node.name == name: status_dict = { "Name": node.name, "Status": node.state, "InstanceID": node.id, } print(" ",node.name,": found") status_print.append(status_dict.copy()) #print(" Status:",node.state) found=1 if found == 0: print(" ",name,": not found") elif "all" not in names and len(names) > 1: for name in names: print("--Finding the status on:", name,"...") found = 0 for node in nodes: if node.name == name: status_dict = { "Name": node.name, "Status": node.state, "InstanceID": node.id, } print(" ",node.name,": found") status_print.append(status_dict.copy()) found=1 if found == 0: print(" ",name,": not found") if found==0 or len(names)==0: print("--You must specify at least one running node") print("--List of all nodes:") if numb_of_nodes == 1: print("--Currently, there is",numb_of_nodes,"node.") else: print("--Currently, there are",numb_of_nodes,"nodes.") i = 0 while i < len(status_list): print(" ",i+1,":",sorted(status_list)[i]) i += 1 else: output = Printer.write(status_print, order=order, header=None, output="table", sort_keys=True) print(output) return elif arguments.boot: try: numb_of_nodes=int(arguments.n) except: numb_of_nodes=1 print("--Starting nodes") #numb_of_nodes=1 #if name for number in range(numb_of_nodes): if numb_of_nodes == 1: name = arguments.name or name_default elif number < 10: name = arguments.name or name_default + '0' + str(number) else: name = arguments.name or name_default + str(number) image = arguments.image or image_default or 'ami-0653e888ec96eab9b' flavor = arguments.flavor or flavor_default or 't2.micro' key = 'test_awskeys' sizes = driver_ec2.list_sizes() size = [s for s in sizes if s.id == flavor][0] node_image = NodeImage(id=image, name=None, driver=driver_ec2) found = 1 while found == 1: if name in status_list: print(" ",name,"already taken") name = increment_string(name) print(" using",name,"instead") else: found = 0 if arguments.dryrun == False: node = driver_ec2.create_node(name=name, image=node_image, size=size) print(name,node.id,"status=starting\n") else: print(name,"status=starting; DRY RUN\n") print(name,image,size,"\n") #optional wait here? #print('Waiting...') #node.wait_until_running() #current_status[name] = "running" print("--Started",numb_of_nodes,"node(s)") return elif arguments.ssh: # need to get name.pem from aws # ssh -i "name.pem" ubuntu@ec2-*.us-east-2.compute.amazonaws.com print("ssh") return elif arguments.stop: print("--Stopping node(s)") names = get_names(arguments, variables) #pprint(names) if names == None: names = [] print("--Error: you need to specify a node to stop") elif len(names) > 2: for name in names: found = 0 for node in nodes: if node.name == name: print(" ",node.name,": found") print(" stopping",node.name) if arguments.dryrun == False: driver_ec2.ex_stop_node(node) print(" ",node.name,"was stopped") else: print(" ",node.name,"was stopped; DRY RUN\n") found=1 if found == 0: print(" ",name,": not found") print(" ",name,"was not stopped") else: found = 0 name = names[0] for node in nodes: if node.name == name: print(" ",node.name,": found") print(" stopping",node.name) if arguments.dryrun == False: driver_ec2.ex_stop_node(node) print(" ",node.name,"was stopped") else: print(" ",node.name,"was stopped; DRY RUN\n") found=1 if found == 0: print(" ",name,": not found") print(" ",name,"was not stopped") return elif arguments.terminate: print("--Terminating node(s)") names = get_names(arguments, variables) if names == None: names = [] print("--Error: you need to specify a node to terminate") elif len(names) > 2: for name in names: found = 0 for node in nodes: if node.name == name: print(" ",node.name,": found") if arguments.dryrun == False: driver_ec2.destroy_node(node) print(" ",node.name,"was terminated") else: print(" ",node.name,"was terminated; DRY RUN\n") found=1 if found == 0: print(" ",name,": not found") print(" ",name,"was not terminated") else: found = 0 name = names[0] for node in nodes: if node.name == name: print(" ",node.name,": found") print(" terminating",node.name) if arguments.dryrun == False: driver_ec2.destroy_node(node) print(" ",node.name,"was stopped") else: print(" ",node.name,"was stopped; DRY RUN\n") found=1 if found == 0: print(" ",name,": not found") print(" ",name,"was not terminated") return elif arguments.start: print("--Restarting nodes") names = get_names(arguments, variables) if names == None: names = [] print("--Error: you need to specify a node to restart") elif len(names) > 2: for name in names: found = 0 for node in nodes: if node.name == name: print(" ",node.name,": found") print(" Attempting to restart",node.name) if arguments.dryrun == False: if node.state == "stopped": driver_ec2.ex_start_node(node) print(" ",node.name,"was restarted") else: print(" ",node.name,"has status:",node.state,"and was therefore not restarted.") else: print(" ",node.name,"was restarted; DRY RUN\n") found=1 if found == 0: print(" ",name,": not found") print(" ",name,"was not restarted") else: found = 0 name = names[0] for node in nodes: if node.name == name: print(" ",node.name,": found") print(" starting",node.name) if arguments.dryrun == False: if node.state == "stopped": driver_ec2.ex_start_node(node) print(" ",node.name,"was restarted") else: print(" ",node.name,"has status:",node.state,"and was therefore not restarted.") else: print(" ",node.name,"was restarted; DRY RUN\n") found=1 if found == 0: print(" ",name,": not found") print(" ",name,"was not restarted") return elif arguments.default: print( "--Defaults used for booting vms in AWS: defaults can\n be changed in ~/.cloudmesh/cloudmesh.yaml file") defaults_dict = [{ "Variable": "Name", "Value": name_default, },{ "Variable": "Region", "Value": region_name, },{ "Variable": "Image", "Value": image_default, },{ "Variable": "Flavor", "Value": flavor_default, }] order=["Variable","Value"] output = Printer.write(defaults_dict, order=order, header=None, output="table", sort_keys=True) print(output) else: print("not implemented")