def __init__(self): self.kcreds = get_keystone_creds() self.keystone = ksclient.Client(**self.kcreds) self.ncreds = get_nova_creds() self.nova = novaclient.Client("1.1",**self.ncreds) self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image',endpoint_type='publicURL') self.glance = glanceclient.Client('1',self.glance_endpoint, token=self.keystone.auth_token)
def terminate_instances(self, instances): creds = get_nova_creds() # nova = client.Client(2.0, creds.get('username'), creds.get('api_key'),creds.get('project_id'), creds.get('auth_url')) nova = client.Client(2.0, username=creds.get('username'), password=self.utils.rc_pwd, project_name=creds.get('project_id'), auth_url=creds.get('auth_url')) deleted_instances = [] for i in instances: instance = nova.servers.get(i.id) # make sure the instance belongs to the cluster if not self.utils.cluster_name in str(instance): self.my_logger.error( "Tried to terminate an instance that was not in the cluster!" ) continue self.my_logger.debug("deleting instance: " + str(instance)) instance.delete() deleted_instances.append(instance) return deleted_instances
def describe_images(self, pattern=None): # Euca-describe-images creds = get_nova_creds() # nova = client.Client(2.0, creds.get('username'), creds.get('api_key'), creds.get('project_id'), creds.get('auth_url')) nova = client.Client(2.0, username=creds.get('username'), password=self.utils.rc_pwd, project_name=creds.get('project_id'), auth_url=creds.get('auth_url')) images = nova.images.list() # # if you are using patterns, show only matching names and emi's matched_images = [] if pattern: for image in images: if image.name == pattern or image.id == pattern: matched_images.append(image) # else: # print images[1].location if len(matched_images) > 0: return matched_images else: return None else: # correction in the returned variable! return images
def create_instances(test_properties): # (sri) Add creation of test environment here default_name = "ubuntu" keystone_creds = credentials.get_keystone_creds() #Get images containing Ubuntu keystone = ksclient.Client(**keystone_creds) glance_endpoint = keystone.service_catalog.url_for( service_type='image', endpoint_type='publicURL') glance = glclient.Client(glance_endpoint, token=keystone.auth_token) ubuntu_images = [ img for img in glance.images.list() if default_name in img["name"]] #Create images nova_creds = credentials.get_nova_creds() nova = nvclient.Client(**nova_creds) for flavor_name, num_instances_of_flavor in test_properties["num_instances_of_flavor"].iteritems(): flavor = nova.flavors.find(name=flavor_name) created_instances = [ nova.servers.create(name=img["name"]+"_vm", image=img["id"], flavor=flavor, max_count=num_instances_of_flavor) for img in ubuntu_images ]
def connect(self): self.creds = get_nova_creds() self.conn = nvclient.Client(**self.creds) self.image_id = self.conn.images.find( name=self.cloud_config.get(self.name, "image_id")) self.instance_type = self.conn.flavors.find( name=self.cloud_config.get(self.name, "instance_type"))
def build(): print ("Reading Config File (%s)...") %(CLUSTER_CFG) parse_conf(CLUSTER_CFG) print ("Initializing Cloud (%s)...") %(CLUSTER_NAME) create_cluster_db() creds = get_nova_creds() nova = nvclient.Client(**creds) check_keypair(nova) create_secgroup(nova) headnode_secgroup(nova) check_munge() setup_slurmhead() create_hosts_file() prep_nfs_exports() print ("Launching total of %s Compute Nodes...\n") %(str(CLUSTER_COMPUTE_SIZE)) strCloudInit=compute_cloud_init() for iCount in range(CLUSTER_RANGE_START,CLUSTER_COMPUTE_SIZE+1): print ("Launching %s") %(str(INSTANCE_NAME + str(iCount))) instance_id,instance_name,instance_ip = launch_instance(nova,INSTANCE_NAME+str(iCount),strCloudInit) update_db(instance_id, instance_name,instance_ip) #We copy the hosts file and configure clustersh. This ensures that in case of a failure to #launch a compute node, user still gets a working cluster. E.g. you requested 10 nodes #but only got 8 due to capacity or quota issues. copyfile("/etc/hosts",CLUSTER_OPT+"/etc/hosts") config_clustersh() copyfile("/etc/hosts",CLUSTER_OPT+"/etc/hosts") #Not dealing with volumes automatically. Let user decide what is best. #c_creds = get_cinder_creds() #c_conn = ciclient.Client(**c_creds) #cinder_volume_create(c_conn) #print(c_conn.volumes.list()) config_clustersh() print ("Cluster launched.... please wait for sometime and then run readyCluster -s to start the daemons\n") sys.exit(0)
def spindown(self): creds = get_nova_creds() nova = client.Client('2', **creds) maxtries = 10 trycount = 0 while 1: try: instance = nova.servers.find(name=self.name) # Release/delete the associated floating IP first try: floating_ip = nova.floating_ips.find( instance_id=instance.id) floating_ip.delete() except novaclient.exceptions.NotFound: # No floating IP assigned, it's alright pass # Then delete the instance instance.delete() return True except novaclient.exceptions.NotFound: # Instance does not exist, nothing to do return False except Exception as e: print('Problem retrieving/deleting server instance...') print(str(e)) if trycount > maxtries: raise print('Retrying in 10s...') time.sleep(10) # wait a bit and try again trycount += 1
def __init__(self, vm_name, image_id, vm_ram, vm_disk, vm_vcpus): """ Constructor to create a connection and a Virtual Machine. :param vm_name: The name of the Virtual Machine to create :param image_name: The name of the image through which we create. :param vm_ram: The ram size of the Virtual Machine. :param vm_disk: The disk size of the Virtual Machine. :param vm_vcpus: Number of CPU's required by the Virtual Machine. """ logging.basicConfig(filename='openstack.log', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' , datefmt='%m/%d/%Y %I:%M:%S %p') try: # Connections with novaClient(compute API) -------------------- logging.info('Establishing Connection.') creds = get_nova_creds() self.nova = nvclient.Client(**creds) except: logging.error('Invalid Credentials.') logging.critical('Connection Aborted.') raise try: # Connections with novaClient(compute API) -------------------- is_instance_present = False instance = self.create(vm_name, image_id, vm_ram, vm_disk, vm_vcpus) self.instance = instance except: logging.error('Virtual Machine not Created.') raise
def spinup(self): # initialise novaclient instance creds = get_nova_creds() nova = nvclient.Client(**creds) # ensure jenkins' pubkey is loaded if not nova.keypairs.findall(name="jenkins"): with open(os.path.expanduser( '/var/lib/jenkins/.ssh/id_rsa.pub')) as fpubkey: nova.keypairs.create(name=self.sshkeyname, public_key=fpubkey.read()) image = nova.images.find(name=self.imagename) if not image: raise Exception('Could not find image...') flavor = nova.flavors.find(name=self.flavorname) if not flavor: raise Exception('Could not find flavor...') # spin up a cloud instance!! self.instance = nova.servers.create( name=self.name, image=image, flavor=flavor, key_name="jenkins", nics=[{ 'net-id': 'eadc7a9b-2ced-4b75-9915-552e6d09da3f' }]) #TODO: figure out how to determine net-id # Poll at 5 second intervals, until the status is no longer 'BUILD' status = self.instance.status while status == 'BUILD': print 'Building minion %s' % self.name time.sleep(5) # Retrieve the instance again so the status field updates self.instance = nova.servers.get(self.instance.id) status = self.instance.status # assign floating ip floating_ips = nova.floating_ips.list() if not floating_ips: self.instance.delete() raise Exception( 'No floating ips in pool :(') # todo: try creating some? for fip in floating_ips: if fip.instance_id is None: # not assigned to another instance self.ip = fip.ip self.instance.add_floating_ip(self.ip) break try: self.ip except: self.instance.delete() raise Exception( 'No available floating ips :(') # todo: try creating some? #time.sleep(10) # sleep a bit, while the floating ip gets sorted return self.ip
def connect(self): self.creds = get_nova_creds() self.conn = nvclient.Client(**self.creds) self.image_id = self.conn.images.find(name=self.cloud_config. get(self.name, "image_id")) self.instance_type = self.conn.flavors.find(name=self.cloud_config. get(self.name, "instance_type"))
def __init__(self, image_name, image_path, vm_name, vm_ram, vm_disk, vm_vcpus): """ Constructor to create a Connection, an Image Instance and a VM Instance :param image_name: The name of the image to be created. :param image_path: The path of the image for creation. :param vm_name: The name of the virtual machine to create :param vm_ram: The ram size of the virtual machine. :param vm_disk: The disk size of the virtual machine. :param vm_vcpus: Number of CPU's required by the virtual machine. """ logging.basicConfig( filename='openstack.log', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') try: cloud_adapter = Cloud_Attributes() logging.info('Establishing Connection.') # Connections with keystoneClient(identity API) -------------------- creds = get_keystone_creds() self.keystone = ksclient.Client(**creds) # Connections with novaClient(compute API) -------------------- creds = get_nova_creds() self.nova = nvclient.Client(**creds) except: logging.error('Invalid Credentials.') logging.critical('Connection Aborted.') raise # Creating IMAGE instance -------------------- try: logging.info('Creating Image Instance.') image = Image_Adapter(image_name, image_path) except: logging.error('Image not Created.') raise # Creating VM instance -------------------- try: logging.info('Creating Virtual Machine.') image_list = cloud_adapter.get_os_specific_image_list(image_name) image_details = image_list.get(image_name) image_id = image_details.get('Image_ID') instance = VM_Adapter(vm_name, image_id, vm_ram, vm_disk, vm_vcpus) except: logging.error('Virtual Machine not Created.') raise
def spindown(self): creds = get_nova_creds() nova = nvclient.Client(**creds) instance = nova.servers.find(name=self.name) if instance: instance.delete() return True else: return False
def __init__(self): _creds = get_nova_creds() try: self.my_nova = nvclient.Client(**_creds) except Exception as e: print "Nova could not auth with vars: ",_creds print "client error:" ,e sys.exit(1) print 'NovaMgr created'
def get_openstack_clients(username="******", password="******", auth_url="http://<your_openstack_url>:35357/v2.0", tenant="your_openstack_tenantname"): set_creds(username=username, password=password, auth_url=auth_url, tenant=tenant, current_os=current) nova_creds = get_nova_creds(current_os=current) nova = novaclient.Client("1.1", **nova_creds) return nova
def __init__(self): self.creds = get_nova_creds() self.nova = nvclient.Client(**self.creds) print "Checking for keypair and importing if not found" if not self.nova.keypairs.findall(name="mykey"): with open(os.path.expanduser('~/.ssh/id_rsa.pub')) as fpubkey: self.nova.keypairs.create(name="mykey", public_key=fpubkey.read()) self.image = self.nova.images.find(name="futuregrid/ubuntu-12.04-03-\ Mar-2014") self.flavor = self.nova.flavors.find(name="m1.tiny")
def __init__(self): self.creds = get_nova_creds() self.nova = nvclient.Client(**self.creds) print "Checking for keypair and importing if not found" if not self.nova.keypairs.findall(name="mykey"): with open(os.path.expanduser('~/.ssh/id_rsa.pub')) as fpubkey: self.nova.keypairs.create(name="mykey", public_key=fpubkey.read()) self.image = self.nova.images.find(name="futuregrid/ubuntu-12.04\ -03-Mar-2014") self.flavor = self.nova.flavors.find(name="m1.tiny")
def find_by_id(self, server_id): creds = get_nova_creds() # nova = client.Client(2.0, creds.get('username'), creds.get('api_key'), creds.get('project_id'), creds.get('auth_url')) nova = client.Client(2.0, username=creds.get('username'), password=self.utils.rc_pwd, project_name=creds.get('project_id'), auth_url=creds.get('auth_url')) servers = nova.servers.list() for server in servers: if server.id == server_id: return server
def shrink(nodeCount): print("Reading Config File (%s)...") % (CLUSTER_CFG) parse_conf(CLUSTER_CFG) print("This operation will delete %s nodes") % (nodeCount) strKey = "Are you sure?" ret = query_yes_no(strKey, "no") if ret == False: sys.exit(0) creds = get_nova_creds() nova = nvclient.Client(**creds) verify_cluster(nova) nextNode = int(next_node(INSTANCE_NAME)) db = CLUSTER_NAME + ".db" conn = sqlite3.connect(db) for iCount in xrange(nodeCount): sSQL = "Select * from nodes where name = '%s'" % (INSTANCE_NAME + str(nextNode)) ret = conn.execute(sSQL) #print ret #print nextNode for server in ret: print "Terminating instance %s (id: %s) " % (server[1], server[0]) try: ret = nova.servers.delete(str(server[0])) except: print "Error terminating the instance..." e = sys.exc_info()[1] print "Error: %s " % e sys.exit(0) try: print str(server[2]) remove_line("/etc/sysconfig/iptables", str(server[2])) fileSystems = ['/apps', '/home', '/opt', '/system', '/short'] toRemove = server[2] + "(rw,sync)" for fs in fileSystems: replace_string_line("/etc/exports", toRemove) remove_line("/etc/hosts", str(server[2])) except: print "Error removing the iptables or nfs exports entry for %s... please remove manually" % ( str(server[2])) e = sys.exc_info()[1] print "Error: %s " % e remove_node_db(INSTANCE_NAME + str(nextNode)) nextNode = nextNode - 1 conn.close() proc = subprocess.Popen(["service", "nfs", "restart"], stdout=subprocess.PIPE, shell=False) proc.wait()
def __init__(self, virtapi, read_only=False): super(EC2Driver, self).__init__(virtapi) self.host_status_base = { 'vcpus': VCPUS, 'memory_mb': MEMORY_IN_MBS, 'local_gb': DISK_IN_GB, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 100000000000, 'hypervisor_type': 'EC2', 'hypervisor_version': '1.0', 'hypervisor_hostname': CONF.host, 'cpu_info': {}, 'disk_available_least': 500000000000, } self._mounts = {} self._interfaces = {} self.creds = get_nova_creds() self.nova = client.Client(**self.creds) region = RegionInfo(name=aws_region, endpoint=aws_endpoint) self.ec2_conn = ec2.EC2Connection(aws_access_key_id=CONF.ec2driver.ec2_access_key_id, aws_secret_access_key=CONF.ec2driver.ec2_secret_access_key, host=host, port=port, region=region, is_secure=secure) self.cloudwatch_conn = ec2.cloudwatch.connect_to_region( aws_region, aws_access_key_id=CONF.ec2driver.ec2_access_key_id, aws_secret_access_key=CONF.ec2driver.ec2_secret_access_key) self.security_group_lock = Lock() self.instance_rule_refresher = InstanceRuleRefresher( GroupRuleRefresher( ec2_connection=self.ec2_conn, openstack_rule_service=OpenstackRuleService( group_service=OpenstackGroupService(self.nova.security_groups), openstack_rule_transformer=OpenstackRuleTransformer() ), ec2_rule_service=EC2RuleService( ec2_connection=self.ec2_conn, ec2_rule_transformer=EC2RuleTransformer(self.ec2_conn) ) ) ) if not '_EC2_NODES' in globals(): set_nodes([CONF.host])
def create_topology(): quantum_creds= get_keystone_creds() quantum = client.Client(**quantum_creds) nova_creds = get_nova_creds() nova = nvclient.Client(**nova_creds) network_object=network_subnet(quantum) network_object.Create_network() network_object.Create_subnet() #enter the nova code to boot instances security_grp_object=security_groups(quantum) security_grp_object.create_security_groups() security_grp_object.create_security_group_rules() load_balancer=Load_Balancer_floating_ip(mysets.network_id,mysets.subnet_mapping, mysets.ip_mapping, mysets.sec_id,quantum); load_balancer.Create_Loadbalancers()
def block_until_running(self, instances, target_status='ACTIVE'): creds = get_nova_creds() # nova = client.Client(2.0, creds.get('username'), creds.get('api_key'), creds.get('project_id'), creds.get('auth_url')) nova = client.Client(2.0, username=creds.get('username'), password=self.utils.rc_pwd, project_name=creds.get('project_id'), auth_url=creds.get('auth_url')) # # Run describe instances until everyone is running tmpinstances = instances.copy() instances = [] members = ("id", "networks", "flavor", "image", "status", "key_name", "name", "created") while len(tmpinstances) > 0: sys.stdout.flush() all_running_instances = nova.servers.list( search_opts={'status': target_status}) # print("all_running_instances:\t" + str(all_running_instances)) # print("length_all = " + str(len(all_running_instances))) for i in range(0, len(all_running_instances)): # print("length_temp = " + str(len(tmpinstances))) for j in range(0, len(tmpinstances)): ping = subprocess.getoutput("/bin/ping -q -c 1 " + str( all_running_instances[i].networks['private-net'][0])) nc = subprocess.getoutput("nc -z -v " + str( all_running_instances[i].networks['private-net'][0]) + " 22") if (all_running_instances[i].id == tmpinstances[j].id) \ and ping.count('1 received') > 0 and nc.count("succeeded") > 0: tmpinstances.pop(j) # # get instance details details = {} for member in members: val = getattr(all_running_instances[i], member, "") # product_codes is a list if hasattr(val, '__iter__') and not ( (type(val) is str) or (type(val) is list)): v = val.get('id') if v == None: v = val.get('private-net')[0] val = v # print (val) details[member] = val _instance = Instance(details) instances.append(_instance) # instances.append(all_running_instances[i]) break # self.describe_instances() # Don't know why this is called. Maybe I will search it one day! print("Running-Instances: (by block_until_running): " + str(instances)) return instances
def createinstance(): creds = get_nova_creds() nova = nvclient.Client(**creds) #if not nova.keypairs.findall(name="mykey"): # with open(os.path.expanduser('')) # a key pair named "mykey" is already created. neutron = client.Client(username='******', password='******', tenant_name='demo', auth_url=os.environ['OS_AUTH_URL']) neutron.format= 'json' netname = str(argv[2]) print netname if not neutron.list_networks(name=netname)['networks']: print "network does not exist" exit(0) else: netid = neutron.list_networks(name=netname)["networks"][0]["id"] # prepare all paremeters image, flavor, instancename, networkinfo image = nova.images.find(name="cirros-0.3.1-x86_64-uec") flavor = nova.flavors.find(name="m1.tiny") instancename = str(argv[1]) networkinfo =[{'net-id':netid}] print "new instance named", instancename, "will be created" #networkinfo = {'uuid':'56b68aef-8080-45de-9206-152b8d0229b6'} instance = nova.servers.create(name=instancename, image=image, flavor=flavor, key_name="mykey", nics=networkinfo) status = instance.status while status =='BUILD': time.sleep(5) instance = nova.servers.get(instance.id) status = instance.status print "status: %s" % status
def spinup(self): # initialise novaclient instance creds = get_nova_creds() nova = client.Client('2', **creds) # ensure jenkins' pubkey is loaded if not nova.keypairs.findall(name=self.sshkeyname): with open(os.path.expanduser(SSH_PUBKEY)) as fpubkey: nova.keypairs.create( name=self.sshkeyname, public_key=fpubkey.read()) image = nova.images.find(name=self.imagename) if not image: raise Exception('Could not find image...') flavor = nova.flavors.find(name=self.flavorname) if not flavor: raise Exception('Could not find flavor...') # Get a floating IP early to fail faster if we are over quota floating_ip = nova.floating_ips.create() # spin up a cloud instance!! # TODO: figure out how to determine net-id self.instance = nova.servers.create( name=self.name, image=image, flavor=flavor, key_name=self.sshkeyname, nics=[{'net-id': self.netid}]) # Poll at 5 second intervals, until the status is no longer 'BUILD' status = self.instance.status while status == 'BUILD': print('Building minion %s' % self.name) time.sleep(5) # Retrieve the instance again so the status field updates self.instance = nova.servers.get(self.instance.id) status = self.instance.status # assign floating ip try: self.instance.add_floating_ip(floating_ip) self.ip = floating_ip.ip except: self.instance.delete() raise return self.ip
def spinup(self): # initialise novaclient instance creds = get_nova_creds() nova = nvclient.Client(**creds) # ensure jenkins' pubkey is loaded if not nova.keypairs.findall(name="jenkins"): with open(os.path.expanduser('/var/lib/jenkins/.ssh/id_rsa.pub')) as fpubkey: nova.keypairs.create(name="jenkins", public_key=fpubkey.read()) image = nova.images.find(name="jenkins") if not image: raise Exception('Could not find image...') flavor = nova.flavors.find(name="c1.c4r4") if not flavor: raise Exception('Could not find flavor...') # spin up a cloud instance!! self.instance = nova.servers.create(name=self.name, image=image, flavor=flavor, key_name="jenkins", nics=[{'net-id':'eadc7a9b-2ced-4b75-9915-552e6d09da3f'}]) #TODO: figure out how to determine net-id # Poll at 5 second intervals, until the status is no longer 'BUILD' status = self.instance.status while status == 'BUILD': print 'Building minion %s' % self.name time.sleep(5) # Retrieve the instance again so the status field updates self.instance = nova.servers.get(self.instance.id) status = self.instance.status # assign floating ip floating_ips = nova.floating_ips.list() if not floating_ips: self.instance.delete() raise Exception('No floating ips in pool :(') # todo: try creating some? for fip in floating_ips: if fip.instance_id is None: # not assigned to another instance self.ip = fip.ip self.instance.add_floating_ip(self.ip) break try: self.ip except: self.instance.delete() raise Exception('No available floating ips :(') # todo: try creating some? #time.sleep(10) # sleep a bit, while the floating ip gets sorted return self.ip
def describe_flavors(self): creds = get_nova_creds() # nova = client.Client(2.0, creds.get('username'), creds.get('api_key'),creds.get('project_id'), creds.get('auth_url')) nova = client.Client(2.0, username=creds.get('username'), password=self.utils.rc_pwd, project_name=creds.get('project_id'), auth_url=creds.get('auth_url')) flavors = nova.flavors.list() flavors_dict = {} for f in flavors: flavors_dict[f.name] = f return flavors_dict
def destroy(): strKey = "This will terminate your cluster compute nodes... Are you sure?" ret = query_yes_no(strKey, "no") if ret == True: print("Reading Config File (%s)...") % (CLUSTER_CFG) parse_conf(CLUSTER_CFG) db = CLUSTER_NAME + ".db" conn = sqlite3.connect(db) sSQL = "SELECT ID, NAME, IP_ADDR_EXT FROM NODES" result = conn.execute(sSQL) creds = get_nova_creds() nova = nvclient.Client(**creds) for row in result: print "Terminating instance (%s)- %s with IP %s " % (str( row[0]), str(row[1]), str(row[2])) try: ret = nova.servers.delete(str(row[0])) except: print "Error terminating the instance..." e = sys.exc_info()[1] print "Error: %s " % e #sys.exit(0) try: print str(row[2]) remove_line("/etc/sysconfig/iptables", str(row[2])) except: print "Error removing the iptables entry for %s... please remove manually" % ( str(row[2])) e = sys.exc_info()[1] print "Error: %s " % e empty_file("/etc/exports") print "NFS Exports file entries have been removed..." proc = subprocess.Popen(["service", "nfs", "stop"], stdout=subprocess.PIPE, shell=False) #print 'poll =', proc.poll(), '("None" means process not terminated yet)' proc.wait() proc = subprocess.Popen(["service", "iptables", "restart"], stdout=subprocess.PIPE, shell=False) proc.wait() conn.close() os.remove(db) else: print "Exiting on user request" sys.exit(0)
def shrink(nodeCount): print ("Reading Config File (%s)...") %(CLUSTER_CFG) parse_conf(CLUSTER_CFG) print ("This operation will delete %s nodes") %(nodeCount) strKey="Are you sure?" ret = query_yes_no(strKey, "no") if ret == False: sys.exit(0) creds = get_nova_creds() nova = nvclient.Client(**creds) verify_cluster(nova) nextNode=int(next_node(INSTANCE_NAME)) db = CLUSTER_NAME + ".db" conn = sqlite3.connect(db) for iCount in xrange(nodeCount): sSQL="Select * from nodes where name = '%s'" %(INSTANCE_NAME+str(nextNode)) ret=conn.execute(sSQL) #print ret #print nextNode for server in ret: print "Terminating instance %s (id: %s) " %(server[1],server[0]) try: ret = nova.servers.delete(str(server[0])) except: print "Error terminating the instance..." e = sys.exc_info()[1] print "Error: %s " % e sys.exit(0) try: print str(server[2]) remove_line("/etc/sysconfig/iptables",str(server[2])) fileSystems=['/apps','/home', '/opt','/system','/short'] toRemove=server[2] + "(rw,sync)" for fs in fileSystems: replace_string_line("/etc/exports",toRemove) remove_line("/etc/hosts",str(server[2])) except: print "Error removing the iptables or nfs exports entry for %s... please remove manually" %(str(server[2])) e = sys.exc_info()[1] print "Error: %s " % e remove_node_db(INSTANCE_NAME+str(nextNode)) nextNode = nextNode - 1 conn.close() proc = subprocess.Popen(["service", "nfs", "restart"], stdout=subprocess.PIPE, shell=False) proc.wait()
def main(): args = parse_args() # setup_logging(args) try: nova = client.Client(**get_nova_creds()) except Exception: raise timeout = 60 if ((args.migrate and args.recover) or (args.migrate is False and args.recover is False)): print("Please either migrate, or recover, but not both") if args.migrate: if (not args.source) or (not args.dest): print("Must supply both source and destination hypervisors") raise SystemExit migrate_away(args, nova, timeout) if args.recover: recover(args, nova, timeout)
def __init__(self): """ Constructor to create a Connection. """ logging.basicConfig( filename='openstack.log', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') try: # Connections with novaClient(compute API) -------------------- logging.info('Establishing Connection.') creds = get_nova_creds() self.nova = nvclient.Client(**creds) except: logging.error('Connection not Establised') raise
def extend(addNodeCount): print ("Reading Config File (%s)...") %(CLUSTER_CFG) parse_conf(CLUSTER_CFG) print ("Extending the cluster by adding %s nodes") %(addNodeCount) strKey="Are you sure?" ret = query_yes_no(strKey, "no") if ret == False: sys.exit(0) creds = get_nova_creds() nova = nvclient.Client(**creds) verify_cluster(nova) nextNode=int(next_node(INSTANCE_NAME)) #print nextNode strCloudInit=compute_cloud_init() for iCount in xrange(addNodeCount): nextNode = int(nextNode) + 1 print "Node %s being added to the cluster..." %(INSTANCE_NAME+str(nextNode)) add_node(nova,INSTANCE_NAME+str(nextNode),strCloudInit) print ("Compute nodes added to the cluster.. \nPlease run './readyCluster.py -s' to restart services on the entire cluster or individually start services using './readyCluster.py -n NODE' and restart slurm on the headnode.")
def destroy(): strKey="This will terminate your cluster compute nodes... Are you sure?" ret = query_yes_no(strKey, "no") if ret == True: print ("Reading Config File (%s)...") %(CLUSTER_CFG) parse_conf(CLUSTER_CFG) db = CLUSTER_NAME + ".db" conn = sqlite3.connect(db) sSQL = "SELECT ID, NAME, IP_ADDR_EXT FROM NODES" result = conn.execute(sSQL) creds = get_nova_creds() nova = nvclient.Client(**creds) for row in result: print "Terminating instance (%s)- %s with IP %s " %(str(row[0]), str(row[1]), str(row[2])) try: ret = nova.servers.delete(str(row[0])) except: print "Error terminating the instance..." e = sys.exc_info()[1] print "Error: %s " % e #sys.exit(0) try: print str(row[2]) remove_line("/etc/sysconfig/iptables",str(row[2])) except: print "Error removing the iptables entry for %s... please remove manually" %(str(row[2])) e = sys.exc_info()[1] print "Error: %s " % e empty_file("/etc/exports") print "NFS Exports file entries have been removed..." proc = subprocess.Popen(["service", "nfs", "stop"], stdout=subprocess.PIPE, shell=False) #print 'poll =', proc.poll(), '("None" means process not terminated yet)' proc.wait() proc = subprocess.Popen(["service", "iptables", "restart"], stdout=subprocess.PIPE, shell=False) proc.wait() conn.close() os.remove(db) else: print "Exiting on user request" sys.exit(0)
def main(): creds = get_nova_creds() nova = nvclient.Client(**creds) config = ConfigParser.RawConfigParser() config.read('/home/chris/src/rabbitmq/chris-moc.cfg') flavor_name = config.get('Openstack', 'flavor') volume_snapshot_id = config.get('Openstack', 'volume-snapshot-id') network_name = config.get('Openstack', 'network') flavor = nova.flavors.find(name=flavor_name) vlsnapshot = nova.volume_snapshots.find(id=volume_snapshot_id) volume = nova.volumes.create(40, snapshot_id=vlsnapshot.id) time.sleep(10) block_dev_mapping = {'vda': volume.id} instance = nova.servers.create(name="chris-worker", image=None, flavor=flavor, block_device_mapping=block_dev_mapping) status = instance.status while status == 'BUILD': time.sleep(5) instance = nova.servers.get(instance.id) status = instance.status print instance.status instance_ip = instance.networks[network_name][0] instance_id = instance.id volume_id = volume.id time.sleep(120) #Writes node metadata on node and used when terminating it cmd = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no chris@%s \ \"echo '%s' > /home/chris/node-metadata.dat && echo '%s' >> /home/chris/node-metadata.dat\"" % ( instance_ip, instance_id, volume_id) os.system(cmd)
def build(): print("Reading Config File (%s)...") % (CLUSTER_CFG) parse_conf(CLUSTER_CFG) print("Initializing Cloud (%s)...") % (CLUSTER_NAME) create_cluster_db() creds = get_nova_creds() nova = nvclient.Client(**creds) check_keypair(nova) create_secgroup(nova) headnode_secgroup(nova) check_munge() setup_slurmhead() create_hosts_file() prep_nfs_exports() print("Launching total of %s Compute Nodes...\n") % ( str(CLUSTER_COMPUTE_SIZE)) strCloudInit = compute_cloud_init() for iCount in range(CLUSTER_RANGE_START, CLUSTER_COMPUTE_SIZE + 1): print("Launching %s") % (str(INSTANCE_NAME + str(iCount))) instance_id, instance_name, instance_ip = launch_instance( nova, INSTANCE_NAME + str(iCount), strCloudInit) update_db(instance_id, instance_name, instance_ip) #We copy the hosts file and configure clustersh. This ensures that in case of a failure to #launch a compute node, user still gets a working cluster. E.g. you requested 10 nodes #but only got 8 due to capacity or quota issues. copyfile("/etc/hosts", CLUSTER_OPT + "/etc/hosts") config_clustersh() copyfile("/etc/hosts", CLUSTER_OPT + "/etc/hosts") #Not dealing with volumes automatically. Let user decide what is best. #c_creds = get_cinder_creds() #c_conn = ciclient.Client(**c_creds) #cinder_volume_create(c_conn) #print(c_conn.volumes.list()) config_clustersh() print( "Cluster launched.... please wait for sometime and then run readyCluster -s to start the daemons\n" ) sys.exit(0)
def createinstance(): creds = get_nova_creds() nova = nvclient.Client(**creds) neutron = client.Client(username='******', password='******', tenant_name='admin', auth_url=os.environ['OS_AUTH_URL']) neutron.format= 'json' netname = str(argv[3]) print netname if not neutron.list_networks(name=netname)['networks']: print "network does not exist" exit(0) else: netid = neutron.list_networks(name=netname)["networks"][0]["id"] imagename = str(argv[1]) image = nova.images.find(name=imagename) flavor = nova.flavors.find(name="m1.ubuntu") instancename = str(argv[2]) networkinfo =[{'net-id':netid}] print "new instance named", instancename, "will be created" #networkinfo = {'uuid':'56b68aef-8080-45de-9206-152b8d0229b6'} instance = nova.servers.create(name=instancename, image=image, flavor=flavor, nics=networkinfo) status = instance.status while status =='BUILD': time.sleep(5) instance = nova.servers.get(instance.id) status = instance.status print "status: %s" % status
def extend(addNodeCount): print("Reading Config File (%s)...") % (CLUSTER_CFG) parse_conf(CLUSTER_CFG) print("Extending the cluster by adding %s nodes") % (addNodeCount) strKey = "Are you sure?" ret = query_yes_no(strKey, "no") if ret == False: sys.exit(0) creds = get_nova_creds() nova = nvclient.Client(**creds) verify_cluster(nova) nextNode = int(next_node(INSTANCE_NAME)) #print nextNode strCloudInit = compute_cloud_init() for iCount in xrange(addNodeCount): nextNode = int(nextNode) + 1 print "Node %s being added to the cluster..." % (INSTANCE_NAME + str(nextNode)) add_node(nova, INSTANCE_NAME + str(nextNode), strCloudInit) print( "Compute nodes added to the cluster.. \nPlease run './readyCluster.py -s' to restart services on the entire cluster or individually start services using './readyCluster.py -n NODE' and restart slurm on the headnode." )
def spindown(self): creds = get_nova_creds() nova = nvclient.Client(**creds) maxtries = 10 trycount = 0 while 1: try: instance = nova.servers.find(name=self.name) if instance: instance.delete() return True else: return False except novaclient.exceptions.ClientException: print 'Problem retrieving server instance...' if trycount > maxtries: return False print 'retrying...' time.sleep(5) # wait a bit and try again trycount = trycount + 1 continue
def main(vm_name): sys.stdout = open(create_log, 'w') creds = get_nova_creds() novac = client.Client(**creds) image = novac.images.find(name="Ubuntu 14.04") flavor = novac.flavors.find(name="m1.small") instance = novac.servers.create(name=vm_name, image=image, flavor=flavor, key_name="rsp-dev") # Poll at 5 second intervals, until the status is no longer 'BUILD' status = instance.status while status == 'BUILD': time.sleep(5) # Retrieve the instance again so the status field updates instance = novac.servers.get(instance.id) status = instance.status print("Your new instance is : %s" % status) if status == 'ACTIVE': instance = novac.servers.find(name=vm_name) instance.add_floating_ip(new_ip) print("IP Address for the instance is : %s",new_ip ) sys.stdout.flush() sys.stdout.close()
def __init__(self, image_name, image_path): """ Constructor to create a Connection and an Image Instance. :param image_name: The name of the image to be created. :param image_path: The path of the image for creation. """ logging.basicConfig(filename='openstack.log', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' , datefmt='%m/%d/%Y %I:%M:%S %p') logging.info('Establishing Connection.') # Establishing Connections -------------------- try: # Connections with keystoneClient(identity API) -------------------- creds = get_keystone_creds() self.keystone = ksclient.Client(**creds) # Connections with novaClient(compute API) -------------------- creds = get_nova_creds() self.nova = nvclient.Client(**creds) except: logging.error('Connection not Established.') raise # Creating IMAGE instance -------------------- try: logging.info('Creating Image.') self.is_image_present = False image = self.create(image_name, image_path) self.image = image except: logging.error('Image not Created.') main.delete() raise
def get_nova_client(): from novaclient import client as novaclient novcreds = get_nova_creds() nova = novaclient.Client('1.1', **novcreds) return nova
def get_nova(who): creds = get_nova_creds(who) nova = nvclient.Client(insecure=True, **creds) return nova
def describe_instances(self, state=None, pattern=None): instances = [] # Sxedon panta state = None, opote trexei to if!!! if state != "pollDB": creds = get_nova_creds() #nova = client.Client(2, creds.get('username'), creds.get('api_key'), creds.get('project_id'), creds.get('auth_url')) # DEPRECATED, not working nova = client.Client(2.0, username=creds.get('username'), password=self.utils.rc_pwd, project_name=creds.get('project_id'), auth_url=creds.get('auth_url')) servers = nova.servers.list() members = ("id", "networks", "flavor", "image", "status", "key_name", "name", "created") for server in servers: print( "OpenStackCluster.describe_instances will get info from server:\t" + str(server)) details = {} for member in members: # print("Getting member = " + str(member)) val = getattr(server, member, "") if hasattr(val, '__iter__') and not ((type(val) is str) or (type(val) is list)): v = val.get('id') if v == None: v = val.get('private-net')[0] val = v details[member] = val else: details[member] = val # print("details to make instance = " + str(details) + "\n") instance = Instance(details) if state: if state == instance.status: instances.append(instance) else: instances.append(instance) # # if simple call # print("OpenStackCluster, state = " + str(state)) if not state: self.utils.refresh_instance_db( instances ) # Very important to run in order to load all user's instances in db in table instances # else, almost never runs!! else: # # read from the local database con = create_engine(self.utils.db_file) cur = con.connect() instancesfromDB = [] try: instancesfromDB = cur.execute( 'select * from instances').fetchall() except exc.DatabaseError: con.rollback() cur.close() for instancefromDB in instancesfromDB: instances.append( self.utils.return_instance_from_tuple(instancefromDB)) # Sxedon panta pattern = None, opote trexei to else!!! # # if you are using patterns and state, show only matching state and id's (???) matched_instances = [] if pattern: for instance in instances: if instance.name.find(pattern) != -1: matched_instances.append(instance) for instance in instances: if instance.id.find(pattern) != -1: matched_instances.append(instance) if len(matched_instances) > 0: return matched_instances else: return None else: return instances
def getNova(): creds = get_nova_creds() nova = novaclient.Client("2", **creds) return nova
def nova_connect(self): creds = get_nova_creds() nova = nvclient.Client(**creds) return nova
import keystoneclient.v2_0.client as keystone import glanceclient as glance import novaclient.v1_1.client as nova import os import time from credentials import get_keystone_creds from credentials import get_nova_creds if __name__ == '__main__': keystoneInfo = get_keystone_creds() novaInfo = get_nova_creds() keystoneclient = keystone.Client(**keystoneInfo) novaclient = nova.Client(**novaInfo) endPoint = keystoneclient.service_catalog.get_urls(service_type = 'image')[0] glanceclient = glance.Client('1', endPoint, token = keystoneclient.auth_token) images = glanceclient.images.list() image_create = None for image in images: if image.name.find('ubuntu') > -1: print image.id, '\t', image.name flavor = novaclient.flavors.find(name="m1.micro") instance = novaclient.servers.create(name=image.id, image=image, flavor=flavor) # check whether the instance has been successfully started status = instance.status while status == 'BUILD': time.sleep(5) instance = novaclient.servers.get(instance.id)
def run_instances(self, image=None, flavor=None, mincount=1, maxcount=1, keypair_name=None): creds = get_nova_creds() # nova = client.Client(2.0, creds.get('username'), creds.get('api_key'), creds.get('project_id'), creds.get('auth_url')) nova = client.Client(2.0, username=creds.get('username'), password=self.utils.rc_pwd, project_name=creds.get('project_id'), auth_url=creds.get('auth_url')) _flavor = nova.flavors.find(name=flavor) lock = threading.Lock() reservation = [] def create(): _name = self.utils.cluster_name + '-' + (str)(uuid.uuid1()) instance = nova.servers.create(name=_name, image=image, flavor=_flavor, min_count=1, max_count=1, key_name=keypair_name) status = instance.status while status == 'BUILD': time.sleep(5) # Retrieve the instance again so the status field updates instance = nova.servers.get(instance.id) status = instance.status with lock: reservation.append(instance) t = [] for i in range(0, int(float(maxcount))): t.append(threading.Thread(target=create)) t[i].daemon = True t[i].start() for j in range(0, int(float(maxcount))): t[j].join() instances = [] # # add the newly run instances to the database members = ("id", "networks", "flavor", "image", "status", "key_name", "name", "created") for instance in reservation: # # get instance details details = {} for member in members: val = getattr(instance, member, "") # product_codes is a list if hasattr(val, '__iter__') and not ((type(val) is str) or (type(val) is list)): v = val.get('id') if v == None: v = val.get('private-net')[0] val = v # print (val) details[member] = val _instance = Instance(details) instances.append(_instance) self.utils.add_to_instance_db(instances) return instances
# -*- coding: utf-8 -*- from os import environ as env import keystoneclient.v2_0.client as ksclient import glanceclient.v2.client as glclient import novaclient.client from credentials import get_creds, get_nova_creds keystone = ksclient.Client(**get_creds()) glance_endpoint = keystone.service_catalog.url_for(service_type='image') glance = glclient.Client(glance_endpoint, token=keystone.auth_token) # instanciranje nova objekta, argument "2" se odnosi na verziju klase # koja ce biti korištena za stvaranje objekta nova = novaclient.client.Client("2", **get_nova_creds()) # ispis teksta na terminal print "List of all images by name and size:" # dohvacanje liste koja opisuje sve pohranjene slike unutar glance usluge images = glance.images.list() # for petlja koja se izvodi za svaki element (image) unutar liste (images) for image in images: # Ispis atributa imena i velicine pojedine pohranjene OS slike print("\n%s\n%s" % (image[u'name'], image[u'size'])) # traženje unosa preko terminala od strane korisnika name = raw_input('\nSearch for image by name: ')
default_name = "ubuntu" default_flavor = "m1.micro" keystone_creds = credentials.get_keystone_creds() #Get images containing Ubuntu keystone = ksclient.Client(**keystone_creds) glance_endpoint = keystone.service_catalog.url_for( service_type='image', endpoint_type='publicURL') glance = glclient.Client(glance_endpoint, token=keystone.auth_token) ubuntu_images = [ img for img in glance.images.list() if default_name in img["name"]] #Create images nova_creds = credentials.get_nova_creds() nova = nvclient.Client(**nova_creds) flavor = nova.flavors.find(name="m1.micro") created_instances = [ nova.servers.create(name=img["name"]+"_vm", image=img["id"], flavor=flavor) for img in ubuntu_images ] #Check for instance creation success for instance in created_instances: while nova.servers.get(instance.id).status == "BUILD": time.sleep(5) status = nova.servers.get(instance.id).status if(status == "ERROR"):
#!/usr/bin/env python # # https://www.ibm.com/developerworks/cloud/library/cl-openstack-pythonapis/ import os import time import novaclient.client from credentials import get_nova_creds creds = get_nova_creds() client_cls = novaclient.client.Client nova = client_cls(2, creds['username'], creds['api_key'], creds['project_id'], auth_url=creds['auth_url'], insecure=creds['insecure']) nova.authenticate() if not nova.keypairs.findall(name="mykey"): with open(os.path.expanduser('~/.ssh/id_rsa.pub')) as fpubkey: nova.keypairs.create(name="mykey", public_key=fpubkey.read()) image = nova.images.find(name="cirros") flavor = nova.flavors.find(name="m1.tiny") instance = nova.servers.create(name="test_halted", image=image, flavor=flavor, key_name="mykey") # Poll at 5 second intervals, until the status is no longer 'BUILD' status = instance.status while status == 'BUILD': time.sleep(5)
import math from novaclient import extension from novaclient.v1_1 import client from novaclient.v1_1 import services from novaclient import utils from novaclient.v1_1.contrib import list_extensions from novaclient.v1_1.contrib import openclcontexts from novaclient.v1_1.contrib import opencldevices from novaclient.v1_1.contrib import openclprograms from novaclient.v1_1.contrib import openclbuffers from novaclient.v1_1.contrib import openclkernels from credentials import get_nova_creds creds = get_nova_creds() extensions = [ extension.Extension(openclcontexts.__name__.split(".")[-1], openclcontexts), extension.Extension(opencldevices.__name__.split(".")[-1], opencldevices), extension.Extension(openclprograms.__name__.split(".")[-1], openclprograms), extension.Extension(openclbuffers.__name__.split(".")[-1], openclbuffers), extension.Extension(openclkernels.__name__.split(".")[-1], openclkernels), ] cl = client.Client(http_log_debug = True, extensions=extensions, **creds) print "Create a Context: " devices = [0, ] properties = [] context, retErr = cl.openclcontexts.create(devices, properties) print("Context ID:", context) print("retErr : ", retErr)
#!/usr/bin/env python from os import environ as env import novaclient.client from neutronclient.v2_0 import client as neutronclient from credentials import get_creds, get_nova_creds try: nova = novaclient.client.Client("2", **get_nova_creds()) neutron = neutronclient.Client(**get_creds()) network_name = 'ssegota_net' except: print "Greska kod autentifikacije" try: #net body_net = {'network': {'name': network_name, 'admin_state_up': True}} netw = neutron.create_network(body=body_net) net_dict = netw['network'] network_id = net_dict['id'] print('Network %s created' % network_id) #subnet body_subnet = { 'subnets': [{ 'name': 'ssegota_subnet1', 'cidr': '10.20.1.0/24', 'ip_version': 4, 'dns_nameservers': ['8.8.4.4', '8.8.8.8'], 'network_id': network_id }] }