def __init__(self): self.conf = ConfigParser.ConfigParser() self.conf.read('client_info.conf') self.username = self.get_config_val('DEFAULT', 'username') self.api_key = self.get_config_val('DEFAULT', 'api_key') self.client = SoftLayer.Client(username=self.username, api_key=self.api_key) self.virtual_server_manager = VSManager(self.client)
def create_guest(self, capacity_id, test, guest_object): """Turns an empty Reserve Capacity into a real Virtual Guest :param int capacity_id: ID of the RESERVED_CAPACITY_GROUP to create this guest into :param bool test: True will use verifyOrder, False will use placeOrder :param dictionary guest_object: Below is the minimum info you need to send in guest_object = { 'domain': 'test.com', 'hostname': 'A1538172419', 'os_code': 'UBUNTU_LATEST_64', 'primary_disk': '25', } """ vs_manager = VSManager(self.client) mask = "mask[instances[id, billingItem[id, item[id,keyName]]], backendRouter[id, datacenter[name]]]" capacity = self.get_object(capacity_id, mask=mask) try: capacity_flavor = capacity['instances'][0]['billingItem']['item'][ 'keyName'] flavor = _flavor_string(capacity_flavor, guest_object['primary_disk']) except KeyError as ex: raise SoftLayerError("Unable to find capacity Flavor.") from ex guest_object['flavor'] = flavor guest_object['datacenter'] = capacity['backendRouter']['datacenter'][ 'name'] # Reserved capacity only supports SAN as of 20181008 guest_object['local_disk'] = False # Reserved capacity only supports monthly ordering via Virtual_Guest::generateOrderTemplate # Hourly ordering would require building out the order manually. guest_object['hourly'] = False template = vs_manager.verify_create_instance(**guest_object) template['reservedCapacityId'] = capacity_id if guest_object.get('ipv6'): ipv6_price = self.ordering_manager.get_price_id_list( 'PUBLIC_CLOUD_SERVER', ['1_IPV6_ADDRESS']) template['prices'].append({'id': ipv6_price[0]}) if test: result = self.client.call('Product_Order', 'verifyOrder', template) else: result = self.client.call('Product_Order', 'placeOrder', template) return result
def cli(env, identifier, purge): """Delete a placement group. Placement Group MUST be empty before you can delete it. IDENTIFIER can be either the Name or Id of the placement group you want to view """ manager = PlacementManager(env.client) group_id = helpers.resolve_id(manager.resolve_ids, identifier, 'placement_group') if purge: placement_group = manager.get_object(group_id) guest_list = ', '.join([ guest['fullyQualifiedDomainName'] for guest in placement_group['guests'] ]) if len(placement_group['guests']) < 1: raise exceptions.CLIAbort( 'No virtual servers were found in placement group %s' % identifier) click.secho("You are about to delete the following guests!\n%s" % guest_list, fg='red') if not (env.skip_confirmations or formatting.confirm( "This action will cancel all guests! Continue?")): raise exceptions.CLIAbort('Aborting virtual server order.') vm_manager = VSManager(env.client) for guest in placement_group['guests']: click.secho("Deleting %s..." % guest['fullyQualifiedDomainName']) vm_manager.cancel_instance(guest['id']) return click.secho("You are about to delete the following placement group! %s" % identifier, fg='red') if not (env.skip_confirmations or formatting.confirm( "This action will cancel the placement group! Continue?")): raise exceptions.CLIAbort('Aborting virtual server order.') cancel_result = manager.delete(group_id) if cancel_result: click.secho("Placement Group %s has been canceld." % identifier, fg='green')
def create_guest(self, capacity_id, test, guest_object): """Turns an empty Reserve Capacity into a real Virtual Guest :param int capacity_id: ID of the RESERVED_CAPACITY_GROUP to create this guest into :param bool test: True will use verifyOrder, False will use placeOrder :param dictionary guest_object: Below is the minimum info you need to send in guest_object = { 'domain': 'test.com', 'hostname': 'A1538172419', 'os_code': 'UBUNTU_LATEST_64', 'primary_disk': '25', } """ vs_manager = VSManager(self.client) mask = "mask[instances[id, billingItem[id, item[id,keyName]]], backendRouter[id, datacenter[name]]]" capacity = self.get_object(capacity_id, mask=mask) try: capacity_flavor = capacity['instances'][0]['billingItem']['item']['keyName'] flavor = _flavor_string(capacity_flavor, guest_object['primary_disk']) except KeyError: raise SoftLayer.SoftLayerError("Unable to find capacity Flavor.") guest_object['flavor'] = flavor guest_object['datacenter'] = capacity['backendRouter']['datacenter']['name'] # Reserved capacity only supports SAN as of 20181008 guest_object['local_disk'] = False template = vs_manager.verify_create_instance(**guest_object) template['reservedCapacityId'] = capacity_id if guest_object.get('ipv6'): ipv6_price = self.ordering_manager.get_price_id_list('PUBLIC_CLOUD_SERVER', ['1_IPV6_ADDRESS']) template['prices'].append({'id': ipv6_price[0]}) if test: result = self.client.call('Product_Order', 'verifyOrder', template) else: result = self.client.call('Product_Order', 'placeOrder', template) return result
def cli(env, identifier, tags): """Tags all guests in an autoscale group. --tags "Use, quotes, if you, want whitespace" --tags Otherwise,Just,commas """ autoscale = AutoScaleManager(env.client) vsmanager = VSManager(env.client) mask = "mask[id,virtualGuestId,virtualGuest[tagReferences,id,hostname]]" guests = autoscale.get_virtual_guests(identifier, mask=mask) click.echo("New Tags: {}".format(tags)) for guest in guests: real_guest = guest.get('virtualGuest') click.echo("Setting tags for {}".format(real_guest.get('hostname'))) vsmanager.set_tags( tags, real_guest.get('id'), ) click.echo("Done")
class VirtualServer: def __init__(self): self.conf = ConfigParser.ConfigParser() self.conf.read('client_info.conf') self.username = self.get_config_val('DEFAULT', 'username') self.api_key = self.get_config_val('DEFAULT', 'api_key') self.client = SoftLayer.Client(username=self.username, api_key=self.api_key) self.virtual_server_manager = VSManager(self.client) def get_config_val(self, section, field): return self.conf.get(section, field) def list_virtual_server(self): """ 1. Function signature list_instances(self, hourly=True, monthly=True, tags=None, cpus=None, memory=None, hostname=None, domain=None, local_disk=None, datacenter=None, nic_speed=None, public_ip=None, private_ip=None, **kwargs): **kwargs can be mask, limits and so on. 2. Can have following masks 'id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'lastKnownPowerState.name', 'powerState', 'maxCpu', 'maxMemory', 'datacenter', 'activeTransaction.transactionStatus[friendlyName,name]', 'status', :return: list of virtual servers """ mask = 'id,hostname' return self.virtual_server_manager.list_instances(mask=mask)
def vs_manager(self): return VSManager(client=self.softlayer_client)
def __init__(self, cpus: int, mem: int, hostname: str, datacenter: str, count: int, transient=False): # Instantiate IBM cloud API object self._sl_client = SoftLayer.create_client_from_env() self.cloud_mgr = VSManager(self._sl_client) self._cpus = cpus self._mem = mem self._hostname = hostname self._count = count self._transient = transient # Load redis password self.redis_pw = os.environ['REDIS_PW'] self.own_ip = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr'] print("Determined that own IP is", self.own_ip) # Restart redis server print("Starting local redis server") start_redis_script = """ if [ "$(docker ps -aq -f name=redis)" ]; then \ docker rm -f redis ; \ fi ; \ docker run -d -p 6379:6379 --name redis redis --requirepass {0} """.format(self.redis_pw) result = self._shell_run_script(spur.LocalShell(), start_redis_script) if result.return_code != 0: print("Error while starting local redis server") print(result.stderr_output.decode('utf-8')) exit(-1) # Build and save worker docker image print("Building worker docker image") build_docker_image = """ docker build . -t invaders && \ docker save invaders | pigz > invaders.tar.gz """ result = self._shell_run_script(spur.LocalShell(), build_docker_image) if result.return_code != 0: print("Error while building worker docker image") print(result.stderr_output.decode('utf-8')) exit(-1) # Create all the hostnames hostnames = [self._hostname + '-' + str(i) for i in range(self._count)] # Keep aside only the ones that are not already instantiated on IBM Cloud instances_list = self.cloud_mgr.list_instances() hostnames_list = [e['hostname'] for e in instances_list] # List of the VMs to instantiate hostnames_nonexistant = [h for h in hostnames if h not in hostnames_list] datacenters = [] for _ in range(len(hostnames_nonexistant)): datacenters.append(datacenter) if len(hostnames_nonexistant) > 0: print("Requesting the VMs...") vm_settings = [{ 'hostname': h, 'domain': 'IBM.cloud', 'datacenter': d, 'dedicated': False, 'private': True, 'cpus': self._cpus, 'os_code': 'CENTOS_7_64', 'local_disk': False, 'memory': self._mem * 1024, 'tags': 'worker, ga', 'nic_speed': 100 } for h, d in zip(hostnames_nonexistant, datacenters)] # Request the machines 10 at a time vm_settings = [vm_settings[x:x + 10] for x in range(0, len(vm_settings), 10)] for s in vm_settings: self.cloud_mgr.create_instances(config_list=s) # Get the IDs of the VMs we'll use self._vm_ids = [e['id'] for e in self.cloud_mgr.list_instances() if e['hostname'] in hostnames] print("Waiting for the VMs to be available + set-up (in background thread)") self.pool = ThreadPool(processes=10) # Limit to 10 to avoid hitting the API calls limit self._setup_results = self.pool.map_async(self._setup_vm, self._vm_ids)
class Cloud: """ This class contains the methods needed to provision the worker VMs on the IBM Cloud. """ def __init__(self, cpus: int, mem: int, hostname: str, datacenter: str, count: int, transient=False): # Instantiate IBM cloud API object self._sl_client = SoftLayer.create_client_from_env() self.cloud_mgr = VSManager(self._sl_client) self._cpus = cpus self._mem = mem self._hostname = hostname self._count = count self._transient = transient # Load redis password self.redis_pw = os.environ['REDIS_PW'] self.own_ip = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr'] print("Determined that own IP is", self.own_ip) # Restart redis server print("Starting local redis server") start_redis_script = """ if [ "$(docker ps -aq -f name=redis)" ]; then \ docker rm -f redis ; \ fi ; \ docker run -d -p 6379:6379 --name redis redis --requirepass {0} """.format(self.redis_pw) result = self._shell_run_script(spur.LocalShell(), start_redis_script) if result.return_code != 0: print("Error while starting local redis server") print(result.stderr_output.decode('utf-8')) exit(-1) # Build and save worker docker image print("Building worker docker image") build_docker_image = """ docker build . -t invaders && \ docker save invaders | pigz > invaders.tar.gz """ result = self._shell_run_script(spur.LocalShell(), build_docker_image) if result.return_code != 0: print("Error while building worker docker image") print(result.stderr_output.decode('utf-8')) exit(-1) # Create all the hostnames hostnames = [self._hostname + '-' + str(i) for i in range(self._count)] # Keep aside only the ones that are not already instantiated on IBM Cloud instances_list = self.cloud_mgr.list_instances() hostnames_list = [e['hostname'] for e in instances_list] # List of the VMs to instantiate hostnames_nonexistant = [h for h in hostnames if h not in hostnames_list] datacenters = [] for _ in range(len(hostnames_nonexistant)): datacenters.append(datacenter) if len(hostnames_nonexistant) > 0: print("Requesting the VMs...") vm_settings = [{ 'hostname': h, 'domain': 'IBM.cloud', 'datacenter': d, 'dedicated': False, 'private': True, 'cpus': self._cpus, 'os_code': 'CENTOS_7_64', 'local_disk': False, 'memory': self._mem * 1024, 'tags': 'worker, ga', 'nic_speed': 100 } for h, d in zip(hostnames_nonexistant, datacenters)] # Request the machines 10 at a time vm_settings = [vm_settings[x:x + 10] for x in range(0, len(vm_settings), 10)] for s in vm_settings: self.cloud_mgr.create_instances(config_list=s) # Get the IDs of the VMs we'll use self._vm_ids = [e['id'] for e in self.cloud_mgr.list_instances() if e['hostname'] in hostnames] print("Waiting for the VMs to be available + set-up (in background thread)") self.pool = ThreadPool(processes=10) # Limit to 10 to avoid hitting the API calls limit self._setup_results = self.pool.map_async(self._setup_vm, self._vm_ids) def _setup_vm(self, id_): self.cloud_mgr.wait_for_ready(id_) ip = None pw = None # Sometimes the OS password is not ready on time, so retry 30 times for i in range(30): try: vm_info = self.cloud_mgr.get_instance(id_) ip = vm_info['primaryBackendIpAddress'] pw = vm_info['operatingSystem']['passwords'][0]['password'] except KeyError: sleep(10) else: break assert ip is not None, "Could not retrieve IP address for " + str(id_) assert pw is not None, "Could not retrieve password for " + str(id_) local_shell = spur.LocalShell() shell = spur.SshShell(hostname=ip, username='******', password=pw, missing_host_key=spur.ssh.MissingHostKey.accept, load_system_host_keys=False) # Configure the VM vm_config_script = """ ip route replace default via {0} ; \ yum install -y epel-release && \ yum install -y wget pxz lbzip2 pigz rsync && \ wget -q https://get.docker.com/ -O docker_install.sh && \ sh docker_install.sh && \ sysctl -w net.ipv4.ip_forward=1 && \ systemctl restart network && \ systemctl enable docker && \ systemctl restart docker """.format(self.own_ip) result = self._shell_run_script(shell, vm_config_script) if result.return_code != 0: print("Error while setting up the VM", id_) print(result.stderr_output.decode('utf-8')) exit(-1) # Uploading the docker image on the VMs docker_copy_script = """ /usr/bin/rsync --verbose --inplace -r \ --rsh="/usr/bin/sshpass -p {0} \ ssh -Tx -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Compression=no -l root" \ invaders.tar.gz \ {1}:/root/ga/ """.format(pw, ip) result = self._shell_run_script(local_shell, docker_copy_script) if result.return_code != 0: print("Error while uploading docker image on VM", id_) print(result.stderr_output.decode('utf-8')) exit(-1) # Decompressing the docker image, loading it # + changing sshd settings to allow for simultaneous connections docker_load_script = """ cd /root/ga ; \ docker rm -f invaders ; \ docker rm -f /invaders ; \ cat invaders.tar.gz | pigz -d | docker load ; \ docker run -d -p 6379:6379 -e \"REDIS_PW={0}\" -e \"REDIS_IP={1}\" --name invaders invaders """.format(self.redis_pw, self.own_ip) result = self._shell_run_script(shell, docker_load_script) if result.return_code != 0: print("Error while loading docker image on VM", id_) print(result.stderr_output.decode('utf-8')) exit(-1) return ip, pw def cancel_all(self): print("Deleting instances...") for i, id_ in enumerate(self._vm_ids): print('\r' + str(i) + '/' + str(len(self._vm_ids)), end='') self.cloud_mgr.cancel_instance(id_) print('\r' + str(len(self._vm_ids)) + '/' + str(len(self._vm_ids))) @staticmethod def _shell_run_script(shell, script, allow_error=True): return shell.run(["/bin/bash", "-c", script], allow_error=allow_error)