def create_host(self, infra, offering, name, team_name, zone=None, database_name=''): url = "{}/{}/{}/host/new".format( self.credential.endpoint, self.provider, self.environment ) data = { "engine": self.engine, "name": name, "cpu": offering.cpus, "memory": offering.memory_size_mb, "group": infra.name, "team_name": team_name, "database_name": database_name } if zone: data['zone'] = zone response = self._request(post, url, json=data, timeout=600) if response.status_code != 201: raise HostProviderCreateVMException(response.content, response) content = response.json() host = Host() host.address = content["address"] host.hostname = host.address host.user = self.vm_credential.user host.password = self.vm_credential.password host.provider = self.provider host.identifier = content["id"] host.offering = offering host.save() return host
def do(self): host = Host() host.address = self.instance.vm_name host.hostname = self.instance.vm_name host.user = '******' host.password = '******' host.provider = 'k8s' host.offering = None host.save() self.instance.hostname = host self.instance.save()
def restore_dst_cluster(dump_path, cluster_info, redis_time_out): from physical.models import Host for instance_info in cluster_info: sys_user = instance_info['sys_user'] sys_pass = instance_info['sys_pass'] remote_path = instance_info['remote_path'] redis_pass = instance_info['redis_pass'] redis_port = instance_info['redis_port'] host = instance_info['host'] click.echo("Restoring target database...") Host.run_script(address=host.address, username=sys_user, password=sys_pass, script=host.commands.database(action='stop')) for instance_info in cluster_info: sys_user = instance_info['sys_user'] sys_pass = instance_info['sys_pass'] remote_path = instance_info['remote_path'] redis_pass = instance_info['redis_pass'] redis_port = instance_info['redis_port'] host = instance_info['host'] try: transport = paramiko.Transport((host, 22)) transport.connect(username=sys_user, password=sys_pass) sftp = paramiko.SFTPClient.from_transport(transport) sftp.put(dump_path, remote_path) sftp.close() transport.close() except Exception, e: click.echo('ERROR while transporting dump file: {}'.format(e)) return False Host.run_script( address=host.address, username=sys_user, password=sys_pass, script="sed -i 's/appendonly/#appendonly/g' /data/redis.conf") Host.run_script(address=host.address, username=sys_user, password=sys_pass, script=host.commands.database(action='start')) Host.run_script( address=host.address, username=sys_user, password=sys_pass, script="sed -i 's/#appendonly/appendonly/g' /data/redis.conf") driver = RedisDriver(host, redis_port, redis_pass, redis_time_out) with driver.redis() as client: try: client.config_set("appendonly", "yes") except Exception, e: click.echo("Error while requesting dump: {}".format(e)) return False
def create_host(self, address): from physical.models import Host host = Host() host.address = address host.hostname = host.address host.user = self.vm_credentials.user host.password = self.vm_credentials.password host.offering = self.cs_offering host.save() return host
def restore_dst_database(dump_path, host, redis_port, redis_pass, sys_user, sys_pass, remote_path, redis_time_out): from physical.models import Host click.echo("Restoring target database...") Host.run_script(address=host.address, username=sys_user, password=sys_pass, script=host.commands.database(action='stop')) try: transport = paramiko.Transport((host, 22)) transport.connect(username=sys_user, password=sys_pass) sftp = paramiko.SFTPClient.from_transport(transport) sftp.put(dump_path, remote_path) sftp.close() transport.close() except Exception, e: click.echo('ERROR while transporting dump file: {}'.format(e)) return False
def create_host(self, address): from physical.models import Host host = Host() host.address = address host.hostname = host.address host.save() return host
def do(self, workflow_dict): try: if 'environment' not in workflow_dict or 'plan' not in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): offering = cs_plan_attrs.get_stronger_offering() if len(bundles) == 1: bundle = bundles[0] else: if index == 0: bundle = LastUsedBundle.get_next_infra_bundle( plan=workflow_dict['plan'], bundles=bundles) else: bundle = LastUsedBundle.get_next_bundle( bundle=bundle, bundles=bundles) try: DatabaseInfraOffering.objects.get( databaseinfra=workflow_dict['databaseinfra']) except ObjectDoesNotExist: LOG.info("Creating databaseInfra Offering...") dbinfra_offering = DatabaseInfraOffering() dbinfra_offering.offering = offering dbinfra_offering.databaseinfra = workflow_dict[ 'databaseinfra'] dbinfra_offering.save() LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address instance.port = 3306 instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.instance_type = Instance.MYSQL instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt'] == 1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + \ ":%i" % (instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra return True return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
try: transport = paramiko.Transport((host, 22)) transport.connect(username=sys_user, password=sys_pass) sftp = paramiko.SFTPClient.from_transport(transport) sftp.put(dump_path, remote_path) sftp.close() transport.close() except Exception, e: click.echo('ERROR while transporting dump file: {}'.format(e)) return False Host.run_script( address=host.address, username=sys_user, password=sys_pass, script="sed -i 's/appendonly/#appendonly/g' /data/redis.conf") Host.run_script(address=host.address, username=sys_user, password=sys_pass, script=host.commands.database(action='start')) Host.run_script( address=host.address, username=sys_user, password=sys_pass, script="sed -i 's/#appendonly/appendonly/g' /data/redis.conf") driver = RedisDriver(host, redis_port, redis_pass, redis_time_out) with driver.redis() as client:
def do(self, workflow_dict): try: if 'environment' not in workflow_dict and 'plan' not in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): if len(bundles) == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle( plan=workflow_dict['plan'], bundle=bundles) if index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = cs_plan_attrs.get_stronger_offering() try: DatabaseInfraOffering.objects.get( databaseinfra=workflow_dict['databaseinfra']) except ObjectDoesNotExist: LOG.info("Creating databaseInfra Offering...") dbinfra_offering = DatabaseInfraOffering() dbinfra_offering.offering = offering dbinfra_offering.databaseinfra = workflow_dict[ 'databaseinfra'] dbinfra_offering.save() LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") if index in (0, 1): instance = Instance() instance.address = host.address instance.port = 6379 instance.is_active = True instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.instance_type = Instance.REDIS instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt'] == 1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + \ ":%i" % (instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra else: instance = Instance() instance.address = host.address instance.port = 26379 instance.is_active = True instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.instance_type = Instance.REDIS_SENTINEL instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
def create_host(self, infra, offering, name, team_name, zone=None, database_name='', host_obj=None, port=None, volume_name=None, init_user=None, init_password=None, static_ip=None): url = "{}/{}/{}/host/new".format(self.credential.endpoint, self.provider, self.environment) data = { "engine": self.engine, "name": name, "cpu": offering.cpus, "memory": offering.memory_size_mb, "group": infra.name, "team_name": team_name, "database_name": database_name, "static_ip_id": static_ip and static_ip.identifier } if zone: data['zone'] = zone if port: data['port'] = port if volume_name: data['volume_name'] = volume_name if init_user: data['init_user'] = init_user if init_password: data['init_password'] = init_password response = self._request(post, url, json=data, timeout=600) if response.status_code != 201: raise HostProviderCreateVMException(response.content, response) content = response.json() if host_obj is None: host = Host() host.hostname = content["address"] else: host = host_obj host.address = content["address"] host.user = self.vm_credential.user host.password = self.vm_credential.password host.provider = self.provider host.identifier = content["id"] host.offering = offering host.save() return host
def create_host(self, infra, offering, name): url = "{}/{}/{}/host/new".format(self.credential.endpoint, self.provider, self.environment) data = { "engine": self.engine, "name": name, "cpu": offering.cpus, "memory": offering.memory_size_mb, "group": infra.name } response = post(url, json=data) if response.status_code != 201: raise IndexError(response.content, response) content = response.json() host = Host() host.address = content["address"] host.hostname = host.address host.user = self.vm_credential.user host.password = self.vm_credential.password host.provider = self.provider host.identifier = content["id"] host.offering = offering host.save() return host
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) target_offering = workflow_dict['target_offering'] cs_plan_attrs = PlanAttr.objects.get( plan=workflow_dict['target_plan']) bundles = list(cs_plan_attrs.bundle.all()) workflow_dict['target_hosts'] = [] workflow_dict['target_instances'] = [] for index, source_host in enumerate(workflow_dict['source_hosts']): sentinel_source_instance = Instance.objects.filter( hostname=source_host, instance_type=Instance.REDIS_SENTINEL)[0] if index < 2: redis_source_instance = Instance.objects.filter( hostname=source_host, instance_type=Instance.REDIS)[0] vm_name = source_host.hostname.split('.')[0] if len(bundles) == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle( plan=workflow_dict['target_plan'], bundle=bundles) if index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = target_offering vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.save() workflow_dict['target_hosts'].append(host) source_host.future_host = host source_host.save() host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") if index < 2: redis_instance = Instance() redis_instance.address = host.address redis_instance.dns = host.address redis_instance.port = redis_source_instance.port redis_instance.is_active = redis_source_instance.is_active redis_instance.is_arbiter = redis_source_instance.is_arbiter redis_instance.instance_type = redis_source_instance.instance_type redis_instance.hostname = host redis_instance.databaseinfra = workflow_dict[ 'databaseinfra'] redis_instance.save() LOG.info("Instance created!") redis_source_instance.future_instance = redis_instance redis_source_instance.save() workflow_dict['target_instances'].append(redis_instance) sentinel_instance = Instance() sentinel_instance.address = host.address sentinel_instance.dns = host.address sentinel_instance.port = sentinel_source_instance.port sentinel_instance.is_active = sentinel_source_instance.is_active sentinel_instance.is_arbiter = sentinel_source_instance.is_arbiter sentinel_instance.instance_type = sentinel_source_instance.instance_type sentinel_instance.hostname = host sentinel_instance.databaseinfra = workflow_dict[ 'databaseinfra'] sentinel_instance.save() LOG.info("Instance created!") sentinel_source_instance.future_instance = sentinel_instance sentinel_source_instance.save() workflow_dict['target_instances'].append(sentinel_instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) target_offering = workflow_dict['target_offering'] cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['target_plan']) bundles = list(cs_plan_attrs.bundle.all()) workflow_dict['target_hosts'] = [] workflow_dict['target_instances'] = [] for index, source_instance in enumerate(workflow_dict['source_instances']): source_host = workflow_dict['source_hosts'][index] vm_name = source_host.hostname.split('.')[0] if len(bundles) == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle(plan=workflow_dict['target_plan'], bundle=bundles) if index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = target_offering vm = cs_provider.deploy_virtual_machine(offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name('affinity_group_id'), ) if not vm: raise Exception("CloudStack could not create the virtualmachine") host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.save() workflow_dict['target_hosts'].append(host) source_host.future_host = host source_host.save() host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address instance.dns = host.address instance.port = source_instance.port instance.is_active = source_instance.is_active instance.is_arbiter = source_instance.is_arbiter instance.instance_type = source_instance.instance_type instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") source_instance.future_instance = instance source_instance.save() workflow_dict['target_instances'].append(instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if not 'environment' in workflow_dict and not 'plan' in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): if bundles.__len__() == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle( plan=workflow_dict['plan'], bundle=bundles) if workflow_dict['enginecod'] == workflow_dict[ 'MONGODB'] and index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = cs_plan_attrs.get_stronger_offering() try: DatabaseInfraOffering.objects.get( databaseinfra=workflow_dict['databaseinfra']) except ObjectDoesNotExist, e: LOG.info("Creating databaseInfra Offering...") dbinfra_offering = DatabaseInfraOffering() dbinfra_offering.offering = offering dbinfra_offering.databaseinfra = workflow_dict[ 'databaseinfra'] dbinfra_offering.save() LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address if workflow_dict['enginecod'] == workflow_dict['MYSQL']: instance.port = 3306 elif workflow_dict['enginecod'] == workflow_dict['MONGODB']: instance.port = 27017 elif workflow_dict['enginecod'] == workflow_dict['REDIS']: instance.port = 6379 instance.is_active = True if workflow_dict['enginecod'] == workflow_dict[ 'MONGODB'] and index == 2: instance.is_arbiter = True else: instance.is_arbiter = False instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt'] == 1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + ":%i" % ( instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra return True return True
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) offering = workflow_dict['offering'] cs_plan_attrs = PlanAttr.objects.get( plan=workflow_dict['target_plan']) bundles = list(cs_plan_attrs.bundle.all()) workflow_dict['target_hosts'] = [] workflow_dict['target_instances'] = [] for index, source_instance in enumerate( workflow_dict['source_instances']): source_host = workflow_dict['source_hosts'][index] vm_name = source_host.hostname.split('.')[0] source_host_attr = HostAttr.objects.get(host=source_host) source_network_id = cs_provider.get_vm_network_id( vm_id=source_host_attr.vm_id, project_id=cs_credentials.project) if len(bundles) == 1: bundle = bundles[0] else: if index == 0: bundle = LastUsedBundle.get_next_infra_bundle( plan=workflow_dict['target_plan'], bundles=bundles) else: bundle = LastUsedBundle.get_next_bundle( bundle=bundle, bundles=bundles) if bundle.networkid == source_network_id: bundle = LastUsedBundle.get_next_bundle( bundle=bundle, bundles=bundles) LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.save() workflow_dict['target_hosts'].append(host) source_host.future_host = host source_host.save() host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address instance.dns = host.address instance.port = source_instance.port instance.is_active = source_instance.is_active instance.is_arbiter = source_instance.is_arbiter instance.instance_type = source_instance.instance_type instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") source_instance.future_instance = instance source_instance.save() workflow_dict['target_instances'].append(instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def create_host(self, infra, offering, name, team_name, zone=None, database_name=''): url = "{}/{}/{}/host/new".format(self.credential.endpoint, self.provider, self.environment) data = { "engine": self.engine, "name": name, "cpu": offering.cpus, "memory": offering.memory_size_mb, "group": infra.name, "team_name": team_name, "database_name": database_name } if zone: data['zone'] = zone response = self._request(post, url, json=data, timeout=600) if response.status_code != 201: raise HostProviderCreateVMException(response.content, response) content = response.json() host = Host() host.address = content["address"] host.hostname = host.address host.user = self.vm_credential.user host.password = self.vm_credential.password host.provider = self.provider host.identifier = content["id"] host.offering = offering host.save() return host
def do(self, workflow_dict): try: if not 'environment' in workflow_dict and not 'plan' in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): if bundles.__len__()==1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle(plan=workflow_dict['plan'], bundle= bundles) if workflow_dict['enginecod'] == workflow_dict['MONGODB'] and index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = cs_plan_attrs.get_stronger_offering() LOG.debug("Deploying new vm on cs with bundle %s and offering %s" % (bundle,offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle= bundle, project_id=cs_credentials.project, vmname=vm_name, ) if not vm: raise Exception("CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address if workflow_dict['enginecod'] == workflow_dict['MYSQL']: instance.port = 3306 elif workflow_dict['enginecod'] == workflow_dict['MONGODB']: instance.port = 27017 instance.is_active = True if workflow_dict['enginecod'] == workflow_dict['MONGODB'] and index == 2: instance.is_arbiter = True else: instance.is_arbiter = False instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt']==1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + ":%i" %(instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra return True return True except Exception as e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False