def undo(self, workflow_dict): try: original_cloudstackpack = workflow_dict['original_cloudstackpack'] environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) original_serviceofferingid = original_cloudstackpack.offering.serviceofferingid if workflow_dict['offering_changed']: host = workflow_dict['host'] host_csattr = HostAttr.objects.get(host=host) offering_changed = cs_provider.change_service_for_vm( vm_id=host_csattr.vm_id, serviceofferingid=original_serviceofferingid) if not offering_changed: raise Exception("Could not change offering for Host {}".format(host)) else: LOG.info('No resize to instance {}'.format(workflow_dict['instance'])) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def stop_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) stoped = cs_provider.stop_virtual_machine(vm_id=host_csattr.vm_id) if not stoped: raise Exception("Could not stop host {}".format(host)) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) instances = workflow_dict['databaseinfra'].instances.all() if not instances: for vm_id in workflow_dict['vms_id']: cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['environment'], vm_id=vm_id) for host in workflow_dict['hosts']: host_attr = HostAttr.objects.filter(host=host) host.delete() LOG.info("Host deleted!") if host_attr: host_attr[0].delete() LOG.info("HostAttr deleted!") for instance in instances: host = instance.hostname host_attr = HostAttr.objects.get(host=host) LOG.info("Destroying virtualmachine %s" % host_attr.vm_id) cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['environment'], vm_id=host_attr.vm_id) host_attr.delete() LOG.info("HostAttr deleted!") instance.delete() LOG.info("Instance deleted") host.delete() LOG.info("Host deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
def update_bundle(self): from dbaas_cloudstack.provider import CloudStackProvider from dbaas_cloudstack.util import get_cs_credential databaseinfra = self.host.instances.all()[0].databaseinfra environment = databaseinfra.environment engine = databaseinfra.engine cs_credentials = get_cs_credential(environment) cs_provider = CloudStackProvider(credentials=cs_credentials) networkid = cs_provider.get_vm_network_id( vm_id=self.vm_id, project_id=cs_credentials.project) zoneid = cs_provider.get_vm_zone_id( vm_id=self.vm_id, project_id=cs_credentials.project) bunbdles = CloudStackBundle.objects.filter( networkid=networkid, zoneid=zoneid, engine=engine, region__environment=environment) if len(bunbdles) > 0: self.bundle = bunbdles[0] self.save()
def do(self, workflow_dict): try: database = workflow_dict['database'] cloudstackpack = workflow_dict['cloudstackpack'] instances_detail = workflow_dict['instances_detail'] environment = workflow_dict['environment'] cs_credentials = get_credentials_for(environment = environment, credential_type = CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials = cs_credentials) serviceofferingid = cloudstackpack.offering.serviceofferingid for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) offering_changed = cs_provider.change_service_for_vm(vm_id = host_csattr.vm_id, serviceofferingid = serviceofferingid) if not offering_changed: raise Exception, "Could not change offering for Host {}".format(host) instance_detail['offering_changed'] = True LOG.info('Updating offering DatabaseInfra.') databaseinfraoffering = DatabaseInfraOffering.objects.get(databaseinfra = database.databaseinfra) databaseinfraoffering.offering = cloudstackpack.offering databaseinfraoffering.save() return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def start_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for(environment = environment, credential_type = CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials = cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) started = cs_provider.start_virtual_machine(vm_id = host_csattr.vm_id) if not started: raise Exception, "Could not start host {}".format(host) for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: error = "Host %s is not ready..." % host LOG.warn(error) raise Exception, error return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) for source_host in workflow_dict['source_hosts']: host_attr = HostAttr.objects.get(host=source_host) LOG.info("Destroying virtualmachine %s" % host_attr.vm_id) cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['source_environment'], vm_id=host_attr.vm_id) host_attr.delete() LOG.info("HostAttr deleted!") source_host.delete() LOG.info("Source host deleted") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: if 'databaseinfra' not in workflow_dict: LOG.info("We could not find a databaseinfra inside the workflow_dict") return False source_secondary_ip_ids = [secondary_ip.id for secondary_ip in workflow_dict['source_secondary_ips']] databaseinfraattr = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra'], equivalent_dbinfraattr=None).exclude(id__in=source_secondary_ip_ids) LOG.info("databaseinfraattr: {}".format(databaseinfraattr)) LOG.info("old infra ip: {}".format(workflow_dict['source_secondary_ips'])) cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) for infra_attr in databaseinfraattr: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) if not cs_provider.remove_networkapi_ip(equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id): return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) for source_instance in workflow_dict['source_instances']: source_instance.future_instance = None source_instance.save() LOG.info("Source instance updated") for target_instance in workflow_dict['target_instances']: target_instance.delete() LOG.info("Target instance deleted") for source_host in workflow_dict['source_hosts']: source_host.future_host = None source_host.save() LOG.info("Source host updated") for target_host in workflow_dict['target_hosts']: host_attr = HostAttr.objects.get(host=target_host) LOG.info("Destroying virtualmachine %s" % host_attr.vm_id) cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['target_environment'], vm_id=host_attr.vm_id) host_attr.delete() LOG.info("HostAttr deleted!") target_host.delete() LOG.info("Target host deleted") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) for infra_attr in workflow_dict['source_secondary_ips']: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) ip_removed = cs_provider.remove_networkapi_ip(equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id) if not ip_removed: return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: if not 'databaseinfra' in workflow_dict and not 'hosts' in workflow_dict: LOG.info( "We could not find a databaseinfra inside the workflow_dict") return False if len(workflow_dict['hosts']) == 1: return True databaseinfraattr = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra']) cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) for infra_attr in databaseinfraattr: LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") return True except Exception as e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: if 'databaseinfra' not in workflow_dict and 'hosts' not in workflow_dict: LOG.info("We could not find a databaseinfra inside the workflow_dict") return False if len(workflow_dict['hosts']) == 1: return True databaseinfraattr = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra']) cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) networkapi_equipment_id = workflow_dict.get('networkapi_equipment_id') for infra_attr in databaseinfraattr: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) if not cs_provider.remove_networkapi_ip(equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id): return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") if networkapi_equipment_id: cs_provider.remove_networkapi_equipment(equipment_id=networkapi_equipment_id) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def __init__(self, instance): super(VmStep, self).__init__(instance) integration = CredentialType.objects.get( type=CredentialType.CLOUDSTACK) environment = self.instance.databaseinfra.environment credentials = Credential.get_credentials(environment, integration) self.provider = CloudStackProvider(credentials=credentials) self.host = self.instance.hostname self.host_cs = HostAttr.objects.get(host=self.host) new_plan = self.instance.databaseinfra.plan.engine_equivalent_plan cs_plan = PlanAttr.objects.get(plan=new_plan) self.bundle = cs_plan.bundle.first()
def stop_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for(environment = environment, credential_type = CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials = cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) stoped = cs_provider.stop_virtual_machine(vm_id = host_csattr.vm_id) if not stoped: raise Exception, "Could not stop host {}".format(host) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, hosts in enumerate(permutations(workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for(environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'IPWRITE': workflow_dict['databaseinfraattr'][0].ip, 'IPREAD': workflow_dict['databaseinfraattr'][1].ip, 'MASTERPAIRNAME': workflow_dict['databaseinfra'].name, 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'INSTANCE01': workflow_dict['instances'][0], 'INSTANCE02': workflow_dict['instances'][1], 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh' }) LOG.info("Updating userdata for %s" % hosts[0]) cs_provider.update_userdata( vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command(server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if 'environment' not in workflow_dict or 'plan' not in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): offering = cs_plan_attrs.get_stronger_offering() if len(bundles) == 1: bundle = bundles[0] else: if index == 0: bundle = LastUsedBundle.get_next_infra_bundle( plan=workflow_dict['plan'], bundles=bundles) else: bundle = LastUsedBundle.get_next_bundle( bundle=bundle, bundles=bundles) try: DatabaseInfraOffering.objects.get( databaseinfra=workflow_dict['databaseinfra']) except ObjectDoesNotExist: LOG.info("Creating databaseInfra Offering...") dbinfra_offering = DatabaseInfraOffering() dbinfra_offering.offering = offering dbinfra_offering.databaseinfra = workflow_dict[ 'databaseinfra'] dbinfra_offering.save() LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address instance.port = 3306 instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.instance_type = Instance.MYSQL instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt'] == 1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + \ ":%i" % (instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra return True return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
def cs_provider(self): if not self.provider: self.provider = CloudStackProvider(credentials=self.cs_credentials) return self.provider
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) mongodbkey = ''.join( random.choice(string.hexdigits) for i in range(50)) workflow_dict['replicasetname'] = 'RepicaSet_' + workflow_dict[ 'databaseinfra'].name statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False if instance.is_arbiter: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password, 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh', 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) cs_provider.update_userdata(vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % host) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % host) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if not 'hosts' in workflow_dict: return False if len(workflow_dict['hosts']) == 1: return True cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) workflow_dict['databaseinfraattr'] = [] for host in workflow_dict['hosts']: LOG.info("Creating Secondary ips...") host_attr = HostAttr.objects.get(host=host) reserved_ip = cs_provider.reserve_ip( project_id=cs_credentials.project, vm_id=host_attr.vm_id) if not reserved_ip: return False total = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra']).count() databaseinfraattr = DatabaseInfraAttr() databaseinfraattr.ip = reserved_ip['secondary_ip'] if total == 0: databaseinfraattr.is_write = True LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = databaseinfraattr.ip + ":%i" % 3306 databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra else: databaseinfraattr.is_write = False databaseinfraattr.cs_ip_id = reserved_ip['cs_ip_id'] databaseinfraattr.databaseinfra = workflow_dict[ 'databaseinfra'] databaseinfraattr.save() workflow_dict['databaseinfraattr'].append(databaseinfraattr) return True except Exception as e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if 'target_hosts' not in workflow_dict: return False if len(workflow_dict['target_hosts']) == 1: return True cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) LOG.info("Get credential fot network api...") networkapi_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider( credentials=cs_credentials, networkapi_credentials=networkapi_credentials) if not cs_provider: raise Exception("Could not create CloudStackProvider object") return False workflow_dict['target_secondary_ips'] = [] networkapi_equipment_id = workflow_dict['source_secondary_ips'][ 0].networkapi_equipment_id if not networkapi_equipment_id: raise Exception("Could not register networkapi equipment") return False for index, host in enumerate(workflow_dict['target_hosts']): LOG.info("Creating Secondary ips...") host_attr = HostAttr.objects.get(host=host) reserved_ip = cs_provider.reserve_ip( project_id=cs_credentials.project, vm_id=host_attr.vm_id) if not reserved_ip: return False databaseinfraattr = DatabaseInfraAttr() databaseinfraattr.ip = reserved_ip['secondary_ip'] if index == 0: databaseinfraattr.is_write = True ip_desc = 'Write IP' else: databaseinfraattr.is_write = False ip_desc = 'Read IP' networkapi_ip_id = cs_provider.register_networkapi_ip( equipment_id=networkapi_equipment_id, ip=reserved_ip['secondary_ip'], ip_desc=ip_desc) databaseinfraattr.cs_ip_id = reserved_ip['cs_ip_id'] databaseinfraattr.networkapi_equipment_id = networkapi_equipment_id databaseinfraattr.networkapi_ip_id = networkapi_ip_id databaseinfraattr.databaseinfra = workflow_dict[ 'databaseinfra'] databaseinfraattr.save() old_ip = workflow_dict['source_secondary_ips'][index] old_ip.equivalent_dbinfraattr = databaseinfraattr old_ip.save() workflow_dict['target_secondary_ips'].append(databaseinfraattr) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host_nfsattr = HostAttr.objects.get(host=host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': workflow_dict['databaseinfra'].password, 'HOSTADDRESS': instance.address, 'PORT': instance.port, 'ENGINE': 'redis', 'DATABASENAME': workflow_dict['name'], 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } LOG.info(contextdict) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) script = build_context_script(contextdict, planattr.userdata) #cs_provider.update_userdata( # vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % host) LOG.info(script) output = {} return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0016) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if not 'environment' in workflow_dict and not 'plan' in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): if bundles.__len__()==1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle(plan=workflow_dict['plan'], bundle= bundles) if workflow_dict['enginecod'] == workflow_dict['MONGODB'] and index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = cs_plan_attrs.get_stronger_offering() LOG.debug("Deploying new vm on cs with bundle %s and offering %s" % (bundle,offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle= bundle, project_id=cs_credentials.project, vmname=vm_name, ) if not vm: raise Exception("CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address if workflow_dict['enginecod'] == workflow_dict['MYSQL']: instance.port = 3306 elif workflow_dict['enginecod'] == workflow_dict['MONGODB']: instance.port = 27017 instance.is_active = True if workflow_dict['enginecod'] == workflow_dict['MONGODB'] and index == 2: instance.is_arbiter = True else: instance.is_arbiter = False instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt']==1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + ":%i" %(instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra return True return True except Exception as e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
def resize_database(self, database, cloudstackpack, task_history=None, user=None): AuditRequest.new_request("resize_database", user, "localhost") try: worker_name = get_worker_name() task_history = TaskHistory.register(request=self.request, task_history=task_history, user=user, worker_name=worker_name) from util.providers import resize_database_instance from util.providers import undo_resize_database_instance from util import get_credentials_for from dbaas_cloudstack.provider import CloudStackProvider from dbaas_credentials.models import CredentialType cs_credentials = get_credentials_for(environment=database.environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) databaseinfra = database.databaseinfra driver = databaseinfra.get_driver() instances = driver.get_database_instances() resized_instances = [] for instance in instances: host = instance.hostname host_attr = host.cs_host_attributes.get() offering = cs_provider.get_vm_offering_id(vm_id=host_attr.vm_id, project_id=cs_credentials.project) if offering == cloudstackpack.offering: LOG.info("Instance offering: {}".format(offering)) continue if databaseinfra.plan.is_ha: LOG.info("Waiting 60s to check continue...") sleep(60) driver.check_replication_and_switch(instance) LOG.info("Waiting 60s to check continue...") sleep(60) result = resize_database_instance(database=database, cloudstackpack=cloudstackpack, instance=instance, task=task_history) result = {"created": True} if result['created'] == False: if 'exceptions' in result: error = "\n".join(": ".join(err) for err in result['exceptions']['error_codes']) traceback = "\nException Traceback\n".join( result['exceptions']['traceback']) error = "{}\n{}\n{}".format(error, traceback, error) else: error = "Something went wrong." break else: resized_instances.append(instance) if len(instances) == len(resized_instances): from dbaas_cloudstack.models import DatabaseInfraOffering LOG.info('Updating offering DatabaseInfra.') databaseinfraoffering = DatabaseInfraOffering.objects.get( databaseinfra=databaseinfra) databaseinfraoffering.offering = cloudstackpack.offering databaseinfraoffering.save() task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Resize successfully done.') return for instance in resized_instances: if databaseinfra.plan.is_ha: if driver.check_instance_is_master(instance): LOG.info("Waiting 60s to check continue...") sleep(60) driver.check_replication_and_switch(instance, attempts=60) LOG.info("Waiting 60s to check continue...") sleep(60) undo_resize_database_instance(database=database, cloudstackpack=cloudstackpack, instance=instance, task=task_history) task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error) return except Exception, e: error = "Resize Database ERROR: {}".format(e) LOG.error(error) task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
def resize_database(self, database, cloudstackpack, task_history=None, user=None): AuditRequest.new_request("resize_database", user, "localhost") try: worker_name = get_worker_name() task_history = TaskHistory.register(request=self.request, task_history=task_history, user=user, worker_name=worker_name) from util.providers import resize_database_instances from util import get_credentials_for from dbaas_cloudstack.provider import CloudStackProvider from dbaas_credentials.models import CredentialType cs_credentials = get_credentials_for( environment=database.environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) databaseinfra = database.databaseinfra driver = databaseinfra.get_driver() instances = driver.get_slave_instances() instances.append(driver.get_master_instance()) instances_to_resize = [] resized_instances = [] disable_zabbix_alarms(database) for instance in instances: host = instance.hostname host_attr = host.cs_host_attributes.get() offering_id = cs_provider.get_vm_offering_id( vm_id=host_attr.vm_id, project_id=cs_credentials.project) if offering_id == cloudstackpack.offering.serviceofferingid: LOG.info("Instance offering: {}".format(offering_id)) resized_instances.append(instance) else: instances_to_resize.append(instance) result = resize_database_instances(database=database, cloudstackpack=cloudstackpack, instances=instances_to_resize, task=task_history) if result['created']: resized_instances += result['completed_instances'] else: if 'exceptions' not in result: error = "Something went wrong." else: error = "\n".join( ": ".join(err) for err in result['exceptions']['error_codes']) traceback = "\nException Traceback\n".join( result['exceptions']['traceback']) error = "{}\n{}\n{}".format(error, traceback, error) if databaseinfra.plan.is_ha: LOG.info("Waiting 60s to check continue...") sleep(60) instance = driver.get_slave_instances()[0] driver.check_replication_and_switch(instance) if len(instances) == len(resized_instances): from dbaas_cloudstack.models import DatabaseInfraOffering LOG.info('Updating offering DatabaseInfra.') databaseinfraoffering = DatabaseInfraOffering.objects.get( databaseinfra=databaseinfra) databaseinfraoffering.offering = cloudstackpack.offering databaseinfraoffering.save() if databaseinfra.engine.engine_type.name == 'redis': new_max_memory = databaseinfraoffering.offering.memory_size_mb resize_factor = 0.5 if new_max_memory > 1024: resize_factor = 0.75 new_max_memory *= resize_factor databaseinfra.per_database_size_mbytes = int(new_max_memory) databaseinfra.save() task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Resize successfully done.') return task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error) return except Exception as e: error = "Resize Database ERROR: {}".format(e) LOG.error(error) task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error) finally: enable_zabbix_alarms(database) AuditRequest.cleanup_request()
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) target_offering = workflow_dict['target_offering'] cs_plan_attrs = PlanAttr.objects.get( plan=workflow_dict['target_plan']) bundles = list(cs_plan_attrs.bundle.all()) workflow_dict['target_hosts'] = [] workflow_dict['target_instances'] = [] for index, source_host in enumerate(workflow_dict['source_hosts']): sentinel_source_instance = Instance.objects.filter( hostname=source_host, instance_type=Instance.REDIS_SENTINEL)[0] if index < 2: redis_source_instance = Instance.objects.filter( hostname=source_host, instance_type=Instance.REDIS)[0] vm_name = source_host.hostname.split('.')[0] if len(bundles) == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle( plan=workflow_dict['target_plan'], bundle=bundles) if index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = target_offering vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.save() workflow_dict['target_hosts'].append(host) source_host.future_host = host source_host.save() host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") if index < 2: redis_instance = Instance() redis_instance.address = host.address redis_instance.dns = host.address redis_instance.port = redis_source_instance.port redis_instance.is_active = redis_source_instance.is_active redis_instance.is_arbiter = redis_source_instance.is_arbiter redis_instance.instance_type = redis_source_instance.instance_type redis_instance.hostname = host redis_instance.databaseinfra = workflow_dict[ 'databaseinfra'] redis_instance.save() LOG.info("Instance created!") redis_source_instance.future_instance = redis_instance redis_source_instance.save() workflow_dict['target_instances'].append(redis_instance) sentinel_instance = Instance() sentinel_instance.address = host.address sentinel_instance.dns = host.address sentinel_instance.port = sentinel_source_instance.port sentinel_instance.is_active = sentinel_source_instance.is_active sentinel_instance.is_arbiter = sentinel_source_instance.is_arbiter sentinel_instance.instance_type = sentinel_source_instance.instance_type sentinel_instance.hostname = host sentinel_instance.databaseinfra = workflow_dict[ 'databaseinfra'] sentinel_instance.save() LOG.info("Instance created!") sentinel_source_instance.future_instance = sentinel_instance sentinel_source_instance.save() workflow_dict['target_instances'].append(sentinel_instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) target_offering = workflow_dict['target_offering'] cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['target_plan']) bundles = list(cs_plan_attrs.bundle.all()) workflow_dict['target_hosts'] = [] workflow_dict['target_instances'] = [] for index, source_instance in enumerate(workflow_dict['source_instances']): source_host = workflow_dict['source_hosts'][index] vm_name = source_host.hostname.split('.')[0] if len(bundles) == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle(plan=workflow_dict['target_plan'], bundle=bundles) if index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = target_offering vm = cs_provider.deploy_virtual_machine(offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name('affinity_group_id'), ) if not vm: raise Exception("CloudStack could not create the virtualmachine") host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.save() workflow_dict['target_hosts'].append(host) source_host.future_host = host source_host.save() host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address instance.dns = host.address instance.port = source_instance.port instance.is_active = source_instance.is_active instance.is_arbiter = source_instance.is_arbiter instance.instance_type = source_instance.instance_type instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") source_instance.future_instance = instance source_instance.save() workflow_dict['target_instances'].append(instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) mongodbkey = ''.join(random.choice(string.hexdigits) for i in range(50)) workflow_dict['replicasetname'] = 'RepicaSet_' + workflow_dict['databaseinfra'].name statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False if instance.is_arbiter: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': get_credentials_for(environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password, 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh', 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) cs_provider.update_userdata( vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % host) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % host) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, hosts in enumerate(permutations( workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'IPWRITE': workflow_dict['databaseinfraattr'][0].ip, 'IPREAD': workflow_dict['databaseinfraattr'][1].ip, 'MASTERPAIRNAME': workflow_dict['databaseinfra'].name, 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'INSTANCE01': workflow_dict['instances'][0], 'INSTANCE02': workflow_dict['instances'][1], 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh' }) LOG.info("Updating userdata for %s" % hosts[0]) cs_provider.update_userdata(vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command( server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) offering = workflow_dict['offering'] cs_plan_attrs = PlanAttr.objects.get( plan=workflow_dict['target_plan']) bundles = list(cs_plan_attrs.bundle.all()) workflow_dict['target_hosts'] = [] workflow_dict['target_instances'] = [] for index, source_instance in enumerate( workflow_dict['source_instances']): source_host = workflow_dict['source_hosts'][index] vm_name = source_host.hostname.split('.')[0] source_host_attr = HostAttr.objects.get(host=source_host) source_network_id = cs_provider.get_vm_network_id( vm_id=source_host_attr.vm_id, project_id=cs_credentials.project) if len(bundles) == 1: bundle = bundles[0] else: if index == 0: bundle = LastUsedBundle.get_next_infra_bundle( plan=workflow_dict['target_plan'], bundles=bundles) else: bundle = LastUsedBundle.get_next_bundle( bundle=bundle, bundles=bundles) if bundle.networkid == source_network_id: bundle = LastUsedBundle.get_next_bundle( bundle=bundle, bundles=bundles) LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.save() workflow_dict['target_hosts'].append(host) source_host.future_host = host source_host.save() host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address instance.dns = host.address instance.port = source_instance.port instance.is_active = source_instance.is_active instance.is_arbiter = source_instance.is_arbiter instance.instance_type = source_instance.instance_type instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") source_instance.future_instance = instance source_instance.save() workflow_dict['target_instances'].append(instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) instances = workflow_dict['databaseinfra'].instances.all() if not instances: for vm_id in workflow_dict['vms_id']: cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['environment'], vm_id=vm_id) for host in workflow_dict['hosts']: host_attr = HostAttr.objects.filter(host=host) host.delete() LOG.info("Host deleted!") if host_attr: host_attr[0].delete() LOG.info("HostAttr deleted!") for instance in instances: if len(Instance.objects.filter(hostname=instance.hostname)) > 1: instance.delete() LOG.info("Instance deleted") continue host = instance.hostname host_attr = HostAttr.objects.get(host=host) LOG.info("Destroying virtualmachine %s" % host_attr.vm_id) cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['environment'], vm_id=host_attr.vm_id) host_attr.delete() LOG.info("HostAttr deleted!") instance.delete() LOG.info("Instance deleted") host.delete() LOG.info("Host deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if 'hosts' not in workflow_dict: return False if len(workflow_dict['hosts']) == 1: return True cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) LOG.info("Get credential fot network api...") networkapi_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) if not cs_provider: raise Exception("Could not create CloudStackProvider object") return False workflow_dict['databaseinfraattr'] = [] networkapi_equipment_id = cs_provider.register_networkapi_equipment( equipment_name=workflow_dict['names']['infra']) if not networkapi_equipment_id: raise Exception("Could not register networkapi equipment") return False workflow_dict['networkapi_equipment_id'] = networkapi_equipment_id for host in workflow_dict['hosts']: LOG.info("Creating Secondary ips...") host_attr = HostAttr.objects.get(host=host) reserved_ip = cs_provider.reserve_ip( project_id=cs_credentials.project, vm_id=host_attr.vm_id) if not reserved_ip: return False total = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra']).count() databaseinfraattr = DatabaseInfraAttr() databaseinfraattr.ip = reserved_ip['secondary_ip'] if total == 0: databaseinfraattr.is_write = True LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = databaseinfraattr.ip + \ ":%i" % 3306 databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra ip_desc = 'Write IP' else: databaseinfraattr.is_write = False ip_desc = 'Read IP' networkapi_ip_id = cs_provider.register_networkapi_ip(equipment_id=networkapi_equipment_id, ip=reserved_ip[ 'secondary_ip'], ip_desc=ip_desc) databaseinfraattr.cs_ip_id = reserved_ip['cs_ip_id'] databaseinfraattr.networkapi_equipment_id = networkapi_equipment_id databaseinfraattr.networkapi_ip_id = networkapi_ip_id databaseinfraattr.databaseinfra = workflow_dict[ 'databaseinfra'] databaseinfraattr.save() workflow_dict['databaseinfraattr'].append(databaseinfraattr) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if 'environment' not in workflow_dict and 'plan' not in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): if len(bundles) == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle( plan=workflow_dict['plan'], bundle=bundles) if index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = cs_plan_attrs.get_stronger_offering() try: DatabaseInfraOffering.objects.get( databaseinfra=workflow_dict['databaseinfra']) except ObjectDoesNotExist: LOG.info("Creating databaseInfra Offering...") dbinfra_offering = DatabaseInfraOffering() dbinfra_offering.offering = offering dbinfra_offering.databaseinfra = workflow_dict[ 'databaseinfra'] dbinfra_offering.save() LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, affinity_group_id=cs_credentials.get_parameter_by_name( 'affinity_group_id'), ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") if index in (0, 1): instance = Instance() instance.address = host.address instance.port = 6379 instance.is_active = True instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.instance_type = Instance.REDIS instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt'] == 1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + \ ":%i" % (instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra else: instance = Instance() instance.address = host.address instance.port = 26379 instance.is_active = True instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.instance_type = Instance.REDIS_SENTINEL instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: if 'databaseinfra' not in workflow_dict: LOG.info( "We could not find a databaseinfra inside the workflow_dict" ) return False source_secondary_ip_ids = [ secondary_ip.id for secondary_ip in workflow_dict['source_secondary_ips'] ] databaseinfraattr = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra'], equivalent_dbinfraattr=None).exclude( id__in=source_secondary_ip_ids) LOG.info("databaseinfraattr: {}".format(databaseinfraattr)) LOG.info("old infra ip: {}".format( workflow_dict['source_secondary_ips'])) cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider( credentials=cs_credentials, networkapi_credentials=networkapi_credentials) for infra_attr in databaseinfraattr: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) if not cs_provider.remove_networkapi_ip( equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id): return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if not 'environment' in workflow_dict and not 'plan' in workflow_dict: return False cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) vm_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.VM) cs_provider = CloudStackProvider(credentials=cs_credentials) cs_plan_attrs = PlanAttr.objects.get(plan=workflow_dict['plan']) workflow_dict['hosts'] = [] workflow_dict['instances'] = [] workflow_dict['databaseinfraattr'] = [] workflow_dict['vms_id'] = [] bundles = list(cs_plan_attrs.bundle.all()) for index, vm_name in enumerate(workflow_dict['names']['vms']): if bundles.__len__() == 1: bundle = bundles[0] else: bundle = LastUsedBundle.get_next_bundle( plan=workflow_dict['plan'], bundle=bundles) if workflow_dict['enginecod'] == workflow_dict[ 'MONGODB'] and index == 2: offering = cs_plan_attrs.get_weaker_offering() else: offering = cs_plan_attrs.get_stronger_offering() try: DatabaseInfraOffering.objects.get( databaseinfra=workflow_dict['databaseinfra']) except ObjectDoesNotExist, e: LOG.info("Creating databaseInfra Offering...") dbinfra_offering = DatabaseInfraOffering() dbinfra_offering.offering = offering dbinfra_offering.databaseinfra = workflow_dict[ 'databaseinfra'] dbinfra_offering.save() LOG.debug( "Deploying new vm on cs with bundle %s and offering %s" % (bundle, offering)) vm = cs_provider.deploy_virtual_machine( offering=offering.serviceofferingid, bundle=bundle, project_id=cs_credentials.project, vmname=vm_name, ) if not vm: raise Exception( "CloudStack could not create the virtualmachine") LOG.debug("New virtualmachine: %s" % vm) workflow_dict['vms_id'].append(vm['virtualmachine'][0]['id']) host = Host() host.address = vm['virtualmachine'][0]['nic'][0]['ipaddress'] host.hostname = host.address host.cloud_portal_host = True host.save() LOG.info("Host created!") workflow_dict['hosts'].append(host) host_attr = HostAttr() host_attr.vm_id = vm['virtualmachine'][0]['id'] host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.host = host host_attr.save() LOG.info("Host attrs custom attributes created!") instance = Instance() instance.address = host.address if workflow_dict['enginecod'] == workflow_dict['MYSQL']: instance.port = 3306 elif workflow_dict['enginecod'] == workflow_dict['MONGODB']: instance.port = 27017 elif workflow_dict['enginecod'] == workflow_dict['REDIS']: instance.port = 6379 instance.is_active = True if workflow_dict['enginecod'] == workflow_dict[ 'MONGODB'] and index == 2: instance.is_arbiter = True else: instance.is_arbiter = False instance.hostname = host instance.databaseinfra = workflow_dict['databaseinfra'] instance.save() LOG.info("Instance created!") workflow_dict['instances'].append(instance) if workflow_dict['qt'] == 1: LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = instance.address + ":%i" % ( instance.port) databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra return True return True
def do(self, workflow_dict): try: if 'target_hosts' not in workflow_dict: return False if len(workflow_dict['target_hosts']) == 1: return True cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) LOG.info("Get credential fot network api...") networkapi_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) if not cs_provider: raise Exception("Could not create CloudStackProvider object") return False workflow_dict['target_secondary_ips'] = [] networkapi_equipment_id = workflow_dict['source_secondary_ips'][0].networkapi_equipment_id if not networkapi_equipment_id: raise Exception("Could not register networkapi equipment") return False for index, host in enumerate(workflow_dict['target_hosts']): LOG.info("Creating Secondary ips...") host_attr = HostAttr.objects.get(host=host) reserved_ip = cs_provider.reserve_ip( project_id=cs_credentials.project, vm_id=host_attr.vm_id) if not reserved_ip: return False databaseinfraattr = DatabaseInfraAttr() databaseinfraattr.ip = reserved_ip['secondary_ip'] if index == 0: databaseinfraattr.is_write = True ip_desc = 'Write IP' else: databaseinfraattr.is_write = False ip_desc = 'Read IP' networkapi_ip_id = cs_provider.register_networkapi_ip(equipment_id=networkapi_equipment_id, ip=reserved_ip['secondary_ip'], ip_desc=ip_desc) databaseinfraattr.cs_ip_id = reserved_ip['cs_ip_id'] databaseinfraattr.networkapi_equipment_id = networkapi_equipment_id databaseinfraattr.networkapi_ip_id = networkapi_ip_id databaseinfraattr.databaseinfra = workflow_dict[ 'databaseinfra'] databaseinfraattr.save() old_ip = workflow_dict['source_secondary_ips'][index] old_ip.equivalent_dbinfraattr = databaseinfraattr old_ip.save() workflow_dict['target_secondary_ips'].append(databaseinfraattr) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if not 'hosts' in workflow_dict: return False if len(workflow_dict['hosts']) == 1: return True cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) LOG.info("Get credential fot network api...") networkapi_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider( credentials=cs_credentials, networkapi_credentials=networkapi_credentials) if not cs_provider: raise Exception, "Could not create CloudStackProvider object" return False workflow_dict['databaseinfraattr'] = [] networkapi_equipment_id = cs_provider.register_networkapi_equipment( equipment_name=workflow_dict['names']['infra']) if not networkapi_equipment_id: raise Exception, "Could not register networkapi equipment" return False for host in workflow_dict['hosts']: LOG.info("Creating Secondary ips...") host_attr = HostAttr.objects.get(host=host) reserved_ip = cs_provider.reserve_ip( project_id=cs_credentials.project, vm_id=host_attr.vm_id) if not reserved_ip: return False total = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra']).count() databaseinfraattr = DatabaseInfraAttr() databaseinfraattr.ip = reserved_ip['secondary_ip'] if total == 0: databaseinfraattr.is_write = True LOG.info("Updating databaseinfra endpoint...") databaseinfra = workflow_dict['databaseinfra'] databaseinfra.endpoint = databaseinfraattr.ip + ":%i" % 3306 databaseinfra.save() workflow_dict['databaseinfra'] = databaseinfra ip_desc = 'Write IP' else: databaseinfraattr.is_write = False ip_desc = 'Read IP' networkapi_ip_id = cs_provider.register_networkapi_ip( equipment_id=networkapi_equipment_id, ip=reserved_ip['secondary_ip'], ip_desc=ip_desc) databaseinfraattr.cs_ip_id = reserved_ip['cs_ip_id'] databaseinfraattr.networkapi_equipment_id = networkapi_equipment_id databaseinfraattr.networkapi_ip_id = networkapi_ip_id databaseinfraattr.databaseinfra = workflow_dict[ 'databaseinfra'] databaseinfraattr.save() workflow_dict['databaseinfraattr'].append(databaseinfraattr) return True except Exception as e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False