def start_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) started = cs_provider.start_virtual_machine( vm_id=host_csattr.vm_id) if not started: raise Exception, "Could not start host {}".format(host) for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_ready = check_ssh(host, wait=5, interval=10) if not host_ready: error = "Host %s is not ready..." % host LOG.warn(error) raise Exception, error return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def start_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) host = workflow_dict['host'] host_csattr = HostAttr.objects.get(host=host) started = cs_provider.start_virtual_machine(vm_id=host_csattr.vm_id) if not started: raise Exception("Could not start host {}".format(host)) host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, retries=50, wait=20, interval=30) if not host_ready: error = "Host %s is not ready..." % host LOG.warn(error) raise Exception(error) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self): host_ready = check_ssh( self.host.address, self.host_cs.vm_user, self.host_cs.vm_password, wait=5, interval=10 ) if not host_ready: raise EnvironmentError('VM is not ready')
def start_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for(environment = environment, credential_type = CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials = cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) started = cs_provider.start_virtual_machine(vm_id = host_csattr.vm_id) if not started: raise Exception, "Could not start host {}".format(host) for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: error = "Host %s is not ready..." % host LOG.warn(error) raise Exception, error return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def vm_is_up(self, attempts=2, wait=5, interval=10): return check_ssh( self.host, retries=attempts, wait=wait, interval=interval )
def do(self, workflow_dict): try: for index, instance in enumerate( workflow_dict['target_instances']): if instance.instance_type == instance.MONGODB_ARBITER: continue if instance.instance_type == instance.REDIS_SENTINEL: continue host = instance.hostname LOG.info("Mounting disks on host {}".format(host)) cs_host_attr = CS_HostAttr.objects.get(host=host) nfs_host_attr = NFS_HostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=cs_host_attr.vm_user, password=cs_host_attr.vm_password, wait=5, interval=10) if not host_ready: raise Exception(str("Host %s is not ready..." % host)) context_dict = { 'EXPORTPATH': nfs_host_attr.nfsaas_path, } script = test_bash_script_error() script += build_mount_disk_script() script = build_context_script(context_dict, script) LOG.info(script) output = {} return_code = exec_remote_command( server=host.address, username=cs_host_attr.vm_user, password=cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: for index, instance in enumerate(workflow_dict['target_instances']): if instance.instance_type == instance.MONGODB_ARBITER: continue if instance.instance_type == instance.REDIS_SENTINEL: continue host = instance.hostname LOG.info("Mounting disks on host {}".format(host)) cs_host_attr = CS_HostAttr.objects.get(host=host) nfs_host_attr = NFS_HostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=cs_host_attr.vm_user, password=cs_host_attr.vm_password, wait=5, interval=10) if not host_ready: raise Exception(str("Host %s is not ready..." % host)) context_dict = { 'EXPORTPATH': nfs_host_attr.nfsaas_path, } script = test_bash_script_error() script += build_mount_disk_script() script = build_context_script(context_dict, script) LOG.info(script) output = {} return_code = exec_remote_command(server=host.address, username=cs_host_attr.vm_user, password=cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def is_up(host_obj, attempts=2, wait=5, interval=10): return check_ssh(host_obj, retries=attempts, wait=wait, interval=interval)
def do(self): host_ready = check_ssh(self.host, wait=5, interval=10) if not host_ready: raise EnvironmentError('VM is not ready')
def do(self, workflow_dict): try: mongodbkey = ''.join( random.choice(string.hexdigits) for i in range(50)) workflow_dict['replicasetname'] = 'ReplicaSet_' + \ workflow_dict['databaseinfra'].name mongodb_password = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host.update_os_description() if instance.is_arbiter: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'IS_HA': workflow_dict['databaseinfra'].plan.is_ha } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': mongodb_password, 'IS_HA': workflow_dict['databaseinfra'].plan.is_ha } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) else: contextdict.update({'DATABASERULE': databaserule}) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) scripts = (planattr.initialization_script, planattr.configuration_script, planattr.start_database_script) for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script) if return_code != 0: return False if len(workflow_dict['hosts']) > 1: scripts_to_run = planattr.start_replication_script contextdict.update({ 'DBPASSWORD': mongodb_password, 'DATABASERULE': 'PRIMARY' }) scripts_to_run = build_context_script(contextdict, scripts_to_run) host = workflow_dict['hosts'][0] host_csattr = CsHostAttr.objects.get(host=host) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=scripts_to_run) if return_code != 0: return False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) mongodbkey = ''.join(random.choice(string.hexdigits) for i in range(50)) workflow_dict['replicasetname'] = 'RepicaSet_' + workflow_dict['databaseinfra'].name statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False if instance.is_arbiter: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': get_credentials_for(environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password, 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh', 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) cs_provider.update_userdata( vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % host) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % host) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: for index, hosts in enumerate(permutations(workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=hosts[ 0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for(environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'IPWRITE': workflow_dict['databaseinfraattr'][0].ip, 'IPREAD': workflow_dict['databaseinfraattr'][1].ip, 'MASTERPAIRNAME': workflow_dict['databaseinfra'].name, 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'INSTANCE01': workflow_dict['instances'][0], 'INSTANCE02': workflow_dict['instances'][1], 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh' }) scripts = (planattr.initialization_script, planattr.configuration_script, planattr.start_database_script) host = hosts[0] host.update_os_description() for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script) if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for hosts in permutations(workflow_dict['hosts']): script = planattr.start_replication_script host = hosts[0] contextdict.update({'IPMASTER': hosts[1].address}) script = build_context_script(contextdict, script) host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Executing script on %s" % host) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script) if return_code != 0: return False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") statsd_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') databaseinfra = workflow_dict['databaseinfra'] sentinel = databaseinfra.get_driver().get_sentinel_client() master = sentinel.discover_master(databaseinfra.name) master_host = master[0] master_port = master[1] for index, source_host in enumerate(workflow_dict['source_hosts']): target_host = source_host.future_host LOG.info(target_host) target_cs_host_attr = CS_HostAttr.objects.get(host=target_host) if index == 2: LOG.info("Cheking host ssh...") host_ready = check_ssh(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, wait=5, interval=10) if not host_ready: raise Exception( str("Host %s is not ready..." % target_host)) script = test_bash_script_error() script += build_permission_script() script = build_context_script({}, script) output = {} LOG.info(script) return_code = exec_remote_command(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) instances_redis = Instance.objects.filter(hostname=target_host, instance_type=Instance.REDIS) instances_sentinel = Instance.objects.filter(hostname=target_host, instance_type=Instance.REDIS_SENTINEL) if instances_redis: only_sentinel = False instance_redis_address = instances_redis[0].address instance_redis_port = instances_redis[0].port else: only_sentinel = True instance_redis_address = '' instance_redis_port = '' if instances_sentinel: instance_sentinel_address = instances_sentinel[0].address instance_sentinel_port = instances_sentinel[0].port else: instance_sentinel_address = '' instance_sentinel_port = '' contextdict = { 'DATABASENAME': workflow_dict['database'].name, 'DBPASSWORD': databaseinfra.password, 'HOSTADDRESS': instance_redis_address, 'PORT': instance_redis_port, 'ENGINE': 'redis', 'HOST': source_host.hostname.split('.')[0], 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, 'IS_HA': databaseinfra.plan.is_ha, 'SENTINELMASTER': master_host, 'SENTINELMASTERPORT': master_port, 'SENTINELADDRESS': instance_sentinel_address, 'SENTINELPORT': instance_sentinel_port, 'MASTERNAME': databaseinfra.name, 'ONLY_SENTINEL': only_sentinel, } planattr = PlanAttr.objects.get( plan=workflow_dict['source_plan']) script = build_context_script( contextdict, planattr.configuration_script) output = {} LOG.info(script) return_code = exec_remote_command(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) if index < 2: change_slave_priority_file(host=target_host, original_value=100, final_value=0) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) mongodbkey = ''.join( random.choice(string.hexdigits) for i in range(50)) workflow_dict['replicasetname'] = 'RepicaSet_' + workflow_dict[ 'databaseinfra'].name statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False if instance.is_arbiter: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password, 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh', 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) cs_provider.update_userdata(vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % host) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % host) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: mongodbkey = ''.join(random.choice(string.hexdigits) for i in range(50)) workflow_dict['replicasetname'] = 'RepicaSet_' + workflow_dict['databaseinfra'].name statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') mongodb_password = get_credentials_for(environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False if instance.is_arbiter: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, 'IS_HA': workflow_dict['databaseinfra'].plan.is_ha } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': mongodb_password, 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, 'IS_HA': workflow_dict['databaseinfra'].plan.is_ha } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) else: contextdict.update({'DATABASERULE': databaserule}) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) scripts = (planattr.initialization_script, planattr.configuration_script, planattr.start_database_script) for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script) if return_code != 0: return False if len(workflow_dict['hosts']) > 1: scripts_to_run = planattr.start_replication_script contextdict.update({'DBPASSWORD': mongodb_password, 'DATABASERULE': 'PRIMARY'}) scripts_to_run = build_context_script(contextdict, scripts_to_run) host = workflow_dict['hosts'][0] host_csattr = CsHostAttr.objects.get(host=host) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=scripts_to_run) if return_code != 0: return False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: offering = workflow_dict['plan'].stronger_offering configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') plan = workflow_dict['plan'] for index, host in enumerate(workflow_dict['hosts']): LOG.info("Getting vm credentials...") LOG.info("Cheking host ssh...") host_ready = check_ssh(host, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host.update_os_description() instances_redis = Instance.objects.filter( hostname=host, instance_type=Instance.REDIS) instances_sentinel = Instance.objects.filter( hostname=host, instance_type=Instance.REDIS_SENTINEL) if instances_redis: host_nfsattr = HostAttr.objects.get(host=host) nfsaas_path = host_nfsattr.nfsaas_path only_sentinel = False instance_redis_address = instances_redis[0].address instance_redis_port = instances_redis[0].port else: nfsaas_path = "" only_sentinel = True instance_redis_address = '' instance_redis_port = '' if instances_sentinel: instance_sentinel_address = instances_sentinel[0].address instance_sentinel_port = instances_sentinel[0].port else: instance_sentinel_address = '' instance_sentinel_port = '' if index == 0: master_host = instance_redis_address master_port = instance_redis_port contextdict = { 'EXPORTPATH': nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': workflow_dict['databaseinfra'].password, 'HOSTADDRESS': instance_redis_address, 'PORT': instance_redis_port, 'ENGINE': 'redis', 'HOST': host.hostname.split('.')[0], 'DRIVER_NAME': workflow_dict['databaseinfra'].get_driver().topology_name( ), 'SENTINELMASTER': master_host, 'SENTINELMASTERPORT': master_port, 'SENTINELADDRESS': instance_sentinel_address, 'SENTINELPORT': instance_sentinel_port, 'MASTERNAME': workflow_dict['databaseinfra'].name, 'ONLY_SENTINEL': only_sentinel, 'HAS_PERSISTENCE': workflow_dict['plan'].has_persistence, 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'configuration': configuration, 'GRAYLOG_ENDPOINT': graylog_endpoint, } LOG.info(contextdict) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template, plan.script.start_replication_template) for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if index > 0 and instances_redis: client = instances_redis[0].databaseinfra.get_driver( ).get_client(instances_redis[0]) client.slaveof(master_host, master_port) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0016) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, hosts in enumerate(permutations( workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, retries=60, wait=30, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, }) scripts = (planattr.initialization_script, planattr.configuration_script, planattr.start_database_script) host = hosts[0] for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script) if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for hosts in permutations(workflow_dict['hosts']): script = planattr.start_replication_script host = hosts[0] contextdict.update({'IPMASTER': hosts[1].address}) script = build_context_script(contextdict, script) host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Executing script on %s" % host) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script) if return_code != 0: return False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") statsd_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') databaseinfra = workflow_dict['databaseinfra'] sentinel = databaseinfra.get_driver().get_sentinel_client() master = sentinel.discover_master(databaseinfra.name) master_host = master[0] master_port = master[1] for index, source_host in enumerate(workflow_dict['source_hosts']): target_host = source_host.future_host LOG.info(target_host) target_cs_host_attr = CS_HostAttr.objects.get(host=target_host) if index == 2: LOG.info("Cheking host ssh...") host_ready = check_ssh( server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, wait=5, interval=10) if not host_ready: raise Exception( str("Host %s is not ready..." % target_host)) script = test_bash_script_error() script += build_permission_script() script = build_context_script({}, script) output = {} LOG.info(script) return_code = exec_remote_command( server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) instances_redis = Instance.objects.filter( hostname=target_host, instance_type=Instance.REDIS) instances_sentinel = Instance.objects.filter( hostname=target_host, instance_type=Instance.REDIS_SENTINEL) if instances_redis: only_sentinel = False instance_redis_address = instances_redis[0].address instance_redis_port = instances_redis[0].port else: only_sentinel = True instance_redis_address = '' instance_redis_port = '' if instances_sentinel: instance_sentinel_address = instances_sentinel[0].address instance_sentinel_port = instances_sentinel[0].port else: instance_sentinel_address = '' instance_sentinel_port = '' contextdict = { 'DATABASENAME': workflow_dict['database'].name, 'DBPASSWORD': databaseinfra.password, 'HOSTADDRESS': instance_redis_address, 'PORT': instance_redis_port, 'ENGINE': 'redis', 'HOST': source_host.hostname.split('.')[0], 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, 'IS_HA': databaseinfra.plan.is_ha, 'SENTINELMASTER': master_host, 'SENTINELMASTERPORT': master_port, 'SENTINELADDRESS': instance_sentinel_address, 'SENTINELPORT': instance_sentinel_port, 'MASTERNAME': databaseinfra.name, 'ONLY_SENTINEL': only_sentinel, } planattr = PlanAttr.objects.get( plan=workflow_dict['source_plan']) script = build_context_script(contextdict, planattr.configuration_script) output = {} LOG.info(script) return_code = exec_remote_command( server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) if index < 2: change_slave_priority_file(host=target_host, original_value=100, final_value=0) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, host in enumerate(workflow_dict['hosts']): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False instances_redis = Instance.objects.filter(hostname=host, instance_type=Instance.REDIS) instances_sentinel = Instance.objects.filter(hostname=host, instance_type=Instance.REDIS_SENTINEL) if instances_redis: host_nfsattr = HostAttr.objects.get(host=host) nfsaas_path = host_nfsattr.nfsaas_path only_sentinel = False instance_redis_address = instances_redis[0].address instance_redis_port = instances_redis[0].port else: nfsaas_path = "" only_sentinel = True instance_redis_address = '' instance_redis_port = '' if instances_sentinel: instance_sentinel_address = instances_sentinel[0].address instance_sentinel_port = instances_sentinel[0].port else: instance_sentinel_address = '' instance_sentinel_port = '' if index == 0: master_host = instance_redis_address master_port = instance_redis_port contextdict = { 'EXPORTPATH': nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': workflow_dict['databaseinfra'].password, 'HOSTADDRESS': instance_redis_address, 'PORT': instance_redis_port, 'ENGINE': 'redis', 'HOST': host.hostname.split('.')[0], 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, 'IS_HA': workflow_dict['databaseinfra'].plan.is_ha, 'SENTINELMASTER': master_host, 'SENTINELMASTERPORT': master_port, 'SENTINELADDRESS': instance_sentinel_address, 'SENTINELPORT': instance_sentinel_port, 'MASTERNAME': workflow_dict['databaseinfra'].name, 'ONLY_SENTINEL': only_sentinel, } LOG.info(contextdict) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) scripts = (planattr.initialization_script, planattr.configuration_script, planattr.start_database_script, planattr.start_replication_script) for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script) if return_code != 0: return False if index > 0 and instances_redis: client = instances_redis[ 0].databaseinfra.get_driver().get_client(instances_redis[0]) client.slaveof(master_host, master_port) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0016) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cloud_stack = workflow_dict['plan'].cs_plan_attributes.first() offering = cloud_stack.get_stronger_offering() configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) mongodbkey = ''.join( random.choice(string.hexdigits) for i in range(50)) infra = workflow_dict['databaseinfra'] if infra.plan.is_ha: infra.database_key = mongodbkey infra.save() workflow_dict['replicasetname'] = infra.get_driver( ).replica_set_name mongodb_password = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password disk_offering = workflow_dict['plan'].disk_offering graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') plan = workflow_dict['plan'] for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") LOG.info("Cheking host ssh...") host_ready = check_ssh(host, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host.update_os_description() if instance.instance_type == instance.MONGODB_ARBITER: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DRIVER_NAME': infra.get_driver().topology_name(), 'configuration': configuration, } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': mongodb_password, 'DRIVER_NAME': infra.get_driver().topology_name(), 'configuration': configuration, } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) else: contextdict.update({'DATABASERULE': databaserule}) contextdict.update({ 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'DISK_SIZE_IN_GB': disk_offering.size_gb(), 'GRAYLOG_ENDPOINT': graylog_endpoint }) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template) for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if len(workflow_dict['hosts']) > 1: scripts_to_run = plan.script.start_replication_template contextdict.update({ 'DBPASSWORD': mongodb_password, 'DATABASERULE': 'PRIMARY' }) scripts_to_run = build_context_script(contextdict, scripts_to_run) host = workflow_dict['hosts'][0] output = {} return_code = exec_remote_command_host(host, scripts_to_run, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, hosts in enumerate(permutations(workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for(environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'IPWRITE': workflow_dict['databaseinfraattr'][0].ip, 'IPREAD': workflow_dict['databaseinfraattr'][1].ip, 'MASTERPAIRNAME': workflow_dict['databaseinfra'].name, 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'INSTANCE01': workflow_dict['instances'][0], 'INSTANCE02': workflow_dict['instances'][1], 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh' }) LOG.info("Updating userdata for %s" % hosts[0]) cs_provider.update_userdata( vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command(server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host_nfsattr = HostAttr.objects.get(host=host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': workflow_dict['databaseinfra'].password, 'HOSTADDRESS': instance.address, 'PORT': instance.port, 'ENGINE': 'redis', 'DATABASENAME': workflow_dict['name'], 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } LOG.info(contextdict) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) script = build_context_script(contextdict, planattr.userdata) #cs_provider.update_userdata( # vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % host) LOG.info(script) output ={} return_code = exec_remote_command(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0016) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: region_migration_dir = Configuration.get_by_name('region_migration_dir') if not region_migration_dir: region_migration_dir = '/tmp' workflow_dict['region_migration_dir_infra_name'] = "{}/{}".format(region_migration_dir, workflow_dict['databaseinfra'].name) for index, source_instance in enumerate(workflow_dict['source_instances']): source_host = source_instance.hostname source_cs_host_attr = CS_HostAttr.objects.get(host=source_host) hostname = source_host.hostname.split('.')[0] localpath = "{}/{}".format(workflow_dict['region_migration_dir_infra_name'], hostname) os.makedirs(localpath) LOG.info('Get source host files to {}'.format(localpath)) if not scp_get_file(server=source_host.address, username=source_cs_host_attr.vm_user, password=source_cs_host_attr.vm_password, localpath="{}/mongodb.key".format(localpath), remotepath="/data/mongodb.key"): raise Exception("FTP Error") if not scp_get_file(server=source_host.address, username=source_cs_host_attr.vm_user, password=source_cs_host_attr.vm_password, localpath="{}/mongodb.conf".format(localpath), remotepath="/data/mongodb.conf"): raise Exception("FTP Error") if not scp_get_file(server=source_host.address, username=source_cs_host_attr.vm_user, password=source_cs_host_attr.vm_password, localpath="{}/td-agent.conf".format(localpath), remotepath="/etc/td-agent/td-agent.conf"): raise Exception("FTP Error") target_host = source_host.future_host LOG.info(target_host) target_cs_host_attr = CS_HostAttr.objects.get(host=target_host) target_instance = source_instance.future_instance if target_instance.instance_type == target_instance.MONGODB_ARBITER: LOG.info("Cheking host ssh...") host_ready = check_ssh(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, wait=5, interval=10) if not host_ready: raise Exception(str("Host %s is not ready..." % target_host)) if not scp_put_file(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, localpath="{}/mongodb.key".format(localpath), remotepath="/data/mongodb.key"): raise Exception("FTP Error") if not scp_put_file(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, localpath="{}/mongodb.conf".format(localpath), remotepath="/data/mongodb.conf"): raise Exception("FTP Error") if not scp_put_file(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, localpath="{}/td-agent.conf".format(localpath), remotepath="/etc/td-agent/td-agent.conf"): raise Exception("FTP Error") script = test_bash_script_error() script += build_permission_script() script += build_start_database_script() script = build_context_script({}, script) output = {} LOG.info(script) return_code = exec_remote_command(server=target_host.address, username=target_cs_host_attr.vm_user, password=target_cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) shutil.rmtree(workflow_dict['region_migration_dir_infra_name']) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, hosts in enumerate(permutations( workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'IPWRITE': workflow_dict['databaseinfraattr'][0].ip, 'IPREAD': workflow_dict['databaseinfraattr'][1].ip, 'MASTERPAIRNAME': workflow_dict['databaseinfra'].name, 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'INSTANCE01': workflow_dict['instances'][0], 'INSTANCE02': workflow_dict['instances'][1], 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh' }) LOG.info("Updating userdata for %s" % hosts[0]) cs_provider.update_userdata(vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command( server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, command='/opt/dbaas/scripts/dbaas_userdata_script.sh') if return_code != 0: return False if len(workflow_dict['hosts']) > 1: for host in workflow_dict['hosts']: LOG.info("Executing script on %s" % hosts[0]) return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=contextdict['SECOND_SCRIPT_FILE']) if return_code != 0: return False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host_nfsattr = HostAttr.objects.get(host=host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': workflow_dict['databaseinfra'].password, 'HOSTADDRESS': instance.address, 'PORT': instance.port, 'ENGINE': 'redis', 'DATABASENAME': workflow_dict['name'], 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, } LOG.info(contextdict) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) script = build_context_script(contextdict, planattr.userdata) #cs_provider.update_userdata( # vm_id=host_csattr.vm_id, contextdict=contextdict, userdata=planattr.userdata) LOG.info("Executing script on %s" % host) LOG.info(script) output = {} return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: return False return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0016) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: LOG.info("Getting cloudstack credentials...") statsd_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.STATSD) statsd_host, statsd_port = statsd_credentials.endpoint.split(':') for index, host in enumerate(workflow_dict['hosts']): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False instances_redis = Instance.objects.filter( hostname=host, instance_type=Instance.REDIS) instances_sentinel = Instance.objects.filter( hostname=host, instance_type=Instance.REDIS_SENTINEL) if instances_redis: host_nfsattr = HostAttr.objects.get(host=host) nfsaas_path = host_nfsattr.nfsaas_path only_sentinel = False instance_redis_address = instances_redis[0].address instance_redis_port = instances_redis[0].port else: nfsaas_path = "" only_sentinel = True instance_redis_address = '' instance_redis_port = '' if instances_sentinel: instance_sentinel_address = instances_sentinel[0].address instance_sentinel_port = instances_sentinel[0].port else: instance_sentinel_address = '' instance_sentinel_port = '' if index == 0: master_host = instance_redis_address master_port = instance_redis_port contextdict = { 'EXPORTPATH': nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': workflow_dict['databaseinfra'].password, 'HOSTADDRESS': instance_redis_address, 'PORT': instance_redis_port, 'ENGINE': 'redis', 'DATABASENAME': workflow_dict['name'], 'HOST': host.hostname.split('.')[0], 'STATSD_HOST': statsd_host, 'STATSD_PORT': statsd_port, 'IS_HA': workflow_dict['databaseinfra'].plan.is_ha, 'SENTINELMASTER': master_host, 'SENTINELMASTERPORT': master_port, 'SENTINELADDRESS': instance_sentinel_address, 'SENTINELPORT': instance_sentinel_port, 'MASTERNAME': workflow_dict['databaseinfra'].name, 'ONLY_SENTINEL': only_sentinel, } LOG.info(contextdict) LOG.info("Updating userdata for %s" % host) planattr = PlanAttr.objects.get(plan=workflow_dict['plan']) script = build_context_script(contextdict, planattr.userdata) LOG.info("Executing script on %s" % host) LOG.info(script) output = {} return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: return False if index > 0 and instances_redis: client = instances_redis[0].databaseinfra.get_driver( ).get_client(instances_redis[0]) client.slaveof(master_host, master_port) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0016) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cloud_stack = workflow_dict['plan'].cs_plan_attributes.first() offering = cloud_stack.get_stronger_offering() configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') plan = workflow_dict['plan'] for index, hosts in enumerate(permutations( workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'configuration': configuration, 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'GRAYLOG_ENDPOINT': graylog_endpoint } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'IPWRITE': workflow_dict['databaseinfraattr'][0].ip, 'IPREAD': workflow_dict['databaseinfraattr'][1].ip, 'MASTERPAIRNAME': workflow_dict['databaseinfra'].name, 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'INSTANCE01': workflow_dict['instances'][0], 'INSTANCE02': workflow_dict['instances'][1], 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh', }) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template) host = hosts[0] host.update_os_description() for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if len(workflow_dict['hosts']) > 1: for hosts in permutations(workflow_dict['hosts']): script = plan.script.start_replication_template host = hosts[0] contextdict.update({'IPMASTER': hosts[1].address}) script = build_context_script(contextdict, script) host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Executing script on %s" % host) output = {} return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: offering = workflow_dict['plan'].stronger_offering configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') replica_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.MYSQL_REPLICA) plan = workflow_dict['plan'] for index, hosts in enumerate(permutations( workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host = hosts[0] LOG.info("Cheking host ssh...") host_ready = check_ssh(host, retries=60, wait=30, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'configuration': configuration, 'GRAYLOG_ENDPOINT': graylog_endpoint, } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'REPLICA_USER': replica_credential.user, 'REPLICA_PASSWORD': replica_credential.password, }) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template) host.update_os_description() for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if len(workflow_dict['hosts']) > 1: for hosts in permutations(workflow_dict['hosts']): script = plan.script.start_replication_template host = hosts[0] contextdict.update({'IPMASTER': hosts[1].address}) script = build_context_script(contextdict, script) LOG.info("Executing script on %s" % host) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. output: {}".format( output) raise Exception(error_msg) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False