def get_dbaas_parameter_default_value(self, parameter_name): from physical.configurations import configuration_factory parameter_name = parameter_name.replace('-', '_') configuration = configuration_factory( self, self.cs_dbinfra_offering.get().offering.memory_size_mb) return getattr(configuration, parameter_name).default
def get_configuration(self): infra = self.infra infra.plan = self.plan configuration = configuration_factory( infra, self.offering.memory_size_mb ) return configuration
def get_configuration(self): try: configuration = configuration_factory(self.infra, self.offering.memory_size_mb) except NotImplementedError: return None else: return configuration
def get_dbaas_parameter_default_value(self, parameter_name): from physical.configurations import configuration_factory parameter_name = parameter_name.replace('-', '_') configuration = configuration_factory( self, # self.cs_dbinfra_offering.get().offering.memory_size_mb self.offering.memory_size_mb ) return getattr(configuration, parameter_name).default
def get_configuration(self): current_resize = self.database.resizes.last() if current_resize and current_resize.is_running: offering = current_resize.target_offer.offering else: offering = self.cs_plan.get_stronger_offering() try: configuration = configuration_factory(self.infra, offering.memory_size_mb) except NotImplementedError: return None else: return configuration
def do(self, workflow_dict): try: cloud_stack = workflow_dict['plan'].cs_plan_attributes.first() offering = cloud_stack.get_stronger_offering() configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') plan = workflow_dict['plan'] for index, hosts in enumerate(permutations( workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host_csattr = CsHostAttr.objects.get(host=hosts[0]) LOG.info("Cheking host ssh...") host_ready = check_ssh(server=hosts[0].address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % hosts[0]) return False host_nfsattr = HostAttr.objects.get(host=hosts[0]) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'configuration': configuration, 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'GRAYLOG_ENDPOINT': graylog_endpoint } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % hosts[0]) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'IPWRITE': workflow_dict['databaseinfraattr'][0].ip, 'IPREAD': workflow_dict['databaseinfraattr'][1].ip, 'MASTERPAIRNAME': workflow_dict['databaseinfra'].name, 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'INSTANCE01': workflow_dict['instances'][0], 'INSTANCE02': workflow_dict['instances'][1], 'SECOND_SCRIPT_FILE': '/opt/dbaas/scripts/dbaas_second_script.sh', }) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template) host = hosts[0] host.update_os_description() for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if len(workflow_dict['hosts']) > 1: for hosts in permutations(workflow_dict['hosts']): script = plan.script.start_replication_template host = hosts[0] contextdict.update({'IPMASTER': hosts[1].address}) script = build_context_script(contextdict, script) host_csattr = CsHostAttr.objects.get(host=host) LOG.info("Executing script on %s" % host) output = {} return_code = exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command=script, output=output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cloud_stack = workflow_dict['plan'].cs_plan_attributes.first() offering = cloud_stack.get_stronger_offering() configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) mongodbkey = ''.join( random.choice(string.hexdigits) for i in range(50)) infra = workflow_dict['databaseinfra'] if infra.plan.is_ha: infra.database_key = mongodbkey infra.save() workflow_dict['replicasetname'] = infra.get_driver( ).replica_set_name mongodb_password = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MONGODB).password disk_offering = workflow_dict['plan'].disk_offering graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') plan = workflow_dict['plan'] for index, instance in enumerate(workflow_dict['instances']): host = instance.hostname LOG.info("Getting vm credentials...") LOG.info("Cheking host ssh...") host_ready = check_ssh(host, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host.update_os_description() if instance.instance_type == instance.MONGODB_ARBITER: contextdict = { 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DRIVER_NAME': infra.get_driver().topology_name(), 'configuration': configuration, } databaserule = 'ARBITER' else: host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'DATABASENAME': workflow_dict['name'], 'ENGINE': 'mongodb', 'DBPASSWORD': mongodb_password, 'DRIVER_NAME': infra.get_driver().topology_name(), 'configuration': configuration, } if index == 0: databaserule = 'PRIMARY' else: databaserule = 'SECONDARY' if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'REPLICASETNAME': workflow_dict['replicasetname'], 'HOST01': workflow_dict['hosts'][0], 'HOST02': workflow_dict['hosts'][1], 'HOST03': workflow_dict['hosts'][2], 'MONGODBKEY': mongodbkey, 'DATABASERULE': databaserule, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], }) else: contextdict.update({'DATABASERULE': databaserule}) contextdict.update({ 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'DISK_SIZE_IN_GB': disk_offering.size_gb(), 'GRAYLOG_ENDPOINT': graylog_endpoint }) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template) for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if len(workflow_dict['hosts']) > 1: scripts_to_run = plan.script.start_replication_template contextdict.update({ 'DBPASSWORD': mongodb_password, 'DATABASERULE': 'PRIMARY' }) scripts_to_run = build_context_script(contextdict, scripts_to_run) host = workflow_dict['hosts'][0] output = {} return_code = exec_remote_command_host(host, scripts_to_run, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: offering = workflow_dict['plan'].stronger_offering configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') replica_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.MYSQL_REPLICA) plan = workflow_dict['plan'] for index, hosts in enumerate(permutations( workflow_dict['hosts'])): LOG.info("Getting vm credentials...") host = hosts[0] LOG.info("Cheking host ssh...") host_ready = check_ssh(host, retries=60, wait=30, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host_nfsattr = HostAttr.objects.get(host=host) contextdict = { 'EXPORTPATH': host_nfsattr.nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.MYSQL).password, 'HOST': workflow_dict['hosts'][index].hostname.split('.')[0], 'ENGINE': 'mysql', 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'configuration': configuration, 'GRAYLOG_ENDPOINT': graylog_endpoint, } if len(workflow_dict['hosts']) > 1: LOG.info("Updating contexdict for %s" % host) contextdict.update({ 'SERVERID': index + 1, 'IPMASTER': hosts[1].address, 'REPLICA_USER': replica_credential.user, 'REPLICA_PASSWORD': replica_credential.password, }) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template) host.update_os_description() for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if len(workflow_dict['hosts']) > 1: for hosts in permutations(workflow_dict['hosts']): script = plan.script.start_replication_template host = hosts[0] contextdict.update({'IPMASTER': hosts[1].address}) script = build_context_script(contextdict, script) LOG.info("Executing script on %s" % host) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. output: {}".format( output) raise Exception(error_msg) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: offering = workflow_dict['plan'].stronger_offering configuration = configuration_factory( workflow_dict['databaseinfra'], offering.memory_size_mb) graylog_credential = get_credentials_for( environment=workflow_dict['databaseinfra'].environment, credential_type=CredentialType.GRAYLOG) graylog_endpoint = graylog_credential.get_parameter_by_name( 'endpoint_log') plan = workflow_dict['plan'] for index, host in enumerate(workflow_dict['hosts']): LOG.info("Getting vm credentials...") LOG.info("Cheking host ssh...") host_ready = check_ssh(host, wait=5, interval=10) if not host_ready: LOG.warn("Host %s is not ready..." % host) return False host.update_os_description() instances_redis = Instance.objects.filter( hostname=host, instance_type=Instance.REDIS) instances_sentinel = Instance.objects.filter( hostname=host, instance_type=Instance.REDIS_SENTINEL) if instances_redis: host_nfsattr = HostAttr.objects.get(host=host) nfsaas_path = host_nfsattr.nfsaas_path only_sentinel = False instance_redis_address = instances_redis[0].address instance_redis_port = instances_redis[0].port else: nfsaas_path = "" only_sentinel = True instance_redis_address = '' instance_redis_port = '' if instances_sentinel: instance_sentinel_address = instances_sentinel[0].address instance_sentinel_port = instances_sentinel[0].port else: instance_sentinel_address = '' instance_sentinel_port = '' if index == 0: master_host = instance_redis_address master_port = instance_redis_port contextdict = { 'EXPORTPATH': nfsaas_path, 'DATABASENAME': workflow_dict['name'], 'DBPASSWORD': workflow_dict['databaseinfra'].password, 'HOSTADDRESS': instance_redis_address, 'PORT': instance_redis_port, 'ENGINE': 'redis', 'HOST': host.hostname.split('.')[0], 'DRIVER_NAME': workflow_dict['databaseinfra'].get_driver().topology_name( ), 'SENTINELMASTER': master_host, 'SENTINELMASTERPORT': master_port, 'SENTINELADDRESS': instance_sentinel_address, 'SENTINELPORT': instance_sentinel_port, 'MASTERNAME': workflow_dict['databaseinfra'].name, 'ONLY_SENTINEL': only_sentinel, 'HAS_PERSISTENCE': workflow_dict['plan'].has_persistence, 'ENVIRONMENT': workflow_dict['databaseinfra'].environment, 'configuration': configuration, 'GRAYLOG_ENDPOINT': graylog_endpoint, } LOG.info(contextdict) scripts = (plan.script.initialization_template, plan.script.configuration_template, plan.script.start_database_template, plan.script.start_replication_template) for script in scripts: LOG.info("Executing script on %s" % host) script = build_context_script(contextdict, script) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: error_msg = "Error executing script. Stdout: {} - " \ "stderr: {}".format(output['stdout'], output['stderr']) raise Exception(error_msg) if index > 0 and instances_redis: client = instances_redis[0].databaseinfra.get_driver( ).get_client(instances_redis[0]) client.slaveof(master_host, master_port) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0016) workflow_dict['exceptions']['traceback'].append(traceback) return False