def undo(self, workflow_dict): LOG.info("Running undo...") try: if 'databaseinfra' not in workflow_dict and 'hosts' not in workflow_dict: LOG.info("We could not find a databaseinfra inside the workflow_dict") return False if len(workflow_dict['hosts']) == 1: return True databaseinfraattr = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra']) cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) networkapi_equipment_id = workflow_dict.get('networkapi_equipment_id') for infra_attr in databaseinfraattr: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) if not cs_provider.remove_networkapi_ip(equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id): return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") if networkapi_equipment_id: cs_provider.remove_networkapi_equipment(equipment_id=networkapi_equipment_id) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: if 'databaseinfra' not in workflow_dict: LOG.info("We could not find a databaseinfra inside the workflow_dict") return False source_secondary_ip_ids = [secondary_ip.id for secondary_ip in workflow_dict['source_secondary_ips']] databaseinfraattr = DatabaseInfraAttr.objects.filter( databaseinfra=workflow_dict['databaseinfra'], equivalent_dbinfraattr=None).exclude(id__in=source_secondary_ip_ids) LOG.info("databaseinfraattr: {}".format(databaseinfraattr)) LOG.info("old infra ip: {}".format(workflow_dict['source_secondary_ips'])) cs_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['target_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) for infra_attr in databaseinfraattr: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) if not cs_provider.remove_networkapi_ip(equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id): return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def __init__(self, instance): super(FoxHA, self).__init__(instance) self.mysql_fox_credentials = get_credentials_for( self.environment, CredentialType.MYSQL_FOXHA ) self.mysql_replica_credentials = get_credentials_for( self.environment, CredentialType.MYSQL_REPLICA ) self.foxha_credentials = get_credentials_for( self.environment, CredentialType.FOXHA ) self.dbaas_api = DatabaseAsAServiceApi( self.infra, self.foxha_credentials ) self.provider = FoxHAProvider(self.dbaas_api)
def stop_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) stoped = cs_provider.stop_virtual_machine(vm_id=host_csattr.vm_id) if not stoped: raise Exception, "Could not stop host {}".format(host) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def credential(self): # TODO Remove hard coded "Cloudstack" if not self._credential: self._credential = get_credentials_for( self.environment, CredentialType.HOST_PROVIDER) return self._credential
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) for source_host in workflow_dict['source_hosts']: host_attr = HostAttr.objects.get(host=source_host) LOG.info("Destroying virtualmachine %s" % host_attr.vm_id) cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['source_environment'], vm_id=host_attr.vm_id) host_attr.delete() LOG.info("HostAttr deleted!") source_host.delete() LOG.info("Source host deleted") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): try: original_cloudstackpack = workflow_dict['original_cloudstackpack'] environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) original_serviceofferingid = original_cloudstackpack.offering.serviceofferingid if workflow_dict['offering_changed']: host = workflow_dict['host'] host_csattr = HostAttr.objects.get(host=host) offering_changed = cs_provider.change_service_for_vm( vm_id=host_csattr.vm_id, serviceofferingid=original_serviceofferingid) if not offering_changed: raise Exception( "Could not change offering for Host {}".format(host)) else: LOG.info('No resize to instance {}'.format( workflow_dict['instance'])) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def start_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for(environment = environment, credential_type = CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials = cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) started = cs_provider.start_virtual_machine(vm_id = host_csattr.vm_id) if not started: raise Exception, "Could not start host {}".format(host) for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, wait=5, interval=10) if not host_ready: error = "Host %s is not ready..." % host LOG.warn(error) raise Exception, error return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def get_network_from_ip(ip, database_environment): net_api_credentials = get_credentials_for( environment=database_environment, credential_type=CredentialType.NETWORKAPI ) ip_client = Ip.Ip( net_api_credentials.endpoint, net_api_credentials.user, net_api_credentials.password ) ips = ip_client.get_ipv4_or_ipv6(ip) ips = ips['ips'] if type(ips) != list: ips = [ips] net_ip = ips[0] network_client = Network.Network( net_api_credentials.endpoint, net_api_credentials.user, net_api_credentials.password ) network = network_client.get_network_ipv4(net_ip['networkipv4']) network = network['network'] return '{}.{}.{}.{}/{}'.format( network['oct1'], network['oct2'], network['oct3'], network['oct4'], network['block'] )
def start_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) host = workflow_dict['host'] host_csattr = HostAttr.objects.get(host=host) started = cs_provider.start_virtual_machine(vm_id=host_csattr.vm_id) if not started: raise Exception("Could not start host {}".format(host)) host_ready = check_ssh(server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, retries=50, wait=20, interval=30) if not host_ready: error = "Host %s is not ready..." % host LOG.warn(error) raise Exception(error) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False dns_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.DNSAPI) dns_list = DatabaseInfraDNSList.objects.filter( databaseinfra=workflow_dict['databaseinfra'].id) for dns in dns_list: LOG.info("Checking dns %s on %s" % (dns.dns, dns_credentials.project)) check_dns(dns.dns, dns_credentials.project) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0005) workflow_dict['exceptions']['traceback'].append(traceback) return False
def stop_vm(workflow_dict): try: environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) instances_detail = workflow_dict['instances_detail'] for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) stoped = cs_provider.stop_virtual_machine(vm_id=host_csattr.vm_id) if not stoped: raise Exception("Could not stop host {}".format(host)) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False database = workflow_dict['databaseinfra'].databases.get() databaseinfra = database.databaseinfra acl_credential = get_credentials_for( environment=database.environment, credential_type=CredentialType.ACLAPI) acl_client = AclClient(acl_credential.endpoint, acl_credential.user, acl_credential.password, database.environment) for database_bind in database.acl_binds.all(): infra_instances_binds = DatabaseInfraInstanceBind.objects.filter( databaseinfra=databaseinfra, bind_address=database_bind.bind_address) try: helpers.unbind_address(database_bind, acl_client, infra_instances_binds, True) except Exception as e: LOG.warn(e) continue return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0019) workflow_dict['exceptions']['traceback'].append(traceback) return False
def database_host_metrics_view(self, request, database, hostname): from util.metrics.metrics import get_metric_datapoints_for URL = get_credentials_for( environment=database.environment, credential_type=CredentialType.GRAPHITE).endpoint title = "{} Metrics".format(database.name) if request.method == 'GET': engine = database.infra.engine_name db_name = database.name hosts = [] for host in Host.objects.filter( instance__databaseinfra=database.infra).distinct(): hosts.append(host.hostname.split('.')[0]) graph_data = get_metric_datapoints_for(engine, db_name, hostname, url=URL, granurality='10seconds', from_option='2hours') return render_to_response("logical/database/metrics/metrics.html", locals(), context_instance=RequestContext(request))
def check_acl_service_and_get_unit_network(database, data, ignore_ip_error=False): try: acl_credential = get_credentials_for( environment=database.environment, credential_type=CredentialType.ACLAPI ) except IndexError: error = 'The {} do not have integration with ACLAPI'.format( database.environment ) return log_and_response( msg=None, e=error, http_status=status.HTTP_201_CREATED ) health_check_info = acl_credential.get_parameters_by_group('hc') try: health_check_url = (acl_credential.endpoint + health_check_info['health_check_url']) simple_hc = simple_health_check.SimpleHealthCheck( health_check_url=health_check_url, service_key=health_check_info['key_name'], redis_client=REDIS_CLIENT, http_client=requests, http_request_exceptions=(Exception,), verify_ssl=False, health_check_request_timeout=int(health_check_info['timeout']) ) except KeyError as e: msg = "AclApi Credential configured improperly." return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR ) try: simple_hc.check_service() except simple_health_check.HealthCheckError as e: LOG.warn(e) msg = ("We are experiencing errors with the acl api, please try again " "later.") return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR ) except Exception as e: LOG.warn(e) try: return get_network_from_ip( data.get('unit-host'), database.environment ) except Exception as e: LOG.warn(e) msg = ("We are experiencing errors with the network api, please try " "get network again later") if not ignore_ip_error: return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR )
def vm_credential(self): if not self._vm_credential: self._vm_credential = get_credentials_for( self.environment, CredentialType.VM, ) return self._vm_credential
def credential(self): if not self._credential: self._credential = get_credentials_for( self.environment, CredentialType.HOST_PROVIDER ) return self._credential
def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False action = 'deny' database = workflow_dict['databaseinfra'].databases.get() for database_bind in database.acl_binds.all(): acl_environment, acl_vlan = database_bind.bind_address.split( '/') data = {"kind": "object#acl", "rules": []} default_options = { "protocol": "tcp", "source": "", "destination": "", "description": "{} access for database {} in {}".format(database_bind.bind_address, database.name, database.environment.name), "action": action, "l4-options": {"dest-port-start": "", "dest-port-op": "eq"} } LOG.info("Default options: {}".format(default_options)) databaseinfra = database.infra infra_instances_binds = DatabaseInfraInstanceBind.objects.filter( databaseinfra=databaseinfra, bind_address=database_bind.bind_address) for infra_instance_bind in infra_instances_binds: custom_options = copy.deepcopy(default_options) custom_options['source'] = database_bind.bind_address custom_options[ 'destination'] = infra_instance_bind.instance + '/32' custom_options[ 'l4-options']['dest-port-start'] = infra_instance_bind.instance_port data['rules'].append(custom_options) acl_credential = get_credentials_for(environment=database.environment, credential_type=CredentialType.ACLAPI) acl_client = AclClient(acl_credential.endpoint, acl_credential.user, acl_credential.password) LOG.info("Data used on payload: {}".format(data)) acl_client.revoke_acl_for(environment=acl_environment, vlan=acl_vlan, payload=data) infra_instances_binds.delete() database_bind.delete() return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0019) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: cs_credentials = get_credentials_for( environment=workflow_dict['environment'], credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) instances = workflow_dict['databaseinfra'].instances.all() if not instances: for vm_id in workflow_dict['vms_id']: cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['environment'], vm_id=vm_id) for host in workflow_dict['hosts']: host_attr = HostAttr.objects.filter(host=host) host.delete() LOG.info("Host deleted!") if host_attr: host_attr[0].delete() LOG.info("HostAttr deleted!") for instance in instances: host = instance.hostname host_attr = HostAttr.objects.get(host=host) LOG.info("Destroying virtualmachine %s" % host_attr.vm_id) cs_provider.destroy_virtual_machine( project_id=cs_credentials.project, environment=workflow_dict['environment'], vm_id=host_attr.vm_id) host_attr.delete() LOG.info("HostAttr deleted!") instance.delete() LOG.info("Instance deleted") host.delete() LOG.info("Host deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0011) workflow_dict['exceptions']['traceback'].append(traceback) return False
def __init__(self, instance): super(MetricsCollector, self).__init__(instance) self.credential = get_credentials_for(self.environment, CredentialType.TELEGRAF) self.collector_allowed = self.credential.get_parameter_by_name( 'collector_allowed') self.kafka_topic = self.credential.get_parameter_by_name('kafka_topic')
def undo(self, workflow_dict): try: original_cloudstackpack = workflow_dict['original_cloudstackpack'] environment = workflow_dict['environment'] cs_credentials = get_credentials_for( environment=environment, credential_type=CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials=cs_credentials) original_serviceofferingid = original_cloudstackpack.offering.serviceofferingid if workflow_dict['offering_changed']: host = workflow_dict['host'] host_csattr = HostAttr.objects.get(host=host) offering_changed = cs_provider.change_service_for_vm( vm_id=host_csattr.vm_id, serviceofferingid=original_serviceofferingid) if not offering_changed: raise Exception("Could not change offering for Host {}".format(host)) else: LOG.info('No resize to instance {}'.format(workflow_dict['instance'])) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def credential(self): if not self._credential: self._credential = get_credentials_for( self.environment, CredentialType.VIP_PROVIDER ) return self._credential
def credential(self): if not self._credential: self._credential = get_credentials_for( Environment.objects.get(name='prod'), CredentialType.VIP_PROVIDER ) return self._credential
def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False database = workflow_dict['databaseinfra'].databases.get() databaseinfra = database.databaseinfra acl_credential = get_credentials_for( environment=database.environment, credential_type=CredentialType.ACLAPI) acl_client = AclClient( acl_credential.endpoint, acl_credential.user, acl_credential.password, database.environment) for database_bind in database.acl_binds.all(): infra_instances_binds = DatabaseInfraInstanceBind.objects.filter( databaseinfra=databaseinfra, bind_address=database_bind.bind_address) try: helpers.unbind_address( database_bind, acl_client, infra_instances_binds, True) except Exception as e: LOG.warn(e) continue return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0019) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: database = workflow_dict['database'] cloudstackpack = workflow_dict['cloudstackpack'] instances_detail = workflow_dict['instances_detail'] environment = workflow_dict['environment'] cs_credentials = get_credentials_for(environment = environment, credential_type = CredentialType.CLOUDSTACK) cs_provider = CloudStackProvider(credentials = cs_credentials) serviceofferingid = cloudstackpack.offering.serviceofferingid for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname host_csattr = HostAttr.objects.get(host=host) offering_changed = cs_provider.change_service_for_vm(vm_id = host_csattr.vm_id, serviceofferingid = serviceofferingid) if not offering_changed: raise Exception, "Could not change offering for Host {}".format(host) instance_detail['offering_changed'] = True LOG.info('Updating offering DatabaseInfra.') databaseinfraoffering = DatabaseInfraOffering.objects.get(databaseinfra = database.databaseinfra) databaseinfraoffering.offering = cloudstackpack.offering databaseinfraoffering.save() return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def get_network_from_ip(ip, database_environment): net_api_credentials = get_credentials_for( environment=database_environment, credential_type=CredentialType.NETWORKAPI ) ip_client = Ip.Ip( net_api_credentials.endpoint, net_api_credentials.user, net_api_credentials.password ) ips = ip_client.get_ipv4_or_ipv6(ip) ips = ips['ips'] if type(ips) != list: ips = [ips] net_ip = ips[0] network_client = Network.Network( net_api_credentials.endpoint, net_api_credentials.user, net_api_credentials.password ) network = network_client.get_network_ipv4(net_ip['networkipv4']) network = network['network'] return network['oct1'] + '.' + network['oct2'] + '.' + network['oct3'] + '.' + network['oct4'] + '/' + network['block']
def metricdetail_view(self, request, database_id): from util.metrics.metrics import get_metric_datapoints_for hostname = request.GET.get('hostname') metricname = request.GET.get('metricname') database = Database.objects.get(id=database_id) engine = database.infra.engine_name db_name = database.name URL = get_credentials_for( environment=database.environment, credential_type=CredentialType.GRAPHITE).endpoint from_option = request.POST.get('change_from') or '2hours' granurality = self.get_granurality(from_option) or '20minutes' from_options = self.build_select_options( from_option, self.get_from_options()) graph_data = get_metric_datapoints_for(engine, db_name, hostname, url=URL, metric_name=metricname, granurality=granurality, from_option=from_option) title = "{} {} Metric".format( database.name, graph_data[0]["graph_name"]) show_filters = Configuration.get_by_name_as_int('metric_filters') if graph_data[0]['normalize_series'] == True: show_filters = False return render_to_response("logical/database/metrics/metricdetail.html", locals(), context_instance=RequestContext(request))
def __init__(self, instance): super(MetricsCollector, self).__init__(instance) self.credential = get_credentials_for( self.environment, CredentialType.TELEGRAF) self.collector_allowed = self.credential.get_parameter_by_name( 'collector_allowed') self.kafka_topic = self.credential.get_parameter_by_name( 'kafka_topic')
def _get_fox_provider(self, driver): databaseinfra = driver.databaseinfra foxha_credentials = get_credentials_for( environment=databaseinfra.environment, credential_type=CredentialType.FOXHA) dbaas_api = DatabaseAsAServiceApi(databaseinfra, foxha_credentials) return FoxHAProvider(dbaas_api)
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider( credentials=cs_credentials, networkapi_credentials=networkapi_credentials) for infra_attr in workflow_dict['source_secondary_ips']: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) ip_removed = cs_provider.remove_networkapi_ip( equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id) if not ip_removed: return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def get_project_id(self, host): env = self.get_env(host) try: credential = get_credentials_for(env, CredentialType.CLOUDSTACK) except IndexError: return None return credential and credential.project
def get_log_url(self): from util import get_credentials_for from util.laas import get_group_name from dbaas_credentials.models import CredentialType credential = get_credentials_for(environment=self.environment, credential_type=CredentialType.LOGNIT) #print credential.endpoint, get_group_name(self) url = "%s%s" % (credential.endpoint, get_group_name(self)) return "<a href='%s' target='_blank'>%s</a>" % (url, url)
def _get_fox_provider(self, driver): databaseinfra = driver.databaseinfra foxha_credentials = get_credentials_for( environment=databaseinfra.environment, credential_type=CredentialType.FOXHA ) dbaas_api = DatabaseAsAServiceApi(databaseinfra, foxha_credentials) return FoxHAProvider(dbaas_api)
def database_dex_analyze_view(self, request, database_id): import json import random from dbaas_laas.provider import LaaSProvider from util import get_credentials_for from util.laas import get_group_name from dbaas_credentials.models import CredentialType import os import string from datetime import datetime, timedelta def generate_random_string(length, stringset=string.ascii_letters+string.digits): return ''.join([stringset[i%len(stringset)] \ for i in [ord(x) for x in os.urandom(length)]]) database = Database.objects.get(id=database_id) if database.status != Database.ALIVE or not database.database_status.is_alive: self.message_user( request, "Database is not alive cannot be analyzed", level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) if database.is_beeing_used_elsewhere(): self.message_user( request, "Database cannot be analyzed because it is in use by another task.", level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) credential =get_credentials_for(environment=database.environment, credential_type=CredentialType.LAAS) db_name = database.name environment = database.environment endpoint = credential.endpoint username = credential.user password = credential.password lognit_environment = credential.get_parameter_by_name('lognit_environment') provider = LaaSProvider() group_name = get_group_name(database) today = (datetime.now()).strftime('%Y%m%d') yesterday = (datetime.now() - timedelta(days=1)).strftime('%Y%m%d') uri = "group:{} text:query date:[{} TO {}] time:[000000 TO 235959]".format(group_name,yesterday,today) parsed_logs = '' database_logs = provider.get_logs_for_group(environment, lognit_environment, uri) try: database_logs = json.loads(database_logs) except Exception, e: pass
def undo(self, workflow_dict): try: if not 'databaseinfra' in workflow_dict: return False action = 'deny' database = workflow_dict['databaseinfra'].databases.get() for database_bind in database.acl_binds.all(): acl_environment, acl_vlan = database_bind.bind_address.split('/') data = {"kind":"object#acl", "rules":[]} default_options = {"protocol": "tcp", "source": "", "destination": "", "description": "{} access for database {} in {}".\ format(database_bind.bind_address, database.name, database.environment.name), "action": action, "l4-options":{ "dest-port-start":"", "dest-port-op":"eq" } } LOG.info("Default options: {}".format(default_options)) databaseinfra = database.infra infra_instances_binds = DatabaseInfraInstanceBind.objects.\ filter(databaseinfra= databaseinfra,bind_address= database_bind.bind_address) for infra_instance_bind in infra_instances_binds: custom_options = copy.deepcopy(default_options) custom_options['source'] = database_bind.bind_address custom_options['destination'] = infra_instance_bind.instance + '/32' custom_options['l4-options']['dest-port-start'] = infra_instance_bind.instance_port data['rules'].append(custom_options) acl_credential = get_credentials_for(environment= database.environment, credential_type=CredentialType.ACLAPI) acl_client = AclClient(acl_credential.endpoint, acl_credential.user, acl_credential.password) LOG.info("Data used on payload: {}".format(data)) acl_client.revoke_acl_for(environment= acl_environment, vlan= acl_vlan, payload=data) infra_instances_binds.delete() database_bind.delete() return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0019) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: cs_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.CLOUDSTACK) networkapi_credentials = get_credentials_for( environment=workflow_dict['source_environment'], credential_type=CredentialType.NETWORKAPI) cs_provider = CloudStackProvider(credentials=cs_credentials, networkapi_credentials=networkapi_credentials) for infra_attr in workflow_dict['source_secondary_ips']: networkapi_equipment_id = infra_attr.networkapi_equipment_id networkapi_ip_id = infra_attr.networkapi_ip_id if networkapi_ip_id: LOG.info("Removing network api IP for %s" % networkapi_ip_id) ip_removed = cs_provider.remove_networkapi_ip(equipment_id=networkapi_equipment_id, ip_id=networkapi_ip_id) if not ip_removed: return False LOG.info("Removing secondary_ip for %s" % infra_attr.cs_ip_id) if not cs_provider.remove_secondary_ips(infra_attr.cs_ip_id): return False LOG.info("Secondary ip deleted!") infra_attr.delete() LOG.info("Databaseinfraattr deleted!") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0010) workflow_dict['exceptions']['traceback'].append(traceback) return False
def vm_credential(self): # TODO Lembrar de colocar o project pra quando tiver um provider # diferente if not self._vm_credential: self._vm_credential = get_credentials_for( self.environment, CredentialType.VM, ) return self._vm_credential
def set_replication_user_not_require_ssl(self, instance=None): LOG.info("settint replication user to NOT require SSL") replica_credential = get_credentials_for( self.databaseinfra.environment, CredentialType.MYSQL_REPLICA) query = "GRANT USAGE ON *.* TO '{}'@'%' REQUIRE NONE".format( replica_credential.user) self.query(query, instance)
def get_initial_infra_credentials(self): credential = get_credentials_for( environment=self.databaseinfra.environment, credential_type=self.credential_type) initusers = credential.get_parameters_by_group('inituser') if len(initusers) == 0: raise Exception('There is no initial user in database credentials') for init_user, init_password in initusers.items(): break return init_user, init_password
def get_engine_credentials(engine, environment): engine = engine.lower() if re.match(r'^mongo.*', engine): credential_type = CredentialType.MONGODB elif re.match(r'^mysql.*', engine): credential_type = CredentialType.MYSQL return get_credentials_for( environment=environment, credential_type=credential_type)
def get_project_id(self, host): env = self.get_env(host) try: credential = get_credentials_for( env, CredentialType.CLOUDSTACK) except IndexError: return None return credential and credential.project
def get_engine_credentials(engine, environment): engine = engine.lower() if engine.startswith('mongo'): credential_type = CredentialType.MONGODB elif engine.startswith(('mysql', 'redis')): credential_type = CredentialType.MYSQL return get_credentials_for(environment=environment, credential_type=credential_type)
def __init__(self, instance): super(SSL, self).__init__(instance) self.credential = get_credentials_for(self.environment, CredentialType.PKI) self.certificate_allowed = self.credential.get_parameter_by_name( 'certificate_allowed') self.master_ssl_ca = self.credential.get_parameter_by_name( 'master_ssl_ca') self.certificate_type = self.credential.get_parameter_by_name( 'certificate_type') self.ssl_files = SSLFiles()
def __init__(self, instance): super(VmStep, self).__init__(instance) self.driver = self.infra.get_driver() self.cs_credentials = get_credentials_for( environment=self.environment, credential_type=CredentialType.CLOUDSTACK ) self.cs_provider = CloudStackProvider(credentials=self.cs_credentials)
def credential(self): if not self._credential: try: self._credential = get_credentials_for( self.environment, CredentialType.ACLFROMHELL) except IndexError: raise Exception( "Credential ACLFROMHELL for env {} not found".format( self.environment.name)) return self._credential
def set_replication_user_not_require_ssl(self, instance=None): LOG.info("settint replication user to NOT require SSL") replica_credential = get_credentials_for( self.databaseinfra.environment, CredentialType.MYSQL_REPLICA ) query = "GRANT USAGE ON *.* TO '{}'@'%' REQUIRE NONE".format( replica_credential.user) self.query(query, instance)
def __laas_log_url(self): if self.databaseinfra.plan.is_pre_provisioned: return "" from util import get_credentials_for from util.laas import get_group_name from dbaas_credentials.models import CredentialType credential = get_credentials_for(environment=self.environment, credential_type=CredentialType.LOGNIT) return credential.endpoint + get_group_name(self)
def __init__(self, instance): super(SSL, self).__init__(instance) self.credential = get_credentials_for( self.environment, CredentialType.PKI) self.certificate_allowed = self.credential.get_parameter_by_name( 'certificate_allowed') self.master_ssl_ca = self.credential.get_parameter_by_name( 'master_ssl_ca') self.certificate_type = self.credential.get_parameter_by_name( 'certificate_type') self.ssl_files = SSLFiles()
def create_host_attr(self, host, vm_id, bundle): from dbaas_cloudstack.models import HostAttr vm_credentials = get_credentials_for(environment=self.environment, credential_type=CredentialType.VM) host_attr = HostAttr() host_attr.vm_id = vm_id host_attr.host = host host_attr.vm_user = vm_credentials.user host_attr.vm_password = vm_credentials.password host_attr.bundle = bundle host_attr.save()
def credential_parameter_by_name(request, env_id, param_name): try: env = Environment.objects.get(id=env_id) credential = get_credentials_for(env, CredentialType.HOST_PROVIDER) except (IndexError, Environment.DoesNotExist): msg = '' else: msg = credential.get_parameter_by_name(param_name) output = json.dumps({'msg': msg or ''}) return HttpResponse(output, content_type="application/json")
def get_engine_credentials(engine, environment): engine = engine.lower() if re.match(r'^mongo.*', engine): credential_type = CredentialType.MONGODB elif re.match(r'^mysql.*', engine): credential_type = CredentialType.MYSQL elif re.match(r'^redis.*', engine): credential_type = CredentialType.MYSQL return get_credentials_for(environment=environment, credential_type=credential_type)
def undo(self, workflow_dict): LOG.info("Running undo...") try: database = workflow_dict['database'] databaseinfra = workflow_dict['databaseinfra'] acl_credential = get_credentials_for( environment=database.environment, credential_type=CredentialType.ACLAPI) acl_client = AclClient(acl_credential.endpoint, acl_credential.user, acl_credential.password, database.environment) instances = databaseinfra.instances.filter( future_instance__isnull=False) databaseinfraattr_instances = DatabaseInfraAttr.objects.filter( databaseinfra=databaseinfra, equivalent_dbinfraattr__isnull=False) instance_address_list = [] for instance in instances: instance_address_list.append(instance.address) for instance in databaseinfraattr_instances: instance_address_list.append(instance.ip) for database_bind in database.acl_binds.all(): if helpers.bind_address( database_bind, acl_client, instances=instances, infra_attr_instances=databaseinfraattr_instances): continue else: LOG.error("The AclApi is not working properly.") database_bind.bind_status = ERROR database_bind.save() DatabaseInfraInstanceBind.objects.filter( databaseinfra=databaseinfra, bind_address=database_bind.bind_address, instance__in=instance_address_list).update( bind_status=ERROR) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def start_replication_parameters(self, instance): base = self.initialization_parameters(instance) replica_credential = get_credentials_for( self.databaseinfra.environment, CredentialType.MYSQL_REPLICA) base['REPLICA_USER'] = replica_credential.user base['REPLICA_PASSWORD'] = replica_credential.password hosts = set(self.databaseinfra.hosts) hosts.discard(instance.hostname) base['IPMASTER'] = hosts.pop().address return base
def do(self, workflow_dict): try: flipper_crdentials = get_credentials_for(workflow_dict['source_environment'], CredentialType.FLIPPER) flipper_vip = flipper_crdentials.get_parameter_by_name('vip') for host in workflow_dict['target_hosts']: cs_host_attr = CS_HostAttr.objects.get(host=host) source_host = workflow_dict['source_hosts'][0] nf_host_attr = NF_HostAttr.objects.get(host=source_host) script = test_bash_script_error() script += build_mount_snapshot_volume_script() script += build_remove_deprecated_files_script() script += build_permission_script() script += build_start_database_script() script += build_flipper_script() context_dict = { 'EXPORT_PATH': nf_host_attr.nfsaas_path, 'SNAPSHOPT_NAME': workflow_dict['snapshot_name'], 'VIP_FLIPPER': flipper_vip, 'IPWRITE': workflow_dict['target_secondary_ips'][0].ip, 'HOST01': workflow_dict['target_hosts'][0], 'HOST02': workflow_dict['target_hosts'][1] } script = build_context_script(context_dict, script) output = {} LOG.info(script) return_code = exec_remote_command(server=host.address, username=cs_host_attr.vm_user, password=cs_host_attr.vm_password, command=script, output=output) LOG.info(output) if return_code != 0: raise Exception(str(output)) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def credential(self): if not self._credential: try: self._credential = get_credentials_for( self.environment, CredentialType.ACLFROMHELL ) except IndexError: raise Exception( "Credential ACLFROMHELL for env {} not found".format( self.environment.name ) ) return self._credential
def start_replication_parameters(self, instance): base = self.initialization_parameters(instance) replica_credential = get_credentials_for( self.databaseinfra.environment, CredentialType.MYSQL_REPLICA ) base['REPLICA_USER'] = replica_credential.user base['REPLICA_PASSWORD'] = replica_credential.password hosts = set(self.databaseinfra.hosts) hosts.discard(instance.hostname) base['IPMASTER'] = hosts.pop().address return base
def credential_parameter_by_name(request, env_id, param_name): try: env = Environment.objects.get(id=env_id) credential = get_credentials_for( env, CredentialType.HOST_PROVIDER ) except (IndexError, Environment.DoesNotExist): msg = '' else: msg = credential.get_parameter_by_name(param_name) output = json.dumps({'msg': msg or ''}) return HttpResponse(output, content_type="application/json")
def __init__(self, instance): super(ACLStep, self).__init__(instance) try: acl_credential = get_credentials_for( environment=self.environment, credential_type=CredentialType.ACLAPI) except IndexError: self.acl_client = None else: self.acl_client = AclClient( acl_credential.endpoint, acl_credential.user, acl_credential.password, self.environment)