コード例 #1
0
    def __mongo_client__(self, instance):
        connection_address = self.__get_admin_connection(instance)
        if not self.databaseinfra and instance:
            self.databaseinfra = instance.databaseinfra
        try:
            # mongo uses timeout in mili seconds
            connection_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                'mongo_connect_timeout', default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000
            
            server_selection_timeout_in_seconds = Configuration.get_by_name_as_int(
                'mongo_server_selection_timeout', default=MONGO_SERVER_SELECTION_DEFAULT_TIMEOUT) * 1000

            socket_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                'mongo_socket_timeout', default=MONGO_SOCKET_TIMEOUT) * 1000

            client = pymongo.MongoClient(
                connection_address, connectTimeoutMS=connection_timeout_in_miliseconds, serverSelectionTimeoutMS=server_selection_timeout_in_seconds,
                 socketTimeoutMS=socket_timeout_in_miliseconds)
            if (not instance) or (instance and instance.instance_type != instance.MONGODB_ARBITER):
                if self.databaseinfra.user and self.databaseinfra.password:
                    LOG.debug('Authenticating databaseinfra %s',
                              self.databaseinfra)
                    client.admin.authenticate(self.databaseinfra.user,
                                              self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(
                message='Invalid address: ' % connection_address)
コード例 #2
0
    def __mongo_client__(self, instance):
        connection_address = self.__get_admin_connection(instance)
        if not self.databaseinfra and instance:
            self.databaseinfra = instance.databaseinfra
        try:
            # mongo uses timeout in mili seconds
            connection_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                'mongo_connect_timeout',
                default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000

            server_selection_timeout_in_seconds = Configuration.get_by_name_as_int(
                'mongo_server_selection_timeout',
                default=MONGO_SERVER_SELECTION_DEFAULT_TIMEOUT) * 1000

            socket_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                'mongo_socket_timeout', default=MONGO_SOCKET_TIMEOUT) * 1000

            client = pymongo.MongoClient(
                connection_address,
                connectTimeoutMS=connection_timeout_in_miliseconds,
                serverSelectionTimeoutMS=server_selection_timeout_in_seconds,
                socketTimeoutMS=socket_timeout_in_miliseconds)
            if (not instance) or (instance and instance.instance_type !=
                                  instance.MONGODB_ARBITER):
                if self.databaseinfra.user and self.databaseinfra.password:
                    LOG.debug('Authenticating databaseinfra %s',
                              self.databaseinfra)
                    client.admin.authenticate(self.databaseinfra.user,
                                              self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(message='Invalid address: ' %
                                      connection_address)
コード例 #3
0
    def __mongo_client__(self, instance, default_timeout=False):
        connection_address = self.__get_admin_connection(instance)
        if not self.databaseinfra and instance:
            self.databaseinfra = instance.databaseinfra
        try:
            # mongo uses timeout in mili seconds
            if default_timeout:
                connection_timeout_in_miliseconds = (
                    MONGO_CONNECTION_DEFAULT_TIMEOUT * 1000
                )
                server_selection_timeout_in_seconds = (
                    MONGO_SERVER_SELECTION_DEFAULT_TIMEOUT * 1000
                )
                socket_timeout_in_miliseconds = MONGO_SOCKET_TIMEOUT * 1000
            else:
                connection_timeout_in_miliseconds = (
                    Configuration.get_by_name_as_int(
                        'mongo_connect_timeout',
                        default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000
                )
                server_selection_timeout_in_seconds = (
                    Configuration.get_by_name_as_int(
                        'mongo_server_selection_timeout',
                        default=MONGO_SERVER_SELECTION_DEFAULT_TIMEOUT) * 1000
                )
                socket_timeout_in_miliseconds = (
                    Configuration.get_by_name_as_int(
                        'mongo_socket_timeout',
                        default=MONGO_SOCKET_TIMEOUT) * 1000
                )

            if self.databaseinfra.ssl_configured and \
               self.databaseinfra.ssl_mode >= self.databaseinfra.PREFERTLS:
                tls = True
                tlsCAFile = Configuration.get_by_name('root_cert_file')
            else:
                tls = False
                tlsCAFile = None

            client = pymongo.MongoClient(
                connection_address,
                connectTimeoutMS=connection_timeout_in_miliseconds,
                serverSelectionTimeoutMS=server_selection_timeout_in_seconds,
                socketTimeoutMS=socket_timeout_in_miliseconds,
                tls=tls,
                tlsCAFile=tlsCAFile
            )
            if (not instance) or (instance and instance.instance_type != instance.MONGODB_ARBITER):  # noqa
                if self.databaseinfra.user and self.databaseinfra.password:
                    LOG.debug('Authenticating databaseinfra %s',
                              self.databaseinfra)
                    client.admin.authenticate(self.databaseinfra.user,
                                              self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(
                message='Invalid address: ' % connection_address)
コード例 #4
0
 def get_log_endpoint(self):
     if Configuration.get_by_name_as_int('graylog_integration') == 1:
         credential = get_credentials_for(
             environment=self.environment,
             credential_type=CredentialType.GRAYLOG)
     elif Configuration.get_by_name_as_int('kibana_integration') == 1:
         credential = get_credentials_for(
             environment=self.environment,
             credential_type=CredentialType.KIBANA_LOG)
     else:
         return ""
     return credential.get_parameter_by_name('endpoint_log')
コード例 #5
0
    def __redis_client__(self, instance):

        try:
            LOG.debug('Connecting to redis databaseinfra %s',
                      self.databaseinfra)
            # redis uses timeout in seconds
            connection_timeout_in_seconds = Configuration.get_by_name_as_int(
                'redis_connect_timeout',
                default=REDIS_CONNECTION_DEFAULT_TIMEOUT)

            if (instance and instance.instance_type == Instance.REDIS) or (
                    not self.databaseinfra.plan.is_ha and not instance):
                connection_address, connection_port = self.__get_admin_single_connection(
                    instance)
                client = redis.StrictRedis(
                    host=connection_address,
                    port=int(connection_port),
                    password=self.databaseinfra.password,
                    socket_timeout=connection_timeout_in_seconds)

            else:
                sentinel = self.get_sentinel_client(instance)
                client = sentinel.master_for(
                    self.databaseinfra.name,
                    socket_timeout=connection_timeout_in_seconds,
                    password=self.databaseinfra.password)

            LOG.debug('Successfully connected to redis databaseinfra %s' %
                      (self.databaseinfra))
            return client
        except Exception as e:
            raise e
コード例 #6
0
    def __mysql_client__(self,
                         instance,
                         database='mysql',
                         default_timeout=False):
        connection_address, connection_port = self.__get_admin_connection(
            instance)
        try:
            LOG.debug('Connecting to mysql databaseinfra %s',
                      self.databaseinfra)
            # mysql uses timeout in seconds
            if default_timeout:
                connection_timeout_in_seconds = MYSQL_CONNECTION_DEFAULT_TIMEOUT
            else:
                connection_timeout_in_seconds = Configuration.get_by_name_as_int(
                    'mysql_connect_timeout',
                    default=MYSQL_CONNECTION_DEFAULT_TIMEOUT)

            client = mysqldb.connect(
                host=connection_address,
                port=int(connection_port),
                user=self.databaseinfra.user,
                passwd=self.databaseinfra.password,
                db=database,
                connect_timeout=connection_timeout_in_seconds)
            LOG.debug('Successfully connected to mysql databaseinfra %s' %
                      (self.databaseinfra))
            return client
        except Exception as e:
            raise e
コード例 #7
0
def remove_database_old_backups(self):

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        worker_name=worker_name,
                                        user=None)

    backup_retention_days = Configuration.get_by_name_as_int(
        'backup_retention_days')

    LOG.info("Removing backups older than %s days" % (backup_retention_days))

    backup_time_dt = date.today() - timedelta(days=backup_retention_days)
    snapshots = Snapshot.objects.filter(start_at__lte=backup_time_dt,
                                        purge_at__isnull=True,
                                        instance__isnull=False,
                                        snapshopt_id__isnull=False)
    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    if len(snapshots) == 0:
        msgs.append("There is no snapshot to purge")
    for snapshot in snapshots:
        try:
            remove_snapshot_backup(snapshot=snapshot)
            msg = "Backup %s removed" % (snapshot)
            LOG.info(msg)
        except:
            msg = "Error removing backup %s" % (snapshot)
            status = TaskHistory.STATUS_ERROR
            LOG.error(msg)
        msgs.append(msg)

    task_history.update_status_for(status, details="\n".join(msgs))

    return
コード例 #8
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")
    try:

        task_history = TaskHistory.register(request=self.request, user=user)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))
        quarantine_time = Configuration.get_by_name_as_int('quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)

        databases = Database.objects.filter(is_in_quarantine=True,
                                            quarantine_dt__lte=quarantine_time_dt)

        for database in databases:
            if database.plan.provider == database.plan.CLOUDSTACK:
                databaseinfra = database.databaseinfra

                destroy_infra(databaseinfra=databaseinfra, task=task_history)
            else:
                database.delete()

            LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
                database.name, quarantine_time))

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Databases destroyed successfully')
        return

    except Exception:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details="Error")
        return
    finally:
        AuditRequest.cleanup_request()
コード例 #9
0
 def get_sentinel_client(self, instance=None):
     connection_timeout_in_seconds = Configuration.get_by_name_as_int(
         'redis_connect_timeout', default=REDIS_CONNECTION_DEFAULT_TIMEOUT)
     sentinels = self.__get_admin_sentinel_connection(instance)
     sentinel = Sentinel(sentinels,
                         socket_timeout=connection_timeout_in_seconds)
     return sentinel
コード例 #10
0
    def metricdetail_view(self, request, database_id):
        from util.metrics.metrics import get_metric_datapoints_for

        hostname = request.GET.get('hostname')
        metricname = request.GET.get('metricname')

        database = Database.objects.get(id=database_id)
        engine = database.infra.engine_name
        db_name = database.name
        URL = get_credentials_for(
            environment=database.environment, credential_type=CredentialType.GRAPHITE).endpoint

        from_option = request.POST.get('change_from') or '2hours'
        granurality = self.get_granurality(from_option) or '20minutes'

        from_options = self.build_select_options(
            from_option, self.get_from_options())

        graph_data = get_metric_datapoints_for(engine, db_name, hostname,
                                               url=URL, metric_name=metricname,
                                               granurality=granurality,
                                               from_option=from_option)

        title = "{} {} Metric".format(
            database.name, graph_data[0]["graph_name"])

        show_filters = Configuration.get_by_name_as_int('metric_filters')
        if graph_data[0]['normalize_series'] == True:
            show_filters = False

        return render_to_response("logical/database/metrics/metricdetail.html", locals(), context_instance=RequestContext(request))
コード例 #11
0
 def get_sentinel_client(self, instance=None):
     connection_timeout_in_seconds = Configuration.get_by_name_as_int(
         'redis_connect_timeout', default=REDIS_CONNECTION_DEFAULT_TIMEOUT)
     sentinels = self.__get_admin_sentinel_connection(instance)
     sentinel = Sentinel(
         sentinels, socket_timeout=connection_timeout_in_seconds)
     return sentinel
コード例 #12
0
def remove_database_old_backups(self):

    task_history = TaskHistory.register(request=self.request, user=None)

    backup_retention_days = Configuration.get_by_name_as_int('backup_retention_days')

    LOG.info("Removing backups older than %s days" % (backup_retention_days))

    backup_time_dt = date.today() - timedelta(days=backup_retention_days)
    snapshots = Snapshot.objects.filter(start_at__lte=backup_time_dt, purge_at__isnull = True, instance__isnull = False, snapshopt_id__isnull = False)
    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    if len(snapshots) == 0:
        msgs.append("There is no snapshot to purge")
    for snapshot in snapshots:
        try:
            remove_snapshot_backup(snapshot=snapshot)
            msg = "Backup %s removed" % (snapshot)
            LOG.info(msg)
        except:
            msg = "Error removing backup %s" % (snapshot)
            status = TaskHistory.STATUS_ERROR
            LOG.error(msg)
        msgs.append(msg)

    task_history.update_status_for(status, details="\n".join(msgs))

    return
コード例 #13
0
ファイル: tasks.py プロジェクト: mbergo/database-as-a-service
def purge_task_history(self):
    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, user=None, worker_name=worker_name)

        now = datetime.datetime.now()
        retention_days = Configuration.get_by_name_as_int('task_history_retention_days')

        n_days_before = now - datetime.timedelta(days=retention_days)

        tasks_to_purge = TaskHistory.objects.filter(task_name__in=['notification.tasks.database_notification',
                'notification.tasks.database_notification_for_team',
                'notification.tasks.update_database_status',
                'notification.tasks.update_database_used_size',
                'notification.tasks.update_instances_status',
                'system.tasks.set_celery_healthcheck_last_update']
        , ended_at__lt=n_days_before
        , task_status__in=["SUCCESS", "ERROR"])

        tasks_to_purge.delete()

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
            details='Purge succesfully done!')
    except Exception, e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)
コード例 #14
0
def user_m2m_changed(sender, **kwargs):
    team = kwargs.get('instance')
    action = kwargs.get('action')
    if action == 'post_add':
        from util.laas import register_team_laas
        if Configuration.get_by_name_as_int('laas_integration') == 1:
            register_team_laas(team)
コード例 #15
0
def _add_read_only_instances(request, database):
    try:
        check_is_database_dead(database.id, 'Add read-only instances')
        check_is_database_enabled(database.id, 'Add read-only instances')
    except DisabledDatabase as err:
        messages.add_message(request, messages.ERROR, err.message)
        return

    if not database.plan.replication_topology.has_horizontal_scalability:
        messages.add_message(
            request, messages.ERROR,
            'Database topology do not have horizontal scalability')
        return

    if 'add_read_qtd' not in request.POST:
        messages.add_message(request, messages.ERROR, 'Quantity is required')
        return

    max_read_hosts = Configuration.get_by_name_as_int('max_read_hosts', 5)
    qtd_new_hosts = int(request.POST['add_read_qtd'])
    current_read_nodes = len(database.infra.instances.filter(read_only=True))
    total_read_hosts = qtd_new_hosts + current_read_nodes
    if total_read_hosts > max_read_hosts:
        messages.add_message(
            request, messages.ERROR,
            'Current limit of read only hosts is {} and you are trying to setup {}'
            .format(max_read_hosts, total_read_hosts))
        return

    TaskRegister.database_add_instances(database=database,
                                        user=request.user,
                                        number_of_instances=qtd_new_hosts)
コード例 #16
0
def databaseinfra_notification(self, user=None):
	task_history = TaskHistory.register(request=self.request, user=user)
	threshold_infra_notification = Configuration.get_by_name_as_int("threshold_infra_notification", default=0)
	if threshold_infra_notification <= 0:
		LOG.warning("database infra notification is disabled")
		return

	# Sum capacity per databseinfra with parameter plan, environment and engine
	infras = DatabaseInfra.objects.values('plan__name', 'environment__name', 'engine__engine_type__name',
	                                      'plan__provider').annotate(capacity=Sum('capacity'))
	for infra in infras:
		# total database created in databaseinfra per plan, environment and engine
		used = DatabaseInfra.objects.filter(plan__name=infra['plan__name'],
		                                    environment__name=infra['environment__name'],
		                                    engine__engine_type__name=infra['engine__engine_type__name']).aggregate(
			used=Count('databases'))
		# calculate the percentage
		percent = int(used['used'] * 100 / infra['capacity'])
		if percent >= threshold_infra_notification and infra['plan__provider'] != Plan.CLOUDSTACK:
			LOG.info('Plan %s in environment %s with %s%% occupied' % (
				infra['plan__name'], infra['environment__name'], percent))
			LOG.info("Sending database infra notification...")
			context = {}
			context['plan'] = infra['plan__name']
			context['environment'] = infra['environment__name']
			context['used'] = used['used']
			context['capacity'] = infra['capacity']
			context['percent'] = percent
			email_notifications.databaseinfra_ending(context=context)

		task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
		                               details='Databaseinfra Notification successfully sent to dbaas admins!')
	return
コード例 #17
0
    def delete_view(self, request, object_id, extra_context=None):
        database = Database.objects.get(id=object_id)
        extra_context = extra_context or {}

        if database.status != Database.ALIVE or not database.database_status.is_alive:
            self.message_user(
                request, "Database {} is not alive and cannot be deleted".format(database.name), level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.is_beeing_used_elsewhere():
            self.message_user(
                request, "Database {} cannot be deleted because it is in use by another task.".format(database.name), level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.has_migration_started():
            self.message_user(
                request, "Database {} cannot be deleted because it is beeing migrated.".format(database.name), level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if not database.is_in_quarantine:
            extra_context['quarantine_days'] = Configuration.get_by_name_as_int(
                'quarantine_retention_days')
        return super(DatabaseAdmin, self).delete_view(request, object_id, extra_context=extra_context)
コード例 #18
0
    def __redis_client__(self, instance):

        try:
            LOG.debug(
                'Connecting to redis databaseinfra %s', self.databaseinfra)
            # redis uses timeout in seconds
            connection_timeout_in_seconds = Configuration.get_by_name_as_int(
                'redis_connect_timeout', default=REDIS_CONNECTION_DEFAULT_TIMEOUT)

            if (instance and instance.instance_type == Instance.REDIS) or (not self.databaseinfra.plan.is_ha and not instance):
                connection_address, connection_port = self.__get_admin_single_connection(
                    instance)
                client = redis.Redis(host=connection_address,
                                     port=int(connection_port),
                                     password=self.databaseinfra.password,
                                     socket_connect_timeout=connection_timeout_in_seconds)

            else:
                sentinel = self.get_sentinel_client(instance)
                client = sentinel.master_for(self.databaseinfra.name,
                                             socket_timeout=connection_timeout_in_seconds,
                                             password=self.databaseinfra.password)

            LOG.debug(
                'Successfully connected to redis databaseinfra %s' % (self.databaseinfra))
            return client
        except Exception, e:
            raise e
コード例 #19
0
def purge_task_history(self):
    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request,
                                            user=None,
                                            worker_name=worker_name)

        now = datetime.datetime.now()
        retention_days = Configuration.get_by_name_as_int(
            'task_history_retention_days')

        n_days_before = now - datetime.timedelta(days=retention_days)

        tasks_to_purge = TaskHistory.objects.filter(task_name__in=[
            'notification.tasks.database_notification',
            'notification.tasks.database_notification_for_team',
            'notification.tasks.update_database_used_size',
            'notification.tasks.update_disk_used_size',
            'notification.tasks.update_database_status',
            'notification.tasks.update_instances_status', 'sync_celery_tasks',
            'system.tasks.set_celery_healthcheck_last_update'
        ],
                                                    ended_at__lt=n_days_before,
                                                    task_status__in=[
                                                        "SUCCESS", "ERROR",
                                                        "WARNING"
                                                    ])

        tasks_to_purge.delete()

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                       details='Purge succesfully done!')
    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)
コード例 #20
0
def user_m2m_changed(sender, **kwargs):
    team = kwargs.get('instance')
    action = kwargs.get('action')
    if action == 'post_add':
        from util.laas import register_team_laas
        if Configuration.get_by_name_as_int('laas_integration') == 1:
            register_team_laas(team)
コード例 #21
0
	def purge_quarantine(self):
		quarantine_time = Configuration.get_by_name_as_int('quarantine_retention_days')
		quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
		databases = Database.objects.filter(is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)
		for database in databases:
			database.delete()
			LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
				database.name, quarantine_time))
コード例 #22
0
    def last_offering_available_for_auto_resize(cls):
        parameter_in_kb = cls.converter_gb_to_kb(
            Configuration.get_by_name_as_int(name='auto_resize_max_size_in_gb',
                                             default=100))

        disks = DiskOffering.objects.filter(
            size_kb__lte=parameter_in_kb).order_by('-size_kb')

        if not disks:
            raise NoDiskOfferingLesserError(parameter_in_kb)
        return disks[0]
コード例 #23
0
    def get_dex_url(self):
        if Configuration.get_by_name_as_int('dex_analyze') != 1:
            return ""

        if self.databaseinfra.plan.provider == Plan.PREPROVISIONED:
            return ""

        if self.engine_type != 'mongodb':
            return ""

        return 1
コード例 #24
0
 def purge_quarantine(self):
     quarantine_time = Configuration.get_by_name_as_int(
         'quarantine_retention_days')
     quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
     databases = Database.objects.filter(
         is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)
     for database in databases:
         database.delete()
         LOG.info(
             "The database %s was deleted, because it was set to quarentine %d days ago"
             % (database.name, quarantine_time))
コード例 #25
0
    def get_dex_url(self):
        if Configuration.get_by_name_as_int('dex_analyze') != 1:
            return ""

        if self.databaseinfra.plan.is_pre_provisioned:
            return ""

        if self.engine_type != 'mongodb':
            return ""

        return 1
コード例 #26
0
def _check_snapshot_limit(instances, task):
    for instance in instances:
        task.add_detail('\nChecking older backups for {}...'.format(instance))
        limit = Configuration.get_by_name_as_int('backup_retention_days')
        snapshots_count = Snapshot.objects.filter(
            purge_at__isnull=True,
            instance=instance,
            snapshopt_id__isnull=False).order_by('start_at').count()
        task.add_detail('Current snapshot limit {}, used {}'.format(
            limit, snapshots_count),
                        level=1)
コード例 #27
0
    def get_dex_url(self):
        if Configuration.get_by_name_as_int('dex_analyze') != 1:
            return ""

        if self.databaseinfra.plan.is_pre_provisioned:
            return ""

        if self.engine_type != 'mongodb':
            return ""

        return 1
コード例 #28
0
    def get_dex_url(self):
        if Configuration.get_by_name_as_int('dex_analyze') != 1:
            return ""

        if self.databaseinfra.plan.provider == Plan.PREPROVISIONED:
            return ""

        if self.engine_type != 'mongodb':
            return ""

        return 1
コード例 #29
0
    def changelist_view(self, request, extra_context=None):
        extra_context = extra_context or {}

        backup_avaliable = Configuration.get_by_name_as_int(
            'backup_avaliable')

        extra_context['backup_avaliable'] = False
        if backup_avaliable:
            extra_context['backup_avaliable'] = True

        return super(SnapshotAdmin, self).changelist_view(request, extra_context=extra_context)
コード例 #30
0
def databaseinfra_notification(self, user=None):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        user=user,
                                        worker_name=worker_name)
    threshold_infra_notification = Configuration.get_by_name_as_int(
        "threshold_infra_notification", default=0)
    if threshold_infra_notification <= 0:
        LOG.warning("database infra notification is disabled")
        return

    # Sum capacity per databseinfra with parameter plan, environment and engine
    infras = DatabaseInfra.objects.values(
        'plan__name', 'environment__name', 'engine__engine_type__name',
        'plan__provider').annotate(capacity=Sum('capacity'))
    for infra in infras:
        try:
            database = infra.databases.get()
        except Database.MultipleObjectsReturned:
            pass
        else:
            if database.is_in_quarantine:
                continue
            if not database.subscribe_to_email_events:
                continue

        used = DatabaseInfra.objects.filter(
            plan__name=infra['plan__name'],
            environment__name=infra['environment__name'],
            engine__engine_type__name=infra['engine__engine_type__name']
        ).aggregate(used=Count('databases'))
        # calculate the percentage

        percent = int(used['used'] * 100 / infra['capacity'])
        if percent >= threshold_infra_notification and infra[
                'plan__provider'] != Plan.CLOUDSTACK:
            LOG.info(
                'Plan %s in environment %s with %s%% occupied' %
                (infra['plan__name'], infra['environment__name'], percent))
            LOG.info("Sending database infra notification...")
            context = {}
            context['plan'] = infra['plan__name']
            context['environment'] = infra['environment__name']
            context['used'] = used['used']
            context['capacity'] = infra['capacity']
            context['percent'] = percent
            email_notifications.databaseinfra_ending(context=context)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details=
            'Databaseinfra Notification successfully sent to dbaas admins!')
    return
コード例 #31
0
    def changelist_view(self, request, extra_context=None):
        extra_context = extra_context or {}

        backup_avaliable = Configuration.get_by_name_as_int('backup_avaliable')

        extra_context['backup_avaliable'] = False
        if backup_avaliable:
            extra_context['backup_avaliable'] = True

        return super(SnapshotAdmin,
                     self).changelist_view(request,
                                           extra_context=extra_context)
コード例 #32
0
def user_post_save_wrapper(kwargs={}):
    user = kwargs.get('instance')
    created = kwargs.get('created')
    if created:
        LOG.debug("new user %s created" % user)
        user.is_active = True
        user.is_staff = True
        user.save()
        # notify new user create
        must_send_mail = Configuration.get_by_name_as_int(
            'new_user_send_mail', 1)
        if must_send_mail:
            notify_new_user_creation(user)
コード例 #33
0
    def __mongo_client__(self, instance):
        connection_address = self.__get_admin_connection(instance)
        try:
            # mongo uses timeout in mili seconds
            connection_timeout_in_miliseconds = Configuration.get_by_name_as_int('mongo_connect_timeout', default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000

            client = pymongo.MongoClient(connection_address, connectTimeoutMS=connection_timeout_in_miliseconds)
            if self.databaseinfra.user and self.databaseinfra.password:
                LOG.debug('Authenticating databaseinfra %s', self.databaseinfra)
                client.admin.authenticate(self.databaseinfra.user, self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(message='Invalid address: ' % connection_address)
コード例 #34
0
    def delete_view(self, request, object_id, extra_context=None):
        database = Database.objects.get(id=object_id)
        can_be_deleted, error = database.can_be_deleted()
        if not can_be_deleted:
            self.message_user(request, error, level=messages.ERROR)
            url = '/admin/logical/database/{}/'.format(object_id)
            return HttpResponseRedirect(url)

        extra_context = extra_context or {}
        if not database.is_in_quarantine:
            extra_context['quarantine_days'] = Configuration.get_by_name_as_int('quarantine_retention_days')

        return super(DatabaseAdmin, self).delete_view(request, object_id, extra_context=extra_context)
コード例 #35
0
def databaseinfra_notification():
    # Sum capacity per databseinfra with parameter plan, environment and engine
    infras = DatabaseInfra.objects.values('plan__name', 'environment__name', 'engine__engine_type__name').annotate(capacity=Sum('capacity'))
    for infra in infras:
        # total database created in databaseinfra per plan, environment and engine
        used = DatabaseInfra.objects.filter(plan__name=infra['plan__name'], environment__name=infra['environment__name'], engine__engine_type__name=infra['engine__engine_type__name']).aggregate(used=Count('databases'))
        # calculate the percentage
        percent = int(used['used'] * 100 / infra['capacity'])
        if percent >= Configuration.get_by_name_as_int("threshold_infra_notification", default=50):
            LOG.info('Plan %s in environment %s with %s%% occupied' % (infra['plan__name'], infra['environment__name'],percent))
            LOG.info("Sending notification...")
            email_notifications.databaseinfra_ending(infra['plan__name'], infra['environment__name'], used['used'],infra['capacity'],percent)
    return
コード例 #36
0
def _check_snapshot_limit(instances, task):
    for instance in instances:
        task.add_detail('\nChecking older backups for {}...'.format(instance))
        limit = Configuration.get_by_name_as_int('backup_retention_days')
        snapshots_count = Snapshot.objects.filter(
            purge_at__isnull=True, instance=instance,
            snapshopt_id__isnull=False
        ).order_by('start_at').count()
        task.add_detail(
            'Current snapshot limit {}, used {}'.format(
                limit, snapshots_count
            ),
            level=1
        )
コード例 #37
0
    def __mysql_client__(self, instance, database='mysql'):
        connection_address, connection_port = self.__get_admin_connection(instance)
        try:
            LOG.debug('Connecting to mysql databaseinfra %s', self.databaseinfra)
            # mysql uses timeout in seconds
            connection_timeout_in_seconds = Configuration.get_by_name_as_int('mysql_connect_timeout', default=MYSQL_CONNECTION_DEFAULT_TIMEOUT)

            client = mysqldb.connect(host=connection_address, port=int(connection_port),
                                     user=self.databaseinfra.user, passwd=self.databaseinfra.password,
                                     db=database, connect_timeout=connection_timeout_in_seconds)
            LOG.debug('Successfully connected to mysql databaseinfra %s' % (self.databaseinfra))
            return client
        except Exception, e:
            raise e
コード例 #38
0
    def last_offering_available_for_auto_resize(cls):
        parameter_in_kb = cls.converter_gb_to_kb(
            Configuration.get_by_name_as_int(
                name='auto_resize_max_size_in_gb', default=100
            )
        )

        disks = DiskOffering.objects.filter(
            size_kb__lte=parameter_in_kb
        ).order_by('-size_kb')

        if not disks:
            raise NoDiskOfferingLesserError(parameter_in_kb)
        return disks[0]
コード例 #39
0
    def get_log_url(self):
        if Configuration.get_by_name_as_int('laas_integration') != 1:
            return ""

        if self.databaseinfra.plan.is_pre_provisioned:
            return ""

        from util import get_credentials_for
        from util.laas import get_group_name
        from dbaas_credentials.models import CredentialType

        credential = get_credentials_for(environment=self.environment,
                                         credential_type=CredentialType.LOGNIT)
        return credential.endpoint + get_group_name(self)
コード例 #40
0
    def do(self, workflow_dict):
        try:
            if Configuration.get_by_name_as_int('laas_integration') == 1:
                register_database_laas(workflow_dict['database'])

            return True

        except Exception:
            traceback = full_stack()

            workflow_dict['exceptions']['error_codes'].append(DBAAS_0018)
            workflow_dict['exceptions']['traceback'].append(traceback)

            return False
コード例 #41
0
def get_snapshots_by_env(env):
    credential = get_credentials_for(env, CredentialType.VOLUME_PROVIDER)
    retention_days = credential.get_parameter_by_name('retention_days')
    if retention_days:
        retention_days = int(retention_days)
    else:
        retention_days = Configuration.get_by_name_as_int(
            'backup_retention_days')

    backup_time_dt = date.today() - timedelta(days=retention_days)
    return Snapshot.objects.filter(start_at__lte=backup_time_dt,
                                   purge_at__isnull=True,
                                   instance__isnull=False,
                                   snapshopt_id__isnull=False,
                                   instance__databaseinfra__environment=env)
コード例 #42
0
    def get_log_url(self):

        if Configuration.get_by_name_as_int('laas_integration') != 1:
            return ""

        if self.databaseinfra.plan.provider == Plan.PREPROVISIONED:
            return ""

        from util import get_credentials_for
        from util.laas import get_group_name
        from dbaas_credentials.models import CredentialType

        credential = get_credentials_for(environment=self.environment, credential_type=CredentialType.LOGNIT)
        url = "%s%s" % (credential.endpoint, get_group_name(self))
        return "%s" % (url)
コード例 #43
0
def database_notification_for_team(team=None):
    """
    Notifies teams of database usage.
    if threshold_database_notification <= 0, the notification is disabled.
    """
    LOG.info("sending database notification for team %s" % team)
    threshold_database_notification = Configuration.get_by_name_as_int(
        "threshold_database_notification", default=0)
    # if threshold_database_notification
    if threshold_database_notification <= 0:
        LOG.warning("database notification is disabled")
        return

    databases = Database.objects.filter(team=team,
                                        is_in_quarantine=False,
                                        subscribe_to_email_events=True)
    msgs = []
    for database in databases:
        used = database.used_size_in_mb
        capacity = database.total_size_in_mb
        try:
            percent_usage = (used / capacity) * 100
        except ZeroDivisionError:
            # database has no total size
            percent_usage = 0.0
        msg = "database %s => usage: %.2f | threshold: %.2f" % (
            database, percent_usage, threshold_database_notification)
        LOG.info(msg)
        msgs.append(msg)

        if not team.email:
            msgs.append(
                "team %s has no email set and therefore no database usage notification will been sent"
                % team)
        else:
            if percent_usage >= threshold_database_notification:
                LOG.info("Sending database notification...")
                context = {}
                context['database'] = database.name
                context['team'] = team
                context['measure_unity'] = "MB"
                context['used'] = used
                context['capacity'] = capacity
                context['percent'] = "%.2f" % percent_usage
                context['environment'] = database.environment.name
                email_notifications.database_usage(context=context)

    return msgs
コード例 #44
0
    def get_log_url(self):

        if Configuration.get_by_name_as_int('laas_integration') != 1:
            return ""

        if self.databaseinfra.plan.provider == Plan.PREPROVISIONED:
            return ""

        from util import get_credentials_for
        from util.laas import get_group_name
        from dbaas_credentials.models import CredentialType

        credential = get_credentials_for(environment=self.environment,
                                         credential_type=CredentialType.LOGNIT)
        url = "%s%s" % (credential.endpoint, get_group_name(self))
        return "%s" % (url)
コード例 #45
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")

    try:
        task_history = TaskHistory.register(request=self.request, user=user)
        task_history.relevance = TaskHistory.RELEVANCE_WARNING

        LOG.info(
            "id: {} | task: {} | kwargs: {} | args: {}".format(
                self.request.id, self.request.task,
                self.request.kwargs, str(self.request.args)
            )
        )

        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days'
        )
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
        task_history.add_detail(
            "Quarantine date older than {}".format(quarantine_time_dt)
        )

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt
        )
        task_history.add_detail(
            "Databases to purge: {}".format(len(databases))
        )

        for database in databases:
            task_history.add_detail('Deleting {}...'.format(database), level=2)
            database.destroy(user)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Listed databases were destroyed successfully.'
        )
        return

    except Exception as e:
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details="Error\n{}".format(e))
        return
    finally:
        AuditRequest.cleanup_request()
コード例 #46
0
def database_notification_for_team(team=None):
	"""
    Notifies teams of database usage.
    if threshold_database_notification <= 0, the notification is disabled.
    """
	from logical.models import Database

	LOG.info("sending database notification for team %s" % team)
	threshold_database_notification = Configuration.get_by_name_as_int("threshold_database_notification", default=0)
	# if threshold_database_notification
	if threshold_database_notification <= 0:
		LOG.warning("database notification is disabled")
		return

	databases = Database.objects.filter(team=team)
	msgs = []
	for database in databases:
		used = database.used_size_in_mb
		capacity = database.total_size_in_mb
		try:
			percent_usage = (used / capacity) * 100
		except ZeroDivisionError:
			#database has no total size
			percent_usage = 0.0
		msg = "database %s => usage: %.2f | threshold: %.2f" % (
			database, percent_usage, threshold_database_notification)
		LOG.info(msg)
		msgs.append(msg)

		if not team.email:
			msgs.append("team %s has no email set and therefore no database usage notification will been sent" % team)
		else:
			if percent_usage >= threshold_database_notification:
				LOG.info("Sending database notification...")
				context = {}
				context['database'] = database.name
				context['team'] = team
				context['measure_unity'] = "MB"
				context['used'] = used
				context['capacity'] = capacity
				context['percent'] = "%.2f" % percent_usage
				context['environment'] = database.environment.name
				email_notifications.database_usage(context=context)

	return msgs
コード例 #47
0
def get_snapshots_by_env(env):
    credential = get_credentials_for(env, CredentialType.VOLUME_PROVIDER)
    retention_days = credential.get_parameter_by_name('retention_days')
    if retention_days:
        retention_days = int(retention_days)
    else:
        retention_days = Configuration.get_by_name_as_int(
            'backup_retention_days'
        )

    backup_time_dt = date.today() - timedelta(days=retention_days)
    return Snapshot.objects.filter(
        start_at__lte=backup_time_dt,
        purge_at__isnull=True,
        instance__isnull=False,
        snapshopt_id__isnull=False,
        instance__databaseinfra__environment=env
    )
コード例 #48
0
    def execute(self):
        self.load_number_of_instances()

        if not self.number_of_instances:
            raise exceptions.RequiredNumberOfInstances(
                'Number of instances is required'
            )

        status, message = self.check_database_status()
        if not status:
            raise exceptions.DatabaseNotAvailable(message)

        if not self.is_ha():
            raise exceptions.DatabaseIsNotHA(
                'Database topology do not have horizontal scalability'
            )

        max_read_hosts = Configuration.get_by_name_as_int('max_read_hosts', 5)
        qtd_new_hosts = self.number_of_instances
        current_read_nodes = len(self.database.infra.instances.filter(read_only=True))
        total_read_hosts = qtd_new_hosts + current_read_nodes
        if total_read_hosts > max_read_hosts:
            raise exceptions.ReadOnlyHostsLimit(
                ('Current limit of read only hosts is {} and you are trying '
                 'to setup {}').format(
                    max_read_hosts, total_read_hosts
                )
            )

        self.task_params = dict(
            database=self.database,
            user=self.request.user,
            number_of_instances=qtd_new_hosts,
            number_of_instances_before_task=self.number_of_instances_before
        )

        if self.retry:
            since_step = self.manager.current_step
            self.task_params['since_step'] = since_step

        TaskRegister.database_add_instances(**self.task_params)
コード例 #49
0
    def delete_view(self, request, object_id, extra_context=None):
        database = Database.objects.get(id=object_id)
        extra_context = extra_context or {}

        if database.status != Database.ALIVE or not database.database_status.is_alive:
            self.message_user(
                request,
                "Database {} is not alive and cannot be deleted".format(
                    database.name),
                level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.is_beeing_used_elsewhere():
            self.message_user(
                request,
                "Database {} cannot be deleted because it is in use by another task."
                .format(database.name),
                level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.has_migration_started():
            self.message_user(
                request,
                "Database {} cannot be deleted because it is beeing migrated.".
                format(database.name),
                level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if not database.is_in_quarantine:
            extra_context[
                'quarantine_days'] = Configuration.get_by_name_as_int(
                    'quarantine_retention_days')
        return super(DatabaseAdmin,
                     self).delete_view(request,
                                       object_id,
                                       extra_context=extra_context)
コード例 #50
0
def purge_quarantine(self, ):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")

    try:
        task_history = TaskHistory.register(request=self.request, user=user)
        task_history.relevance = TaskHistory.RELEVANCE_WARNING

        LOG.info("id: {} | task: {} | kwargs: {} | args: {}".format(
            self.request.id, self.request.task, self.request.kwargs,
            str(self.request.args)))

        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
        task_history.add_detail(
            "Quarantine date older than {}".format(quarantine_time_dt))

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)
        task_history.add_detail("Databases to purge: {}".format(
            len(databases)))

        for database in databases:
            task_history.add_detail('Deleting {}...'.format(database), level=2)
            database.destroy(user)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Listed databases were destroyed successfully.')
        return

    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details="Error\n{}".format(e))
        return
    finally:
        AuditRequest.cleanup_request()
コード例 #51
0
    def metricdetail_view(self, request, database_id):
        from util.metrics.metrics import get_metric_datapoints_for

        hostname = request.GET.get('hostname')
        metricname = request.GET.get('metricname')

        database = Database.objects.get(id=database_id)
        engine = database.infra.engine_name
        db_name = database.name
        URL = get_credentials_for(
            environment=database.environment,
            credential_type=CredentialType.GRAPHITE).endpoint

        from_option = request.POST.get('change_from') or '2hours'
        granurality = self.get_granurality(from_option) or '20minutes'

        from_options = self.build_select_options(from_option,
                                                 self.get_from_options())

        graph_data = get_metric_datapoints_for(engine,
                                               db_name,
                                               hostname,
                                               url=URL,
                                               metric_name=metricname,
                                               granurality=granurality,
                                               from_option=from_option)

        title = "{} {} Metric".format(database.name,
                                      graph_data[0]["graph_name"])

        show_filters = Configuration.get_by_name_as_int('metric_filters')
        if graph_data[0]['normalize_series'] == True:
            show_filters = False

        return render_to_response("logical/database/metrics/metricdetail.html",
                                  locals(),
                                  context_instance=RequestContext(request))
コード例 #52
0
def _add_read_only_instances(request, database):
    try:
        check_is_database_dead(database.id, 'Add read-only instances')
        check_is_database_enabled(database.id, 'Add read-only instances')
    except DisabledDatabase as err:
        messages.add_message(request, messages.ERROR, err.message)
        return

    if not database.plan.replication_topology.has_horizontal_scalability:
        messages.add_message(
            request, messages.ERROR,
            'Database topology do not have horizontal scalability'
        )
        return

    if 'add_read_qtd' not in request.POST:
        messages.add_message(request, messages.ERROR, 'Quantity is required')
        return

    max_read_hosts = Configuration.get_by_name_as_int('max_read_hosts', 5)
    qtd_new_hosts = int(request.POST['add_read_qtd'])
    current_read_nodes = len(database.infra.instances.filter(read_only=True))
    total_read_hosts = qtd_new_hosts + current_read_nodes
    if total_read_hosts > max_read_hosts:
        messages.add_message(
            request, messages.ERROR,
            'Current limit of read only hosts is {} and you are trying to setup {}'.format(
                max_read_hosts, total_read_hosts
            )
        )
        return

    TaskRegister.database_add_instances(
        database=database,
        user=request.user,
        number_of_instances=qtd_new_hosts
    )
コード例 #53
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")
    try:

        task_history = TaskHistory.register(request=self.request, user=user)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))
        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)

        databases = Database.objects.filter(is_in_quarantine=True,
                                            quarantine_dt__lte=quarantine_time_dt)

        for database in databases:
            if database.plan.provider == database.plan.CLOUDSTACK:
                databaseinfra = database.databaseinfra

                destroy_infra(databaseinfra=databaseinfra, task=task_history)
            else:
                database.delete()

            LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
                database.name, quarantine_time))

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Databases destroyed successfully')
        return

    except Exception:
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details="Error")
        return
    finally:
        AuditRequest.cleanup_request()
コード例 #54
0
def database_hosts(request, context, database):
    if request.method == 'POST':
        if 'add_read_only' in request.POST:
            _add_read_only_instances(request, database)

    hosts = OrderedDict()
    instances = database.infra.instances.all().order_by('shard', 'id')
    if instances[0].shard:
        instances_tmp = []
        instances_slaves = []
        last_shard = None
        for instance in instances:
            if instance.is_current_write:
                instances_tmp.append(instance)
                last_shard = instance.shard
                if instances_slaves:
                    instances_tmp += instances_slaves
                    instances_slaves = []
            else:
                if last_shard == instance.shard:
                    instances_tmp.append(instance)
                else:
                    instances_slaves.append(instance)
        if instances_slaves:
            instances_tmp += instances_slaves
            instances_slaves = []

        instances = instances_tmp

    for instance in instances:
        if instance.hostname not in hosts:
            hosts[instance.hostname] = []
        hosts[instance.hostname].append(instance)

    context['core_attribute'] = database.engine.write_node_description
    context['read_only_attribute'] = database.engine.read_node_description
    context['last_reinstall_vm'] = database.reinstall_vm.last()

    context['instances_core'] = []
    context['instances_read_only'] = []
    for host, instances in hosts.items():
        attributes = []
        is_read_only = False
        status = ''
        switch_database = False
        for instance in instances:
            is_read_only = instance.read_only
            status = instance.status_html()

            if not instance.is_database:
                context['non_database_attribute'] = instance.get_instance_type_display()
                attributes.append(context['non_database_attribute'])
            elif instance.is_current_write:
                attributes.append(context['core_attribute'])
                if database.databaseinfra.plan.is_ha:
                    switch_database = True
            else:
                attributes.append(context['read_only_attribute'])

        full_description = host.hostname

        padding = False
        if not instance.is_current_write:
            if instance.shard:
                padding = True

        if len(hosts) > 1:
            full_description += ' - ' + '/'.join(attributes)

        host_data = {
            'id': host.id, 'status': status, 'description': full_description,
            'switch_database': switch_database, 'padding': padding
        }

        if is_read_only:
            context['instances_read_only'].append(host_data)
        else:
            context['instances_core'].append(host_data)

    context['max_read_hosts'] = Configuration.get_by_name_as_int('max_read_hosts', 5)
    enable_host = context['max_read_hosts'] - len(context['instances_read_only'])
    context['enable_host'] = range(1, enable_host+1)

    return render_to_response(
        "logical/database/details/hosts_tab.html",
        context, RequestContext(request)
    )
コード例 #55
0
 def delete_view(self, request, object_id, extra_context=None):
     database = Database.objects.get(id=object_id)
     extra_context = extra_context or {}
     if not database.is_in_quarantine:
         extra_context['quarantine_days'] = Configuration.get_by_name_as_int('quarantine_retention_days')
     return super(DatabaseAdmin, self).delete_view(request, object_id, extra_context=extra_context)
コード例 #56
0
    def restore_allowed(self):
        if Configuration.get_by_name_as_int('restore_allowed') == 1:
            return True

        return False
コード例 #57
0
def user_m2m_changed(sender, **kwargs):
    team = kwargs.get('instance')
    action = kwargs.get('action')
    if action == 'post_add':
        if Configuration.get_by_name_as_int('laas_integration') == 1:
            register_team_laas_task.delay(team)
コード例 #58
0
class Database(BaseModel):
    DEAD = 0
    ALIVE = 1
    INITIALIZING = 2
    ALERT = 3

    DB_STATUS = ((DEAD, 'Dead'), (ALIVE, 'Alive'),
                 (INITIALIZING, 'Initializing'), (ALERT, 'Alert'))

    name = models.CharField(verbose_name=_("Database name"),
                            max_length=100,
                            db_index=True)
    databaseinfra = models.ForeignKey(DatabaseInfra,
                                      related_name="databases",
                                      on_delete=models.PROTECT)
    project = models.ForeignKey(Project,
                                related_name="databases",
                                on_delete=models.PROTECT,
                                null=True,
                                blank=True)
    team = models.ForeignKey(
        Team,
        related_name="databases",
        null=True,
        blank=True,
        help_text=_("Team that is accountable for the database"))
    is_in_quarantine = models.BooleanField(
        verbose_name=_("Is database in quarantine?"), default=False)
    quarantine_dt = models.DateField(verbose_name=_("Quarantine date"),
                                     null=True,
                                     blank=True,
                                     editable=False)
    description = models.TextField(verbose_name=_("Description"),
                                   null=True,
                                   blank=True)
    status = models.IntegerField(choices=DB_STATUS, default=2)
    used_size_in_bytes = models.FloatField(default=0.0)
    environment = models.ForeignKey(Environment,
                                    related_name="databases",
                                    on_delete=models.PROTECT,
                                    db_index=True)
    backup_path = models.CharField(verbose_name=_("Backup path"),
                                   max_length=300,
                                   null=True,
                                   blank=True,
                                   help_text=_("Full path to backup file"))
    subscribe_to_email_events = models.BooleanField(
        verbose_name=_("Subscribe to email events"),
        default=True,
        help_text=_("Check this box if you'd like to receive information "
                    "regarding this database by email."))
    disk_auto_resize = models.BooleanField(
        verbose_name=_("Disk auto resize"),
        default=True,
        help_text=_("When marked, the disk will be resized automatically."))
    is_protected = models.BooleanField(
        verbose_name=_("Protected"),
        default=False,
        help_text=_("When marked, the database can not be deleted."))
    quarantine_user = models.ForeignKey(User,
                                        related_name='databases_quarantine',
                                        null=True,
                                        blank=True,
                                        editable=False)

    def team_contact(self):
        if self.team:
            return self.team.emergency_contacts

    team_contact.short_description = 'Emergency contacts'

    objects = models.Manager()
    alive = DatabaseAliveManager()
    quarantine_time = Configuration.get_by_name_as_int(
        'quarantine_retention_days')

    def __unicode__(self):
        return u"{}".format(self.name)

    class Meta:
        permissions = (
            ("can_manage_quarantine_databases",
             "Can manage databases in quarantine"),
            ("view_database", "Can view databases"),
            ("upgrade_mongo24_to_30",
             "Can upgrade mongoDB version from 2.4 to 3.0"),
            ("upgrade_database", "Can upgrade databases"),
        )
        unique_together = (('name', 'environment'), )

        ordering = ('name', )

    @property
    def infra(self):
        return self.databaseinfra

    @property
    def engine_type(self):
        return self.infra.engine_name

    @property
    def engine(self):
        return self.infra.engine

    @property
    def plan(self):
        return self.databaseinfra and self.databaseinfra.plan

    def pin_task(self, task):
        try:
            with transaction.atomic():
                DatabaseLock(database=self, task=task).save()
        except Error:
            return False
        else:
            return True

    def update_task(self, task):
        lock = self.lock.first()
        if not lock:
            return self.pin_task(task)

        with transaction.atomic():
            lock = DatabaseLock.objects.select_for_update().filter(
                database=self).first()
            if lock.task.task_name != task.task_name or not lock.task.is_status_error:
                return False

            lock.task = task
            lock.save()
            return True

    def unpin_task(self):
        DatabaseLock.objects.filter(database=self).delete()

    @property
    def current_locked_task(self):
        lock = self.lock.first()
        if lock:
            return lock.task

    def delete(self, *args, **kwargs):
        if self.is_in_quarantine:
            LOG.warning(
                "Database {} is in quarantine and will be removed".format(
                    self.name))
            for credential in self.credentials.all():
                instance = factory_for(self.databaseinfra)
                instance.remove_user(credential)

            engine = self.databaseinfra.engine
            databaseinfra = self.databaseinfra

            try:
                DatabaseHistory.objects.create(
                    database_id=self.id,
                    name=self.name,
                    description=self.description,
                    engine='{} {}'.format(engine.engine_type.name,
                                          engine.version),
                    project=self.project.name if self.project else '',
                    team=self.team.name if self.team else '',
                    databaseinfra_name=databaseinfra.name,
                    plan=databaseinfra.plan.name,
                    disk_size_kb=databaseinfra.disk_offering.size_kb,
                    has_persistence=databaseinfra.plan.has_persistence,
                    environment=self.environment.name,
                    created_at=self.created_at)
            except Exception, err:
                LOG.error(
                    'Erro ao criar o database history para "o database {}: {}'.
                    format(self.id, err))

            super(Database, self).delete(*args, **kwargs)

        else:
コード例 #59
0
 def is_backup_available(self):
     backup_available = Configuration.get_by_name_as_int('backup_available')
     return backup_available == 1