def __mongo_client__(self, instance):
        connection_address = self.__get_admin_connection(instance)
        if not self.databaseinfra and instance:
            self.databaseinfra = instance.databaseinfra
        try:
            # mongo uses timeout in mili seconds
            connection_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                'mongo_connect_timeout', default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000
            
            server_selection_timeout_in_seconds = Configuration.get_by_name_as_int(
                'mongo_server_selection_timeout', default=MONGO_SERVER_SELECTION_DEFAULT_TIMEOUT) * 1000

            socket_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                'mongo_socket_timeout', default=MONGO_SOCKET_TIMEOUT) * 1000

            client = pymongo.MongoClient(
                connection_address, connectTimeoutMS=connection_timeout_in_miliseconds, serverSelectionTimeoutMS=server_selection_timeout_in_seconds,
                 socketTimeoutMS=socket_timeout_in_miliseconds)
            if (not instance) or (instance and instance.instance_type != instance.MONGODB_ARBITER):
                if self.databaseinfra.user and self.databaseinfra.password:
                    LOG.debug('Authenticating databaseinfra %s',
                              self.databaseinfra)
                    client.admin.authenticate(self.databaseinfra.user,
                                              self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(
                message='Invalid address: ' % connection_address)
def databaseinfra_ending(context={}):
    LOG.info("Notifying DatabaseInfra ending with context %s" % context)
    subject = _("[DBAAS] DatabaseInfra is almost full")
    template = "infra_notification"
    addr_from = Configuration.get_by_name("email_addr_from")
    addr_to = Configuration.get_by_name_as_list("new_user_notify_email")

    context['domain'] = get_domain()

    send_mail_template(subject, template, addr_from, addr_to,
                       fail_silently=False, attachments=None, context=context)
def notify_new_user_creation(user=None):
    subject=_("[DBAAS] a new user has just been created: %s" % user.username)
    template="new_user_notification"
    addr_from=Configuration.get_by_name("email_addr_from")
    addr_to=Configuration.get_by_name_as_list("new_user_notify_email")
    context={}
    context['user'] = user
    domain = get_domain()
    context['url'] = domain + reverse('admin:account_team_changelist')
    LOG.debug("user: %s | addr_from: %s | addr_to: %s" % (user, addr_from, addr_to))
    if user and addr_from and addr_to:
        send_mail_template(subject, template, addr_from, addr_to, fail_silently=False, attachments=None, context=context)
    else:
        LOG.warning("could not send email for new user creation")
Example #4
0
class MakeDatabaseBackup(TestCase):

    def setUp(self):
        cache.clear()

        self.admin = SnapshotAdmin(Snapshot, admin.sites.AdminSite())
        self.param_backup_available = Configuration(
            name='backup_available', value=1
        )
        self.param_backup_available.save()

    def tearDown(self):
        if self.param_backup_available.id:
            self.param_backup_available.delete()

    def test_is_backup_available(self):
        self.assertTrue(self.admin.is_backup_available)

    def test_is_backup_disable(self):
        self.param_backup_available.value = 0
        self.param_backup_available.save()
        self.assertFalse(self.admin.is_backup_available)

    def test_is_backup_disable_not_configured(self):
        self.param_backup_available.delete()
        self.assertFalse(self.admin.is_backup_available)
def databaseinfra_ending(plan,environment,used,capacity,percent):
    LOG.info("Notifying DatabaseInfra ending")
    subject=_("[DBAAS] DatabaseInfra is almost full")
    template="infra_notification"
    addr_from=Configuration.get_by_name("email_addr_from")
    addr_to=Configuration.get_by_name_as_list("new_user_notify_email")
    context={}
    context['domain'] = get_domain()
    context['plan'] = plan
    context['environment'] = environment
    context['used'] = used
    context['capacity'] = capacity
    context['percent'] = percent
    send_mail_template(subject, template, addr_from, addr_to, fail_silently=False, attachments=None, context=context)
    
Example #6
0
def user_m2m_changed(sender, **kwargs):
    team = kwargs.get('instance')
    action = kwargs.get('action')
    if action == 'post_add':
        from util.laas import register_team_laas
        if Configuration.get_by_name_as_int('laas_integration') == 1:
            register_team_laas(team)
    def do(self, workflow_dict):
        try:

            if 'databaseinfra' not in workflow_dict \
                or 'clone' not in workflow_dict :
                return False

            args = get_clone_args(workflow_dict['clone'], workflow_dict['database'])
            script_name = factory_for(workflow_dict['clone'].databaseinfra).clone()

            python_bin= Configuration.get_by_name('python_venv_bin')

            return_code, output = call_script(script_name, working_dir=settings.SCRIPTS_PATH
                , args=args, split_lines=False, python_bin=python_bin)

            LOG.info("Script Output: {}".format(output))
            LOG.info("Return code: {}".format(return_code))

            if return_code != 0:
                workflow_dict['exceptions']['traceback'].append(output)
                return False

            return True
        except Exception:
            traceback = full_stack()

            workflow_dict['exceptions']['error_codes'].append(DBAAS_0017)
            workflow_dict['exceptions']['traceback'].append(traceback)

            return False
Example #8
0
def remove_database_old_backups(self):

    task_history = TaskHistory.register(request=self.request, user=None)

    backup_retention_days = Configuration.get_by_name_as_int('backup_retention_days')

    LOG.info("Removing backups older than %s days" % (backup_retention_days))

    backup_time_dt = date.today() - timedelta(days=backup_retention_days)
    snapshots = Snapshot.objects.filter(start_at__lte=backup_time_dt, purge_at__isnull = True, instance__isnull = False, snapshopt_id__isnull = False)
    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    if len(snapshots) == 0:
        msgs.append("There is no snapshot to purge")
    for snapshot in snapshots:
        try:
            remove_snapshot_backup(snapshot=snapshot)
            msg = "Backup %s removed" % (snapshot)
            LOG.info(msg)
        except:
            msg = "Error removing backup %s" % (snapshot)
            status = TaskHistory.STATUS_ERROR
            LOG.error(msg)
        msgs.append(msg)

    task_history.update_status_for(status, details="\n".join(msgs))

    return
Example #9
0
def purge_task_history(self):
    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, user=None, worker_name=worker_name)

        now = datetime.datetime.now()
        retention_days = Configuration.get_by_name_as_int('task_history_retention_days')

        n_days_before = now - datetime.timedelta(days=retention_days)

        tasks_to_purge = TaskHistory.objects.filter(task_name__in=['notification.tasks.database_notification',
                'notification.tasks.database_notification_for_team',
                'notification.tasks.update_database_status',
                'notification.tasks.update_database_used_size',
                'notification.tasks.update_instances_status',
                'system.tasks.set_celery_healthcheck_last_update']
        , ended_at__lt=n_days_before
        , task_status__in=["SUCCESS", "ERROR"])

        tasks_to_purge.delete()

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
            details='Purge succesfully done!')
    except Exception, e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)
Example #10
0
def get_clone_args(origin_database, dest_database):
    
    #origin
    origin_instance=origin_database.databaseinfra.instances.all()[0]
    
    db_orig=origin_database.name
    user_orig=origin_database.databaseinfra.user
    #pass_orig="PASSWORD_ORIGIN=%s" % origin_database.databaseinfra.password
    pass_orig=origin_database.databaseinfra.password
    host_orig=origin_instance.address
    port_orig=origin_instance.port
    
    #destination
    dest_instance=dest_database.databaseinfra.instances.all()[0]
    
    db_dest=dest_database.name
    user_dest=dest_database.databaseinfra.user
    #pass_dest="PASSWORD_DEST=%s" % dest_database.databaseinfra.password
    pass_dest=dest_database.databaseinfra.password
    host_dest=dest_instance.address
    port_dest=dest_instance.port
    
    path_of_dump=Configuration.get_by_name('database_clone_dir')
    
    args=[db_orig, user_orig, pass_orig, host_orig, str(int(port_orig)), 
            db_dest, user_dest, pass_dest, host_dest, str(int(port_dest)), 
            path_of_dump
    ]
    
    return args
Example #11
0
def databaseinfra_notification(self, user=None):
	task_history = TaskHistory.register(request=self.request, user=user)
	threshold_infra_notification = Configuration.get_by_name_as_int("threshold_infra_notification", default=0)
	if threshold_infra_notification <= 0:
		LOG.warning("database infra notification is disabled")
		return

	# Sum capacity per databseinfra with parameter plan, environment and engine
	infras = DatabaseInfra.objects.values('plan__name', 'environment__name', 'engine__engine_type__name',
	                                      'plan__provider').annotate(capacity=Sum('capacity'))
	for infra in infras:
		# total database created in databaseinfra per plan, environment and engine
		used = DatabaseInfra.objects.filter(plan__name=infra['plan__name'],
		                                    environment__name=infra['environment__name'],
		                                    engine__engine_type__name=infra['engine__engine_type__name']).aggregate(
			used=Count('databases'))
		# calculate the percentage
		percent = int(used['used'] * 100 / infra['capacity'])
		if percent >= threshold_infra_notification and infra['plan__provider'] != Plan.CLOUDSTACK:
			LOG.info('Plan %s in environment %s with %s%% occupied' % (
				infra['plan__name'], infra['environment__name'], percent))
			LOG.info("Sending database infra notification...")
			context = {}
			context['plan'] = infra['plan__name']
			context['environment'] = infra['environment__name']
			context['used'] = used['used']
			context['capacity'] = infra['capacity']
			context['percent'] = percent
			email_notifications.databaseinfra_ending(context=context)

		task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
		                               details='Databaseinfra Notification successfully sent to dbaas admins!')
	return
Example #12
0
 def get_sentinel_client(self, instance=None):
     connection_timeout_in_seconds = Configuration.get_by_name_as_int(
         'redis_connect_timeout', default=REDIS_CONNECTION_DEFAULT_TIMEOUT)
     sentinels = self.__get_admin_sentinel_connection(instance)
     sentinel = Sentinel(
         sentinels, socket_timeout=connection_timeout_in_seconds)
     return sentinel
Example #13
0
    def __redis_client__(self, instance):

        try:
            LOG.debug(
                'Connecting to redis databaseinfra %s', self.databaseinfra)
            # redis uses timeout in seconds
            connection_timeout_in_seconds = Configuration.get_by_name_as_int(
                'redis_connect_timeout', default=REDIS_CONNECTION_DEFAULT_TIMEOUT)

            if (instance and instance.instance_type == Instance.REDIS) or (not self.databaseinfra.plan.is_ha and not instance):
                connection_address, connection_port = self.__get_admin_single_connection(
                    instance)
                client = redis.Redis(host=connection_address,
                                     port=int(connection_port),
                                     password=self.databaseinfra.password,
                                     socket_connect_timeout=connection_timeout_in_seconds)

            else:
                sentinel = self.get_sentinel_client(instance)
                client = sentinel.master_for(self.databaseinfra.name,
                                             socket_timeout=connection_timeout_in_seconds,
                                             password=self.databaseinfra.password)

            LOG.debug(
                'Successfully connected to redis databaseinfra %s' % (self.databaseinfra))
            return client
        except Exception, e:
            raise e
    def revoke_detail(request, id):
        import celery
        from system.models import Configuration
        celery_inpsect = celery.current_app.control.inspect()

        celery_workers = Configuration.get_by_name_as_list('celery_workers',)

        try:
            workers = celery_inpsect.ping().keys()
        except Exception as e:
            LOG.warn("All celery workers are down! {} :(".format(e))
            messages.add_message(request, messages.ERROR,
                                 "Migration can't be revoked because all celery workers are down!",)
            return HttpResponseRedirect(request.META.get('HTTP_REFERER'))

        if workers and workers != celery_workers:
            LOG.warn("At least one celery worker is down! :(")
            messages.add_message(request, messages.ERROR,
                                 "Migration can't be revoked because at least one celery worker is down!",)
            return HttpResponseRedirect(request.META.get('HTTP_REFERER'))

        detail = models.DatabaseRegionMigrationDetail.objects.get(id=id)
        if detail.status == detail.WAITING:
            if detail.revoke_maintenance(request):
                messages.add_message(request, messages.SUCCESS,
                                     "Migration revoked!",)
            else:
                messages.add_message(request, messages.ERROR,
                                     "Migration has already started!",)
        else:
            messages.add_message(request, messages.ERROR,
                                 "Migration can't be revoked!",)

        return HttpResponseRedirect(reverse('admin:region_migration_databaseregionmigrationdetail_changelist'))
    def delete_view(self, request, object_id, extra_context=None):
        database = Database.objects.get(id=object_id)
        extra_context = extra_context or {}

        if database.status != Database.ALIVE or not database.database_status.is_alive:
            self.message_user(
                request, "Database {} is not alive and cannot be deleted".format(database.name), level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.is_beeing_used_elsewhere():
            self.message_user(
                request, "Database {} cannot be deleted because it is in use by another task.".format(database.name), level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.has_migration_started():
            self.message_user(
                request, "Database {} cannot be deleted because it is beeing migrated.".format(database.name), level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if not database.is_in_quarantine:
            extra_context['quarantine_days'] = Configuration.get_by_name_as_int(
                'quarantine_retention_days')
        return super(DatabaseAdmin, self).delete_view(request, object_id, extra_context=extra_context)
def database_name_evironment_constraint(database_name, environment_name):
    from logical.models import Database
    from system.models import Configuration

    databases = Database.objects.filter(name=database_name)
    if not databases:
        return False

    dev_envs = Configuration.get_by_name_as_list('dev_envs')
    if environment_name in dev_envs:
        return False

    prod_envs = Configuration.get_by_name_as_list('prod_envs')
    return any((
        database.environment.name in prod_envs
        for database in databases))
    def metricdetail_view(self, request, database_id):
        from util.metrics.metrics import get_metric_datapoints_for

        hostname = request.GET.get('hostname')
        metricname = request.GET.get('metricname')

        database = Database.objects.get(id=database_id)
        engine = database.infra.engine_name
        db_name = database.name
        URL = get_credentials_for(
            environment=database.environment, credential_type=CredentialType.GRAPHITE).endpoint

        from_option = request.POST.get('change_from') or '2hours'
        granurality = self.get_granurality(from_option) or '20minutes'

        from_options = self.build_select_options(
            from_option, self.get_from_options())

        graph_data = get_metric_datapoints_for(engine, db_name, hostname,
                                               url=URL, metric_name=metricname,
                                               granurality=granurality,
                                               from_option=from_option)

        title = "{} {} Metric".format(
            database.name, graph_data[0]["graph_name"])

        show_filters = Configuration.get_by_name_as_int('metric_filters')
        if graph_data[0]['normalize_series'] == True:
            show_filters = False

        return render_to_response("logical/database/metrics/metricdetail.html", locals(), context_instance=RequestContext(request))
def database_usage(context={}):
    LOG.info("Notifying Database usage with context %s" % context)
    subject = _("[DBAAS] Database is almost full")
    template = "database_notification"
    addr_from = Configuration.get_by_name("email_addr_from")
    team = context.get("team")
    if team and team.email:
        addr_to = [
            team.email, Configuration.get_by_name("new_user_notify_email")]
    else:
        addr_to = Configuration.get_by_name("new_user_notify_email")

    context['domain'] = get_domain()

    send_mail_template(subject, template, addr_from, addr_to,
                       fail_silently=False, attachments=None, context=context)
Example #19
0
	def purge_quarantine(self):
		quarantine_time = Configuration.get_by_name_as_int('quarantine_retention_days')
		quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
		databases = Database.objects.filter(is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)
		for database in databases:
			database.delete()
			LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
				database.name, quarantine_time))
def database_analyzing(context={}):
    LOG.info("Notifying Database alayzing with context %s" % context)
    subject = _("[DBAAS] Database overestimated")
    template = "analyzing_notification"
    addr_from = Configuration.get_by_name("email_addr_from")
    send_email = Configuration.get_by_name("send_analysis_email")
    team = context.get("team")
    if team and team.email and send_email:
        addr_to = [
            team.email, Configuration.get_by_name("new_user_notify_email")]
    else:
        addr_to = Configuration.get_by_name("new_user_notify_email")

    context['domain'] = get_domain()

    send_mail_template(subject, template, addr_from, addr_to,
                       fail_silently=False, attachments=None, context=context)
Example #21
0
    def setUp(self):
        cache.clear()

        self.admin = SnapshotAdmin(Snapshot, admin.sites.AdminSite())
        self.param_backup_available = Configuration(
            name='backup_available', value=1
        )
        self.param_backup_available.save()
    def setUp(self):
        self.team = TeamFactory()
        self.threshold_database_notification = Configuration(
            name='threshold_database_notification', value=70,
            description='Threshold infra notification'
        )
        self.threshold_database_notification.save()
        self.new_user_notify_email = Configuration(
            name='new_user_notify_email', value='*****@*****.**',
            description='New user notify e-mail'
        )
        self.new_user_notify_email.save()

        self.database_big = DatabaseFactory(
            databaseinfra__engine__engine_type__name=self.engine_name,
        )
        self.database_big.team = self.team
        self.database_big.save()

        self.infra_big = self.database_big.databaseinfra
        self.infra_big.plan.replication_topology.class_path = self.replication_topology_class_path
        self.infra_big.plan.replication_topology.save()
        self.infra_big.save()

        self.database_small = DatabaseFactory(
            databaseinfra__engine__engine_type__name=self.engine_name
        )
        self.database_small.team = self.team
        self.database_small.save()

        self.infra_small = self.database_small.databaseinfra
        self.infra_small.plan.replication_topology.class_path = self.replication_topology_class_path
        self.infra_small.plan.replication_topology.save()
        self.infra_small.save()

        self.instance_helper.create_instances_by_quant(
            qt=self.instance_quantity, infra=self.infra_big,
            total_size_in_bytes=10000, used_size_in_bytes=9900,
            port=self.port, instance_type=self.instance_type
        )
        self.instance_helper.create_instances_by_quant(
            qt=self.instance_quantity, infra=self.infra_small,
            total_size_in_bytes=10000, used_size_in_bytes=1900,
            port=self.port, instance_type=self.instance_type
        )
def has_difference_between(metadata, collected):
    threshold = Configuration.get_by_name_as_float(
        "threshold_disk_size_difference", default=1.0
    )

    difference = (metadata * threshold)/100
    max_value = metadata + difference
    min_value = metadata - difference

    return collected > max_value or collected < min_value
    def get_dex_url(self):
        if Configuration.get_by_name_as_int('dex_analyze') != 1:
            return ""

        if self.databaseinfra.plan.provider == Plan.PREPROVISIONED:
            return ""

        if self.engine_type != 'mongodb':
            return ""

        return 1
    def changelist_view(self, request, extra_context=None):
        extra_context = extra_context or {}

        backup_avaliable = Configuration.get_by_name_as_int(
            'backup_avaliable')

        extra_context['backup_avaliable'] = False
        if backup_avaliable:
            extra_context['backup_avaliable'] = True

        return super(SnapshotAdmin, self).changelist_view(request, extra_context=extra_context)
Example #26
0
    def get_dex_url(self):
        if Configuration.get_by_name_as_int('dex_analyze') != 1:
            return ""

        if self.databaseinfra.plan.is_pre_provisioned:
            return ""

        if self.engine_type != 'mongodb':
            return ""

        return 1
def get_configuration(context, configuration_name, context_var_name):
    """
    Usage: {% get_configuration config_name context_var %}

    Search config name on system configuration and set context_var on
    page context
    """
    config_val = Configuration.get_by_name(configuration_name) or ''

    context[context_var_name] = config_val

    return ''
Example #28
0
    def changelist_view(self, request, extra_context=None):
        extra_context = extra_context or {}

        backup_avaliable = Configuration.get_by_name_as_int('backup_avaliable')

        extra_context['backup_avaliable'] = False
        if backup_avaliable:
            extra_context['backup_avaliable'] = True

        return super(SnapshotAdmin,
                     self).changelist_view(request,
                                           extra_context=extra_context)
Example #29
0
def external_links(request):
    iaas_status = Configuration.get_by_name('iaas_status')
    iaas_quota = Configuration.get_by_name('iaas_quota')

    try:
        credential = get_credentials_for(
            environment=Environment.objects.first(),
            credential_type=CredentialType.GRAFANA
        )

        sofia_dashboard = "{}/{}?var-datasource={}".format(
            credential.endpoint,
            credential.get_parameter_by_name('sofia_dbaas_dashboard'),
            credential.get_parameter_by_name('datasource')
        )
    except IndexError:
        sofia_dashboard = ""

    return {'iaas_status': iaas_status,
            'iaas_quota': iaas_quota,
            'sofia_main_dashboard': sofia_dashboard}
def notify_new_user_creation(user=None):
    subject = _("[DBAAS] a new user has just been created: %s" % user.username)
    template = "new_user_notification"
    addr_from = Configuration.get_by_name("email_addr_from")
    addr_to = Configuration.get_by_name_as_list("new_user_notify_email")
    context = {}
    context['user'] = user
    domain = get_domain()
    context['url'] = domain + reverse('admin:account_team_changelist')
    LOG.debug("user: %s | addr_from: %s | addr_to: %s" %
              (user, addr_from, addr_to))
    if user and addr_from and addr_to:
        send_mail_template(subject,
                           template,
                           addr_from,
                           addr_to,
                           fail_silently=False,
                           attachments=None,
                           context=context)
    else:
        LOG.warning("could not send email for new user creation")
    def revoke_detail(request, id):
        import celery
        from system.models import Configuration
        celery_inpsect = celery.current_app.control.inspect()

        celery_workers = Configuration.get_by_name_as_list('celery_workers', )

        try:
            workers = celery_inpsect.ping().keys()
        except Exception as e:
            LOG.warn("All celery workers are down! {} :(".format(e))
            messages.add_message(
                request,
                messages.ERROR,
                "Migration can't be revoked because all celery workers are down!",
            )
            return HttpResponseRedirect(request.META.get('HTTP_REFERER'))

        if workers and workers != celery_workers:
            LOG.warn("At least one celery worker is down! :(")
            messages.add_message(
                request,
                messages.ERROR,
                "Migration can't be revoked because at least one celery worker is down!",
            )
            return HttpResponseRedirect(request.META.get('HTTP_REFERER'))

        detail = models.DatabaseRegionMigrationDetail.objects.get(id=id)
        if detail.status == detail.WAITING:
            if detail.revoke_maintenance(request):
                messages.add_message(
                    request,
                    messages.SUCCESS,
                    "Migration revoked!",
                )
            else:
                messages.add_message(
                    request,
                    messages.ERROR,
                    "Migration has already started!",
                )
        else:
            messages.add_message(
                request,
                messages.ERROR,
                "Migration can't be revoked!",
            )

        return HttpResponseRedirect(
            reverse(
                'admin:region_migration_databaseregionmigrationdetail_changelist'
            ))
Example #32
0
def user_post_save_wrapper(kwargs={}):
    user = kwargs.get('instance')
    created = kwargs.get('created')
    if created:
        LOG.debug("new user %s created" % user)
        user.is_active = True
        user.is_staff = True
        user.save()
        # notify new user create
        must_send_mail = Configuration.get_by_name_as_int(
            'new_user_send_mail', 1)
        if must_send_mail:
            notify_new_user_creation(user)
Example #33
0
    def setUp(self):
        cache.clear()
        mail.outbox = []

        self.email_from = Configuration(name='email_addr_from',
                                        value='*****@*****.**')
        self.email_from.save()

        self.email_adm = Configuration(name='new_user_notify_email',
                                       value='*****@*****.**')
        self.email_adm.save()
        self.team = mommy.make('Team',
                               name='team_1',
                               email='*****@*****.**',
                               contacts='contact_1',
                               role__name='fake_role',
                               organization__name='fake_organization')
        self.database = DatabaseHelper.create(name='fake_db_name',
                                              team=self.team)
        self.task_schedule = mommy.make('TaskSchedule',
                                        method_path=self.action,
                                        database=self.database)
Example #34
0
def _check_snapshot_limit(instances, task):
    for instance in instances:
        task.add_detail('\nChecking older backups for {}...'.format(instance))

        backup_limit = Configuration.get_by_name_as_int(
            'backup_retention_days')
        snapshots_count = Snapshot.objects.filter(
            purge_at__isnull=True,
            instance=instance,
            snapshopt_id__isnull=False).order_by('start_at').count()
        task.add_detail('Current snapshot limit {}, used {}'.format(
            backup_limit, snapshots_count),
                        level=1)
    def __mongo_client__(self, instance):
        connection_address = self.__get_admin_connection(instance)
        try:
            # mongo uses timeout in mili seconds
            connection_timeout_in_miliseconds = Configuration.get_by_name_as_int('mongo_connect_timeout', default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000

            client = pymongo.MongoClient(connection_address, connectTimeoutMS=connection_timeout_in_miliseconds)
            if self.databaseinfra.user and self.databaseinfra.password:
                LOG.debug('Authenticating databaseinfra %s', self.databaseinfra)
                client.admin.authenticate(self.databaseinfra.user, self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(message='Invalid address: ' % connection_address)
Example #36
0
 def purge_quarantine(self):
     quarantine_time = Configuration.get_by_name_as_int(
         'quarantine_retention_days')
     quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
     databases = Database.objects.filter(
         is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt
     )
     for database in databases:
         database.delete()
         LOG.info(
             ("The database %s was deleted, because it was set to "
              "quarentine %d days ago") % (database.name, quarantine_time)
         )
    def delete_view(self, request, object_id, extra_context=None):
        database = Database.objects.get(id=object_id)
        can_be_deleted, error = database.can_be_deleted()
        if not can_be_deleted:
            self.message_user(request, error, level=messages.ERROR)
            url = '/admin/logical/database/{}/'.format(object_id)
            return HttpResponseRedirect(url)

        extra_context = extra_context or {}
        if not database.is_in_quarantine:
            extra_context['quarantine_days'] = Configuration.get_by_name_as_int('quarantine_retention_days')

        return super(DatabaseAdmin, self).delete_view(request, object_id, extra_context=extra_context)
Example #38
0
def databaseinfra_notification():
    # Sum capacity per databseinfra with parameter plan, environment and engine
    infras = DatabaseInfra.objects.values('plan__name', 'environment__name', 'engine__engine_type__name').annotate(capacity=Sum('capacity'))
    for infra in infras:
        # total database created in databaseinfra per plan, environment and engine
        used = DatabaseInfra.objects.filter(plan__name=infra['plan__name'], environment__name=infra['environment__name'], engine__engine_type__name=infra['engine__engine_type__name']).aggregate(used=Count('databases'))
        # calculate the percentage
        percent = int(used['used'] * 100 / infra['capacity'])
        if percent >= Configuration.get_by_name_as_int("threshold_infra_notification", default=50):
            LOG.info('Plan %s in environment %s with %s%% occupied' % (infra['plan__name'], infra['environment__name'],percent))
            LOG.info("Sending notification...")
            email_notifications.databaseinfra_ending(infra['plan__name'], infra['environment__name'], used['used'],infra['capacity'],percent)
    return
Example #39
0
    def revoke_maintenance(request, id):
        import celery
        from system.models import Configuration
        celery_inpsect = celery.current_app.control.inspect()

        celery_workers = Configuration.get_by_name_as_list('celery_workers',)

        try:
            workers = celery_inpsect.ping().keys()
        except Exception, e:
            LOG.warn("All celery workers are down! {} :(".format(e))
            messages.add_message(request, messages.ERROR,
                                 "Maintenance can't be revoked because all celery workers are down!",)
            return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
Example #40
0
    def last_offering_available_for_auto_resize(cls):
        parameter_in_kb = cls.converter_gb_to_kb(
            Configuration.get_by_name_as_int(
                name='auto_resize_max_size_in_gb', default=100
            )
        )

        disks = DiskOffering.objects.filter(
            size_kb__lte=parameter_in_kb
        ).order_by('-size_kb')

        if not disks:
            raise NoDiskOfferingLesserError(parameter_in_kb)
        return disks[0]
Example #41
0
def database_usage(context={}):
    LOG.info("Notifying Database usage with context %s" % context)
    subject = _("[DBAAS] Database is almost full")
    template = "database_notification"
    addr_from = Configuration.get_by_name("email_addr_from")
    team = context.get("team")
    if team and team.email:
        addr_to = [
            team.email,
            Configuration.get_by_name("new_user_notify_email")
        ]
    else:
        addr_to = Configuration.get_by_name("new_user_notify_email")

    context['domain'] = get_domain()

    send_mail_template(subject,
                       template,
                       addr_from,
                       addr_to,
                       fail_silently=False,
                       attachments=None,
                       context=context)
Example #42
0
def databaseinfra_notification(self, user=None):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, user=user, worker_name=worker_name)
    threshold_infra_notification = Configuration.get_by_name_as_int(
        "threshold_infra_notification", default=0)
    if threshold_infra_notification <= 0:
        LOG.warning("database infra notification is disabled")
        return

    # Sum capacity per databseinfra with parameter plan, environment and engine
    infras = DatabaseInfra.objects.values(
        'plan__name', 'environment__name', 'engine__engine_type__name',
        'plan__provider'
    ).annotate(capacity=Sum('capacity'))
    for infra in infras:
        try:
            database = infra.databases.get()
        except Database.MultipleObjectsReturned:
            pass
        else:
            if database.is_in_quarantine:
                continue
            if not database.subscribe_to_email_events:
                continue

        used = DatabaseInfra.objects.filter(
            plan__name=infra['plan__name'], environment__name=infra['environment__name'],
            engine__engine_type__name=infra['engine__engine_type__name']
        ).aggregate(used=Count('databases'))
        # calculate the percentage

        percent = int(used['used'] * 100 / infra['capacity'])
        if percent >= threshold_infra_notification and infra['plan__provider'] != Plan.CLOUDSTACK:
            LOG.info('Plan %s in environment %s with %s%% occupied' % (
                infra['plan__name'], infra['environment__name'], percent))
            LOG.info("Sending database infra notification...")
            context = {}
            context['plan'] = infra['plan__name']
            context['environment'] = infra['environment__name']
            context['used'] = used['used']
            context['capacity'] = infra['capacity']
            context['percent'] = percent
            email_notifications.databaseinfra_ending(context=context)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Databaseinfra Notification successfully sent to dbaas admins!'
        )
    return
Example #43
0
def get_config(conf_name=None):
    if conf_name is None:
        raise Exception("Invalid config name")

    c = Configuration.get_by_name_all_fields(conf_name)
    if not c:
        return None

    return {
        "name": c.name,
        "value": c.value,
        "description": c.description,
        "hash": c.hash
    }
Example #44
0
    def get_log_url(self):
        if Configuration.get_by_name_as_int('laas_integration') != 1:
            return ""

        if self.databaseinfra.plan.is_pre_provisioned:
            return ""

        from util import get_credentials_for
        from util.laas import get_group_name
        from dbaas_credentials.models import CredentialType

        credential = get_credentials_for(environment=self.environment,
                                         credential_type=CredentialType.LOGNIT)
        return credential.endpoint + get_group_name(self)
Example #45
0
    def do(self, workflow_dict):
        try:
            if Configuration.get_by_name_as_int('laas_integration') == 1:
                register_database_laas(workflow_dict['database'])

            return True

        except Exception:
            traceback = full_stack()

            workflow_dict['exceptions']['error_codes'].append(DBAAS_0018)
            workflow_dict['exceptions']['traceback'].append(traceback)

            return False
    def __mongo_client__(self, instance, default_timeout=False):
        connection_address = self.__get_admin_connection(instance)
        if not self.databaseinfra and instance:
            self.databaseinfra = instance.databaseinfra
        try:
            # mongo uses timeout in mili seconds
            if default_timeout:
                connection_timeout_in_miliseconds = MONGO_CONNECTION_DEFAULT_TIMEOUT * 1000
                server_selection_timeout_in_seconds = MONGO_SERVER_SELECTION_DEFAULT_TIMEOUT * 1000
                socket_timeout_in_miliseconds = MONGO_SOCKET_TIMEOUT * 1000
            else:
                connection_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                    'mongo_connect_timeout',
                    default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000
                server_selection_timeout_in_seconds = Configuration.get_by_name_as_int(
                    'mongo_server_selection_timeout',
                    default=MONGO_SERVER_SELECTION_DEFAULT_TIMEOUT) * 1000
                socket_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                    'mongo_socket_timeout',
                    default=MONGO_SOCKET_TIMEOUT) * 1000

            client = pymongo.MongoClient(
                connection_address,
                connectTimeoutMS=connection_timeout_in_miliseconds,
                serverSelectionTimeoutMS=server_selection_timeout_in_seconds,
                socketTimeoutMS=socket_timeout_in_miliseconds)
            if (not instance) or (instance and instance.instance_type !=
                                  instance.MONGODB_ARBITER):
                if self.databaseinfra.user and self.databaseinfra.password:
                    LOG.debug('Authenticating databaseinfra %s',
                              self.databaseinfra)
                    client.admin.authenticate(self.databaseinfra.user,
                                              self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(message='Invalid address: ' %
                                      connection_address)
Example #47
0
class PhysicalErrorsTestCase(TestCase):

    def setUp(self):
        self.auto_resize_max_size_in_gb = Configuration(
            name='auto_resize_max_size_in_gb', value=100
        )
        self.auto_resize_max_size_in_gb.save()

    def tearDown(self):
        self.auto_resize_max_size_in_gb.delete()

    def test_no_disk_offering(self):
        size = 123
        typo = 'testing'
        message = 'No disk offering {} than {}kb'.format(typo, size)
        no_disk_offering = NoDiskOfferingError(typo=typo, size=size)
        self.assertEqual(no_disk_offering.message, message)

    def test_no_disk_offering_lesser(self):
        size = 456
        message = 'No disk offering lesser than {}kb'.format(size)
        no_disk_offering = NoDiskOfferingLesserError(size=size)
        self.assertEqual(no_disk_offering.message, message)

    def test_no_disk_offering_greater(self):
        size = 789
        message = 'No disk offering greater than {}kb'.format(size)
        no_disk_offering = NoDiskOfferingGreaterError(size=size)
        self.assertEqual(no_disk_offering.message, message)

    def test_disk_auto_resize_max_value(self):
        message = 'Disk auto resize can not be greater than {}GB'.format(
            self.auto_resize_max_size_in_gb.value
        )
        no_disk_offering = DiskOfferingMaxAutoResize()
        self.assertEqual(no_disk_offering.message, message)
Example #48
0
def database_name_evironment_constraint(database_name, environment_name):
    from logical.models import Database
    from system.models import Configuration

    databases = Database.objects.filter(name=database_name)
    if not databases:
        return False

    dev_envs = Configuration.get_by_name_as_list('dev_envs')
    if environment_name in dev_envs:
        return False

    prod_envs = Environment.prod_envs()
    return any(
        (database.environment.name in prod_envs for database in databases))
Example #49
0
def get_snapshots_by_env(env):
    credential = get_credentials_for(env, CredentialType.VOLUME_PROVIDER)
    retention_days = credential.get_parameter_by_name('retention_days')
    if retention_days:
        retention_days = int(retention_days)
    else:
        retention_days = Configuration.get_by_name_as_int(
            'backup_retention_days')

    backup_time_dt = date.today() - timedelta(days=retention_days)
    return Snapshot.objects.filter(start_at__lte=backup_time_dt,
                                   purge_at__isnull=True,
                                   instance__isnull=False,
                                   snapshopt_id__isnull=False,
                                   instance__databaseinfra__environment=env)
def database_analyzing(context={}):
    LOG.info("Notifying Database alayzing with context %s" % context)
    subject = _("[DBAAS] Database overestimated")
    template = "analyzing_notification"
    addr_from = Configuration.get_by_name("email_addr_from")
    send_email = Configuration.get_by_name("send_analysis_email")
    team = context.get("team")
    if team and team.email and send_email:
        addr_to = [
            team.email,
            Configuration.get_by_name("new_user_notify_email")
        ]
    else:
        addr_to = Configuration.get_by_name("new_user_notify_email")

    context['domain'] = get_domain()

    send_mail_template(subject,
                       template,
                       addr_from,
                       addr_to,
                       fail_silently=False,
                       attachments=None,
                       context=context)
Example #51
0
def database_notification_for_team(team=None):
    """
    Notifies teams of database usage.
    if threshold_database_notification <= 0, the notification is disabled.
    """
    LOG.info("sending database notification for team %s" % team)
    threshold_database_notification = Configuration.get_by_name_as_int(
        "threshold_database_notification", default=0)
    # if threshold_database_notification
    if threshold_database_notification <= 0:
        LOG.warning("database notification is disabled")
        return

    databases = Database.objects.filter(team=team,
                                        is_in_quarantine=False,
                                        subscribe_to_email_events=True)
    msgs = []
    for database in databases:
        used = database.used_size_in_mb
        capacity = database.total_size_in_mb
        try:
            percent_usage = (used / capacity) * 100
        except ZeroDivisionError:
            # database has no total size
            percent_usage = 0.0
        msg = "database %s => usage: %.2f | threshold: %.2f" % (
            database, percent_usage, threshold_database_notification)
        LOG.info(msg)
        msgs.append(msg)

        if not team.email:
            msgs.append(
                "team %s has no email set and therefore no database usage notification will been sent"
                % team)
        else:
            if percent_usage >= threshold_database_notification:
                LOG.info("Sending database notification...")
                context = {}
                context['database'] = database.name
                context['team'] = team
                context['measure_unity'] = "MB"
                context['used'] = used
                context['capacity'] = capacity
                context['percent'] = "%.2f" % percent_usage
                context['environment'] = database.environment.name
                email_notifications.database_usage(context=context)

    return msgs
Example #52
0
    def get_log_url(self):

        if Configuration.get_by_name_as_int('laas_integration') != 1:
            return ""

        if self.databaseinfra.plan.provider == Plan.PREPROVISIONED:
            return ""

        from util import get_credentials_for
        from util.laas import get_group_name
        from dbaas_credentials.models import CredentialType

        credential = get_credentials_for(environment=self.environment,
                                         credential_type=CredentialType.LOGNIT)
        url = "%s%s" % (credential.endpoint, get_group_name(self))
        return "%s" % (url)
Example #53
0
    def __mysql_client__(self, instance, database='mysql'):
        connection_address, connection_port = self.__get_admin_connection(
            instance)
        try:
            LOG.debug(
                'Connecting to mysql databaseinfra %s', self.databaseinfra)
            # mysql uses timeout in seconds
            connection_timeout_in_seconds = Configuration.get_by_name_as_int(
                'mysql_connect_timeout', default=MYSQL_CONNECTION_DEFAULT_TIMEOUT)

            client = mysqldb.connect(host=connection_address, port=int(connection_port),
                                     user=self.databaseinfra.user, passwd=self.databaseinfra.password,
                                     db=database, connect_timeout=connection_timeout_in_seconds)
            LOG.debug(
                'Successfully connected to mysql databaseinfra %s' % (self.databaseinfra))
            return client
        except Exception as e:
            raise e
    def execute(self):
        self.load_number_of_instances()

        if not self.number_of_instances:
            raise exceptions.RequiredNumberOfInstances(
                'Number of instances is required'
            )

        status, message = self.check_database_status()
        if not status:
            raise exceptions.DatabaseNotAvailable(message)

        if not self.is_ha():
            raise exceptions.DatabaseIsNotHA(
                'Database topology do not have horizontal scalability'
            )

        max_read_hosts = Configuration.get_by_name_as_int('max_read_hosts', 5)
        qtd_new_hosts = self.number_of_instances
        current_read_nodes = len(self.database.infra.instances.filter(read_only=True))
        total_read_hosts = qtd_new_hosts + current_read_nodes
        if total_read_hosts > max_read_hosts:
            raise exceptions.ReadOnlyHostsLimit(
                ('Current limit of read only hosts is {} and you are trying '
                 'to setup {}').format(
                    max_read_hosts, total_read_hosts
                )
            )

        self.task_params = dict(
            database=self.database,
            user=self.request.user,
            number_of_instances=qtd_new_hosts,
            number_of_instances_before_task=self.number_of_instances_before
        )

        if self.retry:
            since_step = self.manager.current_step
            self.task_params['since_step'] = since_step

        TaskRegister.database_add_instances(**self.task_params)
def purge_quarantine(self, ):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")
    try:

        task_history = TaskHistory.register(request=self.request, user=user)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" %
                 (self.request.id, self.request.task, self.request.kwargs,
                  str(self.request.args)))
        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)

        for database in databases:
            if database.plan.provider == database.plan.CLOUDSTACK:
                databaseinfra = database.databaseinfra

                destroy_infra(databaseinfra=databaseinfra, task=task_history)
            else:
                database.delete()

            LOG.info(
                "The database %s was deleted, because it was set to quarentine %d days ago"
                % (database.name, quarantine_time))

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Databases destroyed successfully')
        return

    except Exception:
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details="Error")
        return
    finally:
        AuditRequest.cleanup_request()
Example #56
0
def notify_team_change_for(user=None):
    LOG.info("Notifying team change for user %s" % user)
    subject = _("[DBAAS] your team has been updated!")
    template = "team_change_notification"
    addr_from = Configuration.get_by_name("email_addr_from")
    if user.email:
        addr_to = [user.email]
        context = {}
        context['user'] = user
        domain = get_domain()
        context['url'] = domain
        context['teams'] = [team.name for team in user.team_set.all()]
        if user and addr_from and addr_to:
            send_mail_template(
                subject, template, addr_from, addr_to,
                fail_silently=False, attachments=None, context=context
            )
        else:
            LOG.warning("could not send email for team change")
    else:
        LOG.warning(
            "user %s has no email set and therefore cannot be notified!")
Example #57
0
    def __mongo_client__(self, instance):
        connection_address = self.__get_admin_connection(instance)
        if not self.databaseinfra and instance:
            self.databaseinfra = instance.databaseinfra
        try:
            # mongo uses timeout in mili seconds
            connection_timeout_in_miliseconds = Configuration.get_by_name_as_int(
                'mongo_connect_timeout',
                default=MONGO_CONNECTION_DEFAULT_TIMEOUT) * 1000

            client = pymongo.MongoClient(
                connection_address,
                connectTimeoutMS=connection_timeout_in_miliseconds)
            if self.databaseinfra.user and self.databaseinfra.password:
                LOG.debug('Authenticating databaseinfra %s',
                          self.databaseinfra)
                client.admin.authenticate(self.databaseinfra.user,
                                          self.databaseinfra.password)
            return client
        except TypeError:
            raise AuthenticationError(message='Invalid address: ' %
                                      connection_address)
Example #58
0
    def delete_view(self, request, object_id, extra_context=None):
        database = Database.objects.get(id=object_id)
        extra_context = extra_context or {}

        if database.status != Database.ALIVE or not database.database_status.is_alive:
            self.message_user(
                request,
                "Database {} is not alive and cannot be deleted".format(
                    database.name),
                level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.is_beeing_used_elsewhere():
            self.message_user(
                request,
                "Database {} cannot be deleted because it is in use by another task."
                .format(database.name),
                level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if database.has_migration_started():
            self.message_user(
                request,
                "Database {} cannot be deleted because it is beeing migrated.".
                format(database.name),
                level=messages.ERROR)
            url = reverse('admin:logical_database_changelist')
            return HttpResponseRedirect(url)

        if not database.is_in_quarantine:
            extra_context[
                'quarantine_days'] = Configuration.get_by_name_as_int(
                    'quarantine_retention_days')
        return super(DatabaseAdmin,
                     self).delete_view(request,
                                       object_id,
                                       extra_context=extra_context)
Example #59
0
class MakeDatabaseBackup(TestCase):
    def setUp(self):
        cache.clear()

        self.admin = SnapshotAdmin(Snapshot, admin.sites.AdminSite())
        self.param_backup_available = Configuration(name='backup_available',
                                                    value=1)
        self.param_backup_available.save()

    def tearDown(self):
        if self.param_backup_available.id:
            self.param_backup_available.delete()

    def test_is_backup_available(self):
        self.assertTrue(self.admin.is_backup_available)

    def test_is_backup_disable(self):
        self.param_backup_available.value = 0
        self.param_backup_available.save()
        self.assertFalse(self.admin.is_backup_available)

    def test_is_backup_disable_not_configured(self):
        self.param_backup_available.delete()
        self.assertFalse(self.admin.is_backup_available)
Example #60
0
def purge_quarantine(self, ):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")

    try:
        task_history = TaskHistory.register(request=self.request, user=user)
        task_history.relevance = TaskHistory.RELEVANCE_WARNING

        LOG.info("id: {} | task: {} | kwargs: {} | args: {}".format(
            self.request.id, self.request.task, self.request.kwargs,
            str(self.request.args)))

        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
        task_history.add_detail(
            "Quarantine date older than {}".format(quarantine_time_dt))

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)
        task_history.add_detail("Databases to purge: {}".format(
            len(databases)))

        for database in databases:
            task_history.add_detail('Deleting {}...'.format(database), level=2)
            database.destroy(user)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Listed databases were destroyed successfully.')
        return

    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details="Error\n{}".format(e))
        return
    finally:
        AuditRequest.cleanup_request()