Esempio n. 1
0
def compute_resource_usage(data):
    """ Computes MonitorData.used based on related monitors """
    MonitorData = type(data)
    resource = data.resource
    today = timezone.now()
    result = 0
    has_result = False
    for monitor in resource.monitors:
        # Get related dataset
        resource_model = data.content_type.model_class()
        monitor_model = get_model(ServiceMonitor.get_backend(monitor).model)
        if resource_model == monitor_model:
            dataset = MonitorData.objects.filter(monitor=monitor,
                    content_type=data.content_type_id, object_id=data.object_id)
        else:
            path = get_model_field_path(monitor_model, resource_model)
            fields = '__'.join(path)
            objects = monitor_model.objects.filter(**{fields: data.object_id})
            pks = objects.values_list('id', flat=True)
            ct = ContentType.objects.get_for_model(monitor_model)
            dataset = MonitorData.objects.filter(monitor=monitor,
                    content_type=ct, object_id__in=pks)
        
        # Process dataset according to resource.period
        if resource.period == resource.MONTHLY_AVG:
            try:
                last = dataset.latest()
            except MonitorData.DoesNotExist:
                continue
            has_result = True
            epoch = datetime(year=today.year, month=today.month, day=1,
                             tzinfo=timezone.utc)
            total = (epoch-last.date).total_seconds()
            dataset = dataset.filter(date__year=today.year,
                                     date__month=today.month)
            for data in dataset:
                slot = (previous-data.date).total_seconds()
                result += data.value * slot/total
        elif resource.period == resource.MONTHLY_SUM:
            dataset = dataset.filter(date__year=today.year, date__month=today.month)
            # FIXME Aggregation of 0s returns None! django bug?
            # value = dataset.aggregate(models.Sum('value'))['value__sum']
            values = dataset.values_list('value', flat=True)
            if values:
                has_result = True
                result += sum(values)
        elif resource.period == resource.LAST:
            try:
                result += dataset.latest().value
            except MonitorData.DoesNotExist:
                continue
            has_result = True
        else:
            msg = "%s support not implemented" % data.period
            raise NotImplementedError(msg)
        
    return result/resource.scale if has_result else None
Esempio n. 2
0
def monitor(resource_id, ids=None):
    with LockFile('/dev/shm/resources.monitor-%i.lock' % resource_id, expire=60*60, unlocked=bool(ids)):
        from .models import ResourceData, Resource
        resource = Resource.objects.get(pk=resource_id)
        resource_model = resource.content_type.model_class()
        logs = []
        # Execute monitors
        for monitor_name in resource.monitors:
            backend = ServiceMonitor.get_backend(monitor_name)
            model = backend.model_class()
            kwargs = {}
            if ids:
                path = get_model_field_path(model, resource_model)
                path = '%s__in' % ('__'.join(path) or 'id')
                kwargs = {
                    path: ids
                }
            # Execute monitor
            monitorings = []
            for obj in model.objects.filter(**kwargs):
                op = Operation(backend, obj, Operation.MONITOR)
                monitorings.append(op)
            logs += Operation.execute(monitorings, async=False)
        
        kwargs = {'id__in': ids} if ids else {}
        # Update used resources and trigger resource exceeded and revovery
        triggers = []
        model = resource.content_type.model_class()
        for obj in model.objects.filter(**kwargs):
            data, __ = ResourceData.get_or_create(obj, resource)
            data.update()
            if not resource.disable_trigger:
                a = data.used
                b = data.allocated
                if data.used > (data.allocated or 0):
                    op = Operation(backend, obj, Operation.EXCEEDED)
                    triggers.append(op)
                elif data.used < (data.allocated or 0):
                    op = Operation(backend, obj, Operation.RECOVERY)
                    triggers.append(op)
        Operation.execute(triggers)
        return logs
Esempio n. 3
0
def monitor(resource_id, ids=None):
    with LockFile('/dev/shm/resources.monitor-%i.lock' % resource_id, expire=60*60, unlocked=bool(ids)):
        from .models import ResourceData, Resource
        resource = Resource.objects.get(pk=resource_id)
        resource_model = resource.content_type.model_class()
        logs = []
        # Execute monitors
        for monitor_name in resource.monitors:
            backend = ServiceMonitor.get_backend(monitor_name)
            model = backend.model_class()
            kwargs = {}
            if ids:
                path = get_model_field_path(model, resource_model)
                path = '%s__in' % ('__'.join(path) or 'id')
                kwargs = {
                    path: ids
                }
            # Execute monitor
            monitorings = []
            for obj in model.objects.filter(**kwargs):
                op = Operation(backend, obj, Operation.MONITOR)
                monitorings.append(op)
            logs += Operation.execute(monitorings, async=False)
        
        kwargs = {'id__in': ids} if ids else {}
        # Update used resources and trigger resource exceeded and revovery
        triggers = []
        model = resource.content_type.model_class()
        for obj in model.objects.filter(**kwargs):
            data, __ = ResourceData.objects.get_or_create(obj, resource)
            data.update()
            if not resource.disable_trigger:
                a = data.used
                b = data.allocated
                if data.used > (data.allocated or 0):
                    op = Operation(backend, obj, Operation.EXCEEDED)
                    triggers.append(op)
                elif data.used < (data.allocated or 0):
                    op = Operation(backend, obj, Operation.RECOVERY)
                    triggers.append(op)
        Operation.execute(triggers)
        return logs
Esempio n. 4
0

@shared_task(name='resources.Monitor')
def monitor(resource_id, ids=None, async=True):
    from .models import ResourceData, Resource
    
    resource = Resource.objects.get(pk=resource_id)
    resource_model = resource.content_type.model_class()
    logs = []
    # Execute monitors
    for monitor_name in resource.monitors:
        backend = ServiceMonitor.get_backend(monitor_name)
        model = backend.model_class()
        kwargs = {}
        if ids:
            path = get_model_field_path(model, resource_model)
            path = '%s__in' % ('__'.join(path) or 'id')
            kwargs = {
                path: ids
            }
        # Execute monitor
        monitorings = []
        for obj in model.objects.filter(**kwargs):
            op = Operation(backend, obj, Operation.MONITOR)
            monitorings.append(op)
        # TODO async=True only when running with celery
        logs += Operation.execute(monitorings, async=async)
    
    kwargs = {'id__in': ids} if ids else {}
    # Update used resources and trigger resource exceeded and revovery
    triggers = []
Esempio n. 5
0
 def get_model_path(self, monitor):
     """ returns a model path between self.content_type and monitor.model """
     resource_model = self.content_type.model_class()
     monitor_model = ServiceMonitor.get_backend(monitor).model_class()
     return get_model_field_path(monitor_model, resource_model)
Esempio n. 6
0
 def get_model_path(self, monitor):
     """ returns a model path between self.content_type and monitor.model """
     resource_model = self.content_type.model_class()
     monitor_model = ServiceMonitor.get_backend(monitor).model_class()
     return get_model_field_path(monitor_model, resource_model)