Esempio n. 1
0
 def handle(self, *args, **options):
     oservice = options['service']
     if not oservice:
         services = Service.objects.all()
     else:
         services = [oservice]
     if options['list_services']:
         print('available services')
         for s in services:
             print('  ', s.name, '(', s.url, ')')
             print('   type', s.service_type.name)
             print('   running on', s.host.name, s.host.ip)
             print('   active:', s.active)
             if s.last_check:
                 print('    last check:', s.last_check)
             else:
                 print('    not checked yet')
             print(' ')
         return
     c = CollectorAPI()
     for s in services:
         try:
             self.run_check(s,
                            collector=c,
                            since=options['since'],
                            until=options['until'],
                            force_check=options['force_check'],
                            format=options['format'])
         except Exception as err:
             log.error("Cannot collect from %s: %s", s, err, exc_info=err)
             if options['halt_on_errors']:
                 raise
     if not options['do_not_clear']:
         log.info("Clearing old data")
         c.clear_old_data()
     if options['emit_notifications']:
         log.info("Processing notifications for %s", options['until'])
         s = Service.objects.first()
         interval = s.check_interval
         now = datetime.utcnow().replace(tzinfo=pytz.utc)
         notifications_check = now - interval
         c.emit_notifications()  #notifications_check)
Esempio n. 2
0
def collect_metric(**options):
    from geonode.celery_app import app
    from geonode.tasks.tasks import memcache_lock
    from geonode.monitoring.models import Service
    from geonode.monitoring.collector import CollectorAPI

    _start_time = None
    _end_time = None
    # The cache key consists of the task name and the MD5 digest
    # of the name.
    name = b'collect_metric'
    hexdigest = md5(name).hexdigest()
    lock_id = '{0}-lock-{1}'.format(name, hexdigest)
    _start_time = datetime.utcnow().isoformat()
    log.info('[{}] Collecting Metrics - started @ {}'.format(
        lock_id, _start_time))
    with memcache_lock(lock_id, app.oid) as acquired:
        if acquired:
            oservice = options['service']
            if not oservice:
                services = Service.objects.all()
            else:
                services = [oservice]
            if options['list_services']:
                print('available services')
                for s in services:
                    print('  ', s.name, '(', s.url, ')')
                    print('   type', s.service_type.name)
                    print('   running on', s.host.name, s.host.ip)
                    print('   active:', s.active)
                    if s.last_check:
                        print('    last check:', s.last_check)
                    else:
                        print('    not checked yet')
                    print(' ')
                return
            c = CollectorAPI()
            for s in services:
                try:
                    run_check(s,
                              collector=c,
                              since=options['since'],
                              until=options['until'],
                              force_check=options['force_check'],
                              format=options['format'])
                except Exception as err:
                    log.error("Cannot collect from %s: %s",
                              s,
                              err,
                              exc_info=err)
                    if options['halt_on_errors']:
                        raise
            if not options['do_not_clear']:
                log.debug("Clearing old data")
                c.clear_old_data()
            if options['emit_notifications']:
                log.debug("Processing notifications for %s", options['until'])
                # s = Service.objects.first()
                # interval = s.check_interval
                # now = datetime.utcnow().replace(tzinfo=pytz.utc)
                # notifications_check = now - interval
                c.emit_notifications()  # notifications_check))
            _end_time = datetime.utcnow().isoformat()
            log.info('[{}] Collecting Metrics - finished @ {}'.format(
                lock_id, _end_time))
    return (_start_time, _end_time)
Esempio n. 3
0
def collect_metric(**options):
    from geonode.tasks.tasks import memcache_lock
    from geonode.monitoring.models import Service
    from geonode.monitoring.collector import CollectorAPI

    _start_time = None
    _end_time = None
    # The cache key consists of the task name and the MD5 digest
    # of the name.
    name = b'collect_metric'
    hexdigest = md5(name).hexdigest()
    lock_id = f'{name.decode()}-lock-{hexdigest}'
    _start_time = _end_time = datetime.utcnow().isoformat()
    log.info('[{}] Collecting Metrics - started @ {}'.format(
        lock_id, _start_time))
    lock = memcache_lock(lock_id)
    if lock.acquire(blocking=False) is True:
        log.info('[{}] Collecting Metrics - [...acquired lock] @ {}'.format(
            lock_id, _start_time))
        try:
            oservice = options['service']
            if not oservice:
                services = Service.objects.all()
            else:
                services = [oservice]
            if options['list_services']:
                print('available services')
                for s in services:
                    print('  ', s.name, '(', s.url, ')')
                    print('   type', s.service_type.name)
                    print('   running on', s.host.name, s.host.ip)
                    print('   active:', s.active)
                    if s.last_check:
                        print('    last check:', s.last_check)
                    else:
                        print('    not checked yet')
                    print(' ')
                return
            c = CollectorAPI()
            for s in services:
                try:
                    run_check(s,
                              collector=c,
                              since=options['since'],
                              until=options['until'],
                              force_check=options['force_check'],
                              format=options['format'])
                except Exception as e:
                    log.warning(e)
            if not options['do_not_clear']:
                log.info("Clearing old data")
                c.clear_old_data()
            if options['emit_notifications']:
                log.info("Processing notifications for %s", options['until'])
                # s = Service.objects.first()
                # interval = s.check_interval
                # now = datetime.utcnow().replace(tzinfo=pytz.utc)
                # notifications_check = now - interval
                c.emit_notifications()  # notifications_check))
            _end_time = datetime.utcnow().isoformat()
            log.info('[{}] Collecting Metrics - finished @ {}'.format(
                lock_id, _end_time))
        except Exception as e:
            log.info('[{}] Collecting Metrics - errored @ {}'.format(
                lock_id, _end_time))
            log.exception(e)
        finally:
            lock.release()
    log.info('[{}] Collecting Metrics - exit @ {}'.format(lock_id, _end_time))
    return (_start_time, _end_time)