Пример #1
0
def main():
    counter = 0
    timer = statsd.timer('lisa.process_ip', rate=0.01)  # 1% sample rate

    while True:
        if KILLED:
            log.info('Shutdown successful')
            return 0

        try:
            ip_info = redis.brpop(rkeys.IPLOGS)
        except RedisError as e:
            log.error('Error with Redis: {}'.format(e))
            return 1

        # don't start above redis call as it will block to wait
        timer.start()

        log.debug('Got log data: ' + ip_info[1])
        try:
            rtype, ip = ip_info[1].split(',')
        except ValueError:
            continue

        timestamp = get_epoch_minute()

        if rate_limit_ip(ip, timestamp):
            continue

        record = geo.get(ip)
        if record:
            # everything goes for total count and map
            process_map(record, timestamp)
            # only shares get more processing
            if rtype != data_types.DOWNLOAD:
                process_share(record, rtype)

        timer.stop()
        statsd.incr('lisa.process_ip', rate=0.01)  # 1% sample rate

        if args.verbose:
            sys.stdout.write('.')
            sys.stdout.flush()

        # using a counter and if statement here instead of the
        # `rate` param on the gauge to avoid getting the length
        # of the Redis list every time.
        counter += 1
        if counter >= 1000:
            counter = 0
            statsd.gauge('queue.geoip', redis.llen(rkeys.IPLOGS))
Пример #2
0
def main():
    counter = 0

    while True:
        if KILLED:
            log.info('Shutdown successful')
            return 0

        try:
            ip_info = redis.brpop(rkeys.IPLOGS)
        except RedisError as e:
            log.error('Error with Redis: {}'.format(e))
            return 1

        log.debug('Got log data: ' + ip_info[1])
        try:
            rtype, ip = ip_info[1].split(',')
        except ValueError:
            continue
        record = geo.get(ip)
        if record:
            # everything goes for total count and map
            process_map(record)
            # only shares get more processing
            if rtype != data_types.DOWNLOAD:
                process_share(record, rtype)

        if args.verbose:
            sys.stdout.write('.')
            sys.stdout.flush()

        if statsd:
            counter += 1
            if counter >= 1000:
                counter = 0
                statsd.gauge('queue.geoip', redis.llen(rkeys.IPLOGS))
Пример #3
0
def main():
    global counter
    timer = statsd.timer('lisa.process_ip', rate=0.01)  # 1% sample rate
    pipe = redis.pipeline()

    while True:
        if KILLED:
            pipe.execute()
            log.info('Shutdown successful')
            return 0

        try:
            if args.benchmark:
                ip_info = redis.rpop(rkeys.IPLOGS)
            else:
                ip_info = redis.brpop(rkeys.IPLOGS)[1]
        except RedisError as e:
            log.error('Error with Redis: {}'.format(e))
            pipe.execute()
            return 1

        if ip_info is None:
            # benchmark run is over
            pipe.execute()
            return 0

        # don't start above redis call as it will block to wait
        timer.start()

        log.debug('Got log data: ' + ip_info)
        try:
            rtype, ip = ip_info.split(',')
        except ValueError:
            continue

        timestamp = get_epoch_minute()

        if rate_limit_ip(ip):
            continue

        record = geo.get(ip)
        if record:
            # everything goes for total count and map
            process_map(record, timestamp, pipe)
            # only shares get more processing
            if rtype != data_types.DOWNLOAD:
                process_share(record, rtype, pipe)

        timer.stop()
        statsd.incr('lisa.process_ip', rate=0.01)  # 1% sample rate

        if args.verbose:
            sys.stdout.write('.')
            sys.stdout.flush()

        # using a counter and if statement here instead of the
        # `rate` param on the gauge to avoid getting the length
        # of the Redis list every time.
        counter += 1
        if args.benchmark:
            if not counter % 1000:
                pipe.execute()
        else:
            if counter >= 1000:
                pipe.execute()
                counter = 0
                statsd.gauge('queue.geoip', redis.llen(rkeys.IPLOGS))
Пример #4
0
                pipe.execute()
                counter = 0
                statsd.gauge('queue.geoip', redis.llen(rkeys.IPLOGS))


if __name__ == '__main__':
    register_signals(handle_signals)
    try:
        geo = maxminddb.Reader(args.file)
    except IOError:
        log.error('ERROR: Can\'t find MaxMind Database file (%s). '
                  'Try setting the --file flag.' % args.file)
        sys.exit(1)

    if args.benchmark:
        bench_count = redis.llen(rkeys.IPLOGS)
        if not bench_count:
            print 'No IPs to process'
            sys.exit(1)
        print 'Starting benchmark of {} records'.format(bench_count)
        bench_start = time.time()

    return_code = main()

    if args.benchmark:
        bench_time = time.time() - bench_start
        print 'Total Processed: {}'.format(counter)
        print 'Total Time:      {}s'.format(bench_time)
        print 'IPs per minute:  {}'.format(counter / (bench_time / 60))

    sys.exit(return_code)