예제 #1
0
def main():
    """Start the poor_consumer."""
    try:
        opts, args = getopt.getopt(sys.argv[1:], "h:v",
                                   ["help", "nack=", "servers=", "queues="])
    except getopt.GetoptError as err:
        print str(err)
        usage()
        sys.exit()

    # defaults
    nack = 0.0
    verbose = False
    servers = "localhost:7712,localhost:7711"
    queues = "test"

    for o, a in opts:
        if o == "-v":
            verbose = True
        elif o in ("-h", "--help"):
            usage()
            sys.exit()
        elif o in ("--nack"):
            nack = float(a)
        elif o in ("--servers"):
            servers = a
        elif o in ("--queues"):
            queues = a
        else:
            assert False, "unhandled option"

    # prepare servers and queus for pydisque
    servers = servers.split(",")
    queues = queues.split(",")

    c = Client(servers)
    c.connect()

    while True:
        jobs = c.get_job(queues)
        for queue_name, job_id, job in jobs:
            rnd = random.random()

            # as this is a test processor, we don't do any validation on
            # the actual job body, so lets just pay attention to id's

            if rnd >= nack:
                print ">>> received job:", job_id
                c.ack_job(job_id)
            else:
                print ">>> bouncing job:", job_id
                c.nack_job(job_id)
예제 #2
0
def main():
    '''
    Validate arguments; send data to the message bus.
    '''
    message = 'Push data to the message bus.'
    socket_help = ('a list containing the host, port numbers to listen to; '
                   'defaults to localhost:7711 (for disque)')
    ttl_help = ('a TTL (in seconds) for the data on Twitter and GitHub; '
                'if not specified, the data will remain forever')

    parser = ArgumentParser(description=message)
    parser.add_argument('-s',
                        '--sockets',
                        help=socket_help,
                        default=['localhost:7711'],
                        dest='sockets',
                        metavar=('HOST:PORT'),
                        nargs='+')
    parser.add_argument('-d',
                        '--debug',
                        help='enable debugging',
                        action='store_true',
                        default=False)
    parser.add_argument('-r',
                        '--recipient',
                        help='keybase-id to send',
                        required=True,
                        metavar=('KEYBASE-ID'))
    parser.add_argument('-t',
                        '--ttl',
                        help=ttl_help,
                        default=0,
                        type=int,
                        metavar=('N'))
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('-i', '--in-file', metavar=('FILE'), default=None)
    group.add_argument('-m', '--message', type=str)

    args = vars(parser.parse_args())

    if args['debug']:
        LOGGER.setLevel(DEBUG)
        LOGGER.addHandler(HANDLER)
    else:
        LOGGER.setLevel(INFO)
        LOGGER.addHandler(HANDLER)

    plaintext, queue = None, None

    if args['in_file']:
        name = args['in_file']
        if not re.match(r'^text\/.*', magic.from_file(name, mime=True)):
            LOGGER.error('[file-error] input-file mimetype should be text/.*')
            return
        else:
            plaintext = open(name, 'r').read()
    else:
        plaintext = args['message']

    try:
        # Instantiate a connection to the queue only if a TTL is specified.
        if args['ttl']:
            queue = Client(args['sockets'])
            queue.connect()
            queue_info = json.dumps(queue.info(), indent=4)
            LOGGER.debug('[queue-init]\n%s', queue_info)

        auth = load_credentials()
        if None in auth:
            LOGGER.error('[load_credentials] unable to load credentials!')
            return

        send(plaintext=plaintext,
             auth=auth,
             recipient=args['recipient'],
             ttl=args['ttl'],
             queue=queue,
             debug=args['debug'])

    except Exception:
        LOGGER.error('[error] unable to connect to the redis-queue (disque)!')
예제 #3
0
 def setUp(self):
     """Setup the tests."""
     self.client = Client(['localhost:7711'])
     self.client.connect()
     self.testID = "%d.%d" % (time.time(), random.randint(1000, 1000000))
예제 #4
0
파일: views.py 프로젝트: YunShiTiger/cmdb-2
def add_job(request):
    user = request.user
    ip = request.META['REMOTE_ADDR']
    zone = request.POST['zone']
    queue = request.POST['queue_name']
    # timeout_ms = request.POST['timeout_ms']
    # replicate = request.POST['replicate']
    # retry_sec = request.POST['retry_sec']
    # delay_sec = request.POST['delay_sec']
    # ttl_sec = request.POST['ttl_sec']
    jobs = request.POST.getlist('jobs', [])
    # print user, env, queue, timeout_ms, replicate, retry_sec, delay_sec, ttl_sec
    print user, zone, queue
    print jobs

    if not user.groups.filter(name__in=['admin', 'dba', 'disque']).exists():
        logs(user, ip, 'add job: %s - %s' % (zone, queue), 'permission denied')
        return HttpResponse(json.dumps({'errcode': 403}),
                            content_type=DEFAULT_CONTENT_TYPE)

    try:
        clusterInfo = ClusterInfo.objects.get(name=zone)
        print clusterInfo.addr
    except ClusterInfo.DoesNotExist:
        logs(user, ip, 'add job: %s - %s' % (zone, queue),
             'unknown disque zone: %s' % zone)
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': 'unknown disque zone:%s' % zone
        }),
                            content_type=DEFAULT_CONTENT_TYPE)
    except ClusterInfo.MultipleObjectsReturned:
        logs(user, ip, 'add job: %s - %s' % (zone, queue),
             'multi objects returned for zone: %s' % zone)
        return HttpResponse(json.dumps({
            'errcode':
            400,
            'msg':
            'multi objects returned for zone:%s' % zone
        }),
                            content_type=DEFAULT_CONTENT_TYPE)
    except Exception as e:
        print e
        logs(user, ip, 'add job: %s - %s' % (zone, queue), str(e))
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': str(e)
        }),
                            content_type=DEFAULT_CONTENT_TYPE)

    if (not queue) or len(queue) == 0:
        logs(user, ip, 'add job: %s - %s' % (zone, queue), 'empty queue name')
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': 'empty queue name'
        }),
                            content_type=DEFAULT_CONTENT_TYPE)
    if len(jobs) == 0:
        logs(user, ip, 'add job: %s - %s' % (zone, queue), 'empty jobs')
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': 'empty jobs'
        }),
                            content_type=DEFAULT_CONTENT_TYPE)

    jobs = map(lambda x: x.encode('utf-8'), jobs)
    jobIds = []
    errJob = []
    addr = clusterInfo.addr.split(',')
    client = Client(addr)
    client.connect()
    for job in jobs:
        try:
            print job
            jobId = client.add_job(queue, job)
            # jobId = client.add_job(queue, job, timeout=timeout_ms, replicate=replicate, delay=delay_sec, retry=retry_sec, ttl=ttl_sec)
            jobIds.append(jobId)
        except Exception as e:
            print e
            errJob.append(job)
    logs(user, ip, 'add job: %s - %s' % (zone, queue), 'success')
    return HttpResponse(json.dumps({
        'errcode': 200,
        'jobIds': jobIds,
        'failJobs': errJob
    }),
                        content_type=DEFAULT_CONTENT_TYPE)
예제 #5
0
파일: views.py 프로젝트: YunShiTiger/cmdb-2
def ack_job(request):
    user = request.user
    ip = request.META['REMOTE_ADDR']
    zone = request.POST['zone']
    jobIds = request.POST.getlist('jobIds', [])
    if not user.groups.filter(name__in=['admin', 'dba', 'disque']).exists():
        logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone),
             'permission denied')
        return HttpResponse(json.dumps({'errcode': 403}),
                            content_type=DEFAULT_CONTENT_TYPE)
    try:
        clusterInfo = ClusterInfo.objects.get(name=zone)
        print clusterInfo.addr
    except ClusterInfo.DoesNotExist:
        logs(user, ip, 'ack job: %s' % jobIds,
             'unknown disque zone: %s' % zone)
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': 'unknown disque zone:%s' % zone
        }),
                            content_type=DEFAULT_CONTENT_TYPE)
    except ClusterInfo.MultipleObjectsReturned:
        logs(user, ip, 'ack job: %s' % jobIds,
             'multi objects returned for zone: %s' % zone)
        return HttpResponse(json.dumps({
            'errcode':
            400,
            'msg':
            'multi objects returned for zone:%s' % zone
        }),
                            content_type=DEFAULT_CONTENT_TYPE)
    except Exception as e:
        print e
        logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone), str(e))
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': str(e)
        }),
                            content_type=DEFAULT_CONTENT_TYPE)

    if len(jobIds) == 0:
        logs(user, ip, 'ack job: zone-%s' % zone, 'empty jobIds')
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': 'empty jobIds'
        }),
                            content_type=DEFAULT_CONTENT_TYPE)
    jobIds = map(lambda x: x.encode('utf-8'), jobIds)
    print user, zone, jobIds

    try:
        addr = clusterInfo.addr.split(',')
        client = Client(addr)
        client.connect()
        client.ack_job(*jobIds)
    except Exception as e:
        print e
        logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone), str(e))
        return HttpResponse(json.dumps({
            'errcode': 400,
            'msg': str(e)
        }),
                            content_type=DEFAULT_CONTENT_TYPE)
    logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone), 'success')
    return HttpResponse(json.dumps({'errcode': 200}),
                        content_type=DEFAULT_CONTENT_TYPE)
예제 #6
0
# -*- coding: utf-8 -*-
# pylint: disable=print-statement

from __future__ import unicode_literals

from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from pydisque.client import Client
from mico.settings import disque_aws, disque_qcd
import json
from django.shortcuts import render
from asset.utils import logs, deny_resubmit
# Create your views here.

disqueAWS = Client(disque_aws)
disqueAWS.connect()

disqueQCD = Client(disque_qcd)
disqueQCD.connect()

clientEnvMap = {'aws': disqueAWS, 'qcd': disqueQCD}

default_content_type = 'application/json'


@login_required
@deny_resubmit(page_key='disque_ack_job')
def ackjob_index(request):
    return render(request, 'disque_ack_job.html')

def main():
    '''
    This is the main method, validate args, load credentials, start the daemon.
    '''
    message = 'Listen to tweets; dump them to the queue.'
    socket_help = ('a list containing the host, port numbers to listen to; '
                   'defaults to localhost:7711 (for disque)')

    parser = ArgumentParser(description=message)
    parser.add_argument('-s',
                        '--sockets',
                        help=socket_help,
                        default=['localhost:7711'],
                        dest='sockets',
                        metavar=('HOST:PORT'),
                        nargs='+')
    parser.add_argument('-c',
                        '--channels',
                        help='Twitter accounts to follow',
                        dest='channels',
                        metavar=('CHANNEL'),
                        nargs='+',
                        required=True)
    parser.add_argument('-d',
                        '--debug',
                        help='enable debugging',
                        action='store_true',
                        default=False)

    args = vars(parser.parse_args())

    if args['debug']:
        LOGGER.setLevel(DEBUG)
        LOGGER.addHandler(HANDLER)
    else:
        LOGGER.setLevel(INFO)
        LOGGER.addHandler(HANDLER)

    try:
        # Connect to the redis-queue.
        queue = Client(args['sockets'])
        queue.connect()
        LOGGER.info('[start-daemon]')
        queue_info = json.dumps(queue.info(), indent=4)
        LOGGER.debug('[queue-init]\n%s', queue_info)

        # Load credentials, initialize authentication module, listen to tweets.
        api = load_credentials()
        if not api:
            LOGGER.error('[load_credentials] unable to load credentials!')
            return

        listener = StreamDaemon(queue)
        streamer = tweepy.Stream(auth=api.auth, listener=listener)
        args['channels'] = [re.sub('@', '', _) for _ in args['channels']]
        streamer.userstream(track=args['channels'])

    except Exception:
        LOGGER.error('[error] unknown error')
        LOGGER.error('[error] unable to connect to the redis-queue (disque)!')

    except KeyboardInterrupt:
        LOGGER.critical('[stop-daemon]')
    return
예제 #8
0
def main():
    '''
    Initialize authentication, client connection.
    '''
    message = 'Delete gists, tweets if a TTL is set.'
    socket_help = ('a list containing the host, port numbers to listen to; '
                   'defaults to localhost:7711 (for disque)')
    retry_help = 'queue check frequncy (in seconds); defaults to 8'

    parser = ArgumentParser(description=message)
    parser.add_argument('-s',
                        '--sockets',
                        help=socket_help,
                        default=['localhost:7711'],
                        dest='sockets',
                        metavar=('HOST:PORT'),
                        nargs='+')
    parser.add_argument('-d',
                        '--debug',
                        help='enable debugging',
                        action='store_true',
                        default=False)
    parser.add_argument('-r',
                        '--retry',
                        help=retry_help,
                        default=8,
                        type=int,
                        metavar=('DELAY'))

    args = vars(parser.parse_args())

    if args['debug']:
        LOGGER.setLevel(DEBUG)
        LOGGER.addHandler(HANDLER)
    else:
        LOGGER.setLevel(INFO)
        LOGGER.addHandler(HANDLER)

    # Load the credentials.
    tokens = load_credentials()

    if None in tokens:
        LOGGER.error('[load_credentials] unable to load credentials!')
        return

    try:
        # Connect to the redis-queue.
        queue = Client(args['sockets'])
        queue.connect()
        LOGGER.info('[start-daemon]')
        queue_info = json.dumps(queue.info(), indent=4)
        LOGGER.debug('[queue-init]\n%s', queue_info)
        listen(queue, tokens, args['debug'], args['retry'])

    except Exception:
        LOGGER.error('[error] unable to connect to the redis-queue (disque)!')

    except KeyboardInterrupt:
        LOGGER.critical('[stop-daemon]')

    return
예제 #9
0
import json
import time
import logging
logging.basicConfig(level=logging.DEBUG)

from pydisque.client import Client

c = Client(['localhost:7712', 'localhost:7711'])
c.connect()

while True:
    t = time.time()
    print "sending job", t
    c.add_job("test", json.dumps(["print", "hello", "world", t]), replicate=1, timeout=100)
    time.sleep(2)
예제 #10
0
 def __init__(self, conf):
     self.conf = conf
     self.client = Client([':'.join([self.host, str(self.port)])])
     self.client.connect()
config = {
    #'host': 'd1-redis-addfix.iqi3ba.ng.0001.apse1.cache.amazonaws.com',
    #'host': 'data-prod-redis-002.iqi3ba.0001.apse1.cache.amazonaws.com',
    'host': 'localhost',
    'port': 6379,
    'db': 0,
}

#for redis connection
r = redis.Redis(**config)
if not r.ping():
    sentry_client.captureException(
        message="settings.py: Failed to connect to redis",
        extra={"error": 'ping failed'})
    sys.exit()

## for disque connection
client = Client(["127.0.0.1:7711"])
#client = Client(["10.0.4.232:7711"])

try:
    client.connect()
except Exception as disque_err:
    sentry_client.captureException(
        message="settings.py:Failed to connect to disque",
        extra={"error": disque_err})
    sys.exit()

catfight_input = config_parser.get("Queues", "catfight_input")
catfight_output = config_parser.get("Queues", "catfight_output")
예제 #12
0
파일: dwq.py 프로젝트: cgundogan/dwq
 def connect(servers):
     global disque
     disque = Client(servers)
     disque.connect()