Пример #1
0
def publish(subject, text):
    '''
    Publish cumulative post to Livejournal.
    '''
    r = ResQ()
    for target in settings.PUBLISHING_LJ_TARGETS.iterkeys():
        r.enqueue(LivejournalPost, target, subject, text)
Пример #2
0
class Jobs(object): 
	def __init__(self,host='localhost',port=6379):
		HOST = 'localhost:6379'
		self.host=host
		self.port=port
		self.queue='iOSPush'
		self.resq = ResQ('%s:%i'%(self.host,self.port))

	def push(self,queue,item):
		self.resq.push(queue,item)

	def pop(self,queue):
		return self.resq.pop(queue)

	def add(self):
		pass
	
	def send(self):
		info=self.pop(self.queue) #will get (None, None) if none
		info={'badge':1,'msg':'您有新的车票信息','token':'cb86b176ee99ae5f3387c79f1226d234599e91c7bc300e97afb034cc0009e192'}
		deviceToken = binascii.unhexlify(info['token'])
		message = APNSNotification()
		message.token(deviceToken)
		message.alert(info['msg'])
		message.badge(info['badge'])
		message.sound()
		wrapper = APNSNotificationWrapper('ck.pem', True)
		wrapper.append(message)
		wrapper.notify()
def pyres_web():
    usage = "usage: %prog [options]"
    parser = OptionParser(usage)
    parser.add_option("--host",
                      dest="host",
                      default="localhost",
                      metavar="HOST")
    parser.add_option("--port", dest="port", type="int", default=8080)
    parser.add_option("--dsn", dest="dsn", help="Redis server to display")
    parser.add_option("--auth", dest="auth", help="Redis user:pass")
    parser.add_option("--server",
                      dest="server",
                      help="Server for itty to run under.",
                      default='wsgiref')
    (options, args) = parser.parse_args()

    if options.dsn:
        from pyres import ResQ
        if options.auth is not None:
            from redis import Redis
            rhost, rport = options.dsn.split(':')
            ruser, rpass = options.auth.split(':')
            redis = Redis(host=rhost,
                          port=int(rport),
                          db=ruser,
                          password=rpass)
            resweb_server.HOST = ResQ(redis)
        else:
            resweb_server.HOST = ResQ(options.dsn)
    run_itty(host=options.host, port=options.port, server=options.server)
Пример #4
0
def publish(text, url=None):
    '''
    Publish status to social networks.
    '''
    r = ResQ()
    r.enqueue(TwitterStatus, text, url)
    r.enqueue(VkontakteStatus, text, url)
Пример #5
0
    def all(cls, host="localhost:6379"):
        if isinstance(host,string_types):
            resq = ResQ(host)
        elif isinstance(host, ResQ):
            resq = host

        return [Worker.find(w,resq) for w in resq.workers() or []]
Пример #6
0
    def all(cls, host="localhost:6379"):
        if isinstance(host, string_types):
            resq = ResQ(host)
        elif isinstance(host, ResQ):
            resq = host

        return [Worker.find(w, resq) for w in resq.workers() or []]
Пример #7
0
 def test_push(self):
     self.resq.push('pushq', 'content-newqueue')
     self.resq.push('pushq', 'content2-newqueue')
     assert self.redis.llen('resque:queue:pushq') == 2
     assert self.redis.lindex('resque:queue:pushq',
                              0) == ResQ.encode('content-newqueue')
     assert self.redis.lindex('resque:queue:pushq',
                              1) == ResQ.encode('content2-newqueue')
Пример #8
0
 def perform(eventid):
     print (eventid)
     db = sqlitedb()
     eventrec = db.getactiveevent()
     for event in eventrec:
         print event['eventid']
         r = ResQ(server="%s:%s" % (resqserver, resqport))
         r.enqueue(callQ, event['eventid'])
Пример #9
0
 def __init__(self, queues, host, max_children=10):
     self.queues = queues
     self._host = host
     self.max_children = max_children
     self._shutdown = False
     self.children = []
     self.resq = ResQ(host)
     self.validate_queues()
     self.reports = {}
Пример #10
0
    def trigger(self, timer):
        intent_json = self.db.hget('timer:%s' % timer, 'intent')
        if not intent_json:
            logging.warning("Triggering timer %s, but found no intent" % timer)
            return

        from async .send_intent import SendIntent
        r = ResQ()
        r.enqueue(SendIntent, intent_json)
Пример #11
0
 def test_get_job(self):
     worker = Worker(['basic'])
     self.resq.enqueue(Basic,"test1")
     job = Job.reserve('basic', self.resq)
     worker.working_on(job)
     name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic')
     assert worker.job() == ResQ.decode(self.redis.get('resque:worker:%s' % name))
     assert worker.processing() == ResQ.decode(self.redis.get('resque:worker:%s' % name))
     worker.done_working()
     w2 = Worker(['basic'])
     print w2.job()
     assert w2.job() == {}
Пример #12
0
 def test_get_job(self):
     worker = Worker(['basic'])
     self.resq.enqueue(Basic,"test1")
     job = Job.reserve('basic', self.resq)
     worker.working_on(job)
     name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic')
     assert worker.job() == ResQ.decode(self.redis.get('resque:worker:%s' % name))
     assert worker.processing() == ResQ.decode(self.redis.get('resque:worker:%s' % name))
     worker.done_working(job)
     w2 = Worker(['basic'])
     print w2.job()
     assert w2.job() == {}
Пример #13
0
def do_start_build(job_name):
    input = request.json

    job = Job.load(job_name, input.get('job_ref'))
    build = Build.create(job,
                         parameters=input.get('parameters', {}),
                         description=input.get('description', ''))
    session_id = '%s-0' % build.uuid
    set_session_queued(g.db, session_id)
    r = ResQ()
    r.enqueue(DispatchSession, session_id)
    return jsonify(**build.as_dict())
Пример #14
0
 def __init__(self, server="localhost:6379", password=None):
     """
     >>> from pyres.scheduler import Scheduler
     >>> scheduler = Scheduler('localhost:6379')
     """
     self._shutdown = False
     if isinstance(server, basestring):
         self.resq = ResQ(server=server, password=password)
     elif isinstance(server, ResQ):
         self.resq = server
     else:
         raise Exception("Bad server argument")
Пример #15
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         'failed_at': int(time.mktime(datetime.datetime.now().timetuple())),
         'payload': self._payload,
         'error': self._parse_message(self._exception),
         'backtrace': self._parse_traceback(self._traceback),
         'queue': self._queue
     }
     if self._worker:
         data['worker'] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush('resque:failed', data)
Пример #16
0
    def retry(self, payload_class, args):
        """This method provides a way to retry a job after a failure.
        If the jobclass defined by the payload containes a ``retry_every``
        attribute then pyres will attempt to retry the job until successful
        or until timeout defined by ``retry_timeout`` on the payload class.

        :param payload_class: the :class:`Job`-like class that needs
                              to be retried
        :type payload_class: :class:`Job`-like

        :param args: The args to be passed to the `payload_class.perform`
                     method when it is retried.
        :type args: list
        """
        retry_every = getattr(payload_class, 'retry_every', None)
        retry_timeout = getattr(payload_class, 'retry_timeout', 0)

        if retry_every:
            now = ResQ._current_time()
            first_attempt = self._payload.get("first_attempt", now)
            retry_until = first_attempt + timedelta(seconds=retry_timeout)
            retry_at = now + timedelta(seconds=retry_every)
            if retry_at < retry_until:
                self.resq.enqueue_at(retry_at, payload_class, *args,
                        **{'first_attempt':first_attempt})
                return True
        return False
Пример #17
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         'failed_at': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),
         'payload': self._payload,
         'exception': self._exception.__class__.__name__,
         'error': self._parse_message(self._exception),
         'backtrace': self._parse_traceback(self._traceback),
         'queue': self._queue
     }
     if self._worker:
         data['worker'] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush('resque:failed', data)
Пример #18
0
def dispatch():
    input = request.json
    session_no = create_session(g.db,
                                input['build_id'],
                                parent=input['parent'],
                                labels=input['labels'],
                                run_info=input['run_info'],
                                state=SESSION_STATE_TO_BACKEND)
    session_id = '%s-%s' % (input['build_id'], session_no)
    ri = input['run_info'] or {}
    args = ", ".join(ri.get('args', []))
    title = "%s(%s)" % (ri.get('step_name', 'main'), args)
    item = RunAsync(session_no, title)
    add_slog(g.db, input['parent'], item)
    r = ResQ()
    r.enqueue(DispatchSession, session_id)
    return jsonify(session_id=session_id)
Пример #19
0
def check_in_available(agent_id):
    session_id = request.json['session_id']
    build_id, num = session_id.split('-')
    with g.db.pipeline() as pipe:
        set_session_done(pipe, session_id, request.json['result'],
                         request.json['output'], request.json['log_file'])
        if int(num) == 0:
            Build.set_done(build_id, request.json['result'], pipe=pipe)

        add_slog(pipe, session_id, SessionDone(request.json['result']))

        pipe.hmset(jdb.KEY_AGENT % agent_id,
                   dict(state=jdb.AGENT_STATE_AVAIL, seen=get_ts()))
        pipe.execute()

    r = ResQ()
    r.enqueue(AgentAvailable, agent_id)
    return jsonify()
Пример #20
0
 def __init__(self, queues, host, max_children=10):
     self.queues = queues
     self._host = host
     self.max_children = max_children
     self._shutdown = False
     self.children = []
     self.resq = ResQ(host)
     self.validate_queues()
     self.reports = {}
Пример #21
0
def autodiscover():
    """
    Auto-discover INSTALLED_APPS cron.py modules and fail silently when
    not present. This forces an import on them to register any cron jobs they
    may want.
    """
    import imp
    from django.conf import settings

    for app in settings.INSTALLED_APPS:
        # For each app, we need to look for an cron.py inside that app's
        # package. We can't use os.path here -- recall that modules may be
        # imported different ways (think zip files) -- so we need to get
        # the app's __path__ and look for cron.py on that path.

        # Step 1: find out the app's __path__ Import errors here will (and
        # should) bubble up, but a missing __path__ (which is legal, but weird)
        # fails silently -- apps that do weird things with __path__ might
        # need to roll their own cron registration.
        try:
            app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__
        except AttributeError:
            continue

        # Step 2: use imp.find_module to find the app's admin.py. For some
        # reason imp.find_module raises ImportError if the app can't be found
        # but doesn't actually try to import the module. So skip this app if
        # its admin.py doesn't exist
        try:
            fp, pathname, description = imp.find_module('tasks', app_path)
        except ImportError:
            continue

        # Step 3: import the app's task file. If this has errors we want them
        # to bubble up.

        modulename = imp.load_module('tasks', fp, pathname, description)
        # Step 4: iterate though tasks.py module and find all classes with their names
        # starting with 'Periodic' and 'Interval'.
        for item in dir(modulename):
            if 'Periodic' or 'Interval' in item and hasattr(item, '__class__'):
                pyres_sched = PyresScheduler()
                resque = ResQ()
                pyres_sched.add_resque(resque)
                pyres_sched.start()

                classname = getattr(modulename, item)
                if hasattr(classname, 'run_every'):
                    run_every = getattr(classname, 'run_every')

                if 'Periodic' in item:
                    print 'periodic'
                    pyres_sched.add_cron_job(classname, args=None, **run_every)
                elif 'Interval' in item:
                    pyres_sched.add_interval_job(classname,
                                                 args=None,
                                                 **run_every)
Пример #22
0
 def setup_resq(self):
     if hasattr(self,'logger'):
         self.logger.info('Connecting to redis server - %s' % self.server)
     if isinstance(self.server,string_types):
         self.resq = ResQ(server=self.server, password=self.password)
     elif isinstance(self.server, ResQ):
         self.resq = self.server
     else:
         raise Exception("Bad server argument")
Пример #23
0
    def all(cls, resq, start=0, count=1):
        items = resq.redis.lrange('resque:failed', start, count) or []

        ret_list = []
        for i in items:
            failure = ResQ.decode(i)
            failure['redis_value'] = b64encode(i)
            ret_list.append(failure)
        return ret_list
Пример #24
0
    def all(cls, resq, start=0, count=1):
        items = resq.redis.lrange('resque:failed', start, count) or []

        ret_list = []
        for i in items:
            failure = ResQ.decode(i)
            failure['redis_value'] = b64encode(i)
            ret_list.append(failure)
        return ret_list
Пример #25
0
def queues(request):
    queues = ResQ.queues(HOST)
    failure_count = Failure.count(ResQ(HOST))
    template = env.get_template('queues.html')
    dic = {
        'queues':queues,
        'failure_count':failure_count,
        'resq': resq
    }
    return str(template.render(dic))
Пример #26
0
def failed_retry(request):
    try:
        import json
    except ImportError:
        import simplejson as json
    failed_job = request.POST['failed_job']
    job = b64decode(failed_job)
    decoded = ResQ.decode(job)
    failure.retry(HOST, decoded['queue'], job)
    raise Redirect('/failed/')
Пример #27
0
 def save(self, resq):
     data = {
         'failed_at' : str(datetime.datetime.now()),
         'payload'   : self._payload,
         'error'     : self._parse_message(self._exception),
         'backtrace' : self._parse_traceback(self._traceback),
         'queue'     : self._queue
     }
     data = ResQ.encode(data)
     resq.redis.push('failed', data)
Пример #28
0
def failed_retry(request):
    try:
        import json
    except ImportError:
        import simplejson as json
    failed_job = request.POST['failed_job']
    job = b64decode(failed_job)
    decoded = ResQ.decode(job)
    failure.retry(HOST, decoded['queue'], job)
    raise Redirect('/failed/')
Пример #29
0
    def all(cls, host="localhost:6379"):
        if isinstance(host, basestring):
            resq = ResQ(host)
        elif isinstance(host, ResQ):
            resq = host

        return [
            Worker.find(w, resq)
            for w in resq.redis.smembers('resque:workers') or []
        ]
Пример #30
0
    def __init__(self,
                 queues=(),
                 server="localhost:6379",
                 password=None,
                 timeout=None):
        self.queues = queues
        self.validate_queues()
        self._shutdown = False
        self.child = None
        self.pid = os.getpid()
        self.hostname = os.uname()[1]
        self.timeout = timeout

        if isinstance(server, string_types):
            self.resq = ResQ(server=server, password=password)
        elif isinstance(server, ResQ):
            self.resq = server
        else:
            raise Exception("Bad server argument")
Пример #31
0
def failed_job(request, retry=False):
    failed_job_ = request.POST['failed_job']
    job = b64decode(failed_job_)
    if retry:
        # post /failed/retry
        decoded = ResQ.decode(job)
        failure.retry(get_pyres(), decoded['queue'], job)
    else:
        # post /failed/delete
        failure.delete(get_pyres(), job)
    return redirect('resweb-failed')
Пример #32
0
 def __init__(self, server="localhost:6379", password=None):
     """
     >>> from pyres.scheduler import Scheduler
     >>> scheduler = Scheduler('localhost:6379')
     """
     self._shutdown = False
     if isinstance(server, basestring):
         self.resq = ResQ(server=server, password=password)
     elif isinstance(server, ResQ):
         self.resq = server
     else:
         raise Exception("Bad server argument")
Пример #33
0
def details():
    r = ResQ()
    with closing(get_connection()) as connection:
        total = 0
        with closing(connection.cursor()) as cursor:
            query = '''
            SELECT COUNT(id) AS count
            FROM records
            WHERE details IS NULL
            '''
            cursor.execute(query)
            total = cursor.fetchone()['count']
        with closing(connection.cursor('cursor')) as cursor:
            query = '''
            SELECT *
            FROM records
            WHERE details IS NULL
            '''
            cursor.execute(query)
            for record in tqdm(cursor, total=total):
                r.enqueue(Record, record['id'])
Пример #34
0
def submit_feedback(request):
    if request.method != 'POST':
        raise Http404
    if 'lang' in request.GET and request.GET['lang'] in ALLOWED_LANGS:
        translation.activate(request.GET['lang'])

    try:
        form = FeedbackForm(request.POST or None)
        if form.is_valid():
            r = ResQ()
            r.enqueue(FeedbackTask, form.cleaned_data['name'],
                      form.cleaned_data['email'], form.cleaned_data['text'])
            return HttpResponse(status=200)
    
        # form is invalid
        return HttpResponse(
            loader.render_to_string('feedback/feedback_form_partial.html',
                                    {'feedback_form': form}),
            status=409)
    except Exception:
        logger.exception('')
        raise
Пример #35
0
    def retry(self, payload_class, args):
        retry_every = getattr(payload_class, "retry_every", None)
        retry_timeout = getattr(payload_class, "retry_timeout", 0)

        if retry_every:
            now = ResQ._utcnow()
            first_attempt = self._payload.get("first_attempt", now)
            retry_until = first_attempt + timedelta(seconds=retry_timeout)
            retry_at = now + timedelta(seconds=retry_every)
            if retry_at < retry_until:
                self.resq.enqueue_at(retry_at, payload_class, *args, **{"first_attempt": first_attempt})
                return True
        return False
Пример #36
0
def add_repository_to_queue(user, repo_owner, repository_name, repo_object=None):
    res = ResQ()
    repo = None
    token = user.get_profile().extra_data['access_token']

    if not repo_object:

        gh = Github(login=user.email, token=token)

        repo = gh.repos.get(repo_owner, repository_name)
    else:
        repo = repo_object

    queue_data = {'email': user.email, 'token': token}
    queue_data['repo'] = {
        'name': repo.name,
        'url': repo.url,
        'git_url': repo.git_url,
        'html_url': repo.html_url,
        'language': repo.language
    }

    db_repo, created = Repository.objects.get_or_create(
            git_url=repo.git_url, defaults=queue_data['repo'])

    query_filter = {'user': user, 'repository': db_repo}
    query_filter.update({'defaults': {
            'user': user,
            "repository": db_repo
        }
    })

    as_contributor, created = Contributor.objects.get_or_create(**query_filter)

    # put a timestamp field in repository model to verify if there is need to 
    # process again. so only put in queue if is not created and timestamp > x time

    res.enqueue(RepositoryWorker, queue_data)
Пример #37
0
 def save(self, resq=None):
     if not resq:
         resq = ResQ()
     data = {
         'failed_at' : str(datetime.datetime.now()),
         'payload'   : self._payload,
         'error'     : self._parse_message(self._exception),
         'backtrace' : self._parse_traceback(self._traceback),
         'queue'     : self._queue
     }
     if self._worker:
         data['worker'] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush('resque:failed', data)
Пример #38
0
def do_register():
    agent_id = request.json['id']

    info = {
        "ip": request.remote_addr,
        'nick': request.json.get('nick', ''),
        "port": request.json["port"],
        "state": jdb.AGENT_STATE_AVAIL,
        "seen": get_ts(),
        "labels": ",".join(request.json["labels"])
    }

    with g.db.pipeline() as pipe:
        pipe.hmset(jdb.KEY_AGENT % agent_id, info)
        pipe.sadd(jdb.KEY_ALL, agent_id)

        for label in request.json["labels"]:
            pipe.sadd(jdb.KEY_LABEL % label, agent_id)
        pipe.execute()

    r = ResQ()
    r.enqueue(AgentAvailable, agent_id)
    return jsonify()
Пример #39
0
def pyres_web():
    usage = "usage: %prog [options]"
    parser = OptionParser(usage)
    parser.add_option("--host",
                      dest="host",
                      default="localhost",
                      metavar="HOST")
    parser.add_option("--port", dest="port", type="int", default=8080)
    parser.add_option("--dsn", dest="dsn", help="redis server to display")
    (options, args) = parser.parse_args()
    if options.dsn:
        from pyres import ResQ
        server.HOST = ResQ(options.dsn)
    run_itty(host=options.host, port=options.port)
Пример #40
0
 def save(self, resq=None):
     if not resq:
         resq = ResQ()
     data = {
         "failed_at": str(datetime.datetime.now()),
         "payload": self._payload,
         "error": self._parse_message(self._exception),
         "backtrace": self._parse_traceback(self._traceback),
         "queue": self._queue,
     }
     if self._worker:
         data["worker"] = self._worker
     data = ResQ.encode(data)
     resq.redis.push("resque:failed", data)
Пример #41
0
    def retry(self, payload_class, args):
        retry_every = getattr(payload_class, 'retry_every', None)
        retry_timeout = getattr(payload_class, 'retry_timeout', 0)

        if retry_every:
            now = ResQ._current_time()
            first_attempt = self._payload.get("first_attempt", now)
            retry_until = first_attempt + timedelta(seconds=retry_timeout)
            retry_at = now + timedelta(seconds=retry_every)
            if retry_at < retry_until:
                self.resq.enqueue_at(retry_at, payload_class, *args,
                                     **{'first_attempt': first_attempt})
                return True
        return False
Пример #42
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]
    parser = argparse.ArgumentParser()
    parser.add_argument('--conf', '-c', help="Path to configuration file.")
    parser.add_argument('--verbose', '-v', action='count', default=0, help='Log level: v=warning, vv=info, vvv=debug.')
    options = parser.parse_args(args)

    log_level = LOGS[options.verbose].upper()
    logging.basicConfig(
        level=getattr(logging, log_level),
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )

    if options.conf:
        cfg = Config.load(abspath(expanduser(options.conf)))
    else:
        cfg = Config()

    conn = ResQ(server="%s:%s" % (cfg.REDIS_HOST, cfg.REDIS_PORT), password=cfg.REDIS_PASSWORD)
    conn.config = cfg

    connect(
        cfg.MONGO_DB,
        host=cfg.MONGO_HOST,
        port=cfg.MONGO_PORT,
        username=cfg.MONGO_USER,
        password=cfg.MONGO_PASS
    )

    print
    print("--- Wight worker started ---")
    print
    Worker.run([WorkerJob.queue], conn)
    print
    print "--- Wight worker killed ---"
    print
Пример #43
0
    def __init__(self, queues=(), server="localhost:6379", password=None, timeout=None):
        self.queues = queues
        self.validate_queues()
        self._shutdown = False
        self.child = None
        self.pid = os.getpid()
        self.hostname = os.uname()[1]
        self.timeout = timeout

        if isinstance(server, string_types):
            self.resq = ResQ(server=server, password=password)
        elif isinstance(server, ResQ):
            self.resq = server
        else:
            raise Exception("Bad server argument")
Пример #44
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         "failed_at": int(time.mktime(datetime.datetime.now().timetuple())),
         "payload": self._payload,
         "error": self._parse_message(self._exception),
         "backtrace": self._parse_traceback(self._traceback),
         "queue": self._queue,
     }
     if self._worker:
         data["worker"] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush("resque:failed", data)
Пример #45
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         'failed_at' : int(time.mktime(datetime.datetime.now().timetuple())),
         'payload'   : self._payload,
         'error'     : self._parse_message(self._exception),
         'backtrace' : self._parse_traceback(self._traceback),
         'queue'     : self._queue
     }
     if self._worker:
         data['worker'] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush('resque:failed', data)
Пример #46
0
    def perform(text, url):
        '''
        Delayed task.
        '''
        if not settings.PUBLISHING_VKONTAKTE_USER:
            return
        if not url is None:
            text = '%s %s' % (text, url)

        method = 'wall.post'
        owner_id, token, extra_params = settings.PUBLISHING_VKONTAKTE_USER
        params = {'owner_id': owner_id,
                  'access_token': token,
                  'message': text}
        params.update(extra_params)
        resp = vk(method, params)
        post_id = resp.get("response", {}).get("post_id")
        if not post_id:
            raise ValueError('Post ID not provided.\nMethod %s\nParams %r'\
                                 '\n Response %r' % (method, params, resp))

        r = ResQ()
        for user, data in settings.PUBLISHING_VKONTAKTE_REPOST_USERS.iteritems():
            r.enqueue(VkontakteLike, user, owner_id, post_id)
Пример #47
0
 def working(cls, host):
     if isinstance(host, string_types):
         resq = ResQ(host)
     elif isinstance(host, ResQ):
         resq = host
     total = []
     for key in Worker.all(host):
         total.append('resque:worker:%s' % key)
     names = []
     for key in total:
         value = resq.redis.get(key)
         if value:
             w = Worker.find(key[14:], resq)  #resque:worker:
             names.append(w)
     return names
Пример #48
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         "failed_at": datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"),
         "payload": self._payload,
         "exception": self._exception.__class__.__name__,
         "error": self._parse_message(self._exception),
         "backtrace": self._parse_traceback(self._traceback),
         "queue": self._queue,
     }
     if self._worker:
         data["worker"] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush("resque:failed", data)
Пример #49
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         'failed_at' : datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),
         'payload'   : self._payload,
         'exception' : self._exception.__class__.__name__,
         'error'     : self._parse_message(self._exception),
         'backtrace' : self._parse_traceback(self._traceback),
         'queue'     : self._queue
     }
     if self._worker:
         data['worker'] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush('resque:failed', data)
Пример #50
0
    def perform(args):
        try:
            sc2reader_to_esdb = SC2ReaderToEsdb()
            filename = args['hash'] + '.s2gs'
            gateway = args['gateway']
            if gateway == 'sea':
                gateway = 'sg'

            # retrieve it from battlenet
            depoturl = 'http://{0}.depot.battle.net:1119/{1}'.format(
                gateway, filename)
            try:
                s2gsfile = urllib2.urlopen(depoturl).read()
            except:
                logging.getLogger("jobs").info(
                    "couldnt retrieve {} s2gs hash {}. maybe its bad.".format(
                        gateway, args['hash']))
                return None

            # save it in S3 because we are pack rats
            bucket = boto.connect_s3(settings.AWS_ACCESS_KEY_ID,
                                     settings.AWS_SECRET_ACCESS_KEY)\
                                     .get_bucket(settings.S2GS_BUCKET_NAME)
            k = Key(bucket)
            k.key = filename
            k.set_contents_from_string(s2gsfile)

            # parse it and write stuff to DB
            summaryDB = sc2reader_to_esdb.processSummary(
                StringIO(s2gsfile), args['hash'])

        except Exception as e:
            tb = traceback.format_exc()
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            logging.getLogger("jobs").info(
                "parsing failed for s2gs {}. oh well. exception={}. {} {} {} {}"
                .format(args['hash'], e, exc_type, fname, exc_tb.tb_lineno,
                        tb))
            pass

        finally:
            # Enqueue ruby PostParse job, always!
            ResQ(server=settings.REDIS_SERVER).enqueue_from_string(
                'ESDB::Jobs::Sc2::Summary::PostParse', 'summaries-high',
                {'hash': args['hash']})
Пример #51
0
    def retry(self, payload_class, args):
        """This method provides a way to retry a job after a failure.
        If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres
        will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class.

        """
        retry_every = getattr(payload_class, "retry_every", None)
        retry_timeout = getattr(payload_class, "retry_timeout", 0)

        if retry_every:
            now = ResQ._current_time()
            first_attempt = self._payload.get("first_attempt", now)
            retry_until = first_attempt + timedelta(seconds=retry_timeout)
            retry_at = now + timedelta(seconds=retry_every)
            if retry_at < retry_until:
                self.resq.enqueue_at(retry_at, payload_class, *args, **{"first_attempt": first_attempt})
                return True
        return False
Пример #52
0
def pyres_web():
    usage = "usage: %prog [options]"
    parser = OptionParser(usage)
    parser.add_option("--host",
                      dest="host",
                      default="localhost",
                      metavar="HOST")
    parser.add_option("--port", dest="port", type="int", default=8080)
    parser.add_option("--dsn", dest="dsn", help="Redis server to display")
    parser.add_option("--server",
                      dest="server",
                      help="Server for itty to run under.",
                      default='wsgiref')
    (options, args) = parser.parse_args()

    if options.dsn:
        from pyres import ResQ
        resweb_server.HOST = ResQ(options.dsn)
    run_itty(host=options.host, port=options.port, server=options.server)
Пример #53
0
    def retry(self, payload_class, args):
        """This method provides a way to retry a job after a failure.
        If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres
        will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class.

        """
        retry_every = getattr(payload_class, 'retry_every', None)
        retry_timeout = getattr(payload_class, 'retry_timeout', 0)

        if retry_every:
            now = ResQ._current_time()
            first_attempt = self._payload.get("first_attempt", now)
            retry_until = first_attempt + timedelta(seconds=retry_timeout)
            retry_at = now + timedelta(seconds=retry_every)
            if retry_at < retry_until:
                self.resq.enqueue_at(retry_at, payload_class, *args,
                                     **{'first_attempt': first_attempt})
                return True
        return False
Пример #54
0
def main(params=None):
    if params is None:
        params = sys.argv[1:]
    parser = argparse.ArgumentParser(description='Runs pyres web console.')

    conn_group = parser.add_argument_group('Connection arguments')
    conn_group.add_argument('--host', default='localhost', help='Binding host')
    conn_group.add_argument('--port',
                            default=8080,
                            type=int,
                            help='Binding port')

    conn_group = parser.add_argument_group('Redis arguments')
    conn_group.add_argument('--redis-host',
                            default='localhost',
                            help='Redis host')
    conn_group.add_argument('--redis-port',
                            default=6379,
                            type=int,
                            help='Redis port')
    conn_group.add_argument('--redis-database',
                            default=0,
                            type=int,
                            help='Redis database')
    conn_group.add_argument('--redis-password',
                            default=None,
                            help='Redis password')

    other_group = parser.add_argument_group('Other arguments')
    other_group.add_argument('-l',
                             '--level',
                             default='debug',
                             help='Logging level')

    arguments = parser.parse_args(params)
    logging.basicConfig(level=getattr(logging, arguments.level.upper()))

    redis = Redis(host=arguments.redis_host,
                  port=arguments.redis_port,
                  db=arguments.redis_database,
                  password=arguments.redis_password)
    resweb_server.HOST = ResQ(redis)
    run_itty(host=arguments.host, port=arguments.port, server='wsgiref')
Пример #55
0
    def run(self):
        setproctitle('pyres_minion:%s: Starting' % (os.getppid(), ))
        if self.log_path:
            if special_log_file(self.log_path):
                self.log_file = self.log_path
            else:
                self.log_file = os.path.join(self.log_path,
                                             'minion-%s.log' % self.pid)
        namespace = 'minion:%s' % self.pid
        self.logger = setup_logging('minion', namespace, self.log_level,
                                    self.log_file)
        #self.clear_logger()
        if isinstance(self.server, string_types):
            self.resq = ResQ(server=self.server, password=self.password)
        elif isinstance(self.server, ResQ):
            self.resq = self.server
        else:
            raise Exception("Bad server argument")

        self.work()
Пример #56
0
    def all(cls, resq, start=0, count=1):
        """Get a list of the items in the failure queue.

        Redis' documentation: `LLEN <http://redis.io/commands/LLEN>`_

        :param resq: The redis queue instance to check
        :type resq: :class:`ResQ`
        :param start: The location in the queue to start checking at.
        :type start: int
        :param count: The number of items to retrieve
        :type count: int

        :returns: A list of items in the queue
        :rtype: `list` of `dict`
        """
        items = resq.redis.lrange('resque:failed', start, count) or []

        ret_list = []
        for i in items:
            failure = ResQ.decode(i)
            failure['redis_value'] = b64encode(i)
            ret_list.append(failure)
        return ret_list
Пример #57
0
 def perform(fail_until):
     if ResQ._current_time() < fail_until:
         raise Exception("Don't blame me!  I'm supposed to fail!")
     else:
         return True
Пример #58
0
 def job(self):
     data = self.resq.redis.get("resque:worker:%s" % self)
     if data:
         return ResQ.decode(data)
     return {}