예제 #1
0
 def __init__(self, queues=[], server="localhost:6379", password=None):
     self.queues = queues
     self.validate_queues()
     self._shutdown = False
     self.child = None
     self.pid = os.getpid()
     self.hostname = os.uname()[1]
     if isinstance(server, basestring):
         self.resq = ResQ(server=server, password=password)
     elif isinstance(server, ResQ):
         self.resq = server
     else:
         raise Exception("Bad server argument")
예제 #2
0
def pyres_web():
    usage = "usage: %prog [options]"
    parser = OptionParser(usage)
    parser.add_option("--host",
                      dest="host",
                      default="localhost",
                      metavar="HOST")
    parser.add_option("--port", dest="port", type="int", default=8080)
    parser.add_option("--dsn", dest="dsn", help="redis server to display")
    (options, args) = parser.parse_args()
    if options.dsn:
        from pyres import ResQ
        server.HOST = ResQ(options.dsn)
    run_itty(host=options.host, port=options.port)
예제 #3
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         'failed_at': int(time.mktime(datetime.datetime.now().timetuple())),
         'payload': self._payload,
         'error': self._parse_message(self._exception),
         'backtrace': self._parse_traceback(self._traceback),
         'queue': self._queue
     }
     if self._worker:
         data['worker'] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush('resque:failed', data)
예제 #4
0
 def working(cls, host):
     if isinstance(host, string_types):
         resq = ResQ(host)
     elif isinstance(host, ResQ):
         resq = host
     total = []
     for key in Worker.all(host):
         total.append('resque:worker:%s' % key)
     names = []
     for key in total:
         value = resq.redis.get(key)
         if value:
             w = Worker.find(key[14:], resq)  #resque:worker:
             names.append(w)
     return names
예제 #5
0
 def save(self, resq=None):
     """Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
     if not resq:
         resq = ResQ()
     data = {
         'failed_at': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),
         'payload': self._payload,
         'exception': self._exception.__class__.__name__,
         'error': self._parse_message(self._exception),
         'backtrace': self._parse_traceback(self._traceback),
         'queue': self._queue
     }
     if self._worker:
         data['worker'] = self._worker
     data = ResQ.encode(data)
     resq.redis.rpush('resque:failed', data)
예제 #6
0
    def __init__(self, queues=(), server="localhost:6379", password=None, timeout=None, odoo_env=None):
        self.queues = queues
        self.validate_queues()
        self._shutdown = False
        self.child = None
        self.pid = os.getpid()
        self.hostname = os.uname()[1]
        self.timeout = timeout
        self.odoo_env = odoo_env

        if isinstance(server, string_types):
            self.resq = ResQ(server=server, password=password)
        elif isinstance(server, ResQ):
            self.resq = server
        else:
            raise Exception("Bad server argument")
예제 #7
0
def dispatch():
    input = request.json
    session_no = create_session(g.db,
                                input['build_id'],
                                parent=input['parent'],
                                labels=input['labels'],
                                run_info=input['run_info'],
                                state=SESSION_STATE_TO_BACKEND)
    session_id = '%s-%s' % (input['build_id'], session_no)
    ri = input['run_info'] or {}
    args = ", ".join(ri.get('args', []))
    title = "%s(%s)" % (ri.get('step_name', 'main'), args)
    item = RunAsync(session_no, title)
    add_slog(g.db, input['parent'], item)
    r = ResQ()
    r.enqueue(DispatchSession, session_id)
    return jsonify(session_id=session_id)
예제 #8
0
파일: jobs.py 프로젝트: nickelsen/ggpyjobs
    def perform(args):
        try:
            sc2reader_to_esdb = SC2ReaderToEsdb()
            filename = args['hash'] + '.s2gs'
            gateway = args['gateway']
            if gateway == 'sea':
                gateway = 'sg'

            # retrieve it from battlenet
            depoturl = 'http://{0}.depot.battle.net:1119/{1}'.format(
                gateway, filename)
            try:
                s2gsfile = urllib2.urlopen(depoturl).read()
            except:
                logging.getLogger("jobs").info(
                    "couldnt retrieve {} s2gs hash {}. maybe its bad.".format(
                        gateway, args['hash']))
                return None

            # save it in S3 because we are pack rats
            bucket = boto.connect_s3(settings.AWS_ACCESS_KEY_ID,
                                     settings.AWS_SECRET_ACCESS_KEY)\
                                     .get_bucket(settings.S2GS_BUCKET_NAME)
            k = Key(bucket)
            k.key = filename
            k.set_contents_from_string(s2gsfile)

            # parse it and write stuff to DB
            summaryDB = sc2reader_to_esdb.processSummary(
                StringIO(s2gsfile), args['hash'])

        except Exception as e:
            tb = traceback.format_exc()
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            logging.getLogger("jobs").info(
                "parsing failed for s2gs {}. oh well. exception={}. {} {} {} {}"
                .format(args['hash'], e, exc_type, fname, exc_tb.tb_lineno,
                        tb))
            pass

        finally:
            # Enqueue ruby PostParse job, always!
            ResQ(server=settings.REDIS_SERVER).enqueue_from_string(
                'ESDB::Jobs::Sc2::Summary::PostParse', 'summaries-high',
                {'hash': args['hash']})
예제 #9
0
def check_in_available(agent_id):
    session_id = request.json['session_id']
    build_id, num = session_id.split('-')
    with g.db.pipeline() as pipe:
        set_session_done(pipe, session_id, request.json['result'],
                         request.json['output'], request.json['log_file'])
        if int(num) == 0:
            Build.set_done(build_id, request.json['result'], pipe=pipe)

        add_slog(pipe, session_id, SessionDone(request.json['result']))

        pipe.hmset(jdb.KEY_AGENT % agent_id,
                   dict(state=jdb.AGENT_STATE_AVAIL, seen=get_ts()))
        pipe.execute()

    r = ResQ()
    r.enqueue(AgentAvailable, agent_id)
    return jsonify()
예제 #10
0
def pyres_web():
    usage = "usage: %prog [options]"
    parser = OptionParser(usage)
    parser.add_option("--host",
                      dest="host",
                      default="localhost",
                      metavar="HOST")
    parser.add_option("--port", dest="port", type="int", default=8080)
    parser.add_option("--dsn", dest="dsn", help="Redis server to display")
    parser.add_option("--server",
                      dest="server",
                      help="Server for itty to run under.",
                      default='wsgiref')
    (options, args) = parser.parse_args()

    if options.dsn:
        from pyres import ResQ
        resweb_server.HOST = ResQ(options.dsn)
    run_itty(host=options.host, port=options.port, server=options.server)
예제 #11
0
def main(params=None):
    if params is None:
        params = sys.argv[1:]
    parser = argparse.ArgumentParser(description='Runs pyres web console.')

    conn_group = parser.add_argument_group('Connection arguments')
    conn_group.add_argument('--host', default='localhost', help='Binding host')
    conn_group.add_argument('--port',
                            default=8080,
                            type=int,
                            help='Binding port')

    conn_group = parser.add_argument_group('Redis arguments')
    conn_group.add_argument('--redis-host',
                            default='localhost',
                            help='Redis host')
    conn_group.add_argument('--redis-port',
                            default=6379,
                            type=int,
                            help='Redis port')
    conn_group.add_argument('--redis-database',
                            default=0,
                            type=int,
                            help='Redis database')
    conn_group.add_argument('--redis-password',
                            default=None,
                            help='Redis password')

    other_group = parser.add_argument_group('Other arguments')
    other_group.add_argument('-l',
                             '--level',
                             default='debug',
                             help='Logging level')

    arguments = parser.parse_args(params)
    logging.basicConfig(level=getattr(logging, arguments.level.upper()))

    redis = Redis(host=arguments.redis_host,
                  port=arguments.redis_port,
                  db=arguments.redis_database,
                  password=arguments.redis_password)
    resweb_server.HOST = ResQ(redis)
    run_itty(host=arguments.host, port=arguments.port, server='wsgiref')
예제 #12
0
    def run(self):
        setproctitle('pyres_minion:%s: Starting' % (os.getppid(), ))
        if self.log_path:
            if special_log_file(self.log_path):
                self.log_file = self.log_path
            else:
                self.log_file = os.path.join(self.log_path,
                                             'minion-%s.log' % self.pid)
        namespace = 'minion:%s' % self.pid
        self.logger = setup_logging('minion', namespace, self.log_level,
                                    self.log_file)
        #self.clear_logger()
        if isinstance(self.server, string_types):
            self.resq = ResQ(server=self.server, password=self.password)
        elif isinstance(self.server, ResQ):
            self.resq = self.server
        else:
            raise Exception("Bad server argument")

        self.work()
예제 #13
0
def details():
    r = ResQ()
    with closing(get_connection()) as connection:
        total = 0
        with closing(connection.cursor()) as cursor:
            query = '''
            SELECT COUNT(id) AS count
            FROM records
            WHERE details IS NULL
            '''
            cursor.execute(query)
            total = cursor.fetchone()['count']
        with closing(connection.cursor('cursor')) as cursor:
            query = '''
            SELECT *
            FROM records
            WHERE details IS NULL
            '''
            cursor.execute(query)
            for record in tqdm(cursor, total=total):
                r.enqueue(Record, record['id'])
예제 #14
0
def do_register():
    agent_id = request.json['id']

    info = {
        "ip": request.remote_addr,
        'nick': request.json.get('nick', ''),
        "port": request.json["port"],
        "state": jdb.AGENT_STATE_AVAIL,
        "seen": get_ts(),
        "labels": ",".join(request.json["labels"])
    }

    with g.db.pipeline() as pipe:
        pipe.hmset(jdb.KEY_AGENT % agent_id, info)
        pipe.sadd(jdb.KEY_ALL, agent_id)

        for label in request.json["labels"]:
            pipe.sadd(jdb.KEY_LABEL % label, agent_id)
        pipe.execute()

    r = ResQ()
    r.enqueue(AgentAvailable, agent_id)
    return jsonify()
예제 #15
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]
    parser = argparse.ArgumentParser()
    parser.add_argument('--conf', '-c', help="Path to configuration file.")
    parser.add_argument('--verbose', '-v', action='count', default=0, help='Log level: v=warning, vv=info, vvv=debug.')
    options = parser.parse_args(args)

    log_level = LOGS[options.verbose].upper()
    logging.basicConfig(
        level=getattr(logging, log_level),
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )

    if options.conf:
        cfg = Config.load(abspath(expanduser(options.conf)))
    else:
        cfg = Config()

    conn = ResQ(server="%s:%s" % (cfg.REDIS_HOST, cfg.REDIS_PORT), password=cfg.REDIS_PASSWORD)
    conn.config = cfg

    connect(
        cfg.MONGO_DB,
        host=cfg.MONGO_HOST,
        port=cfg.MONGO_PORT,
        username=cfg.MONGO_USER,
        password=cfg.MONGO_PASS
    )

    print
    print("--- Wight worker started ---")
    print
    Worker.run([WorkerJob.queue], conn)
    print
    print "--- Wight worker killed ---"
    print
예제 #16
0
파일: core.py 프로젝트: salimane/resweb
def before_request():
    """Make sure we are connected to the database each request."""
    g.pyres = ResQ(app.config['RESWEB_HOST'], password=app.config.get('RESWEB_PASSWORD', None))
예제 #17
0
 def setUp(self):
     self.resq = ResQ()
     self.redis = self.resq.redis
     self.redis.flushall()
예제 #18
0
  def perform(args):
    performStart = datetime.now()
    md5 = None
    replayDB = None

    try:
      sc2reader_to_esdb = SC2ReaderToEsdb()

      #
      # at this point the 'hash' may actually be an S3 key like '/uploads/1234-5667-1234234/filename.sc2replay'
      # or simply '{md5}'
      #
      # not to worry, in a few lines, we'll rename the S3 key to be md5.sc2replay
      #
      filename = args['hash']
      if re.search('.sc2replay', filename, re.IGNORECASE) is None:
        filename = filename + ".SC2Replay"

      bucket = boto.connect_s3(settings.AWS_ACCESS_KEY_ID,
                               settings.AWS_SECRET_ACCESS_KEY)\
                               .get_bucket(settings.REPLAY_BUCKET_NAME)

      # logging.getLogger("jobs").info("trying to get key {}".format(filename));
      k = bucket.get_key(filename)

      replaystring = k.get_contents_as_string()
      md5 = hashlib.md5(replaystring).hexdigest()

      #
      # rename the S3 key to simply be md5.SC2Replay, so it's easier for us to find it
      # when we need it.
      #
      # http://stackoverflow.com/questions/2481685/amazon-s3-boto-how-to-rename-a-file-in-a-bucket
      k.copy(settings.REPLAY_BUCKET_NAME, md5 + ".SC2Replay",
             metadata=None, preserve_acl=False)

      replayDB, blob = sc2reader_to_esdb.processReplay(StringIO(replaystring), args['channel'])

      if len(blob) > 0:
        blobbucket = boto.connect_s3(settings.AWS_ACCESS_KEY_ID,
                                     settings.AWS_SECRET_ACCESS_KEY)\
                                     .get_bucket(settings.BLOB_BUCKET_NAME)
        k = Key(blobbucket)
        k.key = "%i" % (replayDB.match.id)
        blobdump = json.dumps(blob)
        k.set_contents_from_string(blobdump)

    except Exception as e:
      tb = traceback.format_exc()
      exc_type, exc_obj, exc_tb = sys.exc_info()
      fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]      
      logging.getLogger("jobs").info("parsing failed for replay {}. oh well. exception={}. {} {} {} {}".format(args['hash'].encode('ascii'), e, exc_type, fname, exc_tb.tb_lineno, tb))
      pass

    finally:
      alldone = datetime.now()

      # Enqueue ruby PostParse job, always.
      ResQ(server=settings.REDIS_SERVER).enqueue_from_string('ESDB::Jobs::Sc2::Replay::PostParse', 'replays-high', {
        'uuid': args['uuid'],
        'hash': md5,
        'provider_id': str(args['provider_id']),
        'ggtracker_received_at': args['ggtracker_received_at'],
        'esdb_received_at': args['esdb_received_at'],
        'preparse_received_at': args['preparse_received_at'],
        'jobspy_received_at': performStart.strftime('%s.%f'),
        'jobspy_done_at': alldone.strftime('%s.%f'),
      })

      # regarding converting times to floating point seconds since the
      # epoch, using %s above is dangerous because its not python, it
      # calls the underlying OS.  i tried using the solution here:
      # http://stackoverflow.com/questions/6999726/python-getting-millis-since-epoch-from-datetime/11111177#11111177
      # but i ran into timezone issues and did the lazy thing instead.

      matchId = 0
      if replayDB and hasattr(replayDB, "match") and replayDB.match.id:
        matchId = replayDB.match.id
      logging.getLogger("jobs").info("all done with match {}. total time in ParseReplay.perform() = {}".format(matchId, alldone - performStart))
def queue_job(input_coordinate=None, \
              hg18_option='off', \
              transcript_type='NA', \
              refseq_correspondence='no', \
              sample_id='yes', \
              analysis_type=None, \
              analysis_programs=None):
    if analysis_type == None:
        print 'Choose an analysis type.'
        return
    input_basename = input_coordinate + '_' + hg18_option + '_' + transcript_type + '_' + refseq_correspondence + '_' + sample_id + '.txt'
    job_name = input_basename[:-4] + '_' + analysis_type + '_' + analysis_programs + '_20121231_010101'
    job_dir = os.path.join(diagnostic_tests_dir, job_name)
    if os.path.exists(job_dir) == False:
        os.mkdir(job_dir)
    input_filename = os.path.join(job_dir, input_basename)
    shutil.copy(os.path.join(diagnostic_tests_dir, input_basename), input_filename)
    job_id = job_name
    mutation_filename = input_filename
    mutation_filename_fix = mutation_filename + '.fix'
    mutation_filename_error = mutation_filename +'.error'
    email = '*****@*****.**'
    classifier = '_Other'
    upload_filename = input_basename
    user_upload_dir = job_dir
    chosen_db_str = analysis_programs.strip('_')
    chosen_dbs = chosen_db_str.split('_')
    tsv_report = 'on'
    gene_annot = 'on'
    hg18 = hg18_option
    functional_annot = 'on'
    f = open(input_filename)
    no_input_line = 0
    for line in f:
        if len(line) > 0:
            if not line[0] in ['#', '>', '!']:
                no_input_line += 1
    f.close()
    if no_input_line >= 5000:
        queue_name = queue_name_prefix + '_LARGE'
    else:
        queue_name = queue_name_prefix + '_SMALL'
    mupit_input = 'on'
    resubmit = 'no'
    wf = open(os.path.join(job_dir, 'job_info.txt'), 'w')
    wf.write(str(no_input_line)+'\n')
    wf.write(job_id+'\n')
    wf.write(chosen_db_str+'\n')
    wf.write('call_queuer_path\n')
    wf.write('python_path\n')
    wf.write('queuer_path\n')
    wf.write(email+'\n')
    wf.write(job_id+'\n')
    wf.write(classifier+'\n')
    wf.write(mutation_filename+'\n')
    wf.write(upload_filename+'\n')
    wf.write(user_upload_dir+'\n')
    wf.write(chosen_db_str+'\n')
    wf.write(tsv_report+'\n')
    wf.write(functional_annot+'\n')
    wf.write(hg18+'\n')
    wf.write(analysis_type+'\n')
    wf.write(str(no_input_line)+'\n')
    wf.write('error_output_path\n')
    wf.write(mupit_input+'\n')
    wf.close()
    r=ResQ()
    argstr  =       'dummy'
    argstr += ' ' + 'yes'
    argstr += ' ' + user_upload_dir
    argstr += ' ' + resubmit
    argstr += ' ' + job_id
    argstr += ' -e ' + email
    argstr += ' -i ' + job_id
    argstr += ' -c ' + classifier
    argstr += ' -m ' + mutation_filename
    argstr += ' -u ' + upload_filename
    argstr += ' -D ' + user_upload_dir
    argstr += ' -d ' + chosen_db_str
    argstr += ' -t ' + tsv_report
    argstr += ' -f ' + functional_annot
    argstr += ' -r ' + hg18
    argstr += ' -y ' + analysis_type
    argstr += ' -n ' + str(no_input_line)
    argstr += ' -M ' + mupit_input
    argstr += ' -R ' + resubmit
    argstr += ' -Q ' + queue_name
    argstr += ' -T ' + 'yes' # -T option for 'test'
    r.enqueue(queue_name, masteranalyzer.MasterAnalyzer, argstr)
    global job_ids
    job_ids.append(job_id)
예제 #20
0
 def test_redis_property(self):
     from redis import Redis
     rq = ResQ(server="localhost:6379")
     red = Redis()
     #rq2 = ResQ(server=red)
     self.assertRaises(Exception, rq.redis, [Basic])
예제 #21
0
from pyres import ResQ
from django.conf import settings
pyres = ResQ(getattr(settings,'PYRES_HOST','localhost:6379'),getattr(settings,'PYRES_PASSWORD',None))

class PyresMixin(object):
    @classmethod
    def enqueue(cls, *args):
        if settings.PYRES_USE_QUEUE:
            pyres.enqueue(cls, *args)
        else:
            cls.perform(*args)
예제 #22
0
import os
from itty import *
from pyres import ResQ
from pyres import failure
from views import (Overview, Queues, Queue, Workers, Working, Failed, Stats,
                   Stat, Worker, Delayed, DelayedTimestamp)
from base64 import b64decode

HOST = ResQ("localhost:6379")
MY_ROOT = os.path.join(os.path.dirname(__file__), 'media')
#resq = ResQ(HOST)


@get("/")
def index(request):
    return str(Overview(HOST).render())


@get("/working/")
def working(request):
    return str(Working(HOST).render())


@get("/queues/")
def queues(request):
    return str(Queues(HOST).render())


@get('/queues/(?P<queue_id>\w.+)/')
def queue(request, queue_id):
    start = int(request.GET.get('start', 0))
예제 #23
0
def get_resq(request):
    return ResQ()
예제 #24
0
def get_pyres():
    '''
    Connection getter.
    '''
    return ResQ('%s:%d' % (getattr(settings, 'REDIS_HOST', 'localhost'),
                           getattr(settings, 'REDIS_PORT', 6379)))