Example #1
0
def creat(item: Item):
    """
    通过该接口可以创建一个抽奖规则
    """
    myredis = MyRedis()
    rq = RedisQueue(item.luckname)
    print("ok")
    if rq.qsize():
        return {"ret": 500, "msg": "该抽奖已经存在,请删除后重试"}
    result = {"ret": 0, "item": item}
    awardlist = item.award
    lucklist = []
    luckdict = {}
    for ward in awardlist:
        luckdict[ward.name] = ward.count
        for i in range(ward.count):
            lucklist.append(ward.name)
    othercount = item.total - len(lucklist)

    if othercount:
        luckdict[item.other] = othercount
        others = [item.other] * othercount

    lucklist = lucklist + others
    random.shuffle(lucklist)
    print(lucklist)
    for luck in lucklist:
        rq.put(luck)

    myredis.hmset(item.luckname, luckdict)

    result = {'ret': 0, 'msg': "succses"}
    return result
Example #2
0
 def _run_script_background(self, command, username, region):
     p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     time.sleep(0.1)  # forget why i sleep here, if not sure, don't modify this
     rq = RedisQueue('script_serviceDeploy_%s_%s' % (region, username))
     rq.delete_queue()  # todo: delete key only
     stdout, stderr = p.communicate()
     ret_code = p.poll()
     if not ret_code:
         script_result = stdout
         logger.info('run command: %s success, output:\n%s' % (command, stdout))
         rq.put('*finished*')
         self.add_script_exec_log(username, self.script_name, self.script_path, True, script_result)
     else:
         script_result = stdout + stderr
         logger.error('run command: %s failed, output:\n%s' % (command, script_result))
         errout = [err for err in stderr.splitlines() if err]
         errout = '\n'.join(errout)
         rq.put('*failed*%s' % errout)
         self.add_script_exec_log(username, self.script_name, self.script_path, False, script_result)
    def start():
        round_robin = 0
        while True:
            try:
                manager_queue = RedisQueue('manager', host=settings.REDIS_HOST, port=settings.REDIS_PORT)
                # get next message - first find an available queue
                keys = RedisKeys(pattern='rendertaskqueue:*', host=settings.REDIS_HOST, port=settings.REDIS_PORT).keys
                if keys is not None and len(keys) > 0:
                    round_robin = BlendClusterClient.next_queue(round_robin, keys)
                    queue_name = keys[round_robin] # just grab the first queue name
                    print 'round robin: ' + queue_name
                    queue = RedisQueue('namechangedbelow', host=settings.REDIS_HOST, port=settings.REDIS_PORT)
                    queue.key = queue_name # override key name (queue name) here as we have the qualified name
                    print 'getting render task message'
                    render_task_json = queue.get()
                    print render_task_json
                    render_task_obj = RenderTaskMessage.json_to_object(render_task_json)
                    hg_url = render_task_obj.hg_url
                    blend_file = render_task_obj.blend_file

                    # notify manager we are working
                    msg = RenderTaskStateChangeMessage.json_from_object({ 'id': render_task_obj.id,
                                                                          'state': 1}) # set working state
                    manager_queue.put(msg)
                    print 'Set task to WORKING state: ' + str(render_task_obj.id)
                    print '-------------------------'
                    print 'WORKING'
                    print '-------------------------'

                    # so some work
                    print 'About to prepare hg'
                    hg = HgUtils(url=hg_url)
                    print 'about to clone or pull hg'
                    hg.clone_or_pull()
                    full_blend_filename = hg.local_project_path + '/' + render_task_obj.blend_file
                    short_blend_filename = ntpath.basename(full_blend_filename).split('.')[0]
                    render_dir = hg.local_project_path + '/renders/' + short_blend_filename + '/'\
                                 + render_task_obj.scene_name + '/'
                    print 'Rendering in dir: ' + render_dir
                    if not os.path.exists(render_dir):
                        os.makedirs(render_dir)
                        hg.push_all_changes()

                    blender = Blender(blend_file=full_blend_filename, scene_name=render_task_obj.scene_name,
                                      start_frame=render_task_obj.frame, end_frame=render_task_obj.frame,
                                      output_dir=render_dir)
                    blender.run()
                    # before pushing changes update local repo
                    hg.get_latest_changes()
                    hg.push_all_changes()
                    print 'done hg'

                    msg = RenderTaskStateChangeMessage.json_from_object({ 'id': render_task_obj.id,
                                                                          'state': 4}) # set success state
                    manager_queue.put(msg)
                    print '-------------------------'
                    print 'SUCCESSFUL'
                    print '-------------------------'
                else:
                    print('No messages available right now, will try again in 5 seconds...')
                    time.sleep(5) # wait 5 secs before trying again
            except Exception as e:
                print('Error raised: ' + e.message)
                try:
                    msg = RenderTaskStateChangeMessage.json_from_object({ 'id': render_task_obj.id,
                                                                          'state': 3}) # set working state failed
                    manager_queue.put(msg)
                    print '-------------------------'
                    print 'FAILED'
                    print '-------------------------'

                except:
                    print('Trying to respond with a failure status but could not!')
                print('Will try again in 5 seconds...')
                time.sleep(5) # wait 5 secs before trying again
Example #4
0
    def handle(self):
        print(self.client_address)
        sqstr = '{0}:{1}'.format(self.client_address[0],
                                 self.client_address[1])
        print(sqstr)
        q = RedisQueue(name=sqstr)
        #q.redisidrun=q.isrun()
        #print ('running')
        while True:
            try:
                data = self.request.recv(1024)
                if (data == b''):
                    try:
                        conn = psycopg2.connect(database="materiel",
                                                user="******",
                                                password="******",
                                                host="58.214.232.165",
                                                port="5432")
                        cur = conn.cursor()
                        print('lin postgresql OK')

                    except:
                        print('InsertToPostgresql Connect Fail')
                        break
                    #ProssData(q)
                    while True:
                        if q.isrun() == 0:
                            break
                        qdata = q.get_nowait()

                        if qdata == None:
                            try:
                                conn.commit()
                                conn.close()
                                print('InsertToPostgresql close')
                                break
                            except:
                                print('InsertToPostgresql commit Fail')
                                break

                            #break
                        print(qdata)
                        if b'DEVID' in qdata:
                            devid, value = qdata.split(b':')
                            print('did:{0}'.format(devid))
                            lisoo = value.split(b';')
                            print('did:{0}-epc:{1}'.format(lisoo[0], lisoo[1]))
                            sqlselectstr = (
                                "INSERT INTO tb_epc_record (device_id,epc,report_time) VALUES ('{0}','{1}',NOW()::timestamp)"
                            ).format(lisoo[0].decode(encoding='utf-8'),
                                     lisoo[1].decode(encoding='utf-8'))
                            print(sqlselectstr)
                            cur.execute(sqlselectstr)
                    break  #continue
                #print (data)
                q.put(data)
                #if data == b'exit':
                #    print ('exit')
                #    self.server.shutdown()
                #    self.request.close()
                #    break

            except Exception as e:
                self.server.shutdown()
                self.request.close()
                break
        if ex[0]==17:   # directory already exists
            pass
        else:
            raise

def makeprocessqentry(config, jobid, path, imgname, timestamp, size):
    return { "action": "process_file", "jobid": jobid, "path": path, "name": imgname, "changed": timestamp, "filesize": size }

if __name__=='__main__':
    config= json.load(open("config.json"))
    makedatadir(config)
    downloadq= RedisQueue(host=config["redis-host"], namespace=config["redis-namespace"], name=config["redis-download-queue"])
    processq= RedisQueue(host=config["redis-host"], namespace=config["redis-namespace"], name=config["redis-process-queue"])
    session= requests.Session()
    
    processq.clear()

    while True:
        row= json.loads(downloadq.get())
        print "%s => " % row['name'].encode('utf-8'),
        r= session.get(row['url'])
        print(r.status_code)
        if r.status_code!=200:
            raise RuntimeError("requests.get(%s) returned %s" % (row['url'], r.status_code))
        outputpath= os.path.join(os.path.expanduser(config["download-dir"]), row["name"])
        with open(outputpath, "w") as f:
            f.write(r.content)
        processq.put(json.dumps(makeprocessqentry(config, row["jobid"], outputpath, row["name"], row["changed"], row["filesize"])))
    
    
    
Example #6
0
def test_live_queue():
    live_queue = RedisQueue("test_queue", MockTask, namespace="pytest")
    assert live_queue.connected is False

    task = MockTask()
    task2 = MockTask()

    task.uri = 'thisIsUnique'
    task2.uri = 'thisIsUnique'

    assert live_queue.connect(host=live_host, port=live_port,
                              password=live_pass) is True
    assert live_queue.connected is True

    live_queue.clear()
    assert live_queue.qsize == 0

    live_queue.put(task)
    assert live_queue.qsize == 1

    live_queue.put(task2)
    assert live_queue.qsize == 2

    new_task = live_queue.get()
    assert isinstance(new_task, MockTask)
    assert new_task.uid == task.uid
    assert new_task.uri == 'thisIsUnique'

    live_queue.clear()

    task.unique = True
    task2.unique = True

    assert task.unique_hash() == task2.unique_hash()

    live_queue.put(task)

    with pytest.raises(TaskAlreadyInQueueException):
        live_queue.put(task2)

    assert live_queue.qsize == 1

    live_queue.clear()

    # test getting and putting the same task into the queue
    assert live_queue.qsize == 0

    live_queue.put(task)
    my_task = live_queue.get()
    live_queue.put(my_task)

    assert live_queue.qsize == 1
                return

def makeorderqentry(jobid, resume):
    return { "jobid": jobid, "resume": resume }
    
def makedownloadqentry(config, jobid, name,url,timestamp,size):
    return { "jobid": jobid, "name": name, "url": url, "changed": timestamp, "filesize": size }

if __name__=='__main__':
    config= json.load(open("config.json"))
    joborderq= RedisQueue(host=config["redis-host"], namespace=config["redis-namespace"], name=config["redis-job-order-queue"])
    downloadq= RedisQueue(host=config["redis-host"], namespace=config["redis-namespace"], name=config["redis-download-queue"])
    
    joborderq.clear()
    downloadq.clear()
    
    # xxxx remove stale files?

    jobid= 1
    for row in commonsfiles(sortkey='img_sha1', limit=50):
        # wait for queue to shrink
        # xxx todo: it would be nicer to have a blocking version of this, instead of polling every second
        while joborderq.qsize()>=config["redis-max-queued-jobs"]:
            time.sleep(1)
        joborderq.put(json.dumps(makeorderqentry(jobid, row['resume'])))
        downloadq.put(json.dumps(makedownloadqentry(config, jobid, row["img_name"], row["url"], row["img_timestamp"], row["img_size"])))
        print("pushed job %d (%s)..." % (jobid, row['img_name']))
        jobid+= 1
    
    print("filequeue task done, exiting.")