def do_doom(self, t, code, queue_at, deadline): logger = logging.getLogger('mwtm_cleaner') if queue_at != None and (deadline == None or deadline == 0 or \ deadline > queue_at): logger.debug('to retry task %s, queue at %s', t.uuid, queue_at) yield db_execute(RETRY_TASK, queue_at, code, t.id) #yield db_execute(RENEW_EVENT, t.uuid, 'retry') g_logger.info( trans2json(message="task_uuid:%s, " "site_asset_id:%s, deadline:%s, external_id:%s " % (t.uuid, t.site_asset_id, deadline, t.external_id), action="retry task")) else: logger.debug('to fail task %s', t.uuid) g_logger.info( trans2json(message="task_uuid:%s, " "site_asset_id:%s, external_id:%s" % (t.uuid, t.site_asset_id, t.external_id), action="to fail task")) rc, _ = yield db_query(CHECK_TASK, t.id) if rc <= 0: yield db_execute(FAIL_TASK, code, t.id) self.send_matches(t, unrecognized=True) task_status = db_txn(self.pool, partial(self.load_task_status, t.uuid)) self.update_hbase_task(task_status) stats.incr(QUERY_FAILED, 1)
def storeTaskMysql(task): rc, res = yield db_execute(STORE_TASK, str(task['task_uuid']), str(task['external_id']), task['task_priority'], task['created_at'], task['dna'], task['company_id'], str(task['site_asset_id'])) if rc: if not len(task['query_scope']) == 0: for i in task['query_scope']: try: r, re = yield db_query(CHECK_SCOPE, i) #r= yield db_result(r) if r: rc, res = yield db_execute(STORE_SCOPE, task_id, i) else: self.logger.info( 'query_scope is not in vddbMetaContent' ' table, task_uuid: %s, site_asset_id: %s,query_scope: %s', str(task['task_uuid']), str(task['site_asset_id']), i) except: self.logger.error( 'failed to store scope ,task_uuid: %s,' 'site_asset_id, query_scope :%s ', str(task['task_uuid']), str(task['site_asset_id']), i) raise
def store_matches(t, matches): for m in matches: c, _ = yield db_execute( STORE_MATCH, t.account, m['meta_uuid'], t.uuid, t.created_at, t.site_asset_id, m['match_type'], m['video_duration'], m['video_score'], m['video_sample_offset'], m['video_ref_offset'], m['audio_duration'], m['audio_score'], m['audio_sample_offset'], m['audio_ref_offset'], 'auto_match', 0, t.account, m['match_type'], m['video_duration'], m['video_score'], m['video_sample_offset'], m['video_ref_offset'], m['audio_duration'], m['audio_score'], m['audio_sample_offset'], m['audio_ref_offset'], 'auto_match', 0) if c > 0: # match stored, add meta if not existing yield db_execute(ADD_CONTENT, m['meta_uuid'], m['meta_name'].encode('utf8'))
def storeTaskMysql(task): rc, res = yield db_execute(STORE_TASK, task['task_uuid'] , task['task_priority'], task['created_at'], task['dna'], task['company_id'], str(task['site_asset_ids'])) if rc: if not len(task['query_scope']) == 0: for i in task['query_scope']: try : r, re = yield db_query(CHECK_SCOPE, meta) #r= yield db_result(r) if r: rc, res = yield db_execute(STORE_SCOPE, task_id, meta_uuid) else: logger.info('query_scope is not in vddbMetaContent table, task_uuid: %s,\ query_scope: %s', task['task_uuid'], i) except: logger.error('failed to store scope ,task_uuid: %s, query_scope :%s ', task['task_uuid'], i)
def store_matches(t, matches): for m in matches: c, _ = yield db_execute(STORE_MATCH, t.account, m['meta_uuid'], t.uuid, t.created_at, t.site_asset_id, m['match_type'], m['video_duration'], m['video_score'], m['video_sample_offset'], m['video_ref_offset'], m['audio_duration'], m['audio_score'], m['audio_sample_offset'], m['audio_ref_offset'], 'auto_match', 0, t.account, m['match_type'], m['video_duration'], m['video_score'], m['video_sample_offset'], m['video_ref_offset'], m['audio_duration'], m['audio_score'], m['audio_sample_offset'], m['audio_ref_offset'], 'auto_match', 0) if c > 0: # match stored, add meta if not existing yield db_execute(ADD_CONTENT, m['meta_uuid'], m['meta_name'].encode('utf8'))
def do_check(): # NOTE: the algorithm is to use the record locking and auto txn rollback # as a method of synchronization between distributed processes # with where conditions, only 1 contender can update the original # row, and when he updates it with his process info, he's the king # insertion works likewise (with module name as unique key) # first, try to sit on the throne which looks like deserted, or like # owned by us rc, _ = yield db_execute(USURP, host, port, module, host, port, timeout) if rc == 1: # yes we sat down yield db_result(True) # we couldn't sit down, is there actually a throne? rc, _ = yield db_query(CHECK_THRONE, module) if rc == 1: # yes there is, it's just occupied yield db_result(False) # the throne is not there yet, make one and sit on it! rc, _ = yield db_execute(MAKE_THRONE, module, host, port) # the throne belongs to its maker! yield db_result(rc == 1)
def do_setup(acc, user): print acc, user uid = str(uuid4()) yield db_execute('''INSERT INTO task(task_identification, status, site_asset_id, company_id, task_priority, user_id, clip_duration, clip_format) VALUES (%s, 'query', UUID(), %s, 128, %s, 600, 'mp4')''', uid, acc, user) _, r = yield db_insert('''INSERT INTO taskQueryHis(task_identification) VALUES (%s)''', uid) yield db_result(uid, r)
def do_doom(self, t, code, queue_at, deadline): logger = logging.getLogger('mwtm_cleaner') if queue_at != None and (deadline == None or deadline == 0 or \ deadline > queue_at): logger.debug('to retry task %s, queue at %s', t.uuid, queue_at) yield db_execute(RETRY_TASK, queue_at, code, t.id) #yield db_execute(RENEW_EVENT, t.uuid, 'retry') g_logger.info(trans2json(message="task_uuid:%s, " "site_asset_id:%s, deadline:%s, external_id:%s " % (t.uuid, t.site_asset_id, deadline, t.external_id), action="retry task")) else: logger.debug('to fail task %s', t.uuid) g_logger.info(trans2json(message="task_uuid:%s, " "site_asset_id:%s, external_id:%s" % (t.uuid, t.site_asset_id, t.external_id), action="to fail task")) rc, _ = yield db_query(CHECK_TASK, t.id) if rc <= 0: yield db_execute(FAIL_TASK, code, t.id) self.send_matches(t, unrecognized=True) task_status = db_txn(self.pool, partial(self.load_task_status, t.uuid)) self.update_hbase_task(task_status) stats.incr(QUERY_FAILED, 1)
def storeScope(pool, task): if not len(task['query_scope']) == 0: for i in task['query_scope']: try : r, re = yield db_query(CHECK_SCOPE, meta) #r= yield db_result(r) if r: rc, res = yield db_execute(STORE_SCOPE, task_id, meta_uuid) else: logger.info('query_scope is not in vddbMetaContent table, task_uuid: %s,\ query_scope: %s', task['task_uuid'], i) except: logger.error('failed to store scope ,task_uuid: %s, query_scope :%s ', task['task_uuid'], i)
def record(t, pid=0): yield db_execute(BEGIN_QUERY, pid, t.id)
def do_renew(): rc, _ = yield db_execute(RENEW_TASKS) yield db_result(rc)
def updateStatus(task_id, sid, external_id): rc = yield db_execute(UPDATE_TASK_STATUS, str(sid), external_id, str(task_id))
def update_task(t, code, mc, tr): # NOTE: status is always query success here if t.from_reverse and mc > 0: yield db_execute(FINISH_TASK, 'match', 1000, t.id) else: yield db_execute(FINISH_TASK, tr, code, t.id)
def record_finish(t, row, code, matches): yield db_execute(UPDATE_QUERY, t.uuid, code, matches, row)
def store_crr(t, crr): # record crr if crr != None and crr != '': crr = crr.encode('utf8') yield db_execute(STORE_CRR, t.uuid, crr, crr)
def update_row(): row = yield insert_row() row_count, _ = yield db_execute('UPDATE test SET a = 3 WHERE id = ?', row) assert row_count == 1 yield db_result(row)
def bad_sql(): with raises(OperationalError): yield db_execute('bad sql')
def delete_row(row): row_count, _ = yield db_execute('DELETE FROM test WHERE id = ?', row) assert row_count == 1