class OracleReaderIndexThread(BaseThread): def __init__(self, output, proc_setting=None, stats=None): BaseThread.__init__(self, proc_setting=proc_setting, stats=stats) self.output = output self.preData = deque() self.config = Config('reader.cfg') self.current_time = self.config.get('current_time', now()) self.current_index = int(self.config.get('current_index', 0)) self.now = self.config.get('now') logging.debug("Reader[%s] init." % (self.name)) def new_connection(self): try: connection = cx_Oracle.connect( "%s/%s@%s:%s/%s" % (self.proc_setting['setting']['user'], self.proc_setting['setting']['password'], self.proc_setting['setting']['ip'], self.proc_setting['setting']['port'], self.proc_setting['setting']['dbname'])) return connection except Exception: logging.error('database connect err:\n%s' % traceback.format_exc()) return None def enqueueData(self): one = self.preData.popleft() try: self.output.put(one, block=False) logging.info('enqueue new data: %s, %s' % (one['id'], one['ImageURL'])) increase(self.stats, 'read_success_total', self.threadId) except QueueFull: self.preData.appendleft(one) return def process(self): if self.preData: self.enqueueData() return self.now = now() exec_sql = self.proc_setting['setting'][ 'query_temp'] % self.current_index query_start = time.time() try: connection = self.new_connection() if connection == None: time.sleep(READ_CONNECT_RETRY_TIME) return cursor = connection.cursor() cursor.execute(exec_sql) rows = cursor.fetchall() connection.close() except Exception as e: query_cost = round((time.time() - query_start) * 1000, 2) logging.error( 'err occurs when exec sql: %s, use time: %sms, sql: %s' % (e, query_cost, exec_sql)) return query_cost = round((time.time() - query_start) * 1000, 2) logging.info('read time: %sms, result.size: %s <== sql: %s' % (query_cost, rows and len(rows), exec_sql)) if rows is not None and len(rows) != 0: for row in rows: data = data_mapping(row) if data['id'] > self.current_index: self.current_index = data['id'] if data['snapshotTime'] > to_mstimestamp(self.current_time): self.current_time = to_string(data['snapshotTime']) if data['ImageURL'] is None: increase(self.stats, 'read_false_total', self.threadId) # logging.error('Data image url error: %s' % str(row)) continue if TOLL_FILTER_TYPE == 1: if data['deviceId'] not in TOLLGATE_IDS: continue elif TOLL_FILTER_TYPE == 2: if data['deviceId'] in TOLLGATE_IDS: continue self.preData.append(data) else: time.sleep(READ_EMPTY_SLEEP_TIME) return try: self.config.set('current_index', self.current_index) self.config.set('current_time', self.current_time) self.config.set('now', self.now) self.config.save() except Exception as e: self.current_index = self.config.get('current_id') self.current_time = self.config.get('current_time', now()) logging.error("Failed to update config file %s: %s." % (self.config.filepath, e)) def run(self): logging.debug('thread is starting %s' % (self.name)) while not self.exit.is_set(): try: self.process() except Exception: logging.error('exception occurs when get data! %s' % (traceback.format_exc())) time.sleep(THREAD_PROCESS_RETRY_TIME) logging.debug('%s to stopping' % (self.name)) if self.preData: while True: if self.preData: logging.debug( 'wait thread reader\'s predata to process, predata len : %s' % len(self.preData)) self.enqueueData() else: break logging.debug('thread is stopped %s' % (self.name))
class DahuaReaderIndexThread(BaseThread): def __init__(self, output, proc_setting=None, stats=None): BaseThread.__init__(self, proc_setting=proc_setting, stats=stats) self.output = output self.preData = deque() self.config = Config('reader.cfg') self.current_index = int(self.config.get('current_index', 0)) self.current_time = self.config.get('current_time', now()) self.now = self.config.get('now') logging.debug("Reader[%s] init." % (self.name)) def enqueueData(self): one = self.preData.popleft() try: self.output.put(one, block=False) logging.info('enqueue new data: %s, %s' % (one['id'], one['ImageURL'])) increase(self.stats, 'read_success_total', self.threadId) except QueueFull: self.preData.appendleft(one) return def process(self): if self.preData: self.enqueueData() return self.now = now() headers = { 'Accept': 'application/json', 'Content-Type': 'application/json;charset=UTF-8', 'authorization': self.proc_setting['setting']['authcode'] } query_data = { 'startId': self.current_index, 'endId': (self.current_index + self.proc_setting['setting']['pagesize']), 'page': { 'pageNo': 1, 'pageSize': self.proc_setting['setting']['pagesize'] } } query_url = self.proc_setting['setting']['resturl'] + '?q=%s' % ( json.dumps(query_data)) session = requests.Session() try: response = session.get(url=query_url, headers=headers) if response and response.status_code == 200: res_data = json.loads(response.content) if res_data['code'] == 100: rows = res_data['data']['rows'] logging.info('result.size: %s, startId: %s.' % (rows and len(rows), self.current_index)) if rows is not None and len(rows) > 1: for row in rows[1:]: data = data_mapping(row) if data['id'] > self.current_index: self.current_index = data['id'] if data['snapshotTime'] > to_mstimestamp( self.current_time): self.current_time = to_string( data['snapshotTime']) if data['ImageURL'] is None: increase(self.stats, 'read_false_total', self.threadId) # logging.error('Data image url error: %s' % str(row)) continue if TOLL_FILTER_TYPE == 1: if data['deviceId'] not in TOLLGATE_IDS: continue elif TOLL_FILTER_TYPE == 2: if data['deviceId'] in TOLLGATE_IDS: continue self.preData.append(data) # self.output.put(data, block=True) # logging.info('enqueue new data: %s, %s' % (data['id'], data['ImageURL'])) else: time.sleep(READ_EMPTY_SLEEP_TIME) return else: logging.error( 'exception occurs when access rest api, url: %s, response err: %s' % (query_url, res_data['msg'])) else: logging.error( 'exception occurs when access rest api, url: %s, rest api may stopped!' % query_url) except Exception as e: logging.error( 'exception occurs when access rest api, url: %s, err: \n%s' % (query_url, traceback.format_exc())) return try: self.config.set('current_index', self.current_index) self.config.set('current_time', self.current_time) self.config.set('now', self.now) self.config.save() except Exception as e: logging.error("Failed to update config file %s: %s." % (self.config.filepath, e)) self.current_index = int(self.config.get('current_index', 0)) self.current_time = self.config.get('current_time', now()) self.now = self.config.get('now') def run(self): logging.debug('thread is starting %s' % (self.name)) while not self.exit.is_set(): try: self.process() except Exception: logging.error('exception occurs when get data! %s' % (traceback.format_exc())) time.sleep(THREAD_PROCESS_RETRY_TIME) logging.debug('%s to stopping' % (self.name)) if self.preData: while True: if self.preData: # logging.debug('wait thread reader\'s predata to process, predata len : %s' % len(self.preData)) self.enqueueData() else: break logging.debug('thread is stopped %s' % (self.name))
class MysqlReaderTimeThread(BaseThread): def __init__(self, output, proc_setting=None, stats=None): BaseThread.__init__(self, proc_setting=proc_setting, stats=stats) self.output = output self.preData = deque() self.repeatData = [] self.config = Config('reader.cfg') self.current_time = self.config.get('current_time', now()) self.now = self.config.get('now') logging.debug("Reader[%s] init." % (self.name)) def new_connection(self): try: new_connection = MySQLdb.connect( host=self.proc_setting['setting']['ip'], port=self.proc_setting['setting']['port'], user=self.proc_setting['setting']['user'], passwd=self.proc_setting['setting']['password'], db=self.proc_setting['setting']['dbname'], # need to set or delete charset='utf8', connect_timeout=READ_CONNECT_RETRY_TIME) return new_connection except Exception: logging.error('database connect err:\n%s' % traceback.format_exc()) return None def enqueueData(self): one = self.preData.popleft() try: self.output.put(one, block=False) logging.info('enqueue new data: %s, %s' % (one['deviceId'], one['ImageURL'])) increase(self.stats, 'read_success_total', self.threadId) except QueueFull: self.preData.appendleft(one) def process(self): if self.preData: self.enqueueData() return self.now = now() if seconds_between(self.current_time, self.now) <= READ_RESERVE_TIME: time.sleep(READ_RESERVE_SLEEP_TIME) return exec_sql = self.proc_setting['setting'][ 'query_temp'] % self.current_time query_start = time.time() try: connection = self.new_connection() if connection == None: return cursor = connection.cursor() cursor.execute(exec_sql) rows = cursor.fetchall() connection.close() except Exception as e: query_cost = round((time.time() - query_start) * 1000, 2) logging.error( 'err occurs when exec sql: %s, use time: %sms, sql: %s' % (e, query_cost, exec_sql)) return query_cost = round((time.time() - query_start) * 1000, 2) logging.info('read time: %sms, result.size: %s <== sql: %s' % (query_cost, rows and len(rows), exec_sql)) if rows is not None and len(rows) != 0: last_data = data_mapping(rows[-1]) # 此次查询数据的所有过车时间都和上一次查询数据的最大过车时间相同 if last_data['snapshotTime'] == to_mstimestamp(self.current_time): self.current_time = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( time.mktime( time.strptime(self.current_time, '%Y-%m-%d %H:%M:%S')) + 1)) repeat_time = self.current_time else: repeat_time = last_data['snapshotTime'] repeat_list = [] for row in rows: data = data_mapping(row) if data['snapshotTime'] > to_mstimestamp(self.current_time): self.current_time = to_string(data['snapshotTime']) if data['id'] in self.repeatData: logging.error('repeat data: %s' % data['id']) continue if data['snapshotTime'] == repeat_time: repeat_list.append(data['id']) if data['ImageURL'] is None: increase(self.stats, 'read_false_total', self.threadId) # logging.error('Data image url error: %s' % str(row)) continue self.preData.append(data) self.repeatData = repeat_list logging.debug('repeat time: %s, new repeat list: %s.' % (to_string(repeat_time), str(self.repeatData))) else: time.sleep(READ_EMPTY_SLEEP_TIME) return try: self.config.set('current_time', self.current_time) self.config.set('now', self.now) self.config.save() except Exception as e: self.current_time = self.config.get('current_time', now()) logging.error("Failed to update config file %s: %s." % (self.config.filepath, e)) return def run(self): logging.debug('thread is starting %s' % (self.name)) while not self.exit.is_set(): try: self.process() except Exception: logging.error('exception occurs when get data! %s' % (traceback.format_exc())) time.sleep(THREAD_PROCESS_RETRY_TIME) logging.debug('%s to stopping' % (self.name)) if self.preData: while True: if self.preData: logging.debug('wait predata to process, the len is: %s' % len(self.preData)) self.enqueueData() else: break logging.debug('thread is stopped %s' % (self.name))
class PostgresqlReaderTimeWalkThread(BaseThread): def __init__(self, output, proc_setting=None, stats=None): BaseThread.__init__(self, proc_setting=proc_setting, stats=stats) self.output = output self.preData = deque() self.repeatData = [] self.config = Config('reader.cfg') self.current_time = self.config.get('current_time', now()) self.now = self.config.get('now') logging.debug("Reader[%s] init." % (self.name)) def new_connection(self): try: new_connection = psycopg2.connect( host=self.proc_setting['setting']['ip'], port=self.proc_setting['setting']['port'], user=self.proc_setting['setting']['user'], password=self.proc_setting['setting']['password'], dbname=self.proc_setting['setting']['dbname'], connect_timeout=READ_CONNECT_RETRY_TIME, # client_encoding='utf-8', ) return new_connection except Exception: logging.error('database connect err:\n%s' % traceback.format_exc()) return None def enqueueData(self): one = self.preData.popleft() try: self.output.put(one, block=False) logging.info('enqueue new data: %s, %s' % (one['deviceId'], one['ImageURL'])) increase(self.stats, 'read_success_total', self.threadId) except QueueFull: self.preData.appendleft(one) def process(self): if self.preData: self.enqueueData() return self.now = now() next_time = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( time.mktime( time.strptime(self.current_time, '%Y-%m-%d %H:%M:%S')) + READ_ONCE_WALK_TIME)) if seconds_between(next_time, self.now) <= READ_RESERVE_TIME: time.sleep(READ_RESERVE_SLEEP_TIME) return exec_sql = self.proc_setting['setting']['query_temp'] % ( self.current_time, next_time) query_start = time.time() try: connection = self.new_connection() if connection == None: time.sleep(READ_CONNECT_RETRY_TIME) return cursor = connection.cursor() cursor.execute(exec_sql) rows = cursor.fetchall() connection.close() except Exception as e: query_cost = round((time.time() - query_start) * 1000, 2) logging.error( 'err occurs when exec sql: %s, use time: %sms, sql: %s' % (e, query_cost, exec_sql)) return query_cost = round((time.time() - query_start) * 1000, 2) logging.info('read time: %sms, result.size: %s <== sql: %s' % (query_cost, rows and len(rows), exec_sql)) if rows is not None and len(rows) != 0: for row in rows: data = data_mapping(row) if data['ImageURL'] is None: increase(self.stats, 'read_false_total', self.threadId) continue if TOLL_FILTER_TYPE == 1: if data['deviceId'] not in TOLLGATE_IDS: continue elif TOLL_FILTER_TYPE == 2: if data['deviceId'] in TOLLGATE_IDS: continue self.preData.append(data) self.current_time = next_time try: self.config.set('current_time', self.current_time) self.config.set('now', self.now) self.config.save() except Exception as e: self.current_time = self.config.get('current_time', now()) logging.error("Failed to update config file %s: %s." % (self.config.filepath, e)) return def run(self): logging.debug('thread is starting %s' % (self.name)) while not self.exit.is_set(): try: self.process() except Exception: logging.error('exception occurs when get data! %s' % (traceback.format_exc())) time.sleep(THREAD_PROCESS_RETRY_TIME) logging.debug('%s to stopping' % (self.name)) if self.preData: while True: if self.preData: logging.debug( 'wait thread reader\'s predata to process, predata len : %s' % len(self.preData)) self.enqueueData() else: break logging.debug('thread is stopped %s' % (self.name))
class HuazunReaderTimeWalkThread(BaseThread): def __init__(self, output, proc_setting=None, stats=None): BaseThread.__init__(self, proc_setting=proc_setting, stats=stats) self.output = output self.preData = deque() self.repeatData = [] self.config = Config('reader.cfg') self.current_time = self.config.get('current_time', now()) self.current_startno = int(self.config.get('current_startno', 1)) self.regist_stat = False self.now = self.config.get('now') logging.debug("Reader[%s] init." % (self.name)) def register(self): register = Register(resturl=self.proc_setting['setting']['resturl'], username=self.proc_setting['setting']['username'], passwd=self.proc_setting['setting']['passwd'], identify=self.proc_setting['setting']['identify']) ret = register.register() if ret: self.regist_stat = True def keepalive(self, times=3): if times < 1: return False header = { 'Content-Type': 'application/json;charset=UTF-8', 'User-Identify': self.proc_setting['setting']['identify'] } query_param = { 'KeepaliveObject': { 'DeviceID': self.proc_setting['setting']['identify'] } } keepurl = self.proc_setting['setting'][ 'resturl'] + '/VIID/System/Keepalive' session = requests.Session() try: response = session.get(url=keepurl, headers=header, data=json.dumps(query_param)) if response.status_code == 200: res_data = json.loads(response.content) if res_data.has_key('ResponseStatusObject') and res_data[ 'ResponseStatusObject']['StatusCode'] == 0: logging.debug('keepalive success') return True else: res_data = json.loads(response.content) logging.error( 'access register url result: %s' % res_data['ResponseStatusObject']['StatusString']) return self.keepalive(times=times - 1) else: logging.error('access register url http err: %s' % response.status_code) return self.keepalive(times=times - 1) except Exception: logging.error(traceback.format_exc()) def enqueueData(self): one = self.preData.popleft() try: self.output.put(one, block=False) logging.info('enqueue new data: %s, %s' % (one['id'], one['ImageURL'])) increase(self.stats, 'read_success_total', self.threadId) except QueueFull: self.preData.appendleft(one) return def process(self): if self.preData: self.enqueueData() return if not self.regist_stat: self.register() return self.now = now() next_time = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( time.mktime( time.strptime(self.current_time, '%Y-%m-%d %H:%M:%S')) + READ_ONCE_WALK_TIME)) if seconds_between(next_time, self.now) <= READ_RESERVE_TIME: time.sleep(READ_RESERVE_SLEEP_TIME) return headers = { 'Content-Type': 'application/json;charset=UTF-8', 'User-Identify': self.proc_setting['setting']['identify'] } query_param = '''/VIID/MotorVehicles?RecordStartNo=%s&PageRecordNum=%s&(MotorVehicle.CreateTime BETWEEN '%s' and '%s')&(Sort = MotorVehicle.CreateTime)''' % ( self.current_startno, self.proc_setting['setting']['pagesize'], self.current_time, next_time) query_url = self.proc_setting['setting'][ 'resturl'] + query_param.replace(' ', '%20') session = requests.Session() try: response = session.get(url=query_url, headers=headers) if response and response.status_code == 200: res_data = json.loads(response.content) if isinstance(res_data, dict): if res_data.has_key('ResponseStatusObject') and res_data[ 'ResponseStatusObject']['StatusCode'] == 4: self.regist_stat = False return else: logging.error( 'huazun rest api err, please call data_aceess_addmin' ) logging.error(response.content) return else: if self.current_startno <= res_data[0][ 'MotorVehiclesListObject']['Pages']: logging.info( 'result.size: %s, StartDateTime: %s, RecordStartNo: %s' % (res_data[0]['MotorVehiclesListObject'] ['PageRecordNum'], self.current_time, self.current_startno)) logging.debug( 'RecordStartNo:%s,PageRecordNum:%s,MaxNumRecordReturn:%s,Offset:%s,Pages:%s' % (res_data[0]['MotorVehiclesListObject'] ['RecordStartNo'], res_data[0] ['MotorVehiclesListObject']['PageRecordNum'], res_data[0]['MotorVehiclesListObject'] ['MaxNumRecordReturn'], res_data[0]['MotorVehiclesListObject']['Offset'], res_data[0]['MotorVehiclesListObject']['Pages'])) for row in res_data[0]['MotorVehiclesListObject'][ 'MotorVehiclesObject']: data = data_mapping(row) if data['ImageURL'] is None: increase(self.stats, 'read_false_total', self.threadId) # logging.error('Data image url error: %s' % str(row)) continue if data['deviceId'] is None: increase(self.stats, 'read_false_total', self.threadId) # logging.error('Data image url error: %s' % str(row)) continue if TOLL_FILTER_TYPE == 1: if data['deviceId'] not in TOLLGATE_IDS: continue elif TOLL_FILTER_TYPE == 2: if data['deviceId'] in TOLLGATE_IDS: continue self.preData.append(data) self.current_startno += 1 else: self.current_startno = 1 self.current_time = next_time else: logging.error( 'exception occurs when access rest api, url: %s, rest api may stopped!' % query_url) except Exception: logging.error( 'exception occurs when access rest api, url: \n%s, err: %s' % (query_url, traceback.format_exc())) return try: self.config.set('current_time', self.current_time) self.config.set('current_startno', self.current_startno) self.config.set('now', self.now) self.config.save() except Exception as e: logging.error("Failed to update config file %s: %s." % (self.config.filepath, e)) self.current_time = self.config.get('current_time', now()) self.current_startno = self.config.get('current_startno', 1) self.now = self.config.get('now') def run(self): logging.debug('thread is starting %s' % (self.name)) while not self.exit.is_set(): try: self.process() except Exception: logging.error('exception occurs when get data! %s' % (traceback.format_exc())) time.sleep(THREAD_PROCESS_RETRY_TIME) logging.debug('%s to stopping' % (self.name)) if self.preData: while True: if self.preData: # logging.info('wait thread reader\'s predata to process, predata len : %s' % len(self.preData)) self.enqueueData() else: break logging.debug('thread is stopped %s' % (self.name))