def ServerHandler(self, clientID, clientsock, addr, replyHandler): #Now block thread until dispatched with new output level # t = threading.currentThread() clientsock.settimeout(TXTIMEOUT); while self.running: log.info("ServerHandler Running -- %s" % threading.currentThread().name) try: rxdata = clientsock.recv(BUFF) except socket.timeout: #If there's nothing to send, then keep waiting # log.info('Server RX Timeout, Checking Lock') if self.slock[clientID].acquire(blocking=FALSE): #Process a pending dispatch send data = json.dumps({ 'id' : clientID, 'dispatch':self.valueOutMap[clientID] }) #print 'Sending data ' + data clientsock.send(data) continue #Handle rxdata, should be json with "commandData" field try: jdata = json.loads(rxdata.strip()) except : # log.info("Exception in ServerHandler while trying to parse JSON string: %s" % repr(rxdata)) continue log.info('ServerHandler RX data: %s' % repr(jdata["returnData"])) replyHandler(clientID, jdata["returnData"]) #Cleanup clientsock.close() log.info("%s Leaving ServerHandler" % threading.currentThread().name)
def run_unit_tests(module_name, dbname, position=runs_at_install): """ :returns: ``True`` if all of ``module_name``'s tests succeeded, ``False`` if any of them failed. :rtype: bool """ global current_test current_test = module_name mods = get_test_modules(module_name) threading.currentThread().testing = True r = True for m in mods: tests = unwrap_suite(unittest.TestLoader().loadTestsFromModule(m)) suite = unittest.TestSuite(itertools.ifilter(position, tests)) if suite.countTestCases(): t0 = time.time() t0_sql = ecore.sql_db.sql_counter _logger.info('%s running tests.', m.__name__) result = unittest.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite) if time.time() - t0 > 5: _logger.log(25, "%s tested in %.2fs, %s queries", m.__name__, time.time() - t0, ecore.sql_db.sql_counter - t0_sql) if not result.wasSuccessful(): r = False _logger.error("Module %s: %d failures, %d errors", module_name, len(result.failures), len(result.errors)) current_test = None threading.currentThread().testing = False return r
def run(self): data = 'dummy' while len(data): data = self.conn.recv(2048) print repr(data) if data.strip() == 'quit': print '\n[-] client IP: ' + str(self.address[0]) + ' closing connection ....' self.conn.close() # how does it know to close this socket when it is not passed to the class??? print '[-] client IP: ' + str(self.address[0]) + ' shutting down server ....' tcpSocket.close() print '[-] ' + str(threading.currentThread()) + ' terminated ....' print '[+] threads remaining: ' + str(threading.enumerate()) # kept recieving an error unless I added this break break else: print 'Thread: ' + str(threading.currentThread()) print '\tClient Hostname: ' + str(socket.gethostname()) print '\tClient IP: ' + str(self.address[0]) print '\tClient Port: ' + str(self.address[1]) print '\tSent:', data self.conn.send(data)
def register_thread(self, uid): """register a thread for redirected IO, registers the current thread""" mythread = threading.currentThread() mythread.setName(uid) input_buffers[uid] = StringBuffer() threads[uid] = threading.currentThread() debug_msg("registering thread for uid=%s" % uid, 8)
def theThread(self, lockType): import sys if sys.version_info[0] < 3 : name = currentThread().getName() else : name = currentThread().name if lockType == db.DB_LOCK_WRITE: lt = "write" else: lt = "read" anID = self.env.lock_id() if verbose: print "%s: locker ID: %s" % (name, anID) for i in xrange(1000) : lock = self.env.lock_get(anID, "some locked thing", lockType) if verbose: print "%s: Aquired %s lock: %s" % (name, lt, lock) self.env.lock_put(lock) if verbose: print "%s: Released %s lock: %s" % (name, lt, lock) self.env.lock_id_free(anID)
def getTask(self,args): while True: if self.has_work_left()>0: try: req = self.q_request.get(block=True,timeout=5) except: continue else: threadname=threading.currentThread().getName() print threadname+'总任务关闭' break with self.lock: #要保证该操作的原子性,进入critical area self.running=self.running+1 # self.lock.acquire() threadname=threading.currentThread().getName() print '线程'+threadname+'发起请求: ' ans=self.do_job(self.task,req,threadname,args) # ans = self.connectpool.getConnect(req) # self.lock.release() self.q_finish.put((req,ans)) # self.lock.acquire() with self.lock: self.running-= 1 threadname=threading.currentThread().getName() print '线程'+threadname+'完成请求' self.q_request.task_done()
def acquire_inputstream(self,urlpath): streaminfo = None # First check mappers, without locking, assuming video stream URL paths won't match mappers for mapper in self.mappers: streaminfo = mapper.get(urlpath) # print >>sys.stderr,"videoserv: get_inputstream: Got streaminfo",`streaminfo`,"from",`mapper` if streaminfo is not None and (streaminfo['statuscode'] == 200 or streaminfo['statuscode'] == 301): return streaminfo if DEBUGLOCK: print >>sys.stderr,"vs: acq_input: lock",urlpath,currentThread().getName() self.lock.acquire() try: streaminfo = self.urlpath2streaminfo.get(urlpath,None) finally: if DEBUGLOCK: print >>sys.stderr,"vs: acq_input: unlock",urlpath,currentThread().getName() self.lock.release() # Grab lock of video stream, such that other threads cannot read from it. Do outside self.lock if streaminfo is not None and 'lock' in streaminfo: if DEBUGLOCK: print >>sys.stderr,"vs: acq_input: stream: lock",urlpath,currentThread().getName() streaminfo['lock'].acquire() return streaminfo
def checkBASE64_ExpDict(self, BASE64_ExpDict, unique_id): ''' 检查BASE64加密字段 ''' for table in BASE64_ExpDict: fields = BASE64_ExpDict[table].keys() values = BASE64_ExpDict[table].values() if not fields: continue PrintLog('debug', '[%s] 检查BASE64加密字段数据: 用例中读取的fields: %s\nvalues: %s', threading.currentThread().getName(), fields, values) query_where = (unique_id,) query_fields = '' for field in fields: query_fields = query_fields + field + ', ' query_str = 'SELECT ' + query_fields[:-2] + ' FROM ' + table + ' WHERE uid = %s' PrintLog('info', '[%s] 执行SQL查询: query_str: %s %s', threading.currentThread().getName(), query_str, query_where) self.curMy.execute(query_str, query_where) self.obj.connMy.commit() result = self.curMy.fetchone() #取查询结果第一条记录 if result is None: raise TableNoneError(u"%s is NONE" % table) expvalues = tuple(values) for i in range(len(fields)): expvalue = expvalues[i] field = fields[i] de_result = EncryptLib.getde_base64(result[i]) PrintLog('info', '[%s] 检查BASE64加密字段: %s 数据: de_result: %s\nexpvalue: %s', threading.currentThread().getName(), field, de_result, expvalue) if type(expvalue) is dict: try: de_resultDict = json_tools.loads(de_result) except: PrintLog('info', '[%s] _检查BASE64加密字段: %s 数据与期望数据类型不一致: de_resultDict: %s', threading.currentThread().getName(), field, de_resultDict) raise AssertionError, u'_检查BASE64加密字段: %s 数据与期望数据类型不一致' % field if 'fanyilist' in expvalue: PrintLog('info', '[%s] 检查BASE64加密字段: %s中: %s字段', threading.currentThread().getName(), field, 'fanyilist') self.check_fanyilist(de_resultDict['fanyilist'], expvalue['fanyilist']) del expvalue['fanyilist'] for key in expvalue: assert key in de_resultDict, u'检查BASE64加密字段: %s字段中无:%s字段' % (field, key) if key == 'MsgBody': MsgBody = expvalue['MsgBody'] if type(MsgBody) is dict: if 'fanyilist' in MsgBody: PrintLog('info', '[%s] 检查BASE64加密字段: %s中: %s中: %s字段', threading.currentThread().getName(), field, key, 'fanyilist') self.check_fanyilist(de_resultDict[key]['fanyilist'], expvalue[key]['fanyilist']) del MsgBody['fanyilist'] for k in MsgBody: PrintLog('info', '[%s] 检查BASE64加密字段: %s中: %s中: %s字段', threading.currentThread().getName(), field, key, k) assert de_resultDict[key][k] == expvalue[key][k], u'检查BASE64加密字段: %s中: %s中: %s字段数据与期望数据不一致' % (field, key, k) else: PrintLog('info', '[%s] 检查BASE64加密字段: %s 数据: de_resultDict[%s]: %s\nexpvalue[%s]: %s', threading.currentThread().getName(), field, 'MsgBody', de_resultDict['MsgBody'], 'MsgBody', expvalue['MsgBody']) assert de_resultDict['MsgBody'] == expvalue['MsgBody'], u'检查BASE64加密字段: %s中:MsgBody字段数据与期望数据不一致' % (field) else: PrintLog('info', '[%s] 检查BASE64加密字段: %s 数据: de_resultDict[%s]: %s\nexpvalue[%s]: %s', threading.currentThread().getName(), field, key, de_resultDict[key], key, expvalue[key]) assert de_resultDict[key] == expvalue[key], u'检查BASE64加密字段: %s中:%s字段数据与期望数据不一致' % (field, key) else: PrintLog('info', '[%s] 检查BASE64加密%s字段数据: de_result: %s\nexpvalue: %s', threading.currentThread().getName(), fields[i], de_result, expvalue) assert de_result == expvalue, u'检查BASE64加密字段: %s字段数据与期望数据不一致' % fields[i]
def CCSAssert(self, obj, ExpectationDict, unique_id): ''' CCS断言入口 ''' try: self.obj = obj self.obj.connMy.select_db(self.obj.dbnameMy) #选择数据库 self.curMy = self.obj.connMy.cursor() ExpDict, BASE64_ExpDict = self.parseExpectationDict(ExpectationDict) PrintLog('debug', '[%s] 提取加密字段数据: ExpDict: %s\nBASE64_ExpDict: %s', threading.currentThread().getName(), ExpDict, BASE64_ExpDict) #检查base64加密数据 self.checkBASE64_ExpDict(BASE64_ExpDict, unique_id) #检查明文数据 self.checkExpDict(ExpDict, unique_id) return 'PASS', except TableNoneError as e: PrintLog('info', '[%s] TableNoneError: TableName: %s', threading.currentThread().getName(), unicode(e)) return 'NONE',unicode(e) except AssertionError as e: PrintLog('info', '[%s] AssertionError: %s', threading.currentThread().getName(),unicode(e.args[0])) return 'FAIL',unicode(e.args[0]) except Exception as e: PrintLog('exception',e) return 'ERROR',unicode(e)
def _load_data(cr, module_name, idref, mode, kind): """ kind: data, demo, test, init_xml, update_xml, demo_xml. noupdate is False, unless it is demo data or it is csv data in init mode. """ try: if kind in ('demo', 'test'): threading.currentThread().testing = True for filename in _get_files_of_kind(kind): _logger.info("loading %s/%s", module_name, filename) noupdate = False if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')): noupdate = True if tools.config.options.get('noupdate_if_unchanged'): cr.execute('select value from ir_values where name=%s and key=%s', (pathname, 'digest')) olddigest = (cr.fetchone() or (None,))[0] if olddigest is None: cr.execute('insert into ir_values (name, model, key, value) values (%s, %s, %s, NULL)', (pathname, 'ir_module_module', 'digest',)) digest = md5.md5(fp.read()).hexdigest() fp.seek(0) if digest == olddigest: noupdate = True else: cr.execute('update ir_values set value=%s where name=%s and key=%s', (digest, pathname, 'digest')) tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report) finally: if kind in ('demo', 'test'): threading.currentThread().testing = False
def _getProperList(self): """ Walk providers for propers """ propers = {} search_date = datetime.datetime.today() - datetime.timedelta(days=2) # for each provider get a list of the origThreadName = threading.currentThread().name providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.isActive()] for curProvider in providers: threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]" logger.log(u"Searching for any new PROPER releases from " + curProvider.name) try: curPropers = curProvider.findPropers(search_date) except AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.DEBUG) continue except Exception, e: logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.DEBUG) logger.log(traceback.format_exc(), logger.DEBUG) continue
def searchForNeededEpisodes(show, episodes): foundResults = {} didSearch = False # ask all providers for any episodes it finds origThreadName = threading.currentThread().name providers = [x for x in sickbeard.providers.sortedProviderList() if x.isActive() and not x.backlog_only] for curProviderCount, curProvider in enumerate(providers): if curProvider.anime_only and not show.is_anime: logger.log(u"" + str(show.name) + " is not an anime skiping ...") continue threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]" try: logger.log(u"Searching RSS cache ...") curFoundResults = curProvider.searchRSS(episodes) except exceptions.AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) if curProviderCount != len(providers): continue break except Exception, e: logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR) if curProviderCount != len(providers): continue break
def ClientHandler(self, replyHandler): t = threading.currentThread() log.info("Running %s" % t.name) while self.active: #blocks on recv, but may timeout try: rxdata = self.sock.recv(BUFF) log.debug("Data Received: %s" %(repr(rxdata))) except socket.timeout: continue try: jdata = json.loads(rxdata.strip()) except: log.info("ClientHandler could not parse JSON string: %s" % repr(rxdata)) continue log.debug('Client RX jdata: %s' %(repr(jdata))) replyHandler(jdata) #cleanup self.sock.close() log.info("Leaving %s" % threading.currentThread().name)
def writerThread(self, d, keys, readers): import sys if sys.version_info[0] < 3 : name = currentThread().getName() else : name = currentThread().name if verbose: print "%s: creating records %d - %d" % (name, start, stop) count=len(keys)//len(readers) count2=count for x in keys : key = '%04d' % x dbutils.DeadlockWrap(d.put, key, self.makeData(key), max_retries=12) if verbose and x % 100 == 0: print "%s: records %d - %d finished" % (name, start, x) count2-=1 if not count2 : readers.pop().start() count2=count if verbose: print "%s: thread finished" % name
def readerThread(self, d, readerNum): import sys if sys.version_info[0] < 3 : name = currentThread().getName() else : name = currentThread().name finished = False while not finished: try: txn = self.env.txn_begin(None, self.txnFlag) c = d.cursor(txn) count = 0 rec = c.first() while rec: count += 1 key, data = rec self.assertEqual(self.makeData(key), data) rec = c.next() if verbose: print "%s: found %d records" % (name, count) c.close() txn.commit() finished = True except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val: if verbose: print "%s: Aborting transaction (%s)" % (name, val.args[1]) c.close() txn.abort()
def _checkdbdata(self, obj, tablemaxid, ExpectationDict): ''' 检查数据库表中数据 ''' obj.connMy.select_db(obj.dbnameMy) #选择数据库 curMy = obj.connMy.cursor() for table in ExpectationDict.keys(): fields = ExpectationDict[table].keys() values = ExpectationDict[table].values() query_id = str(tablemaxid[table] + 1) query_fields = '' for field in fields: query_fields = query_fields + field + ',' query_fields = query_fields[:-1] query_str = 'SELECT ' + query_fields + ' FROM ' + table + ' WHERE id = ' + query_id PrintLog('info', '[%s] 执行SQL查询: query_str: %s', threading.currentThread().getName(), query_str) curMy.execute(query_str) obj.connMy.commit() result = curMy.fetchone() if result is None: raise TableNoneError(u"%s is NONE" % table) expvalues = tuple(values) PrintLog('debug', '[%s] 比较数据库表中数据与期望数据: result: %s expvalues: %s', threading.currentThread().getName(), result, expvalues) assert result == expvalues, u'检查入库数据不正确'
def UBASAssert(self, obj, response, tablemaxid, ExpectationDict): ''' UBAS断言入口 ''' try: #检查响应 response.encoding = response.apparent_encoding assert response.status_code == 200, u'HTTP响应码错误' responseContent = unicode(response.content, "utf-8") responseContentDict = json.loads(responseContent) Expectation_HTTPResponse = ExpectationDict['HTTPResponse'] Expectation_fieltlist = Expectation_HTTPResponse.keys() Expectation_valuelist = Expectation_HTTPResponse.values() PrintLog('debug','[%s] 比较响应数据与期望数据各字段: Expectation_HTTPResponse: %s responseContentDict: %s', threading.currentThread().getName(), Expectation_HTTPResponse, responseContentDict) for i in xrange(len(Expectation_fieltlist)): assert Expectation_valuelist[i] == responseContentDict[Expectation_fieltlist[i]], u'响应%s字段值不正确' % Expectation_fieltlist[i] del ExpectationDict['HTTPResponse'] self._checkdbdata(obj, tablemaxid, ExpectationDict) return 'PASS', except TableNoneError as e: PrintLog('info', '[%s] TableNoneError: TableName: %s', threading.currentThread().getName(), unicode(e)) return 'NONE',unicode(e) except AssertionError as e: PrintLog('info', '[%s] AssertionError: %s', threading.currentThread().getName(),unicode(e.args[0])) return 'FAIL',unicode(e.args[0]) except Exception as e: PrintLog('exception',e) return 'ERROR',unicode(e)
def UPSLabelAssert(self, obj, expectation, userid): ''' UPSLabel断言入口 ''' #expectation: {"userlabelquery":{"labelIds":[1010001]}, "userlabel":{"labelIds":[1010001]}} try: tables = [] values = [] for table in expectation: tables.append(table) values.append(expectation[table]) if set(tables) == set(["userlabelquery", "userlabel"]): PrintLog('debug', '[%s] 调用标签检查函数: _checkLabel 参数:%s', threading.currentThread().getName(), (obj, values, userid)) self._checkLabel(obj, values, userid) if set(tables) == set(["!userlabelquery", "!userlabel"]): PrintLog('debug', '[%s] 调用标签检查函数: _checkNotLabel 参数: %s', threading.currentThread().getName(), (obj, values, userid)) self._checkNotLabel(obj, values, userid) return 'PASS', except TableNoneError as e: PrintLog('debug', '[%s] TableNoneError: TableName: %s', threading.currentThread().getName(), unicode(e)) return 'NONE',unicode(e) except AssertionError as e: PrintLog('debug', '[%s] AssertionError: %s', threading.currentThread().getName(),unicode(e.args[0])) return 'FAIL',unicode(e.args[0]) except Exception as e: PrintLog('exception',e) return 'ERROR',unicode(e)
def test_multithreaded(): if '__pypy__' in sys.builtin_module_names: py.test.skip("not supported on pypy just yet") import threading finished = [] def f(): t0 = time.time() while time.time() - t0 < 1.5: pass # busy loop finished.append("foo") threads = [threading.Thread(target=f), threading.Thread(target=f)] prof = vmprof.Profiler() with prof.measure(): for t in threads: t.start() f() for t in threads: t.join() stats = prof.get_stats() all_ids = set([x[2] for x in stats.profiles]) cur_id = threading.currentThread().ident assert all_ids == set([threading.currentThread().ident, threads[0].ident, threads[1].ident]) lgt1 = len([x[2] for x in stats.profiles if x[2] == cur_id]) total = len(stats.profiles) # between 33-10% and 33+10% is within one profile assert (0.23 * total) <= lgt1 <= (0.43 * total) assert len(finished) == 3
def RunUPSLabelCase(self, sheet, testid, TestData, TestEnvironment): ''' 运行用户画像标签用例 ''' ModUPSLabelO = ModUPSLabel.ModUPSLabel() dbinfo = ModUPSLabelO.getRuncaseEnvironment_Userdb(TestEnvironment) function = ModUPSLabelO.DriverCbFunction #读取超时时间 timeouttask = ModUPSLabelO.getRuncaseEnvironment_Timeouttask_label(TestEnvironment) #读取延时时间 Expectation = self.TestCaseO.get_Expectation(sheet, testid) parseRt = ModUPSLabelO.parseExpForDriver(Expectation) if parseRt is True: timeoutdelay = ModUPSLabelO.getRuncaseEnvironment_Timeoutdelay_label(TestEnvironment) else: timeoutdelay = 0 #测试数据解析 TestData = ModUPSLabelO.parseParamsForDriver(TestData) #驱动执行获得返回的唯一userid PrintLog('debug', '[%s] 驱动执行:TestData:%s CbFunction:%s', threading.currentThread().getName(), TestData, function.__name__) DriverO = Interface_Driver.Interface_DoData(dbinfo) DriverResult = DriverO.insert(TestData, function) #执行用例 PrintLog('debug', '[%s] 执行结果:DriverResult:%s', threading.currentThread().getName(), DriverResult) #装载任务参数 if DriverResult is False: return False taskargs = DriverResult return timeouttask, timeoutdelay, taskargs
def algorithm(self, parameters=None): """ Actually runs the code """ logging.debug("Running JSM.JobCreator") try: self.pollSubscriptions() except WMException: # self.close() myThread = threading.currentThread() if getattr(myThread, 'transaction', False) \ and getattr(myThread.transaction, 'transaction', False): myThread.transaction.rollback() raise except Exception as ex: # self.close() myThread = threading.currentThread() if getattr(myThread, 'transaction', False) \ and getattr(myThread.transaction, 'transaction', False): myThread.transaction.rollback() # Handle temporary connection problems (Temporary) if "(InterfaceError) not connected" in str(ex): logging.error( 'There was a connection problem during the JobCreator algorithm, I will try again next cycle') else: msg = "Failed to execute JobCreator. Error: %s" % str(ex) logging.exception(msg) raise JobCreatorException(msg)
def RunCCSCase(self, sheet, testid, TestData, TestEnvironment): ''' 运行授信接口用例 ''' ModCCSO = ModCCS.ModCCS() dbinfo = ModCCSO.getRuncaseEnvironment_db(TestEnvironment) #读取超时时间 timeouttask = ModCCSO.getRuncaseEnvironment_Timeouttask(TestEnvironment) timeoutdelay = 0 #测试数据解析 parseResult = ModCCSO.parseParamsForDriver(TestData, sheet, testid) if parseResult is False: raise ValueError('parseParamsForDriver is Fail') TestData, unique_id = parseResult #驱动执行获得返回的唯一userid PrintLog('debug', '[%s] 驱动执行:TestData:%s\nunique_id: %s', threading.currentThread().getName(), TestData, unique_id) DriverO = Interface_Driver.Interface_DoData(dbinfo) DriverResult = DriverO.insert(TestData) #执行用例 PrintLog('debug', '[%s] 执行结果:DriverResult:%s', threading.currentThread().getName(), DriverResult) #装载任务参数 if DriverResult is False: return False taskargs = unique_id return timeouttask, timeoutdelay, taskargs
def processXmlQuerySync(rspec,url=None): #check if provisioning / monitoring / etc if threading.currentThread().callBackURL: url = threading.currentThread().callBackURL if not rspec.query.provisioning == None : status = SyncThread.startMethodAndJoin(ProvisioningDispatcher.processProvisioning, rspec.query.provisioning, url) return status
def fetchData(self): threadname = threading.currentThread().name if "Thread-" in threadname: threading.currentThread().name = "API" if self.data == 'OK': args = [] if 'name' in self.kwargs: args.append({"name": self.kwargs['name']}) if 'id' in self.kwargs: args.append({"id": self.kwargs['id']}) if 'group' in self.kwargs: args.append({"group": self.kwargs['group']}) if 'value' in self.kwargs: args.append({"value": self.kwargs['value']}) if 'wait' in self.kwargs: args.append({"wait": "True"}) if args == []: args = '' logger.info('Received API command: %s %s' % (self.cmd, args)) methodToCall = getattr(self, "_" + self.cmd) methodToCall(**self.kwargs) if 'callback' not in self.kwargs: if isinstance(self.data, basestring): return self.data else: return json.dumps(self.data) else: self.callback = self.kwargs['callback'] self.data = json.dumps(self.data) self.data = self.callback + '(' + self.data + ');' return self.data else: return self.data
def setUp(self): "make a logger instance and create tables" self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema() myThread = threading.currentThread() if myThread.dialect == 'MySQL': myThread.create = """ create table test (bind1 varchar(20), bind2 varchar(20)) ENGINE=InnoDB """ if myThread.dialect == 'SQLite': myThread.create = """ create table test (bind1 varchar(20), bind2 varchar(20))""" myThread.insert = """ insert into test (bind1, bind2) values (:bind1, :bind2) """ myThread.insert_binds = \ [ {'bind1':'value1a', 'bind2': 'value2a'},\ {'bind1':'value1b', 'bind2': 'value2b'},\ {'bind1':'value1c', 'bind2': 'value2d'} ] myThread.select = "select * from test" myThread = threading.currentThread() myThread.transaction = Transaction(myThread.dbi) myThread.transaction.processData(myThread.create) myThread.transaction.processData(myThread.insert, myThread.insert_binds) myThread.transaction.commit() return
def __call__(self, parameters): DefaultSlave.__call__(self, parameters) # Handle the message args = self.messageArgs logging.debug("Handling AddWorkflowToManage message: %s" % str(args)) myThread = threading.currentThread() # Validate arguments if "FilesetMatch" in args and "WorkflowId" in args \ and "SplitAlgo" in args and "Type" in args: try: myThread.transaction.begin() self.addManagedWorkflow.execute(workflow = args['WorkflowId'], \ fileset_match = args['FilesetMatch'], \ split_algo = args['SplitAlgo'], \ type = args['Type'], \ conn = myThread.transaction.conn, \ transaction = True) myThread.transaction.commit() except: myThread.transaction.rollback() raise else: logging.error("Received malformed parameters: %s" % str(args)) # Report as done myThread = threading.currentThread() myThread.msgService.finish()
def failJobs(self, failedJobs): """ _failJobs_ Dump those jobs that have failed due to timeout """ myThread = threading.currentThread() if len(failedJobs) == 0: return myThread = threading.currentThread() # Load DAOs setFWJRAction = self.daoFactory(classname="Jobs.SetFWJRPath") loadAction = self.daoFactory(classname="Jobs.LoadFromID") jrBinds = [] for job in failedJobs: jrPath = os.path.join(job.getCache(), "Report.%i.pkl" % (job["retry_count"])) jrBinds.append({"jobid": job["id"], "fwjrpath": jrPath}) # job.setFWJRPath(os.path.join(job.getCache(), # 'Report.%i.pkl' % (job['retry_count']))) # Set all paths at once myThread.transaction.begin() setFWJRAction.execute(binds=jrBinds) self.changeState.propagate(failedJobs, "jobfailed", "executing") logging.info("Failed %i jobs" % (len(failedJobs))) myThread.transaction.commit() return
def __call__(self, parameters): DefaultSlave.__call__(self, parameters) # Handle the message args = self.messageArgs msg = "Handling RemoveFromWorkflowManagementLocationList message: %s" % \ str(args) logging.debug(msg) myThread = threading.currentThread() # Validate arguments if args.has_key("FilesetMatch") and args.has_key("WorkflowId") \ and args.has_key("Locations"): locations = args['Locations'].split(",") try: myThread.transaction.begin() for loc in locations: self.unmarkLocation.execute(workflow = args['WorkflowId'], \ fileset_match = args['FilesetMatch'], \ location = loc, \ conn = myThread.transaction.conn, \ transaction = True) myThread.transaction.commit() except: myThread.transaction.rollback() raise else: logging.error("Received malformed parameters: %s" % str(args)) # Report as done myThread = threading.currentThread() myThread.msgService.finish()
def main(self, argv, opts_etc=None): if threading.currentThread().getName() == "MainThread": threading.currentThread().setName("mt") err, opts, source, sink = self.opt_parse(argv) if err: return err if opts_etc: opts.etc = opts_etc # Used for unit tests, etc. logging.info(self.name + "...") logging.info(" source : %s", source) logging.info(" sink : %s", sink) logging.info(" opts : %s", opts.safe) source_class, sink_class = self.find_handlers(opts, source, sink) if not source_class: return "error: unknown type of source: " + source if not sink_class: return "error: unknown type of sink: " + sink err = sink_class.check_source(opts, source_class, source, sink_class, sink) if err: return err try: return pump.PumpingStation(opts, source_class, source, sink_class, sink).run() except KeyboardInterrupt: return "interrupted."
def RunUBASCase(self, sheet, testid, TestData, TestEnvironment): ''' 运行数据回流接口用例 ''' ModUBASO = ModUBAS.ModUBAS() dbinfo = ModUBASO.getRuncaseEnvironment_db(TestEnvironment) url = ModUBASO.getRuncaseEnvironment_Url(TestEnvironment) headers = ModUBASO.getRuncaseEnvironment_Headers(TestEnvironment) #读取超时时间 timeouttask = ModUBASO.getRuncaseEnvironment_Timeouttask(TestEnvironment) timeoutdelay = 0 #获取待插入数据前表的maxid Expectation = self.TestCaseO.get_Expectation(sheet, testid) TableList = ModUBASO.parseExpForDriver(Expectation) if TableList is False: return False DriverOO = Interface_Driver.Interface_DoData(dbinfo) TableMaxid = DriverOO.getTableMaxid(TableList) #驱动执行获得response PrintLog('debug', '[%s] 驱动执行:headers:%s TestData:%s', threading.currentThread().getName(), headers, TestData) DriverO = Interface_Driver.Interface_Http(url) DriverResult = DriverO.post(headers, TestData) #执行用例 PrintLog('debug', '[%s] 执行结果:DriverResult:%s', threading.currentThread().getName(), DriverResult) #装载任务参数 if DriverResult is False: return False taskargs = DriverResult,TableMaxid return timeouttask, timeoutdelay, taskargs
def run(self): for _ in range(10): print(threading.currentThread().getName())
def get_current_stack(): """Return the current thread's execution stack.""" return threading.currentThread().stack
def calculate(): global ddir global host global user, passwd, dbname, table # connect database and create table if needed lock.acquire() conn, cur = db.get_or_create_db(host, user, passwd, dbname) db.create_twtable_if_needed(cur, table) lock.release() #begin fetch curthread = threading.currentThread().getName() search_str = get_next() while search_str is not None: print (curthread, "begin fetching", search_str) #load exist data from database index_set, exist_set = db.load_exist(cur, search_str, table) index = max([int(x.split(".")[0]) for x in index_set], default = -1) + 1 print ("now start from index ", index) #test shows that only can fetch less than 10 pages, otherwise return no content, so 12 is enough for i in range(0,12): print ("start loop", i) time.sleep(1) # tbs = 'isz:l' #only big size img, but too little params = dict (q=search_str, ijn=i , start=i*100, tbm='isch') url = base_url + urlencode(params) retry = 0 while retry < 5: try: page = requests.get(url, headers = headers) break except Exception as e: print ("request get error, retry...") retry += 1 time.sleep(2) if retry >= 5: print ("current url failed", url) continue soup = BeautifulSoup(page.text, 'lxml') image_divs = soup.find_all('div', class_='rg_meta') # handle 100 imgs for div in image_divs: meta = json.loads(div.text) #set image's name info = dict() info['imgsavename'] = str(index) + ".jpg" info['desc'] = meta.get('pt', None) info['fromUrl'] = meta.get('ru', None) info['name'] = search_str if 'ou' in meta: info['imgurl'] = meta['ou'] info['height'] = meta['oh'] info['width'] = meta['ow'] elif 'tu' in meta: info['imgurl'] = meta['tu'] info['height'] = meta['th'] info['width'] = meta['tw'] else: info['imgurl'] = None info['height'] = None info['width'] = None if info['imgurl'] in exist_set or info['imgurl'] is None: print ("img %s of %s exists, skip"%(search_str, info['imgurl'])) continue print ("got image:", info['imgurl']) if download_img(info, os.path.join(ddir, search_str.replace(" ", "_"))): print ("finish page", info['imgsavename']) index += 1 else: print ("skip page", info['imgsavename']) #store into db when every species is downloaded try: print (curthread, "begin store into database...") lock.acquire() db.insert_info_one(conn, cur, table, info, ddir) lock.release() exist_set.add(info['imgurl']) print (curthread, "finish store into database...") except ProgrammingError as e: lock.release() conn.commit() cur.close() conn.close() print (e, "exception happened in ",curthread) #reopen ,to avoid cursor timeout conn.commit() cur.close() conn.close() conn, cur = db.get_or_create_db(host, user, passwd, dbname) search_str = get_next() print (curthread,"thread done") lock_inc()
def release(self, args): logger.info("Thread {0} release the semaphore.".format( threading.currentThread().getName())) self._semaphore.release()
def f(): ident.append(threading.currentThread().ident) done.set()
def _get_db(self): # find current DB based on thread/worker db name (see netsvc) db_name = getattr(threading.currentThread(), 'dbname', None) if db_name: return sql_db.db_connect(db_name)
def check(): print('%s is checking' % currentThread().getName()) time.sleep(5) event.set()
def testSaveTransaction(self): """ _testSaveTransaction_ Create a job and a job mask and save them both to the database. Load the job from the database and verify that everything was written correctly. Begin a new transaction and update the job mask again. Load the mask and verify that it's correct. Finally, rollback the transaction and reload the mask to verify that it is in the correct state. """ testJobA = self.createTestJob() testJobA["mask"]["FirstEvent"] = 1 testJobA["mask"]["LastEvent"] = 2 testJobA["mask"]["FirstLumi"] = 3 testJobA["mask"]["LastLumi"] = 4 testJobA["mask"]["FirstRun"] = 5 testJobA["mask"]["LastRun"] = 6 testJobA.save() testJobB = Job(id=testJobA["id"]) testJobB.loadData() assert testJobA["mask"] == testJobB["mask"], \ "ERROR: Job mask did not load properly" myThread = threading.currentThread() myThread.transaction.begin() testJobA["mask"]["FirstEvent"] = 7 testJobA["mask"]["LastEvent"] = 8 testJobA["mask"]["FirstLumi"] = 9 testJobA["mask"]["LastLumi"] = 10 testJobA["mask"]["FirstRun"] = 11 testJobA["mask"]["LastRun"] = 12 testJobA["name"] = "stevesJob" testJobA["couch_record"] = "someCouchRecord" testJobA["location"] = "test2.site.ch" testJobA.save() testJobC = Job(id=testJobA["id"]) testJobC.loadData() assert testJobA["mask"] == testJobC["mask"], \ "ERROR: Job mask did not load properly" assert testJobC["name"] == "stevesJob", \ "ERROR: Job name did not save" assert testJobC["couch_record"] == "someCouchRecord", \ "ERROR: Job couch record did not save" assert testJobC["location"] == "test2.site.ch", \ "ERROR: Job site did not save" myThread.transaction.rollback() testJobD = Job(id=testJobA["id"]) testJobD.loadData() assert testJobB["mask"] == testJobD["mask"], \ "ERROR: Job mask did not load properly" return
def delayed(): print 'Worker running', threading.currentThread().getName() return
def test_acceptability_criterial_buttons(self): cr, uid = self.cr, self.uid self.test_create_method() threading.currentThread().testing = True # Search the user and the user story to change the criterials user_id = self.user.search(cr, uid, [('name', '=', 'User Test')]) story_id = self.story.search(cr, uid, [('name', '=', 'User Story Test')]) user_brw = user_id and self.user.browse(cr, uid, user_id[0]) if not user_brw.email: user_brw.write({'email': '*****@*****.**'}) story_brw = story_id and self.story.browse(cr, uid, story_id[0]) # Test approve an acceptability criteria with a specific user and chack # that the generated message is send by the user who approve. approve_user_id = self.user.create( cr, uid, { 'name': 'Approver User', 'login': '******', 'email': '*****@*****.**', }) self.assertTrue(approve_user_id) user_brw = self.user.browse(cr, uid, approve_user_id) # Adding user story group to the user created previously us_manager_group = self.data.get_object(cr, uid, 'user_story', 'group_user_story_manager') self.user.write(cr, uid, [approve_user_id], {'groups_id': [(4, us_manager_group.id)]}) i = 0 for criterial in user_brw and story_brw and story_brw.accep_crit_ids: if i == 0: mes = 'El criterio%{0}%ha sido aceptado por%'.\ format(criterial.name) self.assertFalse(criterial.accepted) self.criterial.approve(cr, user_brw.id, [criterial.id]) m_id = self.message.search(cr, uid, [('res_id', '=', story_brw.id), ('body', 'ilike', mes)]) self.assertTrue(m_id, "The message was not created") msg_data = self.message.read(cr, uid, m_id, [ 'model', 'author_id', 'create_uid', 'write_uid', 'email_from', 'notified_partner_ids', 'partner_ids', ])[0] self.partner = self.registry('res.partner') author_id = msg_data.get('author_id')[0] approver_partner = self.user.browse( cr, uid, approve_user_id).partner_id.id self.assertEqual(approver_partner, author_id) cri_brw = self.criterial.browse(cr, uid, criterial.id) self.assertTrue(cri_brw.accepted, "The criterial was not accepted") elif i == 1: mes = 'El criterio%{0}%ha sido terminado por%'.\ format(criterial.name) self.criterial.ask_review(cr, user_brw.id, [criterial.id]) m_id = self.message.search(cr, uid, [('res_id', '=', story_brw.id), ('body', 'ilike', mes)]) self.assertTrue(m_id, "The message was not created") i += 1
def connect(self, host=None, port=None, user=None, password=None, encryption=None, smtp_debug=False, mail_server_id=None): """Returns a new SMTP connection to the given SMTP server. When running in test mode, this method does nothing and returns `None`. :param host: host or IP of SMTP server to connect to, if mail_server_id not passed :param int port: SMTP port to connect to :param user: optional username to authenticate with :param password: optional password to authenticate with :param string encryption: optional, ``'ssl'`` | ``'starttls'`` :param bool smtp_debug: toggle debugging of SMTP sessions (all i/o will be output in logs) :param mail_server_id: ID of specific mail server to use (overrides other parameters) """ # Do not actually connect while running in test mode if getattr(threading.currentThread(), 'testing', False): return None mail_server = smtp_encryption = None if mail_server_id: mail_server = self.sudo().browse(mail_server_id) elif not host: mail_server = self.sudo().search([], order='sequence', limit=1) if mail_server: smtp_server = mail_server.smtp_host smtp_port = mail_server.smtp_port smtp_user = mail_server.smtp_user smtp_password = mail_server.smtp_pass smtp_encryption = mail_server.smtp_encryption smtp_debug = smtp_debug or mail_server.smtp_debug else: # we were passed individual smtp parameters or nothing and there is no default server smtp_server = host or tools.config.get('smtp_server') smtp_port = tools.config.get('smtp_port', 25) if port is None else port smtp_user = user or tools.config.get('smtp_user') smtp_password = password or tools.config.get('smtp_password') smtp_encryption = encryption if smtp_encryption is None and tools.config.get('smtp_ssl'): smtp_encryption = 'starttls' # smtp_ssl => STARTTLS as of v7 if not smtp_server: raise UserError((_("Missing SMTP Server") + "\n" + _("Please define at least one SMTP server, " "or provide the SMTP parameters explicitly."))) if smtp_encryption == 'ssl': if 'SMTP_SSL' not in smtplib.__all__: raise UserError( _("Your Odoo Server does not support SMTP-over-SSL. " "You could use STARTTLS instead. " "If SSL is needed, an upgrade to Python 2.6 on the server-side " "should do the trick.")) connection = smtplib.SMTP_SSL(smtp_server, smtp_port, timeout=SMTP_TIMEOUT) else: connection = smtplib.SMTP(smtp_server, smtp_port, timeout=SMTP_TIMEOUT) connection.set_debuglevel(smtp_debug) if smtp_encryption == 'starttls': # starttls() will perform ehlo() if needed first # and will discard the previous list of services # after successfully performing STARTTLS command, # (as per RFC 3207) so for example any AUTH # capability that appears only on encrypted channels # will be correctly detected for next step connection.starttls() if smtp_user: # Attempt authentication - will raise if AUTH service not supported # The user/password must be converted to bytestrings in order to be usable for # certain hashing schemes, like HMAC. # See also bug #597143 and python issue #5285 smtp_user = pycompat.to_text(ustr(smtp_user)) smtp_password = pycompat.to_text(ustr(smtp_password)) connection.login(smtp_user, smtp_password) # Some methods of SMTP don't check whether EHLO/HELO was sent. # Anyway, as it may have been sent by login(), all subsequent usages should consider this command as sent. connection.ehlo_or_helo_if_needed() return connection
def testAutoIncrementCheck(self): """ _AutoIncrementCheck_ Test and see whether we can find and set the auto_increment values """ myThread = threading.currentThread() if not myThread.dialect.lower() == 'mysql': return testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") testWorkflow.create() testFileset = Fileset(name="TestFileset") testFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow) testSubscription.create() testFileA = File(lfn=makeUUID(), locations="test.site.ch") testFileB = File(lfn=makeUUID(), locations="test.site.ch") testFileA.create() testFileB.create() testFileset.addFile([testFileA, testFileB]) testFileset.commit() testSubscription.acquireFiles([testFileA, testFileB]) testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() incrementDAO = self.daoFactory(classname="Jobs.AutoIncrementCheck") incrementDAO.execute() testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 1) incrementDAO.execute() testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 2) incrementDAO.execute(input=10) testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 11) incrementDAO.execute(input=5) testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 12) return
def hello(): print("hello world!{}".format(threading.currentThread())) print("start.....{}".format(threading.currentThread())) yield from asyncio.sleep(10) print("Done......{}".format(threading.currentThread())) print("Hello again!{}".format(threading.currentThread()))
def send_email(self, message, mail_server_id=None, smtp_server=None, smtp_port=None, smtp_user=None, smtp_password=None, smtp_encryption=None, smtp_debug=False, smtp_session=None): """Sends an email directly (no queuing). No retries are done, the caller should handle MailDeliveryException in order to ensure that the mail is never lost. If the mail_server_id is provided, sends using this mail server, ignoring other smtp_* arguments. If mail_server_id is None and smtp_server is None, use the default mail server (highest priority). If mail_server_id is None and smtp_server is not None, use the provided smtp_* arguments. If both mail_server_id and smtp_server are None, look for an 'smtp_server' value in server config, and fails if not found. :param message: the email.message.Message to send. The envelope sender will be extracted from the ``Return-Path`` (if present), or will be set to the default bounce address. The envelope recipients will be extracted from the combined list of ``To``, ``CC`` and ``BCC`` headers. :param smtp_session: optional pre-established SMTP session. When provided, overrides `mail_server_id` and all the `smtp_*` parameters. Passing the matching `mail_server_id` may yield better debugging/log messages. The caller is in charge of disconnecting the session. :param mail_server_id: optional id of ir.mail_server to use for sending. overrides other smtp_* arguments. :param smtp_server: optional hostname of SMTP server to use :param smtp_encryption: optional TLS mode, one of 'none', 'starttls' or 'ssl' (see ir.mail_server fields for explanation) :param smtp_port: optional SMTP port, if mail_server_id is not passed :param smtp_user: optional SMTP user, if mail_server_id is not passed :param smtp_password: optional SMTP password to use, if mail_server_id is not passed :param smtp_debug: optional SMTP debug flag, if mail_server_id is not passed :return: the Message-ID of the message that was just sent, if successfully sent, otherwise raises MailDeliveryException and logs root cause. """ # Use the default bounce address **only if** no Return-Path was # provided by caller. Caller may be using Variable Envelope Return # Path (VERP) to detect no-longer valid email addresses. smtp_from = message['Return-Path'] or self._get_default_bounce_address( ) or message['From'] assert smtp_from, "The Return-Path or From header is required for any outbound email" # The email's "Envelope From" (Return-Path), and all recipient addresses must only contain ASCII characters. from_rfc2822 = extract_rfc2822_addresses(smtp_from) assert from_rfc2822, ( "Malformed 'Return-Path' or 'From' address: %r - " "It should contain one valid plain ASCII email") % smtp_from # use last extracted email, to support rarities like 'Support@MyComp <*****@*****.**>' smtp_from = from_rfc2822[-1] email_to = message['To'] email_cc = message['Cc'] email_bcc = message['Bcc'] del message['Bcc'] smtp_to_list = [ address for base in [email_to, email_cc, email_bcc] for address in extract_rfc2822_addresses(base) if address ] assert smtp_to_list, self.NO_VALID_RECIPIENT x_forge_to = message['X-Forge-To'] if x_forge_to: # `To:` header forged, e.g. for posting on mail.channels, to avoid confusion del message['X-Forge-To'] del message['To'] # avoid multiple To: headers! message['To'] = x_forge_to # Do not actually send emails in testing mode! if getattr(threading.currentThread(), 'testing', False) or self.env.registry.in_test_mode(): _test_logger.info("skip sending email in test mode") return message['Message-Id'] try: message_id = message['Message-Id'] smtp = smtp_session smtp = smtp or self.connect(smtp_server, smtp_port, smtp_user, smtp_password, smtp_encryption, smtp_debug, mail_server_id=mail_server_id) smtp.sendmail(smtp_from, smtp_to_list, message.as_string()) # do not quit() a pre-established smtp_session if not smtp_session: smtp.quit() except smtplib.SMTPServerDisconnected: raise except Exception as e: params = (ustr(smtp_server), e.__class__.__name__, ustr(e)) msg = _( "Mail delivery failed via SMTP server '%s'.\n%s: %s") % params _logger.info(msg) raise MailDeliveryException(_("Mail Delivery Failed"), msg) return message_id
def transfer_analysed_files(self, analysed_files_queue): """ transfer the analysed files to webserver location and delete the mp4 files """ current_thread = threading.currentThread() self.__database_manager = DatabaseManager.DatabaseManager() while getattr(current_thread, 'is_running', True): try: file_name = analysed_files_queue.get(False) except Queue.Empty: time.sleep(1) continue # delete the mp4 file clear_command = [] clear_command.append('rm') clear_command.append(file_name) clear_process = subprocess.Popen(clear_command) while clear_process.poll() is None: time.sleep(0.1) # move the avi file to website location file_name = os.path.splitext(file_name)[0] + '.h264' converted_file_name = os.path.splitext(file_name)[0] + '.mp4' convert_command = [] convert_command.append(MP4BOX) convert_command.append("-add") convert_command.append(file_name) convert_command.append(converted_file_name) convert_process = subprocess.Popen(convert_command) while convert_process.poll() is None: time.sleep(0.1) # delete the h264 file clear_command = [] clear_command.append('rm') clear_command.append(file_name) clear_process = subprocess.Popen(clear_command) while clear_process.poll() is None: time.sleep(0.1) move_command = [] move_command.append('mv') move_command.append(converted_file_name) move_command.append('/var/www/html/videos/') move_process = subprocess.Popen(move_command) while move_process.poll() is None: time.sleep(0.1) timest = time.time() timestamp = datetime.datetime.fromtimestamp(timest).strftime('%Y-%m-%d %H:%M:%S') db_file_record = [converted_file_name, timestamp] self.__database_manager.insert_data_in_database( db_file_record, 'HOME_SCANNER_VIDEO_FILES' ) analysed_files_queue.task_done()
def __init__(self, msg, *args, **kwargs): index = getattr(threading.currentThread(), 'index', 0) if index: msg = '\n'.join('%d> %s' % (index, l) for l in msg.splitlines()) super(Error, self).__init__(msg, *args, **kwargs)
def start(self): if (self.is_runnning == True): self.is_runnning = False gui.button_start_download['text'] = 'Start download' return gui.button_start_download['text'] = 'Stop' self.is_runnning = True self.total_downloaded = 0 self.finished_downloads = 0 self.active_downloads_count = 0 self.current_speed = 0 last_total_downloaded = 0 downloaded_since_last_measurement = 0 self.download_left = vid_list.total_size self.time_left = 0 if (gui.is_pending_checked.get() == 1): total_size = vid_list.pending_delete_total_size total_videos = vid_list.pending_delete_count else: total_size = vid_list.total_size total_videos = vid_list.total_videos t0 = time.time() i = 0 self.t = threading.currentThread() while (self.is_runnning == True): time.sleep(0.1) if (self.active_downloads_count < int( gui.spin_threads_count.get())): try: if (gui.is_pending_checked.get() == 0 or vid_list.data["videos"][i]["stale"] == True): url = "https:" + vid_list.data["videos"][i]["files"][ "mp4"]["url"] title = vid_list.data["videos"][i]["title"] fileID = vid_list.data["videos"][i]["file_id"] # filename should always start with fileID, because it's unique, and end with .mp4 # if video has a title, append it name = fileID if title: #video title needs to be cleansed before putting into filename valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) title = ''.join(c for c in title if c in valid_chars) name = name + "_" + title name = name + ".mp4" self.d = threading.Thread( target=download_manager.get_video, args=(url, name)) self.d.start() i += 1 else: i += 1 except: i += 1 else: pass time_since_last_speed_measure = time.time() - t0 if (time_since_last_speed_measure >= 2): self.download_left = total_size - self.total_downloaded downloaded_since_last_measurement = self.total_downloaded - last_total_downloaded self.current_speed = downloaded_since_last_measurement / 2 if (self.current_speed > 0): self.time_left = self.download_left / self.current_speed else: self.time_left = 3600 last_total_downloaded = self.total_downloaded t0 = time.time() percentage = round( float(self.total_downloaded) / float(total_size) * 100) gui.text_total_downloaded['text'] = sizeof_fmt( self.total_downloaded) + "/" + sizeof_fmt(total_size) gui.text_progress_finished['text'] = str( self.finished_downloads) + "/" + str(total_videos) gui.text_current_speed['text'] = sizeof_fmt( self.current_speed) + "/s (" + str( datetime.timedelta(seconds=round(self.time_left))) + ")" gui.progress_bar['value'] = percentage if (self.finished_downloads >= total_videos): messagebox.showinfo('Done', 'All videos downloaded') gui.button_start_download['text'] = 'Start download' return
def tearDown_test_web_server(): if sickrage.srCore: sickrage.srCore.srWebServer.shutdown() def load_tests(loader, tests): global TESTALL TESTALL = True return tests # ================= # test globals # ================= threading.currentThread().setName('TESTS') PROG_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, 'sickrage')) if PROG_DIR not in sys.path: sys.path.insert(0, PROG_DIR) sickrage.DATA_DIR = TESTDIR = os.path.abspath(os.path.dirname(__file__)) sickrage.CONFIG_FILE = TESTCONFIGNAME = os.path.abspath( os.path.join(TESTDIR, "config.ini")) sickrage.NOLAUNCH = True sickrage.srCore = Core() TESTALL = False TESTSKIPPED = ['test_issue_submitter', 'test_ssl_sni'] TESTDBNAME = "sickrage.db"
async def hello(): print('Hello world! (%s)' % threading.currentThread()) await asyncio.sleep(1) print('Hello again! (%s)' % threading.currentThread())
def run(self, force=False): if self.amActive or (not sickrage.app.config.use_subtitles or sickrage.app.developer) and not force: return self.amActive = True # set thread name threading.currentThread().setName(self.name) if len(sickrage.subtitles.getEnabledServiceList()) < 1: sickrage.app.log.warning( 'Not enough services selected. At least 1 service is required to search subtitles in the background' ) return sickrage.app.log.info('Checking for subtitles') # get episodes on which we want subtitles # criteria is: # - show subtitles = 1 # - episode subtitles != config wanted languages or 'und' (depends on config multi) # - search count < 2 and diff(airdate, now) > 1 week : now -> 1d # - search count < 7 and diff(airdate, now) <= 1 week : now -> 4h -> 8h -> 16h -> 1d -> 1d -> 1d today = datetime.date.today().toordinal() results = [] for s in sickrage.app.showlist: for e in (e for e in sickrage.app.main_db.get_many( 'tv_episodes', s.indexerid) if s.subtitles == 1 and e['location'] != '' and e['subtitles'] not in sickrage.subtitles.wanted_languages() and ( e['subtitles_searchcount'] <= 2 or (e['subtitles_searchcount'] <= 7 and (today - e['airdate'])))): results += [{ 'show_name': s.name, 'showid': e['showid'], 'season': e['season'], 'episode': e['episode'], 'status': e['status'], 'subtitles': e['subtitles'], 'searchcount': e['subtitles_searchcount'], 'lastsearch': e['subtitles_lastsearch'], 'location': e['location'], 'airdate_daydiff': (today - e['airdate']) }] if len(results) == 0: sickrage.app.log.info('No subtitles to download') return rules = self._getRules() now = datetime.datetime.now() for epToSub in results: if not os.path.isfile(epToSub['location']): sickrage.app.log.debug( 'Episode file does not exist, cannot download subtitles for episode %dx%d of show %s' % (epToSub['season'], epToSub['episode'], epToSub['show_name'])) continue # http://bugs.python.org/issue7980#msg221094 # I dont think this needs done here, but keeping to be safe datetime.datetime.strptime('20110101', '%Y%m%d') if ((epToSub['airdate_daydiff'] > 7 and epToSub['searchcount'] < 2 and now - datetime.datetime.strptime(epToSub['lastsearch'], dateTimeFormat) > datetime.timedelta(hours=rules['old'][epToSub['searchcount']]) ) or (epToSub['airdate_daydiff'] <= 7 and epToSub['searchcount'] < 7 and now - datetime.datetime.strptime(epToSub['lastsearch'], dateTimeFormat) > datetime.timedelta(hours=rules['new'][epToSub['searchcount']]) )): sickrage.app.log.debug( 'Downloading subtitles for episode %dx%d of show %s' % (epToSub['season'], epToSub['episode'], epToSub['show_name'])) showObj = findCertainShow(int(epToSub['showid'])) if not showObj: sickrage.app.log.debug('Show not found') return epObj = showObj.get_episode(int(epToSub["season"]), int(epToSub["episode"])) if isinstance(epObj, str): sickrage.app.log.debug('Episode not found') return existing_subtitles = epObj.subtitles try: epObj.download_subtitles() except Exception as e: sickrage.app.log.debug('Unable to find subtitles') sickrage.app.log.debug(str(e)) return newSubtitles = frozenset( epObj.subtitles).difference(existing_subtitles) if newSubtitles: sickrage.app.log.info( 'Downloaded subtitles for S%02dE%02d in %s' % (epToSub["season"], epToSub["episode"], ', '.join(newSubtitles))) self.amActive = False
def isMainThread(): return mainThread is threading.currentThread()
def hello(): print('Hello world! (%s)' % threading.currentThread()) yield from asyncio.sleep(5) print('Hello again! (%s)' % threading.currentThread())
def run(self): for x in range(0, 3): print('正在画画%s' % threading.currentThread()) time.sleep(1)
def TerminateTask(): if (type(threading.currentThread()) == ooTask): threading.currentThread().terminate() else: print 'ERROR: TerminateTask called in Invalid Context.' sys.exit(-1)
for curProvider in providers: threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]" logger.log(u"Searching for any new PROPER releases from " + curProvider.name) try: curPropers = curProvider.findPropers(search_date) except exceptions.AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) continue except Exception, e: logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) continue finally: threading.currentThread().name = origThreadName # if they haven't been added by a different provider than add the proper to the list for x in curPropers: name = self._genericName(x.name) if not name in propers: logger.log(u"Found new proper: " + x.name, logger.DEBUG) x.provider = curProvider propers[name] = x # take the list of unique propers and get it sorted by sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True) finalPropers = [] for curProper in sortedPropers:
import time import atexit import select import threading import multiprocessing import re from . import conf, ircutils, log, registry from .utils import minisix startedAt = time.time() # Just in case it doesn't get set later. starting = False mainThread = threading.currentThread() def isMainThread(): return mainThread is threading.currentThread() threadsSpawned = 1 # Starts at one for the initial "thread." class SupyThread(threading.Thread, object): def __init__(self, *args, **kwargs): global threadsSpawned threadsSpawned += 1 super(SupyThread, self).__init__(*args, **kwargs) log.debug('Spawning thread %q.', self.getName())
def runThreads(numThreads, threadFunction, cleanupFunction=None, forwardException=True, threadChoice=False, startThreadMsg=True): threads = [] kb.threadContinue = True kb.threadException = False if threadChoice and numThreads == 1 and not (kb.injection.data and not any(_ not in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED) for _ in kb.injection.data)): while True: message = "please enter number of threads? [Enter for %d (current)] " % numThreads choice = readInput(message, default=str(numThreads)) if choice: skipThreadCheck = False if choice.endswith('!'): choice = choice[:-1] skipThreadCheck = True if choice.isdigit(): if int(choice) > MAX_NUMBER_OF_THREADS and not skipThreadCheck: errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS logger.critical(errMsg) else: conf.threads = numThreads = int(choice) break if numThreads == 1: warnMsg = "running in a single-thread mode. This could take a while" logger.warn(warnMsg) try: if numThreads > 1: if startThreadMsg: infoMsg = "starting %d threads" % numThreads logger.info(infoMsg) else: threadFunction() return # Start the threads for numThread in xrange(numThreads): thread = threading.Thread(target=exceptionHandledFunction, name=str(numThread), args=[threadFunction]) setDaemon(thread) try: thread.start() except Exception as ex: errMsg = "error occurred while starting new thread ('%s')" % ex logger.critical(errMsg) break threads.append(thread) # And wait for them to all finish alive = True while alive: alive = False for thread in threads: if thread.isAlive(): alive = True time.sleep(0.1) except (KeyboardInterrupt, SqlmapUserQuitException) as ex: print() kb.prependFlag = False kb.threadContinue = False kb.threadException = True if numThreads > 1: logger.info("waiting for threads to finish%s" % (" (Ctrl+C was pressed)" if isinstance(ex, KeyboardInterrupt) else "")) try: while (threading.activeCount() > 1): pass except KeyboardInterrupt: raise SqlmapThreadException("user aborted (Ctrl+C was pressed multiple times)") if forwardException: raise except (SqlmapConnectionException, SqlmapValueException) as ex: print() kb.threadException = True logger.error("thread %s: '%s'" % (threading.currentThread().getName(), ex)) if conf.get("verbose") > 1: traceback.print_exc() except: from lib.core.common import unhandledExceptionMessage print() kb.threadException = True errMsg = unhandledExceptionMessage() logger.error("thread %s: %s" % (threading.currentThread().getName(), errMsg)) traceback.print_exc() finally: kb.bruteMode = False kb.threadContinue = True kb.threadException = False for lock in kb.locks.values(): if lock.locked(): try: lock.release() except: pass if conf.get("hashDB"): conf.hashDB.flush(True) if cleanupFunction: cleanupFunction()
def initThread(self, thread_index): """ The ReqMgr expects the DBI to be contained in the Thread """ myThread = threading.currentThread() # Get it from the DBFormatter superclass myThread.dbi = self.dbi
def philosopher(left, right): while True: with left: print(threading.currentThread(), 'grabbed left...') # time.sleep(0.01) with right: print(threading.currentThread(), 'eating')
def CheckForAbsences(): global w_service ####### Build a service object for interacting with the API for thread w api_root = 'https://homesteadapi.appspot.com/_ah/api' api = 'deviceApi' version = 'v1' discovery_url = '%s/discovery/v1/apis/%s/%s/rest' % (api_root, api, version) w_service = build(api, version, discoveryServiceUrl=discovery_url) ###### print "\n" + str(datetime.now().time()) + ' in CheckForAbsences function' logging.info('in CheckForAbsences function, Present keys are ' + str(PRESENT.keys())) print "\n Present Keys are: " + str(PRESENT.keys()) ##### loop through PRESENT DICT for key in PRESENT.keys(): print str( datetime.now().time() ) + ' looping through PRESENT dict at key: ' + key + ' which is ' + allDevices[ key] logging.info('looping through PRESENT dict at key: ' + key + ' which is ' + allDevices[key]) rightnow = datetime.now() print 'now time is: ' + str(rightnow) lastNotified = PRESENT[key] print 'lastNotified time is ' + str(lastNotified) d = rightnow - lastNotified print str(datetime.now().time( )) + " time delta (now - last notified on this device)= " + str( d) + " for key: " + key + " which is " + allDevices[key] if d > timedelta(minutes=(PollTime + 5)): print str( datetime.now().time() ) + "---> key (" + allDevices[key] + ") hasn't been seen in " + str( PollTime ) + " mins, and is being removed from the PRESENT dict : " + str( key) #SendEmail(key, ' is no longer being detected - is being removed from the PRESENT dict') logging.info('this mac has been removed from the PRESENT dict: ' + allDevices[key]) ######## API CALLS # call api with patch method for this mac # home is: false # mac is: key # name is: SMARTPHONES[key] print str(datetime.now().time() ) + "---> about to call API with id = " + key response = w_service.patch(macid=key, body={ 'home': 'false' }).execute() # now remove the key from the local array del PRESENT[key] ###### adjust the servo if key == 'e8:50:8b:41:d1:33': os.system("sudo echo 0=0% > /dev/servoblaster") logging.debug('Exiting ' + threading.currentThread().getName()) print "\n" + str(datetime.now().time() ) + " Exiting " + threading.currentThread().getName() threading.Timer((PollTime * 60), CheckForAbsences).start()