def test_funcTimeout(self): sleepFunction = getSleepLambda(1.25) expectedResult = 5 + 13 startTime = time.time() result = sleepFunction(5, 13) endTime = time.time() assert result == expectedResult , 'Did not get return from sleepFunction' try: result = func_timeout(1.5, sleepFunction, args=(5, 13)) except FunctionTimedOut as te: raise AssertionError('Got unexpected timeout at 1.5 second timeout for 1.25 second function: %s' %(str(te),)) assert result == expectedResult , 'Got wrong return from func_timeout.\nGot: %s\nExpected: %s\n' %(repr(result), repr(expectedResult)) gotException = False try: result = func_timeout(1, sleepFunction, args=(5, 13)) except FunctionTimedOut as te: gotException = True assert gotException , 'Expected to get FunctionTimedOut exception for 1.25 sec function at 1s timeout' try: result = func_timeout(1.5, sleepFunction, args=(5,), kwargs={ 'b' : 13}) except FunctionTimedOut as te: raise AssertionError('Got unexpected timeout at 1.5 second timeout for 1.25 second function: %s' %(str(te), )) except Exception as e: raise AssertionError('Got unknown exception mixing args and kwargs: < %s > %s' %(e.__class__.__name__, str(e))) assert result == expectedResult , 'Got wrong result when mixing args and kwargs'
def test_exception(self): sleepFunction = getSleepLambda(.5) expectedResult = 5 + 19 gotException = False functionTimedOut = None startTime = time.time() try: result = func_timeout(.3, sleepFunction, args=(5, 19)) except FunctionTimedOut as fte: functionTimedOut = fte gotException = True endTime = time.time() assert gotException , 'Expected to get exception' assert 'timed out after ' in functionTimedOut.msg , 'Expected message to be constructed. Got: %s' %(repr(functionTimedOut.msg), ) assert round(functionTimedOut.timedOutAfter, 1) == .3 , 'Expected timedOutAfter to equal timeout ( .3 ). Got: %s' %(str(round(functionTimedOut.timedOutAfter, 1)), ) assert functionTimedOut.timedOutFunction == sleepFunction , 'Expected timedOutFunction to equal sleepFunction' assert functionTimedOut.timedOutArgs == (5, 19) , 'Expected args to equal (5, 19)' assert functionTimedOut.timedOutKwargs == {} , 'Expected timedOutKwargs to equal {}'
def copy_pdf_to_hw_staging_dir(file_title_map, output_dir, doi, current_zipfile): """ we will attempt to generate a headless pdf and move this pdf to the ftp staging site. if this headless creation fails, we will raise an error to [email protected], and try to copy the original pdf file to ftp staging the function that we call to decapitate the pdf is contained in decapitatePDF.py. It manages some error handline, and tries to determine witheher the pdf cover content has been celanly removed. TODO: - elife - ianm - tidy up paths to temporary pdf decpitation paths """ for name in file_title_map.keys(): # we extract the pdf from the zipfile title = file_title_map[name] if title == "Merged PDF": print title new_name = gen_new_name_for_file(name, title, doi) file = current_zipfile.read(name) print new_name decap_name = "decap_" + new_name decap_name_plus_path = tmp_dir + "/" + decap_name # we save the pdf to a local file temp_file = open(decap_name_plus_path, "wb") temp_file.write(file) temp_file.close() decap_status = None try: # pass the local file path, and the path to a temp dir, to the decapitation script decap_status = func_timeout(PDF_DECAPITATE_TIMEOUT, decapitate_pdf_with_error_check, args=(decap_name_plus_path, decap_dir + "/")) except FunctionTimedOut: decap_status = False timeout_message = "PDF decap did not finish within {x} seconds".format( x=PDF_DECAPITATE_TIMEOUT) logger.error(timeout_message) if decap_status: # pass the local file path, and teh path to a temp dir, to the decapiation script try: move_file = open(decap_dir + "/" + decap_name, "rb").read() out_handler = open(output_dir + "/" + new_name, "wb") out_handler.write(move_file) out_handler.close() print "decapitaiton worked" except: # The decap may return true but the file does not exist for some reason # allow the transformation to continue in order to processes the supplementary files alert_message = "decap returned true but the pdf file is missing " + new_name logger.error(alert_message) else: # if the decapitation script has failed, we move the original pdf file move_file = file alert_message = "could not decapitate " + new_name logger.error(alert_message) alert_production(alert_message)
def upload_file(self, parent_file_id: str = 'root', path: str = None, upload_timeout: float = 10, retry_num: int = 3, force: bool = False, chunk_size: int = None, c: bool = False): """ 上传文件 :param parent_file_id: 上传目录的id :param path: 上传文件路径 :param upload_timeout: 分块上传超时时间 :param retry_num: :param force: 强制覆盖 :param chunk_size: 分块大小 :param c: 断点续传 :return: """ path = Path(path) file_size = path.stat().st_size file_name = path.name self._chunk_size = chunk_size or self._chunk_size # 获取sha1 content_hash = get_sha1(path, self._chunk_size) # 分片列表 part_info_list = [] count = int(file_size / self._chunk_size) + 1 for i in range(count): part_info_list.append({"part_number": i + 1}) json = { "size": file_size, "part_info_list": part_info_list, "content_hash": content_hash } path_list = [] existed = False # 已存在任务 if content_hash in GLOBAL_VAR.tasks: # 是否是列表或可迭代对象 if isinstance(GLOBAL_VAR.tasks[content_hash].path, Iterable) and not isinstance( GLOBAL_VAR.tasks[content_hash].path, str): path_list.extend(GLOBAL_VAR.tasks[content_hash].path) else: path_list.append(GLOBAL_VAR.tasks[content_hash].path) path_list = list( set([str(Path(path_).absolute()) for path_ in path_list])) flag = False # 云盘是否存在该文件 if GLOBAL_VAR.tasks[content_hash].upload_time and path_list: existed = True for path_ in path_list: # 是否存在路径 if path.absolute() == Path(path_).absolute(): flag = True break # 云盘存在该文件且存在路径且断点续传 if existed and flag and c: # 已存在跳过上传 self._print.upload_info(path, status=True, existed=True) GLOBAL_VAR.tasks[content_hash].path = path_list[0] if len( path_list) == 1 else path_list GLOBAL_VAR.file_set.add((content_hash, str(path.absolute()))) return GLOBAL_VAR.tasks[content_hash].file_id # 断点续传且已存在任务且云盘不存在该文件 if c and content_hash in GLOBAL_VAR.tasks and not existed: upload_id = GLOBAL_VAR.tasks[content_hash].upload_id file_id = GLOBAL_VAR.tasks[content_hash].file_id self._chunk_size = GLOBAL_VAR.tasks[content_hash].chunk_size part_number = GLOBAL_VAR.tasks[content_hash].part_number # 获取上传链接列表 part_info_list = self.get_upload_url(path, upload_id, file_id, self._chunk_size, part_number) if not part_info_list: # 重新上传 if str(path.absolute()) in path_list: # 删除未上传成功的任务 del path_list[str(path.absolute())] GLOBAL_VAR.tasks[content_hash].path = path_list[0] if len( path_list) == 1 else path_list return self.upload_file(parent_file_id=parent_file_id, path=path, upload_timeout=upload_timeout, retry_num=retry_num, force=force, chunk_size=chunk_size, c=c) elif part_info_list == 'AlreadyExist.File': # 漏网之鱼 self._print.upload_info(path, status=True, existed=True) path_list.append(str(path.absolute())) path_list = list(set(path_list)) GLOBAL_VAR.tasks[content_hash].path = path_list[0] if len( path_list) == 1 else path_list GLOBAL_VAR.file_set.add((content_hash, str(path.absolute()))) return GLOBAL_VAR.tasks[content_hash].file_id else: # 申请创建文件 r = self.create_file(file_name=file_name, parent_file_id=parent_file_id, file_type=True, json=json, force=force) if 'rapid_upload' not in r.json(): message = r.json()['message'] logger.error(message) raise AliyunpanException(message) task_info = { 'path': str(path.absolute()), 'upload_id': None, 'file_id': None, 'chunk_size': self._chunk_size, 'part_number': None } rapid_upload = r.json()['rapid_upload'] # 快速上传成功 if rapid_upload: self._print.upload_info(path, status=True, rapid_upload=True) file_id = r.json()['file_id'] task_info['file_id'] = file_id task_info['upload_time'] = time.time() GLOBAL_VAR.tasks[content_hash] = task_info GLOBAL_VAR.file_set.add((content_hash, str(path.absolute()))) if existed: # 同hash不同路径 path_list.append(str(path.absolute())) path_list = list(set(path_list)) GLOBAL_VAR.tasks[content_hash].path = path_list[0] if len( path_list) == 1 else path_list return file_id else: upload_id = r.json()['upload_id'] file_id = r.json()['file_id'] part_info_list = r.json()['part_info_list'] task_info['upload_id'] = upload_id task_info['file_id'] = file_id task_info['part_number'] = 1 GLOBAL_VAR.tasks[content_hash] = task_info upload_bar = UploadBar(size=file_size) upload_bar.upload_info(path) upload_bar.update(refresh_line=False) logger.debug( f'upload_id: {upload_id}, file_id: {file_id}, part_info_list: {part_info_list}' ) for i in part_info_list: part_number, upload_url = i['part_number'], i['upload_url'] GLOBAL_VAR.tasks[content_hash].part_number = part_number # 分块读取 with path.open('rb') as f: f.seek((part_number - 1) * self._chunk_size) chunk = f.read(self._chunk_size) if not chunk: break size = len(chunk) retry_count = 0 while True: upload_bar.update(refresh_line=True) logger.debug( f'(upload_id={upload_id}, file_id={file_id}, size={size}): Upload part of {part_number} to {upload_url}.' ) try: # 开始上传 func_timeout.func_timeout( upload_timeout, lambda: self._req.put(upload_url, data=chunk)) break except func_timeout.exceptions.FunctionTimedOut: logger.warn('Upload timeout.') if retry_count is retry_num: self._print.error_info(f'上传超时{retry_num}次,即将重新上传', refresh_line=True) time.sleep(1) return self.upload_file(parent_file_id=parent_file_id, path=path, upload_timeout=upload_timeout, retry_num=retry_num, force=force, chunk_size=chunk_size, c=c) self._print.error_info('上传超时', refresh_line=True) retry_count += 1 time.sleep(1) except KeyboardInterrupt: raise except: logger.error(sys.exc_info()) exc_type, exc_value, exc_traceback = sys.exc_info() self._print.error_info(exc_type.__name__, refresh_line=True) time.sleep(1) self._print.wait_info(refresh_line=True) k = part_number / part_info_list[-1]['part_number'] upload_bar.update(ratio=k, refresh_line=True) # 上传完成保存文件 url = 'https://api.aliyundrive.com/v2/file/complete' json = { "ignoreError": True, "drive_id": self.drive_id, "file_id": file_id, "upload_id": upload_id } r = self._req.post(url, json=json) if r.status_code == 200: upload_bar.upload_info(path, status=True, t=upload_bar.time, average_speed=upload_bar.average_speed, refresh_line=True) GLOBAL_VAR.tasks[content_hash].upload_time = time.time() GLOBAL_VAR.file_set.add((content_hash, str(path.absolute()))) return r.json()['file_id'] else: upload_bar.upload_info(path, status=False, refresh_line=True) return False
def play_game(PlayerWhite, PlayerBlack, max_time_per_move_white=None, max_time_per_move_black=None, time_control_white=None, time_control_black=None, seed=0, board=chess.Board(fen=chess.STARTING_FEN), draw_time_white=5, trash_talk_time_white=1, draw_time_black=5, trash_talk_time_black=1, verbose=False, write=None): """ Initializes game. ---------- Parameters ---------- PlayerWhite: Class Player class corresponding to the white pieces. PlayerBlack: Class Player class corresponding to the black pieces. max_time_per_move_white: float (default: None) Max. thinking time (in sec) to be passed to white. max_time_per_move_black: float (default: None) Max. thinking time (in sec) to be passed to black. time_control_white: 2-tuple of floats (default: None) The time control for white, formatted as (x, y) where the time control is x minutes with a y second increment. This argument is distinct from max_time_per_move_white. time_control_black: 2-tuple of floats (default: None) The time control for black, formatted as (x, y) where the time control is x minutes with a y second increment. This argument is distinct from max_time_per_move_black. seed: int (default: 0) Random seed used to initialize state. board: Board (default: chess.Board()) Initial board configuration (the default is just the normal board). draw_time_white: float (default: 5) Time in seconds that a player is allowed to take to decide to offer a draw or to accept one. trash_talk_time_white: float (default: 1) Time in seconds that a player is allowed to take to produce trash talk. draw_time_black: float (default: 5) Time in seconds that a player is allowed to take to decide to offer a draw or to accept one. trash_talk_time_black: float (default: 1) Time in seconds that a player is allowed to take to produce trash talk. verbose: bool (default: False) If True, print out the board at each step for diagnostic purposes. write: str (default: None) If specified, write the game into a file with the filename specified here. """ np.random.seed(seed) board = copy.deepcopy(board) init_fen = board.fen() # Sort the players based on which one is going first in this board configuration white = PlayerWhite(side='white', board=copy.copy(board), max_time_per_move=max_time_per_move_white, time_control=time_control_white) black = PlayerBlack(side='black', board=copy.copy(board), max_time_per_move=max_time_per_move_black, time_control=time_control_black) if board.turn: players = [white, black] player_names = ['white', 'black'] player_bools = [True, False] time_control0 = time_control_white max_time_per_move0 = max_time_per_move_white time_control1 = time_control_black max_time_per_move1 = max_time_per_move_black draw_time0 = draw_time_white draw_time1 = draw_time_black trash_talk_time0 = trash_talk_time_white trash_talk_time1 = trash_talk_time_black else: players = [black, white] player_names = ['black', 'white'] player_bools = [False, True] time_control0 = time_control_black max_time_per_move0 = max_time_per_move_black time_control1 = time_control_white max_time_per_move1 = max_time_per_move_white draw_time0 = draw_time_black draw_time1 = draw_time_white trash_talk_time0 = trash_talk_time_black trash_talk_time1 = trash_talk_time_white if time_control0 is not None: total_time0 = 60 * time_control0[0] increment0 = time_control0[1] if time_control1 is not None: total_time1 = 60 * time_control1[0] increment1 = time_control1[1] # Start game player0_times = [] player1_times = [] first_turn = True move_log = '' while True: ######################################################################## # PLAYER 0 ######################################################################## # Player 0 time control if time_control0 is not None and max_time_per_move0 is not None: timeout0 = np.min([total_time0, max_time_per_move0]) elif time_control0 is not None: timeout0 = total_time0 elif max_time_per_move0 is not None: timeout0 = max_time_per_move0 else: timeout0 = None # Construct player 0 move if first_turn: def turn0(): move0 = players[0].make_move() return move0 first_turn = False else: if timeout0 is not None: def turn0(): players[0].receive_move(move1, time_left=timeout0) move0 = players[0].make_move() return move0 else: def turn0(): players[0].receive_move(move1, time_left=None) move0 = players[0].make_move() return move0 # Attempt to perform player 0 move try: start = time.time() move0 = func_timeout(timeout=timeout0, func=turn0) time0 = time.time() - start if timeout0 is not None: # if python delay finishes up but external code runs over, correct time time0 = np.min([time0, timeout0]) # Log move move_log += f'{player_names[0]} t={time0} | move:{move0}\n' except FunctionTimedOut: # runs out of time if board.has_insufficient_material(player_bools[1]): game_result = f'Draw:timeout with insufficient material' else: game_result = f'Win {player_names[1]}:timeout' break except: # another error is thrown game_result = f'Win {player_names[1]}:runtime error' break # Update player 0 time control if time_control0 is not None: total_time0 -= time0 total_time0 += increment0 # Attempt to offer draw draw_request = False try: if func_timeout(timeout=draw_time0, func=players[0].request_draw): move_log += f'{player_names[0]} | offers draw\n' draw_request = True except FunctionTimedOut: move_log += f'{player_names[0]} | draw solicitation timed out\n' except: move_log += f'{player_names[0]} | draw solicitation threw error\n' if draw_request: try: if func_timeout(timeout=draw_time1, func=players[1].respond_draw): move_log += f'{player_names[1]} | accepts draw\n' game_result = 'Draw:agreement' break else: move_log += f'{player_names[1]} | declines draw\n' except FunctionTimedOut: move_log += f'{player_names[1]} | draw response timed out\n' except: move_log += f'{player_names[1]} | draw response threw error\n' # Solicit trash talk trash_talk = None try: trash_talk = func_timeout(timeout=trash_talk_time0, func=players[0].solicit_trash_talk) if trash_talk is not None: if isinstance(trash_talk, str): move_log += f'{player_names[0]} | says:{trash_talk}\n' else: move_log += f'{player_names[0]} | trash talk solicitation gave invalid type\n' except FunctionTimedOut: move_log += f'{player_names[0]} | trash talk solicitation timed out\n' except: move_log += f'{player_names[0]} | trash talk solicitation threw error\n' if trash_talk is not None: try: func_timeout(timeout=trash_talk_time1, func=players[1].receive_trash_talk, args=(trash_talk,)) except FunctionTimedOut: move_log += f'{player_names[1]} | trash talk reception timed out\n' except: move_log += f'{player_names[1]} | trash talk reception threw error\n' # Attempt to push move legality = False try: legality = board.is_legal(chess.Move.from_uci(move0)) except: # invalid move game_result = f'Win {player_names[1]}:invalid move' break if not board.is_legal(chess.Move.from_uci(move0)): # illegal move game_result = f'Win {player_names[1]}:illegal move' break board.push(chess.Move.from_uci(move0)) # Check if game ends naturally if board.is_game_over(): if board.is_checkmate(): game_result = f'Win {player_names[0]}:checkmate' if board.is_stalemate(): game_result = f'Draw:stalemate' if board.is_insufficient_material(): game_result = f'Draw:insufficient material' if board.can_claim_threefold_repetition(): game_result = f'Draw:threefold repetition' if board.can_claim_fifty_moves(): game_result = f'Draw:fifty-move rule' break if verbose: print(f'white: {time0} s') print(board) print('\n') ######################################################################## # PLAYER 1 ######################################################################## # Player 1 time control if time_control1 is not None and max_time_per_move1 is not None: timeout1 = np.min([total_time1, max_time_per_move1]) elif time_control1 is not None: timeout1 = total_time1 elif max_time_per_move1 is not None: timeout1 = max_time_per_move1 else: timeout1 = None # Construct player 1 move if timeout1 is not None: def turn1(): players[1].receive_move(move0, time_left=timeout1) move1 = players[1].make_move() return move1 else: def turn1(): players[1].receive_move(move0, time_left=None) move1 = players[1].make_move() return move1 # Attempt to perform player 1 move try: start = time.time() move1 = func_timeout(timeout=timeout1, func=turn1) time1 = time.time() - start if timeout1 is not None: # if python delay finishes up but external code runs over, correct time time1 = np.min([time1, timeout1]) # Log move move_log += f'{player_names[1]} t={time1} | move:{move1}\n' except FunctionTimedOut: # runs out of time if board.has_insufficient_material(player_bools[0]): game_result = f'Draw:timeout with insufficient material' else: game_result = f'Win {player_names[0]}:timeout' break except: # another error is thrown game_result = f'Win {player_names[0]}:runtime error' break # Update player 1 time control if time_control1 is not None: total_time1 -= time1 total_time1 += increment1 # Attempt to offer draw draw_request = False try: if func_timeout(timeout=draw_time1, func=players[1].request_draw): move_log += f'{player_names[1]} | offers draw\n' draw_request = True except FunctionTimedOut: move_log += f'{player_names[1]} | draw solicitation timed out\n' except Exception as e: print(e) move_log += f'{player_names[1]} | draw solicitation threw error\n' if draw_request: try: if func_timeout(timeout=draw_time0, func=players[0].respond_draw): move_log += f'{player_names[0]} | accepts draw\n' game_result = 'Draw:agreement' break else: move_log += f'{player_names[0]} | declines draw\n' except FunctionTimedOut: move_log += f'{player_names[0]} | draw response timed out\n' except: move_log += f'{player_names[0]} | draw response threw error\n' # Solicit trash talk trash_talk = None try: trash_talk = func_timeout(timeout=trash_talk_time1, func=players[1].solicit_trash_talk) if trash_talk is not None: if isinstance(trash_talk, str): move_log += f'{player_names[1]} | says:{trash_talk}\n' else: move_log += f'{player_names[1]} | trash talk solicitation gave invalid type\n' except FunctionTimedOut: move_log += f'{player_names[1]} | trash talk solicitation timed out\n' except: move_log += f'{player_names[1]} | trash talk solicitation threw error\n' if trash_talk is not None: try: func_timeout(timeout=trash_talk_time0, func=players[0].receive_trash_talk, args=(trash_talk,)) except FunctionTimedOut: move_log += f'{player_names[0]} | trash talk reception timed out\n' except: move_log += f'{player_names[0]} | trash talk reception threw error\n' # Attempt to push move legality = False try: legality = board.is_legal(chess.Move.from_uci(move1)) except: # invalid move game_result = f'Win {player_names[0]}:invalid move' break if not board.is_legal(chess.Move.from_uci(move1)): # illegal move game_result = f'Win {player_names[0]}:illegal move' break board.push(chess.Move.from_uci(move1)) # Check if game ends naturally if board.is_game_over(): if board.is_checkmate(): game_result = f'Win {player_names[1]}:checkmate' if board.is_stalemate(): game_result = f'Draw:stalemate' if board.is_insufficient_material(): game_result = f'Draw:insufficient material' if board.can_claim_threefold_repetition(): game_result = f'Draw:threefold repetition' if board.can_claim_fifty_moves(): game_result = f'Draw:fifty-move rule' break if verbose: print(f'black: {time1} s') print(board) print('\n') if verbose: print(board) print('\n') print(game_result) print(move_log) if write is not None: text = '--Setup--\n' text += f'fname:{write}\n' text += f'white:{white.name}\n' text += f'black:{black.name}\n' text += f'init_fen:{init_fen}\n' text += f'game_result:{game_result}\n' text += '--Time Control--\n' text += f'max_time_per_move_white:{max_time_per_move_white}\n' text += f'max_time_per_move_black:{max_time_per_move_black}\n' text += f'time_control_white:{time_control_white}\n' text += f'time_control_black:{time_control_black}\n' text += f'draw_time_white:{draw_time_white}\n' text += f'draw_time_black:{draw_time_black}\n' text += f'trash_talk_time_white:{trash_talk_time_white}\n' text += f'trash_talk_time_black:{trash_talk_time_black}\n' text += '--Game--\n' text += move_log f = open(write, 'w') f.write(text) f.close()
def runChess(game_nos, time_limits, MAX_TIME=300): """ 输入: 对局 双方学号 输出 : 对局结果以及中间过程, result = {'white':xxx ,'black':xxx,'winStu':xxx,'start_time':xxx,'end_time':xxx,'steps':[],'time_by_step':[],'OutOFMAXTIME':True,'OUTOFKERNELTIME':True} 处理过程: 1. 根据学号 导入模块 2. 生成 对战中全局变量 3. 记录中间结果 4. 超时记录 """ result = {} result['white'] = game_nos[0] result['black'] = game_nos[1] # 生成全局变量 list_white = [] # AI_WHITE list_black = [] # AI_BLACK list_summary = [] # all steps_times = [] list_all = [] out_time_White = 0 out_time_Black = 0 for i in range(COLUMN + 1): for j in range(ROW + 1): list_all.append((i, j)) change = 0 g = 0 # 动态引入模块 ai_white = importlib.import_module('ais.' + game_nos[0]).ai ai_black = importlib.import_module('ais.' + game_nos[1]).ai result['start_time'] = time.time() result['WinSituation'] = 'StrengthRolling' while g == 0: step_start = time.time() blank_list = list(set(list_all).difference(set(list_summary))) try: pos = blank_list[0] if change % 2 == 0: pos = func_timeout(MAX_TIME, ai_white, (list_white, list_black, list_all)) list_white.append(pos) list_summary.append(pos) else: pos = func_timeout(MAX_TIME, ai_black, (list_black, list_white, list_all)) list_black.append(pos) list_summary.append(pos) if pos not in blank_list: raise ZeroDivisionError except FunctionTimedOut: print('[{}FunctionTimedOut] WINER={}'.format( result['white'] if change % 2 == 0 else result['black'], result['white'] if change % 2 != 0 else result['black'])) g = 1 result['win_Stu'] = result['white'] if change % 2 != 0 else result[ 'black'] result['WinSituation'] = 'OUTOFTIME' except ZeroDivisionError: print('[PosInvaild]') g = 1 result['win_Stu'] = result['white'] if change % 2 != 0 else result[ 'black'] result['WinSituation'] = 'PosInvaild' except BaseException: print('[BaseException]') g = 1 result['win_Stu'] = result['white'] if change % 2 != 0 else result[ 'black'] result['WinSituation'] = 'BaseException' step_end = time.time() step_spend = step_end - step_start # 记录 一步 时间 steps_times.append(step_spend) if step_spend > time_limits: if change % 2 == 0: out_time_White += 1 else: out_time_Black += 1 if out_time_Black > 2 or out_time_White > 2 or game_win( list_white) or game_win(list_black) or len( list_summary) == len(list_all): print('game over') if out_time_Black > 2 or game_win(list_white): result['win_Stu'] = result['white'] elif out_time_White > 2 or game_win(list_black): result['win_Stu'] = result['black'] elif len(list_summary) == len(list_all): result['win_Stu'] = 'NOONE' g = 1 change += 1 result['end_time'] = time.time() result['steps'] = list_summary return result
Newer tests for all features found in "tests" directory. ''' from func_timeout import func_timeout, FunctionTimedOut import time import sys def doit(howmany): time.sleep(2) return 17 + howmany if __name__ == '__main__': print ( "Should get return value of 23:" ) print ( "\tGot Return: %s\n" %(str(func_timeout(4, doit, args=(6,))),) ) print ( "\nShould time out (exception):" ) myException = None try: print ("\tGot Return: %s\n" %(str(func_timeout(1, doit, kwargs={'howmany' : 16})),)) except FunctionTimedOut as e: sys.stderr.write('\tGot Exception: %s\n' %(str(e),)) myException = e pass print ( "\nRetrying with longer timeout, should get 16+17=33:" ) if myException is not None: print ( "\nGot: %s\n" %( str(myException.retry(2.5)), ) ) else: sys.stderr.write('Did not get exception before?\n')
def noteeval(code, resetpickle, picklefile, workingdir): # save current dir and change to this notebook's dir oldwd = os.getcwd() os.chdir(workingdir) # Load the environment to be used (this allows persistence, so note 2 gets note 1's vars etc. environment = {} environment['__runcount'] = 0 if resetpickle or picklefile == '': environment = {} environment['__runcount'] = 0 else: try: with open(picklefile, "rb") as pfile: environment = dill.load(pfile) pfile.close() except IOError: environment = {} environment['__runcount'] = 0 # redirect stdout and stderr try: old_stdout = sys.stdout redir_out = sys.stdout = StringIO() old_stderr = sys.stderr redir_err = sys.stderr = StringIO() # Using https://pypi.org/project/func-timeout/ to timeut infinite loops # Look at os.setuid(uid) for using a different user (needs a pool, and copying files...) # https://docs.python.org/3/library/os.html#os.setuid # Probably will take a bit of testing... if environment['__runcount'] == 0: exec("import matplotlib\nmatplotlib.use('Agg')\n", environment) try: func_timeout(5, exec, args=(code, environment)) except FunctionTimedOut: output = "" errors = "Error: Timed out. (Do you have an infinite loop in your code?)" return (output, errors, environment['__runcount']) # exec(code, environment) sys.stdout = old_stdout sys.stderr = old_stderr except Exception as e: # tidy up if there's a problem sys.stdout = old_stdout sys.stderr = old_stderr output = "" errors = "Error: " + str(e) os.chdir(oldwd) return (output, errors, environment['__runcount']) #get the note's output (and errors) output = str(redir_out.getvalue()) errors = str(redir_err.getvalue()) #print(output) environment['__runcount'] = environment['__runcount'] + 1 # save the environment if picklefile != '': with open(picklefile, "wb") as pfile: dill.dump(environment, pfile) pfile.close() # restore the directory and return the output (or error message.) os.chdir(oldwd) return (output, errors, environment['__runcount'])
fp.close() device.close() retstr.close() return text #file paths file_name = '4' file_path = r'M:\PDF Data Extraction\Vet Record Examples (1)\Vet Record Examples\{}.pdf'.format( file_name) #date and time now = dt.now() dt_string = now.strftime("%Y%m%d%H%M") #timeout try: text = to.func_timeout(5, convert_pdf_to_txt, args=(file_path, )) print(text) except: print( "Conversion could not complete within 5 seconds and was terminated.\n") #print(convert_pdf_to_txt(file_path)) # create and write to text file #f = open(r'M:\PDF Data Extraction\Vet Record Examples (1)\Vet Record Text Output\{}_{}.txt'.format(file_name, dt_string), 'w+') #text = convert_pdf_to_txt(file_path) #f.write(text) #f.close() #print(text)
def simulate_with_timeout(experiment_id, policy_name, throughputs_file, cluster_spec, lam, seed, interval, jobs_to_complete, fixed_job_duration, solver, generate_multi_gpu_jobs, generate_multi_priority_jobs, simulate_steady_state, log_dir, timeout, verbose, checkpoint_threshold, profiling_percentage, num_reference_models, num_gpus_per_server, ideal): lam_str = 'lambda=%f.log' % (lam) checkpoint_file = None if checkpoint_threshold is not None: checkpoint_file = os.path.join(log_dir, 'lambda=%f.pickle' % lam) cluster_spec_str = 'v100:%d|p100:%d|k80:%d' % ( cluster_spec['v100'], cluster_spec['p100'], cluster_spec['k80']) policy = utils.get_policy(policy_name, solver=solver, seed=seed) if verbose: current_time = datetime.datetime.now() print('[%s] [Experiment ID: %2d] ' 'Configuration: cluster_spec=%s, policy=%s, ' 'seed=%d, lam=%f, ' 'profiling_percentage=%f, ' 'num_reference_models=%d' % (current_time, experiment_id, cluster_spec_str, policy.name, seed, lam, profiling_percentage, num_reference_models)) with open(os.path.join(log_dir, lam_str), 'w') as f: with contextlib.redirect_stderr(f), contextlib.redirect_stdout(f): sched = scheduler.Scheduler( policy, throughputs_file=throughputs_file, seed=seed, time_per_iteration=interval, simulate=True, profiling_percentage=profiling_percentage, num_reference_models=num_reference_models) if timeout is None: sched.simulate( cluster_spec, lam=lam, jobs_to_complete=jobs_to_complete, fixed_job_duration=fixed_job_duration, generate_multi_gpu_jobs=generate_multi_gpu_jobs, generate_multi_priority_jobs=generate_multi_priority_jobs, simulate_steady_state=simulate_steady_state, checkpoint_file=checkpoint_file, checkpoint_threshold=checkpoint_threshold, num_gpus_per_server=num_gpus_per_server, ideal=ideal) average_jct = sched.get_average_jct(jobs_to_complete) utilization = 1.0 if not ideal: utilization = sched.get_cluster_utilization() else: try: func_timeout( timeout, sched.simulate, args=(cluster_spec, ), kwargs={ 'lam': lam, 'jobs_to_complete': jobs_to_complete, 'fixed_job_duration': fixed_job_duration, 'generate_multi_gpu_jobs': generate_multi_gpu_jobs, 'generate_multi_priority_jobs': generate_multi_priority_jobs, 'simulate_steady_state': simulate_steady_state, 'checkpoint_file': checkpoint_file, 'checkpoint_threshold': checkpoint_threshold, 'num_gpus_per_server': num_gpus_per_server, 'ideal': ideal }) average_jct = sched.get_average_jct(jobs_to_complete) utilization = sched.get_cluster_utilization() except FunctionTimedOut: average_jct = float('inf') utilization = 1.0 if verbose: current_time = datetime.datetime.now() print('[%s] [Experiment ID: %2d] ' 'Results: average JCT=%f, utilization=%f' % (current_time, experiment_id, average_jct, utilization)) sched.shutdown() return average_jct, utilization
def process_messages(messages, dirName): reverse_messages = sorted(messages, key=lambda x: x.timestamp, reverse=True) logging.info("Loaded message count: " + str(len(reverse_messages))) skip_count = 0 for message in reverse_messages: logging.info('class: ' + str(message.__class__.__name__)) logging.info('message: ' + str(message)) logging.info('id: ' + str(message.id)) logging.info('type: ' + str(message.type)) logging.info('timestamp: ' + str(message.timestamp)) logging.info('chat_id: ' + str(message.chat_id)) logging.info('sender: ' + str(message.sender)) logging.info('sender.id: ' + str(message.sender.id)) logging.info('sender.safe_name: ' + str(message.sender.get_safe_name())) with db_conn.cursor() as cur: cur.execute(check_if_processed, (str(message.id), )) result_set = cur.fetchone() if result_set is None: if message.type == 'chat': logging.info('-- Chat') logging.info('safe_content: ' + str(message.safe_content)) logging.info('content: ' + str(message.content)) with db_conn.cursor() as cur: cur.execute(insert_to_messages, (str(message.id), str( message.type), str(message.timestamp), str(message.chat_id['user'][:12]), str(message.sender.get_safe_name()), str(mobile_number))) message_id = cur.fetchone()[0] cur.execute(insert_to_chats, (str(message.content), int(message_id))) chat_id = cur.fetchone()[0] db_conn.commit() elif message.type == 'image' or message.type == 'video': logging.info('-- Image or Video') logging.info('filename: ' + str(message.filename)) logging.info('size: ' + str(message.size)) logging.info('mime: ' + str(message.mime)) logging.info('caption: ' + str(message.caption)) logging.info('media_key: ' + str(message.media_key)) logging.info('client_url: ' + str(message.client_url)) file_split = os.path.splitext(str(message.filename)) new_file_name = file_split[0] + f"_{iden_number}" + file_split[ 1] try: if skip_count <= 10: downloaded_file = func_timeout(5, message.save_media, args=(dirName, True)) os.rename(downloaded_file, os.path.join(dirName, new_file_name)) logging.info(f"Photo downloaded to {dirName} folder") status = 'downloaded' skip_count = 0 else: logging.info( "Consecutive skipped photo count reached to the thershold 10, will not try to download any more from this sender" ) break except (Exception, FunctionTimedOut) as ex: logging.exception("Cannot download photo, skipping") status = 'skipped' skip_count = skip_count + 1 with db_conn.cursor() as cur: cur.execute(insert_to_messages, (str(message.id), str( message.type), str(message.timestamp), str(message.chat_id['user'][:12]), str(message.sender.get_safe_name()), str(mobile_number))) message_id = cur.fetchone()[0] cur.execute(insert_to_downloads, (new_file_name, status, None, int(message_id), int(message.size), str(message.mime), str(message.caption), str(message.media_key))) download_id = cur.fetchone()[0] db_conn.commit() else: logging.info('-- Other') else: process_time = result_set[0] logging.info("Already processed on " + str(process_time))
def upload_file(access_token, drive_id, parent_file_id='root', path=None, timeout=10, retry_num=3): """ 上传文件 :param retry_num: :param access_token: :param drive_id: :param parent_file_id: 上传目录的id :param path: 上传文件路径 :param timeout: 上传超时时间 :return: """ split_size = 5242880 # 默认5MB分片大小(不要改) file_size = os.path.getsize(path) _, file_name = os.path.split(path) # 获取sha1 with open(path, 'rb') as f: sha1 = hashlib.sha1() count = 0 while True: chunk = f.read(split_size) if not chunk: break count += 1 sha1.update(chunk) content_hash = sha1.hexdigest() # 分片列表 part_info_list = [] for i in range(count): part_info_list.append({"part_number": i + 1}) json = { "name": file_name, "type": "file", "size": file_size, "drive_id": drive_id, "parent_file_id": parent_file_id, "part_info_list": part_info_list, "content_hash_name": "sha1", "content_hash": content_hash, # 如果文件存在则自动重命名(删了上传会出现名字一模一样的文件) "check_name_mode": "auto_rename" } # 申请创建文件 url = 'https://api.aliyundrive.com/v2/file/create' headers = {'User-Agent': None, 'Authorization': access_token} r = requests.post(url, headers=headers, json=json) # 如果存在匹配的hash值的文件则不会重复上传 rapid_upload = r.json()['rapid_upload'] if rapid_upload: print('快速上传成功') else: upload_id = r.json()['upload_id'] file_id = r.json()['file_id'] part_info_list = r.json()['part_info_list'] part_info_list_new = [] total_time = 0 count_size = 0 k = 0 upload_info = f'\r上传中... [{"*" * 10}] %0' for i in part_info_list: part_number, upload_url = i['part_number'], i['upload_url'] with open(path, 'rb') as f: f.seek((part_number - 1) * split_size) chunk = f.read(split_size) if not chunk: break size = len(chunk) retry_count = 0 start_time = time.time() while True: if upload_info: sys.stdout.write(upload_info) try: # 开始上传 func_timeout.func_timeout(timeout, lambda: requests.put(upload_url, headers=headers, data=chunk, timeout=timeout)) break except requests.exceptions.RequestException: exc_type, exc_value, exc_traceback = sys.exc_info() sys.stdout.write(f'\rError:{exc_type.__name__}') time.sleep(1) except func_timeout.exceptions.FunctionTimedOut: if retry_count is retry_num: sys.stdout.write(f'\rError:上传超时{retry_num}次,即将重新上传') time.sleep(1) return upload_file(access_token, drive_id, parent_file_id, path, timeout) sys.stdout.write(f'\rError:上传超时') retry_count += 1 time.sleep(1) # 重试等待时间 n = 3 while n: sys.stdout.write(f'\r{n}秒后重试') n -= 1 time.sleep(1) sys.stdout.write('\r') end_time = time.time() t = end_time - start_time total_time += t k += size / file_size count_size += size upload_info = f'\r上传中{"." * (part_number % 4)} [{"=" * int(k * 10)}{"*" * int((1 - k) * 10)}] %{math.ceil(k * 1000) / 10} {round(count_size / 1024 / 1024 / total_time, 2)}MB/s' sys.stdout.write(upload_info) # 上传完成保存文件 url = 'https://api.aliyundrive.com/v2/file/complete' json = { "ignoreError": True, "drive_id": drive_id, "file_id": file_id, "upload_id": upload_id, "part_info_list": part_info_list_new } r = requests.post(url, headers=headers, json=json) if r.status_code == 200: total_time = int(total_time * 100) / 100 print( f'\n上传成功,耗时{int(total_time * 100) / 100}秒,平均速度{round(file_size / 1024 / 1024 / total_time)}MB/s' ) else: print('\n上传失败')
def download_images(d_img_dict): try: func_timeout(media_wait_between_requests, tor_img_download_loop, args = [d_img_dict]) except FunctionTimedOut: print ("Torsession terminated after {} seconds tor_timeout.".format(media_wait_between_requests)) return
def get_emails(host, port, username, password, timeout): fetch_protocol = '(RFC822)' print('\nConnecting to Gmail\'s IMAP server .....') try: conn = imaplib.IMAP4_SSL(host, port) print('\tConnected.\n') except socket.gaierror: print('\tCheck your internet connection.\n') print('Restart the program to try again.') sys.exit() dummy_var = input() try: print('Attempting to login .....') conn.login(username, password) print('\tLogin Successful.\n') print('Reading your INBOX .....\n') except: print( '\tLogin Failed.\n\nPossible reasons :\n\n\t1. Access to less-secure apps is "turned off" in your Google account\'s settings.\n\n\tResolve here : {}' .format(app_access_url)) print( '\n\t2. "Enable IMAP" option is turned off in your Gmail settings.\n\n\tResolve here : {}\n\n\t3. You entered invalid credentials.' .format(enable_imap_url)) print('\nRestart the program to try again.') sys.exit() dummy_var = input() conn.select('INBOX') inbox_bytes = conn.uid('search', None, 'ALL')[1][0].split()[::-1] print(f'\tYou have {len(inbox_bytes)} mails in your inbox.\n\n') download_dir = os.path.join( os.environ.get('userprofile'), 'Desktop\\{}-INBOX'.format(re.sub('[<>|?*:"/\\\\]', '.', username))) check_isdir(download_dir) os.chdir(download_dir) failed_uid = [] display_value = 1 for fetch_uid in inbox_bytes: try: print( f'Fetching email {display_value} of {len(inbox_bytes)} .....') func_timeout(timeout, export_inbox, args=(conn, fetch_uid, fetch_protocol)) print(f'\tDone.\n') except FunctionTimedOut: print(f'\tFailed.\n') failed_uid.append(fetch_uid) display_value += 1 os.chdir(download_dir) for uid in [x.decode('utf-8') for x in failed_uid]: for folder in os.listdir('.'): if uid in folder: os.system(f'echo y | rmdir /s {folder}') # Windows #os.system(f'sudo rm -r {folder}') # Linux failed_data_list = [ conn.uid('fetch', uid, fetch_protocol)[1][0][1] for uid in failed_uid ] print('Logging out .....') conn.logout() print('\tSuccessfully logged out.\n') os.chdir(os.environ.get('userprofile')) print(f'All mails have been saved to {download_dir}\n') if bool(failed_uid): print( f'Fetch failure for these mail uid(s) : {[int(x.decode("utf-8")) for x in failed_uid]}\n' ) return (failed_data_list, failed_uid)
def intent_and_entity_rerouter(text): logging.info('Intent classifier received: %s' % text) for ignorable in nlu_ignore: if (text.lower().startswith(ignorable)): command_handler(text) return True THRESHOLD = 0.75 try: nlu_response = func_timeout(1, nlu_parser, args=(text, )) except (FunctionTimedOut, Exception) as e: logging.error(f'Error getting NLU data: {e}') return False logging.debug('Intent classifier response: %s' % nlu_response) if nlu_response['intent']['confidence'] >= THRESHOLD: intent = nlu_response['intent']['name'] entities = nlu_response['entities'] has_entities = isinstance(entities, list) and len(entities) > 0 if intent == 'get_weather': logging.info( 'Weather request acknowledged. Sending through designated path.' ) if entities: weather.get_weather(False, False, *[entities[0]['value']]) else: weather.get_weather() return True elif intent == 'find_info' and has_entities: # TODO: consider how to make images and local lookup optional # possible intents: toggle_image_display (translated entities: on/off); toggle_air_gap (grants or removes Luna's access to the internet, # and, more importantly, the internets access to Luna) action = intel_handler.informant(entities[0]['value'].title(), True, 0, False) logging.info(f'Caller received action: {action}') if action: handle_user_input(action) return True elif intent == 'find_images': if utils.is_online(): entity = entities[0]['value'] try: image_urls = wikipedia.page(entity).images render_images = StoppableThread( target=display_manager.fetch_images, args=( entities[0]['value'], image_urls, )) render_images.daemon = True render_images.start() H() sprint(random.choice(pending_image_search_responses)) except Exception as e: logging.error(e) H() sprint('For some reason I could not comply.') else: H() sprint('I need an internet connection to comply.') return True elif intent == 'find_related_info' and has_entities: utils.find_related(entities[0]['value']) return True elif intent == 'directions' and has_entities: origin = None destination = None for entity in entities: if entity['entity'] == 'source': origin = entity['value'] elif entity['entity'] == 'destination': destination = entity['value'] logging.info( 'Parsing direction query with destination: %s and origin: %s' % (destination, origin)) if destination: utils.directions(destination, origin) else: H() sprint('No destination found.') return True elif intent == 'find_location' and has_entities: utils.find_location(entities[0]['value']) return True elif intent == 'find_more_info' and has_entities: action = intel_handler.informant(entities[0]['value'].title(), False, 0, True) logging.info(f'Caller received action: {action}') if action: handle_user_input(action) else: return True else: return False return False
def split(l, n): # For item i in a range that is a length of l, for i in range(0, len(l), n): # Create an index range for l of n items: yield l[i:i + n] # MAIN EXECUTION BEGINS # Check if Bucket Exists s3CheckIfBucketExists(s3Resource, params["athenaResultBucket"]) # Fetch Athena result file from S3 try: s3_filename = func_timeout(params['timeout'], athena_to_s3, args=(athenaClient, params)) except FunctionTimedOut: print("Athena Show Partition query timed out.") print() print("#~ FAILURE ~#") print() print() raise #s3_filename = athena_to_s3(athenaClient, params) print("Athena Result File At :") print(params['athenaResultBucket'] + '/' + params["athenaResultFolder"] + '/' + s3_filename) print("----------------------------------") print()
'Vet Record Examples (1)', 'Vet Record Text Multiple Output\Success Folder') filelist = os.listdir(input_path) #error checking error_list = [] success_list = [] #Check file name has correct format conversion_list = (name_format(filelist))[0] error_append((name_format(filelist))[1], input_path, error_list) for i in conversion_list: path = input_path + '\{}'.format(i) file = os.path.splitext(i)[0] try: text = to.func_timeout(15, convert_pdf_to_txt, args=(path, )) x = re.search("[a-zA-Z]", text) if x == None: error_list.append(str(path)) print('{} Error - Empty Document'.format(file)) continue else: f = open(output_path + '\{}_{}.txt'.format(file, dt_string), 'w+') f.write(text) f.close() success_list.append(str(path)) print('{} Success'.format(file)) except to.exceptions.FunctionTimedOut: error_list.append(str(path)) print('{} conversion timed out'.format(file)) # NOTE - This UnicodeEncodeError Try/Except is a bodge until I can sort out encoding
def Run(self): player_order = [] for i in range(self.game_state.first_player, len(self.players)): player_order.append(i) for i in range(0, self.game_state.first_player): player_order.append(i) game_continuing = True round_count = 0 move_count = 0 for plr in self.game_state.players: plr.player_trace.StartRound() for i in player_order: gs_copy = copy.deepcopy(self.game_state) try: func_timeout(self.startRound_time_limit, self.players[i].StartRound, args=(gs_copy, )) except FunctionTimedOut: self.warnings[i] += 1 if self.displayer is not None: self.displayer.TimeOutWarning(self, i) self.warning_positions.append((i, round_count, -1)) if self.warnings[i] == self.warning_limit: player_traces = self._EndGame(player_order, isTimeOut=True, id=i) return player_traces except AttributeError: pass random.seed(self.seed_list[self.seed_idx]) self.seed_idx += 1 if self.displayer is not None: self.displayer.StartRound(self.game_state) while game_continuing: for i in player_order: plr_state = self.game_state.players[i] moves = plr_state.GetAvailableMoves(self.game_state) gs_copy = copy.deepcopy(self.game_state) moves_copy = copy.deepcopy(moves) try: selected = func_timeout(self.time_limit, self.players[i].SelectMove, args=(moves_copy, gs_copy)) except FunctionTimedOut: self.warnings[i] += 1 if self.displayer is not None: self.displayer.TimeOutWarning(self, i) self.warning_positions.append((i, round_count, move_count)) if self.warnings[i] == self.warning_limit: player_traces = self._EndGame(player_order, isTimeOut=True, id=i) return player_traces selected = random.choice(moves) assert (ValidMove(selected, moves)) random.seed(self.seed_list[self.seed_idx]) self.seed_idx += 1 self.game_state.ExecuteMove(i, selected) if self.displayer is not None: self.displayer.ExcuteMove(i, selected, self.game_state) if not self.game_state.TilesRemaining(): break # Have we reached the end of round? if self.game_state.TilesRemaining(): move_count += 1 continue # It is the end of round self.game_state.ExecuteEndOfRound() if self.displayer is not None: self.displayer.EndRound(self.game_state) # Is it the end of the game? for i in player_order: plr_state = self.game_state.players[i] completed_rows = plr_state.GetCompletedRows() if completed_rows > 0: game_continuing = False break # Set up the next round if game_continuing: round_count += 1 move_count = 0 self.game_state.SetupNewRound() player_order = [] for i in range(self.game_state.first_player, len(self.players)): player_order.append(i) for i in range(0, self.game_state.first_player): player_order.append(i) for i in player_order: gs_copy = copy.deepcopy(self.game_state) try: func_timeout(self.startRound_time_limit, self.players[i].StartRound, args=(gs_copy, )) except FunctionTimedOut: self.warnings[i] += 1 if self.displayer is not None: self.displayer.TimeOutWarning(self, i) self.warning_positions.append((i, round_count, -1)) if self.warnings[i] == self.warning_limit: player_traces = self._EndGame(player_order, isTimeOut=True, id=i) return player_traces except AttributeError: pass random.seed(self.seed_list[self.seed_idx]) self.seed_idx += 1 if self.displayer is not None: self.displayer.StartRound(self.game_state) # Score player bonuses player_traces = self._EndGame(player_order, isTimeOut=False) # Return scores return player_traces
def mccv(learner, X, y, target_size=.9, r=0.0, min_stages=3, timeout=None, seed=0, repeats=10): def evaluate(learner_inst, X, y, num_examples, seed=0, timeout=None, verbose=False): deadline = None if timeout is None else time.time() + timeout random.seed(seed) n = X.shape[0] indices_train = random.sample(range(n), num_examples) mask_train = np.zeros(n) mask_train[indices_train] = 1 mask_train = mask_train.astype(bool) mask_test = (1 - mask_train).astype(bool) X_train = X[mask_train].copy() y_train = y[mask_train] X_test = X[mask_test].copy() y_test = y[mask_test] learner_inst = sklearn.base.clone(learner_inst) eval_logger.info( f"Training {format_learner(learner_inst)} on data of shape {X_train.shape} using seed {seed}." ) if deadline is None: learner_inst.fit(X_train, y_train) else: func_timeout(deadline - time.time(), learner_inst.fit, (X_train, y_train)) y_hat = learner_inst.predict(X_test) error_rate = 1 - sklearn.metrics.accuracy_score(y_test, y_hat) eval_logger.info( f"Training ready. Obtaining predictions for {X_test.shape[0]} instances. Error rate of model on {len(y_hat)} instances is {error_rate}" ) return error_rate """ Conducts a 90/10 MCCV (imitating a bit a 10-fold cross validation) """ eval_logger.info(f"Running mccv with seed {seed}") if not timeout is None: deadline = time.time() + timeout scores = [] n = X.shape[0] num_examples = int(target_size * n) seed *= 13 for r in range(repeats): eval_logger.info( f"Seed in MCCV: {seed}. Training on {num_examples} examples. That is {np.round(100 * num_examples / X.shape[0])}% of the data (testing on rest)." ) if timeout is None: try: scores.append(evaluate(learner, X, y, num_examples, seed)) except KeyboardInterrupt: raise except: eval_logger.info("AN ERROR OCCURRED, not counting this run!") else: try: if deadline <= time.time(): break scores.append( func_timeout(deadline - time.time(), evaluate, (learner, X, y, num_examples, seed))) except FunctionTimedOut: break except KeyboardInterrupt: raise except: eval_logger.info("AN ERROR OCCURRED, not counting this run!") seed += 1 return np.mean(scores) if len(scores) > 0 else np.nan, scores
vif = compute_vif(gt_image, enhanced_image) except: vif = "null" ms_ssim = compute_msssim(np.array([gt_image]), np.array([enhanced_image]), max_val=255) try: uqi = compute_uqi(gt_image, enhanced_image) except: uqi = "null" try: fsim = func_timeout(10, oc1.FeatureSIM, args=(gt_image, enhanced_image)) except FunctionTimedOut: fsim = "null" except Exception as e: fsim = "null" # oc.addpath("./Visible_Edges_Ratio") # e1, ns1 = oc.EvaluationDescriptorCalculation(gt_path, enhanced) # metrics["ns1"] += ns1 # metrics["e1"] += e1 # print("ver") try: essim = func_timeout(7, oc2.ESSIM, args=(gt_image, enhanced_image))
def play_the_game(words, options): ''' The game loop ''' # variable that controls whether the game is on or not game_on = True # stores the latest played word (by man or machine) previous_word = None # list of words available for the ongoing game playable_words = words # stores the winners and keeps count of wins per tournament winner_dict = {} # variable that holds the players and their state (active/dropped) players_dict = initialize_player_dict(options['computer_player_count']) # number of rounds to be played, decreased after each game round rounds = options['tournament_rounds'] # convinience flag that tells whether game is in tournament mode or not (True/False) tournament_mode = options['tournament_mode'] # time limit for player's move in seconds. Defaults to 2 minutes. time_limit = options['timer_time'] or 120 # difficulty level for machine players, used to randomize machine's answer. difficulty_level = options['difficulty_level'] # used to count the time spent in the game or tournament. Extra. game_start_time = time() print_header(options) while game_on: for player in players_dict: if player_active(player, players_dict): try: # use func_timeout module to trigger time limit word = func_timeout(time_limit, get_next_word, args=(difficulty_level, player, playable_words, previous_word), kwargs=None) previous_word = validate(player, word, previous_word, playable_words) except FunctionTimedOut: print_exception_and_drop_player( player, players_dict, EXCEPTION_MSG_DICT['timeout']) except Exception as invalid_word_exception: print_exception_and_drop_player(player, players_dict, invalid_word_exception) if only_one_player_left(players_dict): # tournament related if tournament_mode: declare_round_winner(find_winner(players_dict), winner_dict) rounds = rounds - 1 if rounds > 0: print_tournament_round_info(rounds) # initialize_players and word list for the next round players_dict = initialize_player_dict( options['computer_player_count']) playable_words = read_playable_words_from_file() game_on = True previous_word = None # reset the previous word if rounds == 0: print_tournament_end_message(winner_dict) print_elapsed_time(game_start_time) game_on = False break else: declare_game_winner(find_winner(players_dict)) print_elapsed_time(game_start_time) game_on = False break
##--------------------------------------------------------------## if args.skipDownload == "no": # Initialise file where to store PMID2GSE info geoInfoFile = geoInfoFile.format(outRootFolder, anCode) # Initialise pandas dataframe that stores data PMID2GSE = pd.DataFrame( columns=["PMID", "gseCodes", "platforms", "ftpLinks"]) # Iterating over each block of PMIDs and retrieve the data (easier for backup purposes) for index, pmids in enumerate(PMIDsChunks): KeepLog( logFile, "Retrieve GSE codes for batch {0}/{1} \n".format( index + 1, len(PMIDsChunks))) # We need to put a timeout to the function, otherwise (if blocked IP or connection, it will stay for forever) try: tmpPMID2GSE = func_timeout(timoutThr, RetrieveGeoInfoFromPMID, args=(pmids, logFile)) SaveIntoDF(tmpPMID2GSE, geoInfoFile, "pandas") PMID2GSE = PMID2GSE.append(tmpPMID2GSE, ignore_index=True) except FunctionTimedOut: KeepLog( logFile, "Sorry, timeout riched for {0}/{1} \n".format( index + 1, len(PMIDsChunks))) continue # continue the for loop if function RetrieveGeoInfoFromPMID takes more than 40 second # Deleting tmpPMID2GSE, geoInfoFile variable and release garbage memory try: del tmpPMID2GSE, geoInfoFile except: KeepLog(logFile, "[WARNING] No need to clear VAR\n")
descision_folder = os.path.join(root_path, "results") if not os.path.isdir(descision_folder): os.makedirs(descision_folder) global best_solution, best_score for path in data_paths[5:]: graph_name = path.split(os.sep)[-1] print(graph_name) res_path = os.path.join(descision_folder, graph_name + '.txt') dataset = Dataset(path) best_solution = 0 best_score = 0 solver = BranchAndBoundSolver(dataset, root_path) start_time = time.time() try: doitReturnValue = func_timeout(1800, solver, args=()) except Exception as e: print(e) total_time = time.time() - start_time print(total_time) def save_results(descision, calc_time, n_vertices, path): with open(path, 'w+') as f: res_string = "N vertices: {} \n Calculation time: {} seconds \n Descision: {}".format( n_vertices, calc_time, descision) f.write(res_string) def check_if_clique(descision, graph): for i in descision: for j in descision: if (i != j):
def test_retry(self): sleepFunction = getSleepLambda(.5) expectedResult = 5 + 19 gotException = False functionTimedOut = None startTime = time.time() try: result = func_timeout(.3, sleepFunction, args=(5, 19)) except FunctionTimedOut as fte: functionTimedOut = fte gotException = True endTime = time.time() assert gotException , 'Expected to get exception' assert compareTimes(endTime, startTime, .3, 3, None, .10) == 0 , 'Expected to wait .3 seconds. Was: %f - %f = %f' %(endTime, startTime, round(endTime - startTime, 3)) gotException = False startTime = time.time() try: result = functionTimedOut.retry() except FunctionTimedOut: gotException = True endTime = time.time() assert gotException , 'Expected to get exception on retry.' assert compareTimes(endTime, startTime, .3, 3, None, .10) == 0 , 'Expected retry with no arguments to use same timeout of .3' gotException = False startTime = time.time() try: result = functionTimedOut.retry(None) except FunctionTimedOut: gotException = True endTime = time.time() assert not gotException , 'Did NOT to get exception with no timeout' assert compareTimes(endTime, startTime, .5, 3, None, .10) == 0 , 'Expected retry with None as timeout to last full length of function' gotException = False startTime = time.time() try: result = functionTimedOut.retry(.4) except FunctionTimedOut: gotException = True finally: endTime = time.time() assert gotException , 'Expected to time out after .4 seconds when providing .4' assert compareTimes(endTime, startTime, .4, 3, .05, None) == 0 , 'Expected providing .4 would allow timeout of up to .4 seconds' threadsCleanedUp = False for i in range(5): time.sleep(1) gc.collect() if threading.active_count() == 1: threadsCleanedUp = True break assert threadsCleanedUp , 'Expected other threads to get cleaned up after gc collection'
def theTests(path_to_code_to_check="../me"): """Run all the tests.""" print("\nWelcome to the exam!") print("May the odds be ever in your favour.\nEspecially today!") if ex_runs(path_to_code_to_check, exerciseNumber=1, weekNumber=WEEK_NUMBER): exam = loadExerciseFile( path_to_code_to_check, weekNumber=WEEK_NUMBER, exerciseNumber=1 ) # testResults.append(test(test_flake8(ex1path), "pass the linter")) exam_test( True, [], exam.string_please, finishing_function=lambda x: type(x) is str, extra_message="Don't over think this! just return a string!", ) exam_test( True, [], exam.list_please, finishing_function=lambda x: type(x) is list, extra_message="Don't over think this! just return a list!", ) exam_test( True, [], exam.dictionary_please, finishing_function=lambda x: type(x) is dict, extra_message="Don't over think this! just return a dictionary!", ) exam_test(True, [5], exam.is_it_5) exam_test(False, [4], exam.is_it_5) exam_test(False, ["cats"], exam.is_it_5) exam_test(0, [5], exam.take_five) exam_test(5, [10], exam.take_five) exam_test(-5, [0], exam.take_five) exam_test("Hello the Queen", ["the Queen"], exam.greet) exam_test("Hello Pr♂nc♀♂", ["Pr♂nc♀♂"], exam.greet) exam_test(4, [[3, 3, 3, 3, 1]], exam.three_counter) exam_test(0, [[0, 1, 2, 5, -9]], exam.three_counter) exam_test(2, [7], exam.n_counter) exam_test(5, [0, [0, 0, 0, 0, 0, [0]]], exam.n_counter) # fmt: off fizza = [ 1, 2, "Fizz", 4, "Buzz", "Fizz", 7, 8, "Fizz", "Buzz", 11, "Fizz", 13, 14, "FizzBuzz", 16, 17, "Fizz", 19, "Buzz", "Fizz", 22, 23, "Fizz", "Buzz", 26, "Fizz", 28, 29, "FizzBuzz", 31, 32, "Fizz", 34, "Buzz", "Fizz", 37, 38, "Fizz", "Buzz", 41, "Fizz", 43, 44, "FizzBuzz", 46, 47, "Fizz", 49, "Buzz", "Fizz", 52, 53, "Fizz", "Buzz", 56, "Fizz", 58, 59, "FizzBuzz", 61, 62, "Fizz", 64, "Buzz", "Fizz", 67, 68, "Fizz", "Buzz", 71, "Fizz", 73, 74, "FizzBuzz", 76, 77, "Fizz", 79, "Buzz", "Fizz", 82, 83, "Fizz", "Buzz", 86, "Fizz", 88, 89, "FizzBuzz", 91, 92, "Fizz", 94, "Buzz", "Fizz", 97, 98, "Fizz", "Buzz", ] # fmt: on exam_test(fizza, [], exam.fizz_buzz) exam_test( "|a| |s|e|r|i|a|l| |k|i|l|l|e|r|", ["a serial killer"], exam.put_behind_bars ) exam_test("|a| |b|a|r|t|e|n|d|e|r|", ["a bartender"], exam.put_behind_bars) exam_test(["red fox"], ["x"], exam.pet_filter) exam_test([], ["q"], exam.pet_filter) exam_test( ["pig", "sheep", "guinea pig", "pigeon", "alpaca", "guppy"], ["p"], exam.pet_filter, ) exam_test("e", [], exam.best_letter_for_pets) word_lengths = [[3, 3, 3], [4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]] exam_test( word_lengths, [], exam.make_filler_text_dictionary, lambda x: [[len(w) for w in x[k]] for k in x.keys()], ) exam_test( True, [50], exam.random_filler_text, lambda x: len(x.split(" ")) == 50 and len(x) > 3 * 50, ) exam_test( True, [1000], exam.random_filler_text, lambda x: len(x.split(" ")) == 1000 and len(x) > 3 * 1000, ) clean_out_old_env() exam_test( True, [100], exam.fast_filler, lambda x: len(x.split(" ")) == 100 and len(x) > 3 * 100, # chdir=True, # NFI what this does :( ) # exam_test(True, ["./week8/dict_racey.json"], os.path.exists) exam_test( True, [10], exam.fast_filler, lambda x: x[0] in string.ascii_uppercase and x[1] in string.ascii_lowercase, "Test if fast_filler is capitalised", ) exam_test( True, [10], exam.fast_filler, lambda x: x[-1] == ".", "Test if fast_filler finishes with a .", ) print( "The point of saving the dictionary is that it's fast!", "The pattern of saving a value locally so that you don't", "need to go and get it is called caching.", "This test runs fast_filler 10 times, and if it manages it in less", "than a second, then you're good to go!", sep="\n", ) try: TIMEOUT_IN_SECONDS = 1 func_timeout( TIMEOUT_IN_SECONDS, lambda: [exam.fast_filler(1000) for _ in range(10)], args=[], ) testResults.append(test(True, "subsequent fast_filler")) except FunctionTimedOut as t: m = ( "Timed out trying to run fast filler 10 times in 1 second, " "subsequent fast_filler probably wasn't fast enough" ) print(m, str(t)) testResults.append(test(False, m + str(t))) except Exception as e: testResults.append(test(False, "subsequent fast_filler failed: " + str(e))) message = ( "Cowabunga! You've got all the tests passing!\n" "Well done, that's all the exercises for this term out of the way!" ) print(testResults) return finish_up(testResults, message, nyan_cat())
def process(self, item, input_item): logging.debug("Processor starting item {} with id {}".format( item.full_name, item.id)) try: if item.type() == Item.INPUT_TYPE: params = item.getParamValueDict() path = params['input_path'] output = np.array(Image.open(path)) elif item.type() == Item.FILTER_TYPE: input_array = input_item.output params = item.getParamValueDict() fn = item.fn try: output = func_timeout(self.TIMEOUT, fn, args=(input_array, params)) except FunctionTimedOut: raise ProcessingError("Filter timed out!") elif item.type() == Item.GROUP_TYPE: if not item.hasChildren(): raise ProcessingError("Group has no children!") children = [child for child in item.children()] child_count = item.rowCount() last_active_child = None pos = child_count - 1 while pos >= 0: child = children[pos] if child.is_active: last_active_child = child break pos -= 1 if last_active_child is None: raise ProcessingError("Group has no active children") else: output = np.copy(last_active_child.output) elif item.type() == Item.MODIFIER_TYPE: if not item.hasChildren(): raise ProcessingError("Modifier has no children!") child_count = item.rowCount() mode = item.params['mode']['value'] clip = item.params['clip']['value'] img_list = [] coeff_list = [] for child in item.children(): if child.is_active: img_list.append(child.output) coeff_list.append( child.params['modifier_coefficient']['value']) if len(img_list) == 0: raise ProcessingError("Modifier has no active children!") output = self._modifier(img_list, coeff_list, mode, clip) except Exception as e: logging.warning("Processor has error: {}".format(repr(e))) item.output = None item.is_processed = False item.has_processing_error = True item.status_message = repr(e) else: logging.debug("Processor was successful") item.output = output item.is_processed = True item.has_processing_error = False item.status_message = "Processing ok." finally: self.signals.item_processed.emit(item) logging.debug("Processor returning item")
def act(action): return func_timeout(inference_timeout, story_manager.act, (action,))
# with tqdm.tqdm(total=len(index)) as pbar: # for future in concurrent.futures.as_completed(future_to_opt): # opt, status = future.result() # idx = NameIndex[opt.gid] # Opts[idx].results = opt.results # pbar.set_postfix(gname=opt.gid, n_var = opt.n_var) # pbar.update(1) with tqdm.tqdm(total=len(Opts)) as pbar: for i in range(len(Opts)): x0 = initialize_flow(Opts[i], np.sum(Opts[i].weights)) try: x, _ = func_timeout(10, barrier_method, kwargs={ "opt": Opts[i], "x0": x0, "stop_criteria": 1e-14 }) Opts[i].results = x except: opt, _ = optimizegraph(Opts[i], max_iter=3000, max_cpu_time=100) Opts[i] = opt pbar.update(1) # add back the forbidden edges Opts = [ opt for opt in Opts if (not (opt.results is None)) and ( not np.any(np.isnan(opt.results))) and (
metadata = { 'ip': ip, 'state': 'unknown', 'whois_country': info.get('asn_country_code'), 'whois_info': info.get("network").get("name"), 'shodan_country': shodan.get('country_code'), 'shodan_os': shodan.get('os'), 'shodan_vulns': vulns, 'shodan_bluekeep': bluekeep } try: count, offline_count, vuln_count, patched_count, offline_dict, patched_dict, vuln_dict = func_timeout( 30, do_scan, args=(ip, count, offline_count, vuln_count, patched_count, offline_dict, patched_dict, vuln_dict, metadata, writer)) except FunctionTimedOut as e: error("unable to complete connection: {}".format(e)) continue print('\nWe found ' + str(vuln_count) + ' vulnerable IP addresses:') for key in vuln_dict: print(key) print('\nWe found ' + str(patched_count) + ' patched IP addresses:') for key in patched_dict: print(key) print('\nWe found ' + str(offline_count) + ' offline IP addresses:')
def fun_time_decorate(func, arg0, arg1, arg2): try: return func_timeout(100, func, args=(arg0, arg1, arg2)) except FunctionTimedOut: return '', ''
def fit(self, X, y): if self._hyperparams["estimator"] is None: op = lale.lib.sklearn.LogisticRegression else: op = self._hyperparams["estimator"] observed_op = op obs = self._hyperparams["observer"] # We always create an observer. # Otherwise, we can have a problem with PlannedOperators # (that are not trainable): # GridSearchCV checks if a fit method is present before # configuring the operator, and our planned operators # don't have a fit method # Observing always has a fit method, and so solves this problem. observed_op = Observing(op=op, observer=obs) hp_grid = self._hyperparams["hp_grid"] data_schema = lale.helpers.fold_schema( X, y, self._hyperparams["cv"], op.is_classifier() ) if hp_grid is None: hp_grid = lale.search.lale_grid_search_cv.get_parameter_grids( observed_op, num_samples=self._hyperparams["lale_num_samples"], num_grids=self._hyperparams["lale_num_grids"], pgo=self._hyperparams["pgo"], data_schema=data_schema, ) else: # if hp_grid is specified manually, we need to add a level of nesting # since we are wrapping it in an observer if isinstance(hp_grid, list): hp_grid = lale.helpers.nest_all_HPparams("op", hp_grid) else: assert isinstance(hp_grid, dict) hp_grid = lale.helpers.nest_HPparams("op", hp_grid) if not hp_grid and isinstance(op, lale.operators.IndividualOp): hp_grid = [ lale.search.lale_grid_search_cv.get_defaults_as_param_grid(observed_op) # type: ignore ] be: lale.operators.TrainableOperator if hp_grid: if obs is not None: impl = observed_op._impl # type: ignore impl.startObserving( "optimize", hp_grid=hp_grid, op=op, num_samples=self._hyperparams["lale_num_samples"], num_grids=self._hyperparams["lale_num_grids"], pgo=self._hyperparams["pgo"], ) try: self.grid = lale.search.lale_grid_search_cv.get_lale_gridsearchcv_op( observed_op, hp_grid, cv=self._hyperparams["cv"], verbose=self._hyperparams["verbose"], scoring=self._hyperparams["scoring"], n_jobs=self._hyperparams["n_jobs"], ) if self._hyperparams["max_opt_time"] is not None: if func_timeout_installed: try: func_timeout( self._hyperparams["max_opt_time"], self.grid.fit, (X, y) ) except FunctionTimedOut: raise BaseException("GridSearchCV timed out.") else: raise ValueError( f"""max_opt_time is set to {self._hyperparams["max_opt_time"]} but the Python package required for timeouts is not installed. Please install `func_timeout` using `pip install func_timeout` or set max_opt_time to None.""" ) else: self.grid.fit(X, y) be = self.grid.best_estimator_ except BaseException as e: if obs is not None: assert isinstance(obs, Observing) # type: ignore impl = observed_op.impl # type: ignore impl.failObserving("optimize", e) raise impl = getattr(be, "impl", None) if impl is not None: assert isinstance(be, Observing) # type: ignore be = impl.getOp() if obs is not None: obs_impl = observed_op._impl # type: ignore obs_impl.endObserving("optimize", best=be) else: assert isinstance(op, lale.operators.TrainableOperator) be = op self._best_estimator = be.fit(X, y) return self
compact.iterationsCuts = 0 try: compact.model.clear() if conf.get_property("problem") == 0: # Big-M compact.constructProblemM() elif conf.get_property("problem") == 1: # McCormick compact.constructProblemMcCormick() elif conf.get_property("problem") == 2: # McCormick Non Negative compact.constructProblemMcCormickNonNegative() compact.model.verbose = 0 compact.model.optimize(relax=True) start = time.time() compact.model.write('{}.lp'.format(instance_name)) doitReturnValue = func_timeout(10800, compact.splitCuts) end = time.time() except FunctionTimedOut: with open("{}.csv".format(instance_name), "a") as f: f.write("{};{};{};{};{}\n".format(compact.iterationsCuts, s, p, compact.model.objective_value, 'timeout')) compact.model.write('{}_{}_{}.lp'.format(instance_name, s, p)) print(compact.iterationsCuts, s, p, compact.model.objective_value, 'timeout') except Exception as e: print('error ', e) # with open("{}.csv".format(instance_name), "a") as f: # f.write("basic+general_cliques;iter;select;parameter;obj;time\n") # selects = [0, 3]
def run(self): """ 运行游戏 :return: """ # 定义统计双方下棋时间 total_time = {"X": 0, "O": 0} # 定义双方每一步下棋时间 step_time = {"X": 0, "O": 0} # 初始化胜负结果和棋子差 winner = None diff = -1 # 游戏开始 print('\n=====开始游戏!=====\n') # 棋盘初始化 self.board.display(step_time, total_time) while True: # 切换当前玩家,如果当前玩家是 None 或者白棋 white_player,则返回黑棋 black_player; # 否则返回 white_player。 self.current_player = self.switch_player(self.black_player, self.white_player) start_time = datetime.datetime.now() # 当前玩家对棋盘进行思考后,得到落子位置 # 判断当前下棋方 color = "X" if self.current_player == self.black_player else "O" # 获取当前下棋方合法落子位置 legal_actions = list(self.board.get_legal_actions(color)) # print("%s合法落子坐标列表:"%color,legal_actions) if len(legal_actions) == 0: # 判断游戏是否结束 if self.game_over(): # 游戏结束,双方都没有合法位置 winner, diff = self.board.get_winner() # 得到赢家 0,1,2 break else: # 另一方有合法位置,切换下棋方 continue board = deepcopy(self.board._board) # legal_actions 不等于 0 则表示当前下棋方有合法落子位置 try: for i in range(0, 3): # 获取落子位置 action = func_timeout(60, self.current_player.get_move, kwargs={'board': self.board}) # 如果 action 是 Q 则说明人类想结束比赛 if action == "Q": # 说明人类想结束游戏,即根据棋子个数定输赢。 break if action not in legal_actions: # 判断当前下棋方落子是否符合合法落子,如果不合法,则需要对方重新输入 print("你落子不符合规则,请重新落子!") continue else: # 落子合法则直接 break break else: # 落子3次不合法,结束游戏! winner, diff = self.force_loss(is_legal=True) break except FunctionTimedOut: # 落子超时,结束游戏 winner, diff = self.force_loss(is_timeout=True) break # 结束时间 end_time = datetime.datetime.now() if board != self.board._board: # 修改棋盘,结束游戏! winner, diff = self.force_loss(is_board=True) break if action == "Q": # 说明人类想结束游戏,即根据棋子个数定输赢。 winner, diff = self.board.get_winner() # 得到赢家 0,1,2 break if action is None: continue else: # 统计一步所用的时间 es_time = (end_time - start_time).seconds if es_time > 60: # 该步超过60秒则结束比赛。 print('\n{} 思考超过 60s'.format(self.current_player)) winner, diff = self.force_loss(is_timeout=True) break # 当前玩家颜色,更新棋局 self.board._move(action, color) # 统计每种棋子下棋所用总时间 if self.current_player == self.black_player: # 当前选手是黑棋一方 step_time["X"] = es_time total_time["X"] += es_time else: step_time["O"] = es_time total_time["O"] += es_time # 显示当前棋盘 self.board.display(step_time, total_time) # 判断游戏是否结束 if self.game_over(): # 游戏结束 winner, diff = self.board.get_winner() # 得到赢家 0,1,2 break print('\n=====游戏结束!=====\n') self.board.display(step_time, total_time) self.print_winner(winner) # 返回'black_win','white_win','draw',棋子数差 if winner is not None and diff > -1: result = {0: 'black_win', 1: 'white_win', 2: 'draw'}[winner]
def copy_pdf_to_hw_staging_dir(file_title_map, output_dir, doi, current_zipfile): """ we will attempt to generate a headless pdf and move this pdf to the ftp staging site. if this headless creation fails, we will raise an error to [email protected], and try to copy the original pdf file to ftp staging the function that we call to decapitate the pdf is contained in decapitatePDF.py. It manages some error handline, and tries to determine witheher the pdf cover content has been celanly removed. TODO: - elife - ianm - tidy up paths to temporary pdf decpitation paths """ for name in file_title_map.keys(): # we extract the pdf from the zipfile title = file_title_map[name] if title == "Merged PDF": print title new_name = gen_new_name_for_file(name, title, doi) file = current_zipfile.read(name) print new_name decap_name = "decap_" + new_name decap_name_plus_path = tmp_dir + "/" + decap_name # we save the pdf to a local file temp_file = open(decap_name_plus_path, "wb") temp_file.write(file) temp_file.close() decap_status = None try: # pass the local file path, and the path to a temp dir, to the decapitation script decap_status = func_timeout( PDF_DECAPITATE_TIMEOUT, decapitate_pdf_with_error_check, args=( decap_name_plus_path, decap_dir + "/")) except FunctionTimedOut: decap_status = False timeout_message = "PDF decap did not finish within {x} seconds".format(x=PDF_DECAPITATE_TIMEOUT) logger.error(timeout_message) if decap_status: # pass the local file path, and teh path to a temp dir, to the decapiation script try: move_file = open(decap_dir + "/" + decap_name, "rb").read() out_handler = open(output_dir + "/" + new_name, "wb") out_handler.write(move_file) out_handler.close() print "decapitaiton worked" except: # The decap may return true but the file does not exist for some reason # allow the transformation to continue in order to processes the supplementary files alert_message = "decap returned true but the pdf file is missing " + new_name logger.error(alert_message) else: # if the decapitation script has failed, we move the original pdf file move_file = file alert_message = "could not decapitate " + new_name logger.error(alert_message) alert_production(alert_message)