def save(config, data): """Save data and create backup, creating a new data file if necessary.""" create_dir(os.path.dirname(config['data_path'])) # atomically save by writing to temporary file and moving to destination try: # write to temp file with open( config['tmp_path'], 'w', encoding='utf-8', errors='replace') as f: for path, weight in data.items(): if is_python3(): f.write(("%s\t%s\n" % (weight, path))) else: f.write(unicode( "%s\t%s\n" % (weight, path)).encode('utf-8')) f.flush() os.fsync(f) except IOError as ex: print("Error saving autojump data (disk full?)" % ex, file=sys.stderr) sys.exit(1) # create backup file if it doesn't exist or is older than BACKUP_THRESHOLD if not os.path.exists(config['backup_path']) or \ (time() - os.path.getmtime(config['backup_path']) > BACKUP_THRESHOLD): move_file(config['data_path'], config['backup_path']) # move temp_file -> autojump.txt move_file(config['tmp_path'], config['data_path'])
def handle_video_dir(d, dry_run=True): file_sizes = [f.size for f in d.walkfiles()] total_size = sum(file_sizes) * 1.0 size_ratios = sorted([s / total_size for s in file_sizes], reverse=True) if size_ratios[0] >= 0.95: vid = sorted(d.walkfiles(), key=lambda f: f.size)[-1] info = guessit.guess_video_info(str(vid)) if info["type"] == "movie": fp = vid.rename("%s%s" % (info["title"], vid.ext)) move_file(fp, HOME_DIR("Videos/Movies"), dry_run) elif info["type"] == "episode": fname = "%(series)s S%(season)02dE%(episodeNumber)02d" % info fp = vid.rename("%s%s" % (fname, vid.ext)) move_file(fp, HOME_DIR("Videos/TO WATCH"), dry_run) #remove the directory if not dry_run: d.rmtree() else: #multiple video files, rename them for f in d.files(): rename(f, dry_run) #move the directory if not dry_run: fp = rename(d, dry_run) fp.move(HOME_DIR("Videos/TO WATCH"))
def ingest_input_file(self): if './history' in self.file_name: logger.warning( 'the data should be ingested already! check database!') else: # make sure the directory exist os.makedirs(DB_PATH, exist_ok=True) # connect to the database db = sqlite3.connect(DB_PATH + DB_NAME + '.sqlite3') # if the table does not exist, create one db.execute( 'CREATE TABLE IF NOT EXISTS wikimedia (id integer primary key autoincrement not null, language STRING, page_name STRING, non_unique_views INTEGER, timestamp TEXT, last_update datetime default current_timestamp )' ) # insert the new data logger.info('ingesting the new data.....') db.executemany( "insert into wikimedia(language, page_name, non_unique_views, timestamp) values (?, ?, ?, ?)", [tuple(x) for x in self.tops_by_language.values]) db.commit() db.close() logger.info('finished ingesting the new data!') # move the file from waiting to history src = self.file_name dst = './history/' + self.file_name.split('/')[-1] logger.info('moved the new data to history file!') move_file(src, dst) return
def create(self, session): ices_tmp_conf = None try: ices_conf_name = '%s_ices.xml' % self.id ices_tmp_conf = copy_file(config.ICES_BASE_CONFIG_PATH, config.TMP_FOLDER, ices_conf_name) self.fill_ices_config(ices_tmp_conf) ices_conf_perm_path = config.ICES_CONFIGS_PATH + ices_conf_name move_file(ices_tmp_conf, ices_conf_perm_path) copy_file(config.ICES_PYTHON_BASE_MODULE_PATH, config.ICES_PYTHON_MODULES_PATH, "playlist_%s.py" % self.id) session.commit() if self.active: self.start_ices() if not self.running: msg = "Ices station was saved and configured, " \ "but can't run. Please see logs" flash(msg) raise IcesException(msg) except Exception as e: session.rollback() try: # Delete all created files if something went wrong self.delete_ices_from_file_system(ices_tmp_conf) except OSError: pass finally: raise Exception(e)
def move_tv_episodes(dry_run=False): """ handle tv episodes downloaded by sabnzbd """ episode_dirs = [] for d in loop_completed_dl_dirs(): #need to fake an extension info = guessit.guess_video_info(str(d)+".avi") if info["type"] != "episode": continue episode_dirs.append(d) for d in episode_dirs: #look for the largest file vid = sorted(d.files(), key=lambda f: f.size)[-1] info = guessit.guess_video_info(str(vid)) fname = "%(series)s S%(season)02dE%(episodeNumber)02d" % info fp = vid.rename("%s%s" % (fname, vid.ext)) move_file(fp, HOME_DIR("Videos/TO WATCH"), dry_run) for d in episode_dirs: #remove the directory if not dry_run: d.rmtree()
def build_source(self, temp_path): result = os.system(f'tsc -p "{self.get_tsconfig()}"') declaration_path = f"{splitext(temp_path)[0]}.d.ts" if(isfile(declaration_path)): move_file(declaration_path, join(make_config.get_path("toolchain/build/project/declarations"), basename(declaration_path))) #result = 0 return result
def smart_move_file(fname, video_dirs, dry_run=False): """ attempts to copy the file for permanent storage if possible dry_run determines if the actual copy is performed """ #see how good of a match we can get for v in video_dirs: #last element is the episode number if ' '.join(fname.namebase.split()[:-1]).startswith(v.name): move_file(fname, v)
def rename_video(f, dry_run=True): """ rename a video file using guessit info """ info = guessit.guess_video_info(str(f)) if info["type"] == "movie": fp = f.rename("%s%s" % (info["title"], f.ext)) move_file(fp, HOME_DIR("Videos/Movies"), dry_run) elif info["type"] == "episode": fname = "%(series)s S%(season)02dE%(episodeNumber)02d" % info fp = f.rename("%s%s" % (fname, f.ext)) move_file(fp, HOME_DIR("Videos/TO WATCH"), dry_run)
def build_source(self, temp_path): result = os.system(f'tsc -p "{self.get_tsconfig()}" --noEmitOnError') self.remove_flags(temp_path) self.place_flag(temp_path) declaration_path = f"{splitext(temp_path)[0]}.d.ts" if (isfile(declaration_path)): move_file( declaration_path, join( make_config.get_path("toolchain/build/typescript-headers"), basename(declaration_path))) return result
def remote_download(ip, remote_root, local_root, file, auth): _url = "{}/download?root={}&file={}&auth={}".format( ip, quote(remote_root), quote(file), auth) cache_file = os.path.join(local_root, utils.cache_dir, os.path.basename(file) + "_" + str(uuid.uuid4())) try: response = requests.get(_url, timeout=30) if response.status_code == 200: if response.content != "ERROR": with open(cache_file, "wb") as f: f.write(response.content) abs_file = os.path.join(local_root, file) if not utils.remove_file(local_root, file): print("download remove local error:{}".format(abs_file)) else: if utils.move_file(cache_file, abs_file): return True else: print("download file cache -> local error:{}".format( cache_file)) else: print("download error,maybe server not root:{} file:{}".format( remote_root, file)) else: print("response.status_code: {}".format(response.status_code)) except BaseException as e: import traceback traceback.print_exc() print(e) if os.path.exists(cache_file): os.remove(cache_file) return False
def create_ads(num): pts = gen_pt(num) with open('nodejs/pts.json', 'w') as json_file: json.dump(pts, json_file, indent=4) print('new create ' + str(len(pts)) + ' pt saved.') # create ads. str_cmd = 'sh shell-script/createAds.sh' out = os.system(str_cmd) if out == 0: print('create ads success!') new_file = tool.DELIVERY_PTS_DIR + "/" + str(time.time()) + '.pts.json' tool.move_file('nodejs/pts.json', new_file) return 0 else: print('create ads failure!') return -1
def migrate_osx_xdg_data(config): """ Older versions incorrectly used Linux XDG_DATA_HOME paths on OS X. This migrates autojump files from ~/.local/share/autojump to ~/Library/autojump """ assert is_osx(), "This function should only be run on OS X." xdg_data_home = os.path.join(os.path.expanduser('~'), '.local', 'share') xdg_aj_home = os.path.join(xdg_data_home, 'autojump') data_path = os.path.join(xdg_aj_home, 'autojump.txt'), backup_path = os.path.join(xdg_aj_home, 'autojump.txt.bak'), if os.path.exists(data_path): move_file(data_path, config['data_path']) if os.path.exists(backup_path): move_file(backup_path, config['backup_path']) # cleanup shutil.rmtree(xdg_aj_home) if len(os.listdir(xdg_data_home)) == 0: shutil.rmtree(xdg_data_home)
def umount(): if not path.isfile(CMTAB_FP): print('Fatal Error of losing cmtab - Attempting recovery...') try: assert system('echo "%s" > /etc/cryptmount/cmtab' % cmtab( MOUNTED_TO, ENCRYPTED_DEV, NAME, path.join('/etc/cryptmount/keys/', NAME + '.key')).make()) == 0 except AssertionError: raise AssertionError('Could not umount encrypted fs') mountData = sys('cryptmount -u ' + NAME + ' && sync') if 'is not recognized' in mountData: assert system('umount /dev/mapper/' + NAME) == 0 else: print mountData del mountData print('unmounted ' + NAME + ' from ' + str(MOUNTED_TO)) print('Cleaning up...') data = [] for root, dirs, files in walk(MOUNTED_TO): data.extend(files) data.extend(dirs) break if len(data) == 0: removedirs(MOUNTED_TO) if path.isfile(path.join(TARGET, 'tempSave.tmp')): remove(path.join(TARGET, 'tempSave.tmp')) try: if path.isfile(path.join(TARGET, 'cmtab.bak')): move_file(path.join(TARGET, 'cmtab.bak'), CMTAB_FP, True) except: raise OSError( 'Couldn\'t write original cmtab into place, manually move it.') print('done.')
def move_movies(dry_run=False): """ handle movies downloaded from 300mbunited, renaming them and moving them to the movies folder """ movie_dirs = [] for d in loop_completed_dl_dirs(): #look for any movie directories #it shouldn't have many files file_count = len(d.listdir()) if file_count > 10 or file_count == 0: continue #look at the distribution of file sizes file_sizes = [f.size for f in d.files()] total_file_sizes = sum(file_sizes) file_size_dist = [f.size * 1.0 / total_file_sizes for f in d.files()] #movie files should be more than 95% of total file size if sorted(file_size_dist)[-1] > 0.95: movie_dirs.append(d) for d in movie_dirs: vid = sorted(d.files(), key=lambda f: f.size)[-1] info = guessit.guess_video_info(str(vid)) try: fp = vid.rename("%s%s" % (info["title"], vid.ext)) move_file(fp, HOME_DIR("Videos/Movies"), dry_run) #not a movie file, just bail except KeyError: return for d in movie_dirs: #remove the directory if not dry_run: d.rmtree()
def move_demuxed_files(self): dest_dir = os.path.join(self.output_dir, self.series, self.number) logger.info('Moving files from %s to %s, please wait a moment...', self.temp_dir, dest_dir) new_files = {} for r in self.files: region_dir = os.path.join(dest_dir, r) create_dir(region_dir) new_files[r] = {} for type_ in ['video', 'audio', 'subs', 'chapters', 'retimed_subs', 'retimed_audio']: if type_ in self.files[r]: new_files[r][type_] = [] for file_ in self.files[r][type_]: if os.path.isfile(file_): dest_fname = os.path.join( region_dir, os.path.basename(file_)) logger.debug('Moving %s...', file_) move_file(file_, dest_fname) new_files[r][type_].append(dest_fname) logger.debug('Complete.') self.files = new_files logger.info('Move complete! Demuxed files in %s', dest_dir)
def edit(self, session): backup_conf_name = 'ices_%s_backup_xml' % self.id copy_file(self.ices_config_path, config.TMP_FOLDER, backup_conf_name) try: self.fill_ices_config(self.ices_config_path) except Exception as e: move_file(backup_conf_name, self.ices_config_path) session.rollback() finally: delete_file(config.TMP_FOLDER + backup_conf_name) if self.active: try: self.restart_ices() except IcesException as e: if 'Process already stopped' in e.message: pass if not self.running: msg = "Ices station was saved and configured, " \ "but can't run. Please see logs" flash(msg) raise IcesException(msg) else: self.stop_ices()
async def post(self): root = self.get_argument("root", None) relative_file = self.get_argument("file", None) if not self.is_auth(root): return logging.info("UploadHandler root:{} file: {} ".format( root, relative_file)) abs_file = os.path.join(root, relative_file) if os.path.exists(abs_file) and not os.access(abs_file, os.W_OK): self.write( json.dumps( dict(status=1, message="no write permission:{}".format(abs_file)))) else: if abs_file: logging.info("download file start:{}".format(abs_file)) cache_file = os.path.join( root, utils.cache_dir, os.path.basename(relative_file) + "_" + str(uuid.uuid4())) async with aiofiles.open(cache_file, 'wb') as f: await f.write(self.request.body) logging.info("download file end:{}".format(abs_file)) if utils.remove_file(root, relative_file): if utils.move_file(cache_file, abs_file): self.write(json.dumps(dict(status=0))) else: logging.warning("move file error {} -> {}".format( cache_file, abs_file)) self.write( json.dumps( dict(status=1, message="cache to file move error"))) else: logging.warning("remove file error {}".format(abs_file)) self.write( json.dumps(dict(status=1, message="file not remove"))) if os.path.exists(cache_file): os.remove(cache_file) else: self.write(json.dumps(dict(status=1, message="file is none")))
newFileName = re.sub(r'\D', "", i) + '.png' path = i.replace(oldFileName, '') if oldFileName == 'img.png': print(path + oldFileName + '=====>' + path + newFileName) os.rename(path + oldFileName, path + newFileName) fileList = [ i for i in utils.read_file(DIR) if i.split('\\')[-1].split('.')[0].isdigit() ] oldDir = [i.replace(i.split('\\')[-1], '') for i in fileList] file = [i.split('\\')[-1] for i in fileList] dst_path = r"F:\OneDrive - THE GOD'S CHILD PROJECT\骨质疏松症项目\代码\整理代码\Mask_RCNN-master\img" for index in range(len(oldDir)): src_path = oldDir[index] utils.move_file(src_path, dst_path, file[index]) os.chdir( r"F:\OneDrive - THE GOD'S CHILD PROJECT\骨质疏松症项目\代码\整理代码\Mask_RCNN-master") os.getcwd() import cv2 from mrcnn.visualize import display_instances from mrcnn.config import Config from mrcnn import model as modellib class BalloonConfig(Config): """Configuration for training on the toy dataset. Derives from the base Config class and overrides some values. """
print "Current file:", current_excel_file orders_list = excel.read_excel(excel_path, current_excel_file, date_row_col, shop_row_col, prod_row_col) count = len(orders_list) print "Got %d. " % count + ("" if count > 0 else "Move next...") order_date = orders_list[0]["orderDate"] print "Order date:", order_date if orders.already_loaded(current_excel_file, order_date): print "Already loaded [%s, %s]! Move next...\n" % ( order_date, current_excel_file ) # todo: send email alert utils.move_file(excel_path + current_excel_file, errors_path) continue # todo: FSP # need for FSP # loaded_prods = ads.select_sql(sql.SQL_ALREADY_LOADED_PROD, None, (order_date, "orderdate")) print "Get products from db" if use_own_prod_code: sql_str = sql.SQL_PRODS prods = ads.select_sql(sql_str) else: sql_str = sql.SQL_PRODS_CLIENT prods = ads.select_sql(sql_str, None, (client_code, client_code)) orders.set_our_prod_codes(orders_list, prods)
def move_downloaded_file(outFilePath): src = outFilePath dst = './waiting/' + outFilePath + '.txt' move_file(src, dst) return
def once_complex(self, dc, one_dc): set_title = self.start_btn.update_query update_status = self.update_status need_number = one_dc['need_number'] num_file = one_dc["number_file"] num_size = one_dc['number_size'] num_join_str = one_dc['number_join_str'] num_join_short_str = one_dc['number_join_short_str'] if not num_join_short_str: num_join_short_str = '' else: num_join_short_str = " " + num_join_short_str num_second = 0 is_iqy = True if num_join_str == '爱奇艺备案号' else False raw_mp4 = one_dc['rawMP4'] i = one_dc['index'] number_second = int(dc["number_second"]) total = one_dc['total'] out_dir = one_dc['output_dir'] temp_dir = one_dc['temp_dir'] pt_second = one_dc['pt_second'] pw_second = one_dc['pw_second'] pt_out_file = one_dc['pt_out_file'] pw_out_file = one_dc['pw_out_file'] frame_size = one_dc['frame_size'] water_size = one_dc['water_size'] rad_var = dc['fps'] if rad_var == 2: fps = '24' elif rad_var == 3: fps = '25' elif rad_var == 4: fps = '30' else: fps = '0' target_fps = fps radio_select_var = dc["bit"] pt_file = dc["pt_file"] pw_file = dc["pw_file"] frame_file = dc["frame_file"] watermark_file = dc["watermark_file"] pt_select = dc['pt_select'] pw_select = dc['pw_select'] need_frame = dc["frame_select"] need_watermark = dc["watermark_select"] double_fix_select = utils.str_to_bool(dc["select_double_fix"]) select_30m = utils.str_to_bool(dc["select_30m"]) fast_mode_select = False # fast_mode_select = dc['fast_mode_select'] # skip_content_mp4 = False count = i + 1 set_title("") format_str = "(%d/%d)" % (count, total) + ' %s' arr = utils.get_file_names(raw_mp4) f_name = arr[1] f_type = arr[2] f_full_name = f_name + f_type out_file_type = ".mpg" if select_30m else ".mp4" temp_video = temp_dir + "-" + f_name + out_file_type final_video = out_dir + f_name + out_file_type if need_number and num_join_str: temp_path = Path(out_dir) / num_join_str temp_path = str(temp_path) + os.sep utils.make_dir(temp_path) final_video = temp_path + f_name + out_file_type vb_str = "" need_same_bit_rate = False # 1) 转正片视频 set_title(format_str % f_full_name) update_status(i, '10%' + num_join_short_str) # 匹配 尺寸和fps tdc = ff.get_video_info(raw_mp4, False) v_size = tdc["v_size"] if tdc["v_size"] else "1920x1080" tdc["v_size"] = v_size fps = tdc["fps"] if tdc["fps"] else "24" tdc["fps"] = fps if target_fps == '0' else target_fps duration = tdc['duration'] if tdc["duration"] else '0' duration = float(duration) if is_iqy: vb_str = "8M" else: # 码率 部分 if radio_select_var == 1: # 保持 need_same_bit_rate = True # tdc["crf"] = 1 vb_str = '' elif radio_select_var == 2: # 自动 tdc["crf"] = 18 vb_str = '' if radio_select_var == 3: vb_str = "4M" elif radio_select_var == 4: vb_str = "6M" elif radio_select_var == 5: vb_str = "8M" elif radio_select_var == 6: vb_str = "10M" elif radio_select_var == 7: vb_str = "30M" obj = ff.create_obj() obj.input_file = raw_mp4 obj.output_file = temp_video obj.need_same_bit_rate = need_same_bit_rate obj.need_30m = select_30m # obj.set_video_info(tdc) # obj.fps = fps # obj.size = v_size obj.set_video_info(tdc, vb_str) if need_number: if number_second == -1: num_second = duration + pt_second + pw_second else: num_second = number_second if double_fix_select and duration: obj.time_start = 0 obj.time_to = duration duration_string = ff.millisecond_to_str(int(duration * 1000)) set_title(format_str % ("*[双倍时长修正]该视频时长:" + duration_string)) png_list = [] msg_str = '正在转换 正片(' if need_frame: png_list.append(["加幕布", frame_file, frame_size, 0]) if need_watermark: png_list.append([" 加水印", watermark_file, water_size, 0]) if need_number: t = num_second - pt_second png_list.append([" 加备案号", num_file, num_size, t]) if len(png_list): sizes = [] times = [] npngs = [] for p in png_list: msg_str += p[0] npngs.append(p[1]) sizes.append(p[2]) times.append(p[3]) png_list = npngs obj.set_overlay(png_list, sizes, times) msg_str += ')……' msg_str = msg_str.replace('()', '') set_title(format_str % msg_str) # 可以不转换片头的情况 # 没有选择任何合成功能时,会对正片进行一次转码操作,后面会进行处理 if not need_frame and not need_watermark and not need_number and not double_fix_select: skip_content_mp4 = True else: skip_content_mp4 = False update_status(i, '20%' + num_join_short_str) obj.execute() # 2) 有片头或片尾需要合成 if pt_select or pw_select: # 生成concat.txt, 并转换片头/片尾 subs = [] # 1 if pt_select: nobj = ff.create_obj() nobj.input_file = pt_file nobj.output_file = pt_out_file nobj.need_30m = select_30m nobj.need_same_bit_rate = need_same_bit_rate # nobj.fps = fps # nobj.size = v_size nobj.set_video_info(tdc, vb_str) # 需要添加备案号 msg_str = "正在转换 片头" if need_number and num_second: msg_str += '(加备案号)' if pt_second < num_second: nobj.set_overlay([num_file], [num_size]) else: nobj.set_overlay([num_file], [num_size], [pt_second]) msg_str += '……' set_title(format_str % msg_str) update_status(i, '40%' + num_join_short_str) nobj.execute() subs.append(pt_out_file) # 2 if skip_content_mp4: if fast_mode_select and ff.compare_video(raw_mp4, pt_out_file): subs.append(raw_mp4) # 让正片参与最后的拼接,但不能删除正片 msg_str = "没有水印等,不转换正片,直接进行合并" set_title(format_str % msg_str) else: # 和片头的视频参数不一致,进行一次转码 obj.set_video_info(tdc, vb_str) # 此操作能恢复之前的大多数参数 msg_str = "正在转换 正片" msg_str += '……' set_title(format_str % msg_str) update_status(i, '50%' + num_join_short_str) obj.execute() subs.append(temp_video) else: subs.append(temp_video) # 3 if pw_select: nobj = ff.create_obj() nobj.input_file = pw_file nobj.output_file = pw_out_file nobj.need_same_bit_rate = need_same_bit_rate nobj.need_30m = select_30m # nobj.fps = fps # nobj.size = v_size nobj.set_video_info(tdc, vb_str) # 需要添加备案号 msg_str = "正在转换 片尾" t = pt_second + duration if need_number and t < num_second: msg_str += '(加备案号)' new_t = num_second - t nobj.set_overlay([num_file], [num_size], [new_t]) msg_str += "……" set_title(format_str % msg_str) update_status(i, '60%' + num_join_short_str) nobj.execute() subs.append(pw_out_file) # 拼接视频 set_title(format_str % "拼接中……") update_status(i, '90%' + num_join_short_str) sub_txt = temp_dir + "concat_" + f_name + ".txt" ff.concat(subs, final_video, sub_txt) # 移除 concat.txt 和 mp4 utils.remove_file(sub_txt) if not skip_content_mp4: utils.remove_file(temp_video) else: # 没有任何选项 仅对正片进行一次转码操作 if skip_content_mp4: obj.execute() utils.move_file(temp_video, final_video) else: utils.move_file(temp_video, final_video) self.final_video = final_video update_status(i, 'OK')
def retime_ac3(episode, src_file, dst_file, bitrate, offset_override=None, region='R1'): ''' Retime an AC3 file based on offsets ''' tmp_dir = tempfile.mkdtemp() # in the case of unexpected exit, we don't want to # keep temp files around atexit.register(delete_temp, tmp_dir) logger.debug('Audio temp folder: %s', tmp_dir) if os.path.isfile(src_file): logger.debug('%s found! Proceeding with retiming...', src_file) else: logger.error('%s not found. Skipping...', src_file) return try: # copy source to tempfile for surgery shutil.copy(src_file, tmp_dir) working_file = os.path.join(tmp_dir, os.path.basename(src_file)) except IOError as e: logger.error("Unable to copy file. %s", e) return r2_chaps = episode.r2_chapters offsets = episode.offsets if not offset_override else offset_override if episode.is_pioneer and region == 'PIONEER': offsets = episode.pioneer_offsets if isinstance(offsets, list): # movies totalOffset = 0 for o in offsets: if o['offset'] == 0: continue if episode.is_special: # frames are at R2 chapter breakpoints # accounted for in the json chapter = o['frame'] else: # chapters are based on reel changes in R1 file chapter = o['frame'] - totalOffset offset = o['offset'] prev_chapter_end, chapter_begin, delay = frame_to_ms(chapter, offset) delaycut_chain(episode.delaycut, working_file, prev_chapter_end, chapter_begin, delay, bitrate) totalOffset += offset else: # TV for key in ['op', 'prologue', 'partB', 'ED', 'NEP']: if key in offsets.keys(): # skip scenes with offset of 0 if offsets[key]['offset'] == 0: continue chapter = r2_chaps[key] offset = offsets[key]['offset'] prev_chapter_end, chapter_begin, delay = frame_to_ms(chapter, offset) delaycut_chain(episode.delaycut, working_file, prev_chapter_end, chapter_begin, delay, bitrate) move_file(working_file, dst_file) delete_temp(tmp_dir)
for url in urls: site_name = re.sub('http(s)?://', '', url).replace('/', '').rstrip() current_file_path = os.path.join(absolute_path, 'image/' + site_name + '/current') previous_file_path = os.path.join(absolute_path, 'image/' + site_name + '/previous') current_file_name = current_file_path + '/screen.png' previous_file_name = previous_file_path + '/screen.png' utils.mkdir(current_file_path) utils.mkdir(previous_file_path) if utils.is_file_exists(current_file_name): utils.move_file(current_file_name, previous_file_name) selenium_crawler = Crawler() selenium_crawler.get_screenshot(url, current_file_name) if utils.is_file_exists(current_file_name) and utils.is_file_exists( previous_file_name): if not utils.diff_image(current_file_name, previous_file_name): message = MIMEMultipart() message['Subject'] = '差分検知[' + site_name + ']' message['From'] = from_address message['To'] = to_address message['Date'] = formatdate() current_file_size = os.path.getsize(
def load_backup(config): if os.path.exists(config['backup_path']): move_file(config['backup_path'], config['data_path']) return load(config) return {}
def order_files(language: str, compiler: str, project_name: str, logger_needed: bool, package_name: str): """ Create project and all other needed folders Copy generated files into right folders :param language: :param compiler: :param project_name: :param logger_needed: :param package_name: :return: """ folder_path = "../" + project_name + "/" package_path = package_name.replace(".", "/") + "/" # Remove older folder if it exists clean_directory(folder_path) # Create main folder create_folder(folder_path) # Create and order resources files folder # TODO : Add other languages here if language == "scala" or language == "java": create_folder(folder_path + "src/main/resources/") copy_file("target/application.conf", folder_path + "src/main/resources/application.conf") if logger_needed: copy_file("target/log4j.properties", folder_path + "src/main/resources/log4j.properties") elif language == "python": create_folder(folder_path + "resources/") # Create and order code files folder # TODO : Add other languages here if language == "scala" or language == "java": create_folder(folder_path + "src/main/" + language + "/" + package_path) files = [ f for f in os.listdir("target/") if "." + language in f and "test" not in f ] for file in files: copy_file( "target/" + file, folder_path + "src/main/" + language + "/" + package_path + file) elif language == "python": create_folder(folder_path + "src/") files = [ f for f in os.listdir("target/") if ".py" in f and "test" not in f ] for file in files: copy_file("target/" + file, folder_path + file) # Create and order test files folder # TODO : Add other languages here if language == "scala" or language == "java": create_folder(folder_path + "src/test/") files = [ f for f in os.listdir("target/") if "." + language in f and "test" in f ] for file in files: copy_file( "target/" + file, folder_path + "src/test/" + language + "/" + package_path + file) elif language == "python": create_folder(folder_path + "test/") files = [ f for f in os.listdir("target/") if ".py" in f and "test" in f ] for file in files: copy_file("target/" + file, folder_path + "test/" + file) # Put compiler, script and doc files files = [ f for f in os.listdir("target/") if ".xml" in f or ".sbt" in f or ".sh" in f or ".md" in f or ".adoc" in f or ".properties" in f ] for file in files: copy_file("target/" + file, folder_path + file) # Arrange sbt compiler files if compiler == "sbt": create_folder(folder_path + "project/") move_file(folder_path + "build.properties", folder_path + "project/build.properties") move_file(folder_path + "assembly.sbt", folder_path + "project/assembly.sbt") move_file(folder_path + "plugins.sbt", folder_path + "project/plugins.sbt") # Make .sh files executable sh_files = [f for f in os.listdir(folder_path) if f.endswith(".sh")] for file in sh_files: os.chmod(os.path.join(folder_path, file), 0o755) logger.info("Finished to order files for language : %s and compiler : %s", language, compiler)
def __postOptimizeStep(self): """ aim: writes a khc file with the true MODL cost require: inputDataFileName - the data file _variable1MicroCluster, _variable2MicroCluster - clusters of values (micro or mini clusters) _variable1ValuesFrequency, _variable2ValuesFrequency - true value frequencies dictionaries ensures: outputCoclusteringRapport - writes tha output coclustering file """ utl.globalTrace( "{0}\t Run post optimization coclustering step \n".format( datetime.datetime.now())) pathFiles = const.KHC_TemporaryDir + const.labelPostOpt + '/' utl.detect_path(pathFiles) utl.globalTrace("{0}\t Run Recode mini clusters \n".format( datetime.datetime.now())) dataFileName = os.path.basename(os.path.abspath( self.inputDataFileName)).split('.')[0] outputDataFileName = pathFiles + dataFileName + '_micro.txt' cluster1Frequency, cluster2Frequency, outputDataFileName = utl.recodeListFileWithClusters( [self.inputDataFileName], self._variable1MicroCluster, self._variable2MicroCluster, outputDataFileName) utl.globalTrace( "{0}\t End Recode mini clusters \t Output file: \t {1}\n".format( datetime.datetime.now(), outputDataFileName)) utl.globalTrace("{0}\t Run KHCc on mini clusters \n".format( datetime.datetime.now())) outputKHCFilePath = const.labelPostOpt + dataFileName + '_micro.khc' #compute clusters of micro clusters variable1Clusters, variable2Clusters, _ = khc.KHCc( outputDataFileName, cluster1Frequency, cluster2Frequency, const.Imax, const.labelPostOpt, outputKHCFilePath) if not const.KHC_KeepRecodedFiles: utl.delete_file(outputDataFileName) Ix_mini = len(variable1Clusters) Iy_mini = len(variable2Clusters) utl.globalTrace( "{0}\t End KHCc on mini clusters \t Ix mini:\t {1}\t Iy mini:\t {2}\n" .format(datetime.datetime.now(), Ix_mini, Iy_mini)) utl.globalTrace( "{0}\t Run format mini clusters to true values \n".format( datetime.datetime.now())) variable1TrueValuesClusters = utl.fromClustersOfMiniClustersToTrueValueClusters( self._variable1MicroCluster, variable1Clusters) variable2TrueValuesClusters = utl.fromClustersOfMiniClustersToTrueValueClusters( self._variable2MicroCluster, variable2Clusters) Ix_mini = len(variable1TrueValuesClusters) Iy_mini = len(variable2TrueValuesClusters) utl.globalTrace( "{0}\t End format mini clusters to true values \t Ix mini:\t {1}\t Iy mini:\t {2}\n" .format(datetime.datetime.now(), Ix_mini, Iy_mini)) # #run KHCe utl.globalTrace("{0}\t Run KHCe\n".format(datetime.datetime.now())) level = khc.KHCe(self.inputDataFileName, self._variable1ValuesFrequency, self._variable2ValuesFrequency, const.Imax, const.labelPostOpt, variable1TrueValuesClusters, variable2TrueValuesClusters, self.outputCoclusteringFile) pathF = os.path.dirname(os.path.abspath( self.inputDataFileName)) + '/' + const.labelPostOpt + '/' destination_path = pathF + const.labelPostOpt + '.khc' source_path = pathFiles + const.labelPostOpt + '.khc' utl.move_file(destination_path, source_path) utl.globalTrace("{0}\t End KHCe \t level: \t {1}\n".format( datetime.datetime.now(), level)) utl.globalTrace( "{0}\t End post optimization coclustering step \n".format( datetime.datetime.now()))
def classify_batch(self, directory, ratio=0.75, min_matches=50, types=("png", "jpg", "jpeg", "tif"), verbose=True): # classify a batch of images in folders with the name of the labels the model was trained in if self.vocabulary is None or self.dictionary is None or self.svm is None: print("[ERROR] Please create or load a model first.") return # get a list of all images in the directory images = [] for t in types: files = glob(path.join(directory, "*." + t)) images.extend(files) # start counters results = {} classified = 0 # classify each image for imgfile in images: name = path.basename(imgfile) # predict image class label, matches = self.best_match(imgfile, ratio=ratio, visualize=0, verbose=verbose) # if the matches reach the confidence threshold, move the image to a folder # named as the class the image was assigned to if label is not None and matches is not None: if matches >= min_matches: folder = path.join(directory, label) move_file(imgfile, folder) # tick up counters classified += 1 if label in results: results[label] += 1 else: results[label] = 1 if verbose: print("[INFO] {} assigned to {} ({} matches)".format( name, label, matches)) elif verbose: print( "[INFO] {} NOT classified (best prediction: {} with {} matches)" .format(name, label, matches)) # provide diagnostic information if verbose: total = len(images) left = total - classified print("[INFO] Classification Results:\n" "Number of images: {}".format(total)) for keys, values in results.items(): print("{}: {} images".format(keys, values)) print("{} images were not classified".format(left))