def _run(): # create dirs if they do not exist check_dirs(_DIRS) iter_object = _DIRS["features"].iterdir() if args.ncbi is None else [ncbi_to_feature(x) for x in args.ncbi] dirnum = len(list(_DIRS["features"].glob('*.txt'))) # go annotation after annotation in annotations directory for ix, annotation_file in enumerate(iter_object, start=1): # convert in case of an object ncbi = feature_to_ncbi(annotation_file) palindrome_file = ncbi_to_palindrome(ncbi) if not annotation_file.is_file(): print(f"Feature file {annotation_file} doesn't exist! Skipping...") continue if not palindrome_file.is_file(): print(f"Feature file {annotation_file} doesn't have matching palindrome file in `palindromes` folder! Skipping...") continue print(f"\t=== Analysing batch {ncbi} ... ({ix} / {dirnum}) ===") try: if len(list(_DIRS["comparison"].glob(f"{ncbi}.xlsx"))) > 0: print(f"\tFeature {ncbi} already processed in comparison folder. Skipping...") continue features = process_feature_file(ncbi) stats(features, ncbi) except Exception as exc: with open(_DIRS["comparison"] / f"{ncbi}.err", "w") as err: err.write(str(exc)) aggregate_analysis()
def extract_subtitles_batch(data): for settings in data: val_to_utf8_str(settings, ['input_dir', 'output_dir']) check_dirs(settings) input_lang = 'zh-TW' output_lang = 'zh-TW' extension = 'vtt' if 'input_lang' in settings: input_lang = to_utf8_str(settings['input_lang']) if 'output_lang' in settings: output_lang = to_utf8_str(settings['output_lang']) if 'extension' in settings: extension = to_utf8_str(settings['extension']) get_it_path, get_ot_path = gen_path_tools(settings, ['input_dir', 'output_dir']) input_names = filter(filter_media, listdir(settings['input_dir'])) input_paths = map(get_it_path, input_names) output_paths = map(get_ot_path, input_names) output_paths = map(gen_extend_path('-', extension), output_paths) final_settings = [] for i in range(len(input_names)): final_settings.append({ 'input_path': input_paths[i], 'output_path': output_paths[i], 'input_lang': input_lang, 'output_lang': output_lang, 'extension': extension, }) show_info('Input', settings['input_dir']) start = time.time() extract_subtitles(final_settings) show_time(start) show_info('Input', settings['input_dir'], is_end=True)
def main(_): # Make sure that the directories to save data have been created check_dirs([config.chk_dir, config.smp_dir]) # Set up tensorflow to only use the GPU resources it needs, and to grow when more is necessary run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: # Create and train the GAN, then visualize the results dcgan = DCGAN(sess, config) visualize(sess, dcgan, config, option=1)
def check_static_image(file_path): app.logger.debug('check_static_image file_path = %s' % file_path) filename, suffix = os.path.splitext(file_path) if suffix != '.png' and suffix != '.jpg' and suffix != '.gif': return category = split_category(file_path) if category is None: return is_origin = True if filename[len(filename) - 7:] != '_origin': is_origin = False else: filename = filename[:len(filename) - 7] markdown_image_path = utils.fix_sep( os.path.join(MARKDOWN_ROOT, filename + suffix), os.sep) if not os.path.isfile(markdown_image_path): return static_image_path = utils.fix_sep( os.path.join(STATIC_ROOT, filename + suffix), os.sep) if not os.path.exists(static_image_path) \ or os.path.getmtime(markdown_image_path) != os.path.getmtime(static_image_path): utils.check_dirs(static_image_path) # 从markdown目录拷贝图片到static目录,并保留原始的修改时间戳 shutil.copy2(markdown_image_path, static_image_path) app.logger.debug('copy image success') thumb_path = '%s_thumb%s' % os.path.splitext(static_image_path) if not os.path.exists(thumb_path) \ or os.path.getmtime(markdown_image_path) != os.path.getmtime(thumb_path): # 制作缩略图 img = Image.open(static_image_path) width, height = img.size if width <= IMAGE_MAX_WIDTH: return app.send_static_file(filename + suffix) if os.path.exists(thumb_path): os.remove(thumb_path) rate = IMAGE_MAX_WIDTH / width img.thumbnail((math.floor(width * rate), math.floor(height * rate))) img.save(thumb_path) os.utime(thumb_path, (os.path.getatime(markdown_image_path), os.path.getmtime(markdown_image_path))) app.logger.debug('make thumb success') if is_origin: # 返回原图 return app.send_static_file(filename + suffix) # 返回缩略图 return app.send_static_file('%s_thumb%s' % (filename, suffix))
def main(): # init random seed init_random_seed(params.manual_seed) #check the needed dirs of config check_dirs() cudnn.benchmark = True torch.cuda.set_device(params.gpu_id[0]) #set current device print('=== Build model ===') #gpu mode generator = Generator() discriminator = Discriminator() generator = nn.DataParallel(generator, device_ids=params.gpu_id).cuda() discriminator = nn.DataParallel(discriminator, device_ids=params.gpu_id).cuda() # restore trained model if params.generator_restored: generator = restore_model(generator, params.generator_restored) if params.discriminator_restored: discriminator = restore_model(discriminator, params.discriminator_restored) # container of training trainer = Trainer(generator, discriminator) if params.mode == 'train': # data loader print('=== Load data ===') train_dataloader = get_data_loader(params.dataset) print('=== Begin training ===') trainer.train(train_dataloader) print('=== Generate {} images, saving in {} ==='.format( params.num_images, params.save_root)) trainer.generate_images(params.num_images, params.save_root) elif params.mode == 'test': if params.generator_restored: print('=== Generate {} images, saving in {} ==='.format( params.num_images, params.save_root)) trainer.generate_images(params.num_images, params.save_root) else: assert False, '[*]load Generator model first!' else: assert False, "[*]mode must be 'train' or 'test'!"
def create_steps(steps=STEPS, default_directories=DIRS): logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s :: %(levelname)s :: %(message)s') file_handler = RotatingFileHandler(os.path.join(LOG_DIR, 'steps.log'), 'a', 1000000, 1) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) logger.addHandler(file_handler) # stream_handler = logging.StreamHandler() # stream_handler.setLevel(logging.DEBUG) # logger.addHandler(stream_handler) status, msg = check_dirs(default_directories, ["ref"]) statuses, msgs = [], [] if status is False: return status, msg else: # logger.info("STEPS - Executing steps: {}.".format(steps)) for s in steps: f = s f = getattr(sys.modules[__name__], s) try: logger.info("STEPS - Executing {}()".format(s)) f(default_directories) # print(s, status, msg) except Exception as e: logger.critical("STEPS - {}(). Exception: {}".format(s, e)) return all(statuses), ",".join(msgs)
def insert(dirs): ''' ``insert()`` populate database from source_dir and reference_dir: - insert_records - insert_games ''' #check directories status, msg = check_dirs(dirs, ["ref", "to"]) if status is False: logger.critical(msg) return status, msg #check tables for table in REF_TABLES: status, msg = check_tables(REF_TABLES) if status is False: return status, msg db.records.drop() insert_records(dirs["to"]) if status is False: logger.critical(msg) return status, msg # else: # msg = msg + " {} record inserted".format(count) insert_games() return status, msg
def get_icon_themes(self): dirs = ('/usr/share/icons', os.path.join(self.homedir, ".icons")) '''filters: if use only one filter string, the 'for' block in check_dirs() will split the filter string to single chars...''' filters = ('cursors', '#') valid = utils.check_dirs(dirs, filters, False) valid.sort() return valid
def get_cursor_themes(self): dirs = ('/usr/share/icons', os.path.join(self.homedir, ".icons")) '''filters: if use only one filter string, the 'for' block in check_dirs() will split the filter string to single chars...''' filters = ('cursors', '') valid = utils.check_dirs(dirs, filters, True) valid.sort() return valid
def make_markdown_html(file_path): app.logger.debug('make_markdown_html file_path = %s' % file_path) category = split_category(file_path) blog_list = calc_markdown_thumbnail_list(category=category) # 转换markdown为html,并存入文件 markdown_path = utils.fix_sep(os.path.join(MARKDOWN_ROOT, file_path), os.sep) html_path = utils.fix_sep( os.path.join(STATIC_ROOT, os.path.splitext(file_path)[0] + '.html'), os.sep) data = thumbnail.fetchone(utils.fix_sep(markdown_path, '/')) title = data[0][thumbnail.TITLE] if len(data) != 0 else '博客' with codecs.open(markdown_path, 'r', 'utf-8') as infile: utils.check_dirs(html_path) with codecs.open(html_path, 'w', 'utf-8', errors='xmlcharrefreplace') as outfile: blog_html = markdown_to_html(infile.read()) outfile.write( render_html(title, category, blog_list['labels'], blog_list['dates'], blog_html)) app.logger.info('convert and write "%s" success' % file_path)
def create_step(step, default_directories=DIRS): status, msg = True, "" logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s :: %(levelname)s :: %(message)s') file_handler = RotatingFileHandler( os.path.join(LOG_DIR, 'step.{}.log'.format(step)), 'a', 1000000, 1) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) logger.addHandler(file_handler) # stream_handler = logging.StreamHandler() # stream_handler.setLevel(logging.DEBUG) # logger.addHandler(stream_handler) if step not in STEPS: msg = "STEP - step {}() doesn't exists. Abort execution".format(step) # logger.critical(msg) return False, msg try: status, msg = check_dirs(default_directories) except: status, msg = check_dirs(DIRS) if status is False: return status, msg else: f = getattr(sys.modules[__name__], step) try: logger.info("STEPS - Execute {}()".format(step)) try: f(default_directories) except: f(DIRS) return status, msg except Exception as e: logger.critical("STEPS - Execute {}() : Error : {}".format( step, e)) return False, e return True, ""
def get_sound_themes(self): bakDir = '/var/lib/youker-assistant-daemon/sound-theme/' dirs = ('/usr/share/sounds', os.path.join(self.homedir, ".sounds")) filters = ('index.theme', '') valid = utils.check_dirs(dirs, filters, True) valid.sort() # check and bak sounds for st in valid: if(os.path.exists(bakDir + st) == False): if(os.path.exists('/usr/share/sounds/' + st)): shutil.copytree('/usr/share/sounds/' + st, bakDir + st) else: shutil.copytree(self.homedir + '.sounds/' + st, bakDir + st) return valid
help='Save path for the image') parser.add_argument('--cuda', type=bool, default=True, help='Use GPU if available') # neural style options parser.add_argument('--w_content', type=float, default=1, help='Content weight') parser.add_argument('--w_style', type=float, default=1e3, help='Style weight') args = parser.parse_args() check_dirs(args.photo_dir) check_dirs(args.art_dir) check_dirs(args.out_dir, make=True) ip_tfs = tf.Compose([tf.ToTensor()]) pil_tf = tf.ToPILImage() vgg19 = models.vgg19(pretrained=True).features model = nn.Sequential() gm = GramMatrix() dtype = torch.FloatTensor if torch.cuda.is_available(): dtype = torch.cuda.FloatTensor if args.cuda:
for sentence in sentences: tokens = word_tokenize(sentence) tokens = filter(remove_special_chars, tokens) tokens = map(remove_fullstop_end, tokens) tokens = filter(lambda x: x, tokens) tokens_list.extend(tokens) # content = re.sub("[',\"^(){};/<>*!@#$%.+=|-?~:]+", " ", content) return tokens_list def main(args): print(args) obj = Parser(args) obj.parse() if __name__ == "__main__": utils.check_dirs() parser = argparse.ArgumentParser(description="Parse arguments for parser") parser.add_argument("-d", "--debug", action="store_true") parser.add_argument('-case', '--casefolding', action='store_true') parser.add_argument('-stem', '--stemming', action='store_true') parser.add_argument('-stop', '--stopping', action='store_true') parser.add_argument('-stopf', '--stopwordsfile', default="", type=str) args = parser.parse_args() main(args)
def main(): """程序的主函数 程序运行的主要逻辑 """ check_dirs(images_dir, bgm_dir) if mkdirs(tmp_dir): console.log(tmp_dir + "创建成功") if mkdirs(tmp_image_dir): console.log(tmp_image_dir + "创建成功") if mkdirs(tmp_music_dir): console.log(tmp_music_dir + "创建成功") if mkdirs(output_dir): console.log(output_dir + "创建成功") img_paths = read_dir(images_dir) for img_file in track(img_paths, description="调整图片中..."): resize_image(img_file, tmp_image_dir, display_size, bg_color) img_paths = read_dir(tmp_image_dir) bgm_paths = read_dir(bgm_dir) # 计算时长 video_total_time, music_total_time = computed_time(img_paths, per_img_display_time, start_img_duration, end_img_duration) clips = [] # 创建片头介绍文字 console.log("开始创建开头文字") video_start_info_img_path = make_info_img( display_size, bg_color, font_color, des_text, font_path, 100, tmp_image_dir, "start_info.jpg", ) video_start_info_img_clip = make_image_clip( video_start_info_img_path, start_img_duration, fps, 0, start_img_duration, start_img_fade_time, ) clips.append(video_start_info_img_clip) console.log("开头文字创建完毕") count = 0 for img_path in track(img_paths, description="添加图片帧..."): tmp_space_start = per_img_display_time * count + start_img_duration tmp_space_end = per_img_display_time * (count + 1) + start_img_duration img_clip = make_image_clip( img_path, per_img_display_time, fps, tmp_space_start, tmp_space_end, per_img_fade_time, ) clips.append(img_clip) count = count + 1 # 创建片尾文字 console.log("开始创建片尾文字") video_end_info_img_path = make_info_img( display_size, bg_color, font_color, end_text, font_path, 100, tmp_image_dir, "end_info.jpg", ) video_end_info_img_clip = make_image_clip( video_end_info_img_path, start_img_duration, fps, video_total_time - end_img_duration, video_total_time, end_img_fade_time, ) clips.append(video_end_info_img_clip) console.log("片尾文字创建完毕") bgm_tmp_file_path = make_bgm(bgm_paths, tmp_music_dir, "bgm.mp3", music_total_time, bgm_fade_time) console.log("经过处理的音频文件路径为" + bgm_tmp_file_path) bgm_clip = AudioFileClip(bgm_tmp_file_path) console.log("背景音乐切片处理完毕") # 混合CLIP console.log("开始将帧切片合并为视频文件") final_clip = CompositeVideoClip(clips) console.log("开始合并背景音乐切片") final_clip = final_clip.set_audio(bgm_clip) console.log("开始导出视频文件到" + output_file) final_clip.write_videofile(output_file) # 清除中间转存文件 console.log("清除中间转换缓存文件") rmdirs(tmp_dir) console.log("中间转换缓存文件清理完毕")
def trajectory_feature_generation_geolife(f, lat_range, lon_range, path='./data/Porto', min_length=50): f.read_data('small.json') fname = 'geolife' trajs = f.data trajs = [v for k, v in trajs.items()] traj_index = {} max_len = 0 preprocessor = Preprocesser(delta=0.001, lat_range=lat_range, lon_range=lon_range) print(preprocessor.get_grid_index((lon_range[0], lat_range[0]))) print(preprocessor.get_grid_index((lon_range[1], lat_range[1]))) in_range_cnt = 0 for i, traj in enumerate(trajs): new_traj = [] coor_traj = [] if (len(traj) > min_length): inrange = True for p in traj: lon, lat = p[0], p[1] if not ((lat > lat_range[0]) & (lat < lat_range[1]) & (lon > lon_range[0]) & (lon < lon_range[1])): inrange = False new_traj.append([0, p[1], p[0]]) if inrange: in_range_cnt += 1 coor_traj = preprocessor.traj2grid_seq(new_traj, isCoordinate=True) # print (coor_traj) if len(coor_traj) == 0: print(len(coor_traj)) if ((len(coor_traj) > 10) & (len(coor_traj) < 150)): if len(traj) > max_len: max_len = len(traj) traj_index[i] = new_traj if i % 200 == 0: print(coor_traj) print(i, len(traj_index.keys())) # print (max_lat,max_lon,min_lat,min_lon) print(max_len) print(len(traj_index.keys())) # print(in_range_cnt, len(trajs)) check_dirs(['./features/{}'.format(fname)]) traj_open = './features/{}/{}_traj_index'.format(fname, fname) cPickle.dump(traj_index, open(traj_open, 'wb')) trajs, useful_grids, max_len = preprocessor.preprocess(traj_index, isCoordinate=True) print(trajs[0]) cPickle.dump((trajs, [], max_len), open('./features/{}/{}_traj_coord'.format(fname, fname), 'wb')) # traj_grids = cPickle.load(open('./data_taxi/porto_traj_coord')) all_trajs_grids_xy = [] min_x, min_y, max_x, max_y = 2000, 2000, 0, 0 for i in trajs: for j in i: x, y, index = preprocessor.get_grid_index((j[1], j[0])) if x < min_x: min_x = x if x > max_x: max_x = x if y < min_y: min_y = y if y > max_y: max_y = y print(min_x, min_y, max_x, max_y) for i in trajs: traj_grid_xy = [] for j in i: x, y, index = preprocessor.get_grid_index((j[1], j[0])) x = x - min_x y = y - min_y grids_xy = [y, x] traj_grid_xy.append(grids_xy) all_trajs_grids_xy.append(traj_grid_xy) print(all_trajs_grids_xy[0]) print(len(all_trajs_grids_xy)) print(all_trajs_grids_xy[0]) cPickle.dump((all_trajs_grids_xy, [], max_len), open('./features/{}/{}_traj_grid'.format(fname, fname), 'wb')) return len(all_trajs_grids_xy)
def main(): """Main method""" global config config = configparser.ConfigParser() config.read('extract.ini') check_dirs([ config['OpenStreetMap']['DataDir'], config['angel.co']['DataDir'], os.path.join(config['angel.co']['DataDir'], 'cities'), os.path.join(config['angel.co']['DataDir'], 'startups') ]) cities = None cities_file = config['main']['input'] output_file = config['main']['output'] with open(output_file, 'wt') as out: with io.open(cities_file, 'r', encoding='utf-8') as f: cities = json.load(f)['cities'] for city in cities: import_osm_data(city, config['OpenStreetMap']['DataDir']) osm_amenity = [ 'atm', 'bank', 'library', 'college', 'university', 'pub', 'bar', 'restaurant', 'cafe' ] osm_public_transport = ['station', 'platform'] angel_data = ['startups', 'investors'] factual_data = ['finance', 'university', 'industry'] print('cities', end='', file=out) for col in osm_amenity + osm_public_transport: print(', osm_{0}'.format(col), end='', file=out) for col in angel_data: print(', angelco_{0}'.format(col), end='', file=out) for col in factual_data: print(', factual_{0}'.format(col), end='', file=out) print(file=out) angel = AngelcoClient(config['angel.co']['AccessToken'], config['angel.co']['DataDir']) factual = FactualClient(config['factual']['key'], config['factual']['secret']) for city in cities: db = city['osm']['db'] osm = OsmClient(db, getpass.getuser()) # Print city name print(city['name'].replace(',', ''), end='', file=out) # Print OSM count for amenity in osm_amenity: print(', {:d}'.format(osm.get_amenity_count(amenity)), end='', file=out) for key in osm_public_transport: print(', {:d}'.format(osm.get_public_transport_count(key)), end='', file=out) # Print angelco count if (city['angelco']): city_tag = city['angelco']['tag_id'] print(', {:d}'.format(angel.get_startup_count(city_tag)), end='', file=out) print(', {:d}'.format(angel.get_investor_count(city_tag)), end='', file=out) if (config['angel.co'].getboolean('DumpData')): angel.dump_all_startups(city_tag) angel.dump_all_investors(city_tag) else: print(', NA, NA', end='', file=out) # Print factual count if (city['factual']): print(', {:d}'.format(factual.get_bank_count(city['factual'])), end='', file=out) print(', {:d}'.format( factual.get_college_count(city['factual'])), end='', file=out) print(', {:d}'.format( factual.get_industry_count(city['factual'])), end='', file=out) else: for col in factual_data: print(', NA', end='', file=out) print(file=out) print('{0} finished.'.format(city['name'])) print('Done.')
def main(): """Main method""" global config config = configparser.ConfigParser() config.read('extract.ini') check_dirs([config['OpenStreetMap']['DataDir'], config['angel.co']['DataDir'], os.path.join(config['angel.co']['DataDir'], 'cities'), os.path.join(config['angel.co']['DataDir'], 'startups')]) cities = None cities_file = config['main']['input'] output_file = config['main']['output'] with open(output_file, 'wt') as out: with io.open(cities_file, 'r', encoding='utf-8') as f: cities = json.load(f)['cities'] for city in cities: import_osm_data(city, config['OpenStreetMap']['DataDir']) osm_amenity = ['atm', 'bank', 'library', 'college', 'university', 'pub', 'bar', 'restaurant', 'cafe'] osm_public_transport = ['station', 'platform'] angel_data = ['startups', 'investors'] factual_data = ['finance', 'university', 'industry'] print('cities', end='', file=out) for col in osm_amenity + osm_public_transport: print(', osm_{0}'.format(col), end='', file=out) for col in angel_data: print(', angelco_{0}'.format(col), end='', file=out) for col in factual_data: print(', factual_{0}'.format(col), end='', file=out) print(file=out) angel = AngelcoClient(config['angel.co']['AccessToken'], config['angel.co']['DataDir']) factual = FactualClient(config['factual']['key'], config['factual']['secret']) for city in cities: db = city['osm']['db'] osm = OsmClient(db, getpass.getuser()) # Print city name print(city['name'].replace(',', ''), end='', file=out) # Print OSM count for amenity in osm_amenity: print(', {:d}'.format(osm.get_amenity_count(amenity)), end='', file=out) for key in osm_public_transport: print(', {:d}'.format(osm.get_public_transport_count(key)), end='', file=out) # Print angelco count if (city['angelco']): city_tag = city['angelco']['tag_id'] print(', {:d}'.format(angel.get_startup_count(city_tag)), end='', file=out) print(', {:d}'.format(angel.get_investor_count(city_tag)), end='', file=out) if (config['angel.co'].getboolean('DumpData')): angel.dump_all_startups(city_tag) angel.dump_all_investors(city_tag) else: print(', NA, NA', end='', file=out) # Print factual count if (city['factual']): print(', {:d}'.format( factual.get_bank_count(city['factual'])), end='', file=out) print(', {:d}'.format( factual.get_college_count(city['factual'])), end='', file=out) print(', {:d}'.format( factual.get_industry_count(city['factual'])), end='', file=out) else: for col in factual_data: print(', NA', end='', file=out) print(file=out) print('{0} finished.'.format(city['name'])) print('Done.')
def get_window_themes(self): dirs = ('/usr/share/themes', os.path.join(self.homedir, ".themes")) filters = ['metacity-1'] valid = utils.check_dirs(dirs, filters, True) valid.sort() return valid
def main(): opt = get_options() # seed random.seed(opt.seed) # load model VGGweights = os.path.join(base_dir, 'Models/vgg_normalised.caffemodel') VGGmodel = os.path.join(base_dir, 'Models/VGG_ave_pool_deploy.prototxt') imagenet_mean = np.array([0.40760392, 0.45795686, 0.48501961]) #mean for color channels (bgr) # im_dir = os.path.join(base_dir, 'Images/') caffe.set_mode_gpu() #for cpu mode do 'caffe.set_mode_cpu()' caffe.set_device(opt.cuda_id) # set dataset dataset_path = os.path.join(opt.data_root, opt.dataset, 'train' if opt.use_train else 'val') file_list = glob(os.path.join(dataset_path, '*/*.JPEG')) num_data = len(file_list) print('Number of data:', num_data) random.shuffle(file_list) # check the target dir target_dataset_path = os.path.join(opt.data_root, opt.tar_dir, 'train' if opt.use_train else 'val') dir_list = glob(os.path.join(dataset_path, '*')) check_dirs(target_dataset_path, dir_list) # iteration for dataset print("Start Iteration") for itr, source_path in enumerate(file_list): # save textured image class_dir = source_path.split('/')[-2] filename = source_path.split('/')[-1] save_path = os.path.join(opt.data_root, opt.tar_dir, 'train' if opt.use_train else 'val', class_dir, filename) if os.path.exists(save_path): print(itr + 1, "is passed.") continue start = time.time() #load source image source_img_org = caffe.io.load_image(source_path) # check_array(np.array(img), 'img') [source_img, net] = load_image(source_path, VGGmodel, VGGweights, imagenet_mean) # check_array(source_img, 'img after processed') im_size = np.asarray(source_img.shape[-2:]) #pass image through the network and save the constraints on each layer constraints = OrderedDict() net.forward(data=source_img) for l, layer in enumerate(opt.tex_layers): constraints[layer] = constraint( [LossFunctions.gram_mse_loss], [{ 'target_gram_matrix': gram_matrix(net.blobs[layer].data), 'weight': opt.tex_weights[l] }]) #get optimisation bounds bounds = get_bounds([source_img], im_size) #generate new texture result = ImageSyn(net, constraints, bounds=bounds, callback=lambda x: show_progress(x, net), minimize_options={ 'maxiter': opt.max_iter, 'maxcor': opt.max_cor, 'ftol': 0, 'gtol': 0 }) #match histogram of new texture with that of the source texture and show both images new_texture = result['x'].reshape(*source_img.shape[1:]).transpose( 1, 2, 0)[:, :, ::-1] new_texture = histogram_matching(new_texture, source_img_org) new_texture = (new_texture * 255).astype(np.uint8) # save textured image Image.fromarray(new_texture).save(save_path) print('itr: {:d}/{:d} ' 'elapsed: {:.1f} '.format(itr + 1, num_data, time.time() - start)) if itr + 1 == opt.stop_itr: break
def main(): """ # Examples to run this script ## Example 1: feed arguments from a yaml/yml file * Step 1. Edit your yaml file, ``` $ vim my_settings.yaml ``` The following is the yaml/yml file example ``` --- data: - input_dir: input_videos_1 output_dir: output_videos_1 combnations: - input_name: 1-final-a.mp4 intervals: - start_time: 0.50 end_time: 1.56 - start_time: 1.00 end_time: 2.00 - input_name: 2-final-a.mp4 intervals: - start_time: 0.50 end_time: 1.56 - start_time: 1.00 end_time: 2.00 - input_dir: input_videos_2 output_dir: output_videos_2 combnations: - input_name: 1-final-b.mp4 intervals: - start_time: 0.50 end_time: 1.56 - start_time: 1.00 end_time: 2.00 - input_name: 2-final-b.mp4 intervals: - start_time: 0.50 end_time: 1.56 - start_time: 1.00 end_time: 2.00 ... ``` * Step 2. Run the command ``` $ pipenv run python cut_batch.py my_settings.yaml ``` If you don't provide a yaml file and then run the command like this: ``` $ pipenv run python cut_batch.py ``` "cut_batch.py" would automatically use "cut_batch.yaml" as default; if "cut_batch.yaml" does not exist, the program would raise error. """ yaml_path = get_yaml_path('cut_batch.yaml') config = read_yaml(yaml_path) global_start = time.time() for settings in config['data']: val_to_utf8_str(settings, ['input_dir', 'output_dir']) check_dirs(settings) show_info('Input', settings['input_dir']) build_subclips(settings) show_info('Input', settings['input_dir'], is_end=True) show_time(global_start, prefix='Total')
custom_plasticity = config["custom_plasticity"] snn_use_softmax = config["snn_use_softmax"] sparse_reg = config["sparse_reg"] v_threshold = config["v_threshold"] tau = config["tau"] lif = config["LIF"] loss_ann = config["loss_ann"] loss_snn = config["loss_snn"] T = config["snn_T"] debug = config["debug"] save_k = config["save_every_k_epoch"] mnist_mean = 0.1307 mnist_std = 0.3081 logs_path, save_path = utils.check_dirs(logs_path="train_seq", save_path="train_seq") utils.set_seed(seed) if __name__ == '__main__': device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device.type == "cuda": print("Training on {} !".format(torch.cuda.get_device_name())) else: print("Training on CPU :( ") dataset_train_1 = dataset.dataset_prepare(labels_phase_1, data_root, train=True) dataset_test_1 = dataset.dataset_prepare(labels_phase_1, data_root,