def worker(): while True: item = q.get() try: info = ydl.extract_info(item["video"], download=False) if (int(info["duration"] / 60) > DUR_LIMIT and item["play_function"]["kwargs"]["sent_by_id"] not in SUDO_USERS): if "on_duration_limit" in item: if item["on_duration_limit"]: item["on_duration_limit"]["args"][0] = item[ "on_duration_limit"]["args"][0].format(DUR_LIMIT) run(item["on_duration_limit"]) q.task_done() elif info["is_live"]: if "on_is_live_error" in item: if item["on_is_live_error"]: run(item["on_is_live_error"]) q.task_done() else: file_name = info["id"] + "." + info["ext"] _log = item["play_function"]["kwargs"]["log"] if file_name not in os.listdir("downloads"): if "on_start" in item: if item["on_start"]: run(item["on_start"]) if _log: open("downloads/" + info["id"] + ".png", "wb+").write( requests.get( info["thumbnails"][-1]["url"]).content) ydl.download([item["video"]]) if _log: _log["kwargs"]["photo"] = generate_image( "downloads/" + info["id"] + ".png", info["title"], item["play_function"]["kwargs"]["sent_by_name"]) run( item["play_function"], file="downloads/" + file_name, title=info["title"], duration=format_duration(info["duration"]), url="https://youtu.be/" + info["id"], log=_log, ) if "on_end" in item: if item["on_end"]: run(item["on_end"]) q.task_done() except: if "on_error" in item: if item["on_error"]: run(item["on_error"]) q.task_done()
def worker(): while True: try: item = q.get() file_name = "" info = ydl.extract_info(item["video"], download=False) if int(info["duration"] / 60 ) > DUR_LIMIT and item["play_func"][1][5] not in SUDO_USERS: args = item["on_dur_limit"][1] args[0] = args[0].format(DUR_LIMIT) item["on_dur_limit"][0](*args) q.task_done() elif info["is_live"]: item["on_is_live_err"][0](*item["on_is_live_err"][1]) q.task_done() else: file_name = info["id"] + "." + info["ext"] args = item["play_func"][1] args[0] = "downloads/" + file_name args[3] = info["title"] args[4] = "https://youtu.be/" + info["id"] args[8] = format_dur(info["duration"]) if file_name not in os.listdir("downloads"): item["on_start"][0](*item["on_start"][1]) open("downloads/" + info["id"] + ".png", "wb+").write( requests.get(info["thumbnails"][-1]["url"]).content) ydl.download([item["video"]]) os.rename([ i for i in os.listdir() if i.endswith(info["ext"]) ][0], "downloads/" + file_name) args[7][1][1] = generate_image( "downloads/" + info["id"] + ".png", info["title"], args[8], args[6]) item["play_func"][0](*args) if args[0] == "downloads/" + file_name: item["on_end"][0](*item["on_end"][1]) q.task_done() except: item["on_err"][0](*item["on_err"][1]) q.task_done()
def begin_training(params): """ Takes model name, Generator and Discriminator architectures as input, builds the rest of the graph. """ model_name, Generator, Discriminator, epochs, restore = params fid_stats_file = "./tmp/" inception_path = "./tmp/" TRAIN_FOR_N_EPOCHS = epochs MODEL_NAME = model_name + "_" + FLAGS.dataset SUMMARY_DIR = 'summary/' + MODEL_NAME + "/" SAVE_DIR = "./saved_models/" + MODEL_NAME + "/" OUTPUT_DIR = './outputs/' + MODEL_NAME + "/" helpers.refresh_dirs(SUMMARY_DIR, OUTPUT_DIR, SAVE_DIR, restore) with tf.Graph().as_default(): with tf.variable_scope('input'): all_real_data_conv = input_pipeline( train_data_list, batch_size=BATCH_SIZE) # Split data over multiple GPUs: split_real_data_conv = tf.split(all_real_data_conv, len(DEVICES)) global_step = tf.train.get_or_create_global_step() gen_cost, disc_cost, pre_real, pre_fake, gradient_penalty, real_data, fake_data, disc_fake, disc_real = split_and_setup_costs( Generator, Discriminator, split_real_data_conv) gen_train_op, disc_train_op, gen_learning_rate = setup_train_ops( gen_cost, disc_cost, global_step) performance_merged, distances_merged = add_summaries(gen_cost, disc_cost, fake_data, real_data, gen_learning_rate, gradient_penalty, pre_real, pre_fake) saver = tf.train.Saver(max_to_keep=1) all_fixed_noise_samples = helpers.prepare_noise_samples( DEVICES, Generator) fid_stats_file += FLAGS.dataset + "_stats.npz" assert tf.gfile.Exists( fid_stats_file), "Can't find training set statistics for FID (%s)" % fid_stats_file f = np.load(fid_stats_file) mu_fid, sigma_fid = f['mu'][:], f['sigma'][:] f.close() inception_path = fid.check_or_download_inception(inception_path) fid.create_inception_graph(inception_path) # Create session config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True if FLAGS.use_XLA: config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 with tf.Session(config=config) as sess: # Restore variables if required ckpt = tf.train.get_checkpoint_state(SAVE_DIR) if restore and ckpt and ckpt.model_checkpoint_path: print("Restoring variables...") saver.restore(sess, ckpt.model_checkpoint_path) print('Variables restored from:\n', ckpt.model_checkpoint_path) else: # Initialise all the variables print("Initialising variables") sess.run(tf.local_variables_initializer()) sess.run(tf.global_variables_initializer()) print('Variables initialised.') # Start input enqueue threads coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) print('Queue runners started.') real_im = sess.run([all_real_data_conv])[0][0][0][0:5] print("Real Image range sample: ", real_im) summary_writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph) helpers.sample_dataset(sess, all_real_data_conv, OUTPUT_DIR) # Training loop try: ep_start = (global_step.eval(sess)) // EPOCH for epoch in tqdm(range(ep_start, TRAIN_FOR_N_EPOCHS), desc="Epochs passed"): step = (global_step.eval(sess)) % EPOCH for _ in tqdm(range(step, EPOCH), desc="Current epoch %i" % epoch, mininterval=0.5): # train gen _, step = sess.run([gen_train_op, global_step]) # Train discriminator if (MODE == 'dcgan') or (MODE == 'lsgan'): disc_iters = 1 else: disc_iters = CRITIC_ITERS for _ in range(disc_iters): _disc_cost, _ = sess.run( [disc_cost, disc_train_op]) if step % (128) == 0: _, _, _, performance_summary, distances_summary = sess.run( [gen_train_op, disc_cost, disc_train_op, performance_merged, distances_merged]) summary_writer.add_summary( performance_summary, step) summary_writer.add_summary( distances_summary, step) if step % (512) == 0: saver.save(sess, SAVE_DIR, global_step=step) helpers.generate_image(step, sess, OUTPUT_DIR, all_fixed_noise_samples, Generator, summary_writer) fid_score, IS_mean, IS_std, kid_score = fake_batch_stats( sess, fake_data) pre_real_out, pre_fake_out, fake_out, real_out = sess.run( [pre_real, pre_fake, disc_fake, disc_real]) scalar_avg_fake = np.mean(fake_out) scalar_sdev_fake = np.std(fake_out) scalar_avg_real = np.mean(real_out) scalar_sdev_real = np.std(real_out) frechet_dist = frechet_distance( pre_real_out, pre_fake_out) kid_score = np.mean(kid_score) inception_summary = tf.Summary() inception_summary.value.add( tag="distances/FD", simple_value=frechet_dist) inception_summary.value.add( tag="distances/FID", simple_value=fid_score) inception_summary.value.add( tag="distances/IS_mean", simple_value=IS_mean) inception_summary.value.add( tag="distances/IS_std", simple_value=IS_std) inception_summary.value.add( tag="distances/KID", simple_value=kid_score) inception_summary.value.add( tag="distances/scalar_mean_fake", simple_value=scalar_avg_fake) inception_summary.value.add( tag="distances/scalar_sdev_fake", simple_value=scalar_sdev_fake) inception_summary.value.add( tag="distances/scalar_mean_real", simple_value=scalar_avg_real) inception_summary.value.add( tag="distances/scalar_sdev_real", simple_value=scalar_sdev_real) summary_writer.add_summary(inception_summary, step) except KeyboardInterrupt as e: print("Manual interrupt occurred.") except Exception as e: print(e) finally: coord.request_stop() coord.join(threads) print('Finished training.') saver.save(sess, SAVE_DIR, global_step=step) print("Model " + MODEL_NAME + " saved in file: {} at step {}".format(SAVE_DIR, step))