#model.freeze_encoder() if args.generate: if args.tokens_path: model.do_generate_from_tokens(paths, args.tokens_path, verbose=True) else: model.do_generate(paths, data_path, index, args.test_speakers, args.test_utts_per_speaker, use_half=use_half, verbose=True, only_discrete=args.only_gen_discrete) else: logger.set_logfile(paths.logfile_path()) logger.log('------------------------------------------------------------') logger.log('-- New training session starts here ------------------------') logger.log(time.strftime('%c UTC', time.gmtime())) logger.log('beta={}'.format(args.beta)) logger.log('num_group={}'.format(args.num_group)) logger.log('test_speakers ={}'.format(args.test_speakers)) logger.log('num_sample={}'.format(args.num_sample)) writer = SummaryWriter(paths.logfile_path() + '_tensorboard') writer.add_scalars('Params/Train', {'beta': args.beta}) writer.add_scalars('Params/Train', {'num_group': args.num_group}) writer.add_scalars('Params/Train', {'num_sample': args.num_sample}) #model.do_train(paths, dataset, optimiser, writer, epochs=args.epochs, test_epochs=args.test_epochs, batch_size=args.batch_size, step=step, epoch=epoch, use_half=use_half, valid_index=test_index, beta=args.beta) if model_type[:5] == 'vqvae': model.do_train(paths, dataset,
ssvae_parser.add_argument('--continue-from', type=str, help="model file path to make continued from") ssvae_parser.add_argument('wav_files', type=str, nargs='+', help="list of wav_files for prediction") args = parser.parse_args() if args.model is None: parser.print_help() sys.exit(1) # some assertions to make sure that batching math assumptions are met assert parse_torch_version() >= (0, 2, 1), "you need pytorch 0.2.1 or later" set_logfile(Path(args.log_dir, "predict.log")) logger.info(f"Prediction started with command: {' '.join(sys.argv)}") args_str = [f"{k}={v}" for (k, v) in vars(args).items()] logger.info(f"args: {' '.join(args_str)}") if args.use_cuda: torch.set_default_tensor_type("torch.cuda.FloatTensor") # run prediction predict = Predict(args) predict(args.wav_files, logging=True)
ssvae_parser.add_argument('--continue-from', default=None, type=str, help="model file path to make continued from") args = parser.parse_args() if args.model is None: parser.print_help() sys.exit(1) # some assertions to make sure that batching math assumptions are met assert parse_torch_version() >= (0, 2, 1), "you need pytorch 0.2.1 or later" set_logfile(Path(args.log_dir, "train.log")) logger.info(f"Training started with command: {' '.join(sys.argv)}") args_str = [f"{k}={v}" for (k, v) in vars(args).items()] logger.info(f"args: {' '.join(args_str)}") if args.use_cuda: logger.info("using cuda") torch.set_default_tensor_type("torch.cuda.FloatTensor") if args.seed is not None: torch.manual_seed(args.seed) np.random.seed(args.seed) if args.use_cuda: torch.cuda.manual_seed(args.seed)
if model_answer in KNOWN_TIMEOUT_PS: runner.timed_out.append(model_answer) runner.timed_out_count += 1 continue print(model_answer) synth.set_specs(examples) # synth.synthesize(examples, problem['prediction']) # runner.solve(synth, examples, problem['prediction'], model_answer) # return runner.statistics() return dict() def run(p_size, data_set): runner = TimedSolver() annotated_problems = get_annotated_problems(p_size, data_set) dsl = Dsl() return solve_problems(dsl, annotated_problems, runner) if __name__ == "__main__": FLAGS = parse_args() logger.logger = logger.initialize() if FLAGS.logfile: logger.logger = logger.set_logfile(get_log_filename(FLAGS.logfile)) logger.logger.debug( 'Starting script python -m run --program-size=%s --data-set=%s --synthesizer=%s' % (FLAGS.p_size, FLAGS.set, FLAGS.synthesizer)) statistics = run(FLAGS.p_size, FLAGS.set) # save_run_statistics(statistics, synthesizer=FLAGS.synthesizer, p_size=FLAGS.p_size)