def configure(): args = parse_arguments() configure_dataset(args) configure_paths(args) args.scales = [args.initial_scale] for i in range(1, args.n_scales): args.scales.append(args.scales[-1] * args.scale_step) args.arch = args.model with (args.result_path / 'opts.json').open('w') as opt_file: json.dump(vars(args), opt_file, default=json_serialize) args.tee = TeedStream(args.result_path / "output.log") args.time_suffix = datetime.now().strftime("%d%m%H%M") tb_path = args.result_path / "tb" args.writer = SummaryWriter(tb_path.as_posix()) args.device = torch.device("cuda" if args.cuda else "cpu") create_code_snapshot( Path(__file__).parent, args.result_path / "snapshot.tgz") torch.manual_seed(args.manual_seed) args.logger = setup_logging(args) return args
def main(): args = parse_arguments() net, _ = create_model(args, args.model) net = net.module net.cpu() if net is None: return net.eval() h, w = args.sample_size, args.sample_size var = torch.randn(1, args.sample_duration, 3, h, w).to('cpu') net.apply(lambda m: m.register_forward_hook(compute_layer_statistics_hook)) out = net(var) restore_module_names(net) print_statisctics()