Exemplo n.º 1
0
def get_prev_models(prev_levels=0, index=20000):
    names=['encoder', 'binarizer', 'unet']
    en=[]
    bn=[]
    un=[]
    for level in range(prev_levels):
        prev_en, prev_bn, _, prev_un = get_models(
            args=args, v_compress=args.v_compress, 
            bits=args.bits,
            encoder_fuse_level=args.encoder_fuse_level,
            decoder_fuse_level=args.decoder_fuse_level,
            level = level)
        prev_en.load_state_dict(torch.load('{}/{}_{}_{:08d}_level{}.pth'.format(args.model_dir, args.save_model_name, names[0], index, level)))
        prev_bn.load_state_dict(torch.load('{}/{}_{}_{:08d}_level{}.pth'.format(args.model_dir, args.save_model_name, names[1], index, level)))
        prev_un.load_state_dict(torch.load('{}/{}_{}_{:08d}_level{}.pth'.format(args.model_dir, args.save_model_name, names[2], index, level)))
      
        prev_en = prev_en.cpu()
        prev_bn = prev_bn.cpu()
        prev_un = prev_un.cpu()
     
        for param in prev_en.parameters():
            param.requires_grad = False 
        for param in prev_bn.parameters():
            param.requires_grad = False 
        for param in prev_un.parameters():
            param.requires_grad = False 
        #prev_en.parameters().requires_grad = False
        #prev_bn.parameters().requires_grad = False
        #prev_un.parameters().requires_grad = False
      
        en.append(prev_en)
        bn.append(prev_bn)
        un.append(prev_un)

    return en, bn, un
Exemplo n.º 2
0
def index():
    manufacturer = util.get_manufacturers()
    vehicle = []
    for car in manufacturer:
        car = car.replace("manufacturer_name_", "")
        vehicle.append(car)
    car_models = util.get_models()
    models = []
    for model in car_models:
        model = model.replace("model_name_", "")
        models.append(model)
    return render_template("index.html", vehicle=vehicle, models=models)
Exemplo n.º 3
0
def get_eval_loaders():
    # We can extend this dict to evaluate on multiple datasets.
    eval_loaders = {
        'TVL':
        get_loader(is_train=False,
                   root=args.eval,
                   mv_dir=args.eval_mv,
                   args=args),
    }
    return eval_loaders


############### Model ###############
encoder, binarizer, decoder, unet = get_models(
    args=args,
    v_compress=args.v_compress,
    bits=args.bits,
    encoder_fuse_level=args.encoder_fuse_level,
    decoder_fuse_level=args.decoder_fuse_level)

nets = [encoder, binarizer, decoder]
if unet is not None:
    nets.append(unet)

gpus = [int(gpu) for gpu in args.gpus.split(',')]
if len(gpus) > 1:
    print("Using GPUs {}.".format(gpus))
    for net in nets:
        net = nn.DataParallel(net, device_ids=gpus)

params = [{'params': net.parameters()} for net in nets]
Exemplo n.º 4
0
      unet_dict.update(pretrain_unet_updated)
      print("Keys not Loaded ",[i for i in unet_dict.keys() if i not in pretrain_unet_updated.keys()])
      net.load_state_dict(unet_dict)
  hypernet = nets[4]
  hypernet.load_state_dict(torch.load("vcii_model_params/demo_hypernet_00010000.pth"))s


args = parser.parse_args()
print(args)
#print(vid_count,"vid")
eval_loader,vid_count = get_eval_loaders()
print(vid_count,"vid")
############### Model ###############
encoder, binarizer, decoder, unet,hypernet = get_models(
  args=args, v_compress=args.v_compress, 
  bits=args.bits,
  encoder_fuse_level=args.encoder_fuse_level,
  decoder_fuse_level=args.decoder_fuse_level,num_vids=vid_count)

nets = [encoder, binarizer, decoder,unet,hypernet]



just_resumed = False
if args.load_model_name:
    print('Loading %s@iter %d' % (args.load_model_name,
                                  args.load_iter))

    resume(args.load_model_name, args.load_iter)
    just_resumed = True
Exemplo n.º 5
0
def test():

    args = parser.parse_args()
    print(args)
    print('Start evaluation...')
    # Load model
    encoder, binarizer, decoder, unet = get_models(
        args=args,
        v_compress=args.v_compress,
        bits=args.bits,
        encoder_fuse_level=args.encoder_fuse_level,
        decoder_fuse_level=args.decoder_fuse_level)

    nets = [encoder, binarizer, decoder]
    if unet is not None:
        nets.append(unet)

    # Using GPUS
    gpus = [int(gpu) for gpu in args.gpus.split(',')]
    if len(gpus) > 1:
        print("Using GPUs {}.".format(gpus))
        for net in nets:
            net = nn.DataParallel(net, device_ids=gpus)

    # Get params from checkpoint
    names = ['encoder', 'binarizer', 'decoder', 'unet']

    if args.load_model_name:
        print('Loading %s@iter %d' % (args.load_model_name, args.load_iter))

        index = args.load_iter
        train_iter = args.load_iter
    else:
        print("please specify the model and iterration for evaluation")
        exit(1)

    for net_idx, net in enumerate(nets):
        if net is not None:
            # print(">>target net:")
            # print(net)
            name = names[net_idx]
            checkpoint_path = '{}/{}_{}_{:08d}.pth'.format(
                args.model_dir, args.load_model_name, name, index)

            print('Loading %s from %s...' % (name, checkpoint_path))
            loaded_net = torch.load(checkpoint_path)
            # print(">>loaded:")
            # print(loaded_net)
            net.load_state_dict(loaded_net)

    set_eval(nets)

    eval_loaders = get_eval_loaders(args)
    for eval_name, eval_loader in eval_loaders.items():
        eval_begin = time.time()
        eval_loss, mssim, psnr = run_eval(nets,
                                          eval_loader,
                                          args,
                                          output_suffix='iter%d' % train_iter)

        print('Evaluation @iter %d done in %d secs' %
              (train_iter, time.time() - eval_begin))
        print('%s Loss   : ' % eval_name +
              '\t'.join(['%.5f' % el for el in eval_loss.tolist()]))
        print('%s MS-SSIM: ' % eval_name +
              '\t'.join(['%.5f' % el for el in mssim.tolist()]))
        print('%s PSNR   : ' % eval_name +
              '\t'.join(['%.5f' % el for el in psnr.tolist()]))