Exemplo n.º 1
0
  def test(load_model_weight=False):
    if load_model_weight:
      if cfg.model_weight_file != '':
        map_location = (lambda storage, loc: storage)
        sd = torch.load(cfg.model_weight_file, map_location=map_location)
        load_state_dict(model, sd)
        print('Loaded model weights from {}'.format(cfg.model_weight_file))
      else:
        load_ckpt(modules_optims, cfg.ckpt_file)

    for test_set, name in zip(test_sets, test_set_names):
      feature_map = ExtractFeature(model_w, TVT)
      test_set.set_feat_func(feature_map)
      print('\n=========> Test on dataset: {} <=========\n'.format(name))
      test_set.eval(
        normalize_feat=cfg.normalize_feature,
        verbose=True)
Exemplo n.º 2
0
def main(config):
    state_dict = utils.load_state_dict(fpath=config['saved_model_path'])
    model_state = state_dict['model_state']
    mask = state_dict['mask']
    training_cfg = state_dict['config']

    utils.set_seed(training_cfg['seed'])
    # Instantiate model & attributes, load state dict 
    model = getters.get_quant_model(config)

    # Need to wrap model with data parallel for it to work it seems

    init_attrs(model, training_cfg)

    # Load model weights and mask
    model = load_weights_and_mask(config, model, model_state, mask)
    print_size_of_model(model)
    # Switch to eval mode, move to cpu and prepare for quantization
    # do module fusion
    # fuse_model(model)
    quant_model = prepare_model_for_quantization(model, config)

    # Grab all necessary objects
    loaders, sizes = getters.get_dataloaders(training_cfg)
    train_loader, _, test_loader = loaders
    train_size, _, test_size = sizes
    batches_per_train_epoch = math.ceil(train_size / training_cfg['batch_size'])
    batches_per_test_epoch = math.ceil(test_size / training_cfg['test_batch_size'])

    # Calibration (could possibly use more epochs)
    calib_acc, calib_loss = evaluate(quant_model, train_loader, batches_per_train_epoch)

    torch.quantization.convert(quant_model, inplace=True)
    logger.info('Succesfully quantized model!')

    print_size_of_model(quant_model)
    logger.info('Evaluating...')
    train_acc, train_loss = evaluate(quant_model, train_loader, batches_per_train_epoch)
    test_acc, test_loss = evaluate(quant_model, test_loader, batches_per_test_epoch)
    
    logger.info('train acc: {} train loss: {}'.format(train_acc, train_loss))
    logger.info('test acc: {} test loss: {}'.format(test_acc, test_loss))
    
    # Save model in same folder
    save_path = config['saved_model_path'].replace('model.pt', '')
    utils.save_run_quant(quant_model, save_path, config, train_acc, test_acc)
Exemplo n.º 3
0
print(opt)

cuda = opt.cuda
device = torch.device('cuda' if cuda else 'cpu')

filepath = opt.test_hr_folder

filelist = utils.get_list(filepath, ext='.png') + utils.get_list(filepath,
                                                                 ext='.JPG')
psnr_list = np.zeros(len(filelist))
ssim_list = np.zeros(len(filelist))
time_list = np.zeros(len(filelist))

model = model.model_rtc(upscale=opt.upscale_factor)
model_dict = utils.load_state_dict(opt.checkpoint)
model.load_state_dict(model_dict, strict=True)

i = 0
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)

for imname in filelist:
    # im_gt = cv2.imread(imname, cv2.IMREAD_COLOR)[:, :, [2, 1, 0]]  # BGR to RGB
    im_gt = sio.imread(imname)  # RGB
    im_gt = utils.modcrop(im_gt, opt.upscale_factor)
    # im_l = cv2.imread(opt.test_lr_folder + imname.split('/')[-1].split('.')[0] + 'x' + str(opt.upscale_factor) + ext, cv2.IMREAD_COLOR)[:, :, [2, 1, 0]]  # BGR to RGB
    # im_l = cv2.imread(opt.test_lr_folder + imname.split('/')[-1].split('.')[0] + ext, cv2.IMREAD_COLOR)[:, :, [2, 1, 0]]  # BGR to RGB
    im_l = sio.imread(opt.test_lr_folder + '/' + imname.split('/')[-1])  # RGB
    if len(im_gt.shape) < 3:
        im_gt = im_gt[..., np.newaxis]
Exemplo n.º 4
0
from data.mydataloader import Dataset
import time
import cv2
import os

cuda = 1
device = torch.device('cuda' if cuda else 'cpu')

def tensor_to_np(tensor):
    img = tensor.mul(255).byte()
    img = img.cpu().numpy().transpose((1, 2, 0))
    return img

# load model
model = model.model().to(device)
model_dict = utils.load_state_dict("./pretrained-model/model.pth")
model.load_state_dict(model_dict, strict=True)
# load model

testset =Dataset("./datasets/same/HR", "./datasets/same/LR")
testing_data_loader = DataLoader(dataset=testset, num_workers=0, batch_size=1,
                                 shuffle=False)

model.eval()

avg_psnr, avg_ssim = 0, 0
start = time.time()


for _,batch in enumerate(testing_data_loader):
    lr_tensor, hr_tensor = batch[0], batch[1]
Exemplo n.º 5
0
def main():
    cfg = Config()

    # Redirect logs to both console and file.
    if cfg.log_to_file:
        ReDirectSTD(cfg.stdout_file, 'stdout', False)
        ReDirectSTD(cfg.stderr_file, 'stderr', False)

    TVT, TMO = set_devices(cfg.sys_device_ids)

    # Dump the configurations to log.
    import pprint
    print('-' * 60)
    print('cfg.__dict__')
    pprint.pprint(cfg.__dict__)
    print('-' * 60)

    ###########
    # Dataset #
    ###########

    test_set = create_dataset(**cfg.test_set_kwargs)

    #########
    # Model #
    #########

    model = Model(cfg.net,
                  path_to_predefined='',
                  pretrained=False,
                  last_conv_stride=cfg.last_conv_stride)
    model.cuda()
    r'''
  This is compeletly useless, but since I used apex, and its optimization level
  has different effect on each layer of networ and optimizer is mandatory argument, I created this optimizer.
  '''
    optimizer = optim.Adam(model.parameters())

    model, optimizer = amp.initialize(
        model,
        optimizer,
        opt_level=cfg.opt_level,
        #loss_scale=cfg.loss_scale
    )

    print(model)

    # Model wrapper
    model_w = DataParallel(model)

    # May Transfer Model to Specified Device.
    TMO([model])

    #####################
    # Load Model Weight #
    #####################

    # To first load weights to CPU
    map_location = (lambda storage, loc: storage)
    used_file = cfg.model_weight_file or cfg.ckpt_file
    loaded = torch.load(used_file, map_location=map_location)
    if cfg.model_weight_file == '':
        loaded = loaded['state_dicts'][0]
    load_state_dict(model, loaded)
    print('Loaded model weights from {}'.format(used_file))

    ###################
    # Extract Feature #
    ###################

    test_set.set_feat_func(ExtractFeature(model_w, TVT))

    with measure_time('Extracting feature...', verbose=True):
        feat, ids, cams, im_names, marks = test_set.extract_feat(True,
                                                                 verbose=True)

    #######################
    # Select Query Images #
    #######################

    # Fix some query images, so that the visualization for different models can
    # be compared.

    # Sort in the order of image names
    inds = np.argsort(im_names)
    feat, ids, cams, im_names, marks = \
      feat[inds], ids[inds], cams[inds], im_names[inds], marks[inds]

    # query, gallery index mask
    is_q = marks == 0
    is_g = marks == 1

    prng = np.random.RandomState(2)
    # selected query indices
    sel_q_inds = prng.permutation(range(np.sum(is_q)))[:cfg.num_queries]

    q_ids = ids[is_q][sel_q_inds]
    q_cams = cams[is_q][sel_q_inds]
    q_feat = feat[is_q][sel_q_inds]
    q_im_names = im_names[is_q][sel_q_inds]

    ####################
    # Compute Distance #
    ####################

    # query-gallery distance
    q_g_dist = compute_dist(q_feat, feat[is_g], type='euclidean')

    ###########################
    # Save Rank List as Image #
    ###########################

    q_im_paths = list()
    for n in q_im_names:
        if isinstance(n, bytes):
            n = n.decode("utf-8")
        q_im_paths.append(ospj(test_set.im_dir, n))

    save_paths = list()
    for n in q_im_names:
        if isinstance(n, bytes):
            n = n.decode("utf-8")
        save_paths.append(ospj(cfg.exp_dir, 'rank_lists', n))

    g_im_paths = list()
    for n in im_names[is_g]:
        if isinstance(n, bytes):
            n = n.decode("utf-8")
        g_im_paths.append(ospj(test_set.im_dir, n))

    for dist_vec, q_id, q_cam, q_im_path, save_path in zip(
            q_g_dist, q_ids, q_cams, q_im_paths, save_paths):

        rank_list, same_id = get_rank_list(dist_vec, q_id, q_cam, ids[is_g],
                                           cams[is_g], cfg.rank_list_size)

        save_rank_list_to_im(rank_list, same_id, q_im_path, g_im_paths,
                             save_path)