Пример #1
0
    r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
    ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4],
          ri[4] / n_samples)
    print("rsum: %.2f" % rsum)
    print("Average i2t Recall: %.2f" % ar)
    print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % r)
    print("Average t2i Recall: %.2f" % ari)
    print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % ri)

    return mean_metrics


if __name__ == '__main__':
    multi_gpu = torch.cuda.device_count() > 1

    args = verify_input_args(parser.parse_args())
    opt = verify_input_args(parser.parse_args())

    # load vocabulary used by the model
    with open('./vocab/%s_vocab.pkl' % args.data_name, 'rb') as f:
        vocab = pickle.load(f)
    args.vocab_size = len(vocab)

    # load model and options
    assert os.path.isfile(args.ckpt)
    model = PVSE(vocab.word2idx, args)
    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda() if multi_gpu else model
        torch.backends.cudnn.benchmark = True
    model.load_state_dict(torch.load(args.ckpt))
Пример #2
0
def main():
    global args, model

    args = parser.parse_args()
    print(args)

    if args.gpu and not torch.cuda.is_available():
        raise Exception("No GPU found!")

    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)

    torch.manual_seed(2020)

    cudnn.benchmark = True
    device = torch.device(('cuda:' + args.gpu_id) if args.gpu else 'cpu')

    model = Grad_concat.GRAD(feats=args.feats,
                             basic_conv=args.basic_conv,
                             tail_conv=args.tail_conv).to(device)
    optimizer = optim.Adam(filter(lambda x: x.requires_grad,
                                  model.parameters()),
                           lr=args.lr)
    criterion = nn.L1Loss()

    if args.continue_train:
        checkpoint_file = torch.load(args.checkpoint_file)
        model.load_state_dict(checkpoint_file['model'])
        optimizer.load_state_dict(checkpoint_file['optimizer'])
        start_epoch = checkpoint_file['epoch']
        best_epoch = checkpoint_file['best_epoch']
        best_psnr = checkpoint_file['best_psnr']
        print("continue train {}.".format(start_epoch))
    else:
        start_epoch = 0
        best_epoch = 0
        best_psnr = 0

    print("Loading dataset ...")
    train_dataset = DIV2K(args, train=True)
    valid_dataset = DIV2K(args, train=False)
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=1)
    valid_dataloader = DataLoader(dataset=valid_dataset, batch_size=1)

    checkpoint_name = "latest.pth"
    is_best = False
    for epoch in range(start_epoch + 1, args.epochs + 1):
        lr = adjust_lr(optimizer, args.lr, epoch, args.decay_step,
                       args.decay_gamma)
        print("[epoch:{}/{}]".format(epoch, args.epochs))

        train(train_dataset, train_dataloader, model, criterion, optimizer,
              device)
        if epoch >= 90:
            valid_psnr = valid(valid_dataset, valid_dataloader, model,
                               criterion, device)

            is_best = valid_psnr > best_psnr
            if is_best:
                best_psnr = valid_psnr
                best_epoch = epoch

            print("learning rate: {}".format(lr))
            print("PSNR: {:.4f}".format(valid_psnr))
            print("best PSNR: {:4f} in epoch: {}".format(
                best_psnr, best_epoch))

        save_checkpoint(
            {
                'epoch': epoch,
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'best_psnr': best_psnr,
                'best_epoch': best_epoch,
            }, os.path.join(args.checkpoint_dir, checkpoint_name), is_best)
Пример #3
0
def main():
    global args, model

    args = parser.parse_args()
    print(args)

    if args.gpu and not torch.cuda.is_available():
        raise Exception("No GPU found!")

    if not os.path.exists(args.test_result):
        os.makedirs(args.test_result)

    if not is_ready(args):
        prepare_data(args)

    cudnn.benchmark = True
    device = torch.device(('cuda:' + args.gpu_id) if args.gpu else 'cpu')

    model = Grad_none.GRAD(feats=args.feats,
                           basic_conv=args.basic_conv,
                           tail_conv=args.tail_conv)
    checkpoint_file = torch.load(args.test_checkpoint)
    model.load_state_dict(checkpoint_file['model'])
    model.eval()
    model = model.to(device)

    psnrs = AverageMeter()

    with tqdm(total=100) as t:
        t.set_description("test")

        for idx in range(0, 100):
            with h5py.File(
                    "{}/DIV2K_np_test_{}.h5".format(args.h5file_dir,
                                                    args.test_sigma),
                    'r') as h5:
                l_image, h_image = h5['l'][str(idx)][()], h5['h'][str(idx)][()]
                l_image = np2tensor(l_image)
                h_image = np2tensor(h_image)

                l_image = l_image.unsqueeze(0)
                h_image = h_image.unsqueeze(0)

                l_image = l_image.to(device)
                h_image = h_image.to(device)

                with torch.no_grad():
                    output = model(l_image)
                    output = quantize(output, [0, 255])
                    psnr = calc_psnr(output, h_image)
                    psnrs.update(psnr.item(), 1)

                if args.test_save:
                    save_image_path = "{}/{:04d}.png".format(
                        args.test_result, idx)
                    output = output.squeeze(0)
                    output = output.data.permute(1, 2, 0)
                    save_image = pil_image.fromarray(
                        output.byte().cpu().numpy())
                    save_image.save(save_image_path)

            t.update(1)

    print("PSNR: {:.4f}".format(psnrs.avg))
Пример #4
0
import facebook
from option import parser

#____________________________________________________________________________||

parser.add_argument('--message', action = 'store', dest = 'message', help = 'message to be sent')

#____________________________________________________________________________||

args = parser.parse_args()

#____________________________________________________________________________||

token = args.token
wallMessage = args.message

#____________________________________________________________________________||

# https://developers.facebook.com/tools/explorer/
access_token = token
user = "******"

graph = facebook.GraphAPI(access_token)
graph.put_object( user , "feed", message = wallMessage )