normalize,
    ])
    criterion = HardNegativeContrastiveLoss().cuda()

    coco_data_train = CocoCaptionsRV(args, sset="val", transform=prepro)
    train_loader = DataLoader(coco_data_train,
                              batch_size=args.batch_size,
                              shuffle=False,
                              drop_last=True,
                              num_workers=args.workers,
                              collate_fn=collate_fn_padded,
                              pin_memory=True)

    print("Initializing network...")

    join_emb = joint_embedding(args).cuda()

    # Grad false to all network
    for param in join_emb.cap_emb.parameters():
        param.requires_grad = False

    for param in join_emb.img_emb.parameters():
        param.requires_grad = False

    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  join_emb.parameters()),
                           lr=0.001)

    eTime = time.time()
    train_loss, batch_train, data_train = train(
        train_loader,
Ejemplo n.º 2
0
                        help="The size of the batches",
                        type=int,
                        default=100)
    parser.add_argument("-ct",
                        "--ctresh",
                        help="Thresholding coeeficient to binarize heat maps",
                        type=float,
                        default=0.45)

    args = parser.parse_args()

    print("Loading model from:", args.model_path)
    checkpoint = torch.load(args.model_path,
                            map_location=lambda storage, loc: storage)

    join_emb = joint_embedding(checkpoint['args_dict'])
    join_emb.load_state_dict(checkpoint["state_dict"])

    for param in join_emb.parameters():
        param.requires_grad = False

    join_emb = torch.nn.DataParallel(join_emb.cuda().eval())

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    in_dim = (400.0, 400.0)
    prepro_val = transforms.Compose([
        transforms.Resize((int(in_dim[0]), int(in_dim[1]))),
        transforms.ToTensor(),
        normalize,
Ejemplo n.º 3
0
        transforms.RandomResizedCrop(256),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    prepro_val = transforms.Compose(
        [transforms.Resize((350, 350)),
         transforms.ToTensor(), normalize])

    end = time.time()
    print("Initializing network ...", end=" ")

    if args.resume:  # Resume previous learning
        checkpoint = torch.load(args.resume,
                                map_location=lambda storage, loc: storage)
        join_emb = joint_embedding(checkpoint["args_dict"])
        join_emb.load_state_dict(checkpoint["state_dict"])
        join_emb = torch.nn.DataParallel(join_emb.cuda())

        last_epoch = checkpoint["epoch"]
        opti = checkpoint["optimizer"]
        print("Load from epoch :", last_epoch)

        last_epoch += 1

        lr_scheduler = MultiStepLR(opti, args.lrd[1:], gamma=args.lrd[0])
        lr_scheduler.step(last_epoch)
        best_rec = checkpoint["best_rec"]

    else:
        # Create new model