コード例 #1
0
def main_train(opt):
    model_image = Net_Basic()
    model_sketch = Vector_embedding()

    model_image.to(device)
    model_image.train()

    model_sketch.to(device)
    model_sketch.train()

    Triplet_Criterion = nn.TripletMarginLoss(margin=0.3).to(device)

    optimizer = optim.Adam(list(model_image.parameters()) +
                           list(model_sketch.parameters()),
                           lr=opt.lr)
    dataset_sketchy_train = CreateDataset_Sketchy(opt)
    dataloader_sketchy_train = data.DataLoader(
        dataset_sketchy_train,
        batch_size=opt.batchsize,
        shuffle=opt.shuffle,
        num_workers=int(opt.nThreads),
        collate_fn=dataset_sketchy_train.collate_self)

    top1_buffer = 0
    top10_buffer = 0
    iter = 0

    for epoch in range(opt.niter):
        for i, sanpled_batch in enumerate(dataloader_sketchy_train, 0):
            model_image.train()
            model_sketch.train()
            iter += 1
            start_time = time.time()
            optimizer.zero_grad()

            sketch_anchor_embedding = model_sketch(
                sanpled_batch['sketch_coord'].to(device),
                sanpled_batch['seq_len'].to(device))
            rgb_positive_embedding = model_image(
                sanpled_batch['positive_img'].to(device))
            rgb_negetive_embedding = model_image(
                sanpled_batch['negetive_img'].to(device))

            Triplet_Loss = Triplet_Criterion(sketch_anchor_embedding,
                                             rgb_positive_embedding,
                                             rgb_negetive_embedding)
            loss = Triplet_Loss

            loss.backward()
            optimizer.step()

            if i % 50 == 0:
                print(
                    'Epoch: {}, Iteration: {}, Time: {}, Total_Loss: {}, Top1: {}, Top10: {}'
                    .format(epoch, i, (time.time() - start_time), Triplet_Loss,
                            top1_buffer, top10_buffer))

            if (i + 0) % opt.save_iter == 0:
                with torch.no_grad():
                    top1, top10 = evaluate(model_sketch, model_image)

                print(
                    'Epoch: {}, Iteration: {}, Top1_Accuracy: {}, Top10_Accuracy: {}'
                    .format(epoch, i, top1, top10))

                if top1 > top1_buffer:
                    torch.save(model_sketch.state_dict(),
                               'model_Best_sketch4h2l.pth')
                    torch.save(model_image.state_dict(),
                               'model_Best_image4h2l.pth')
                    top1_buffer, top10_buffer = top1, top10
                    print('Model Updated')
コード例 #2
0
def train(epoch):
    model.train()
    criterion_c = nn.CrossEntropyLoss()
    if ENABLE_TRIPLET_WITH_COSINE:
        criterion_t = TripletMarginLossCosine()
    else:
        criterion_t = nn.TripletMarginLoss()
    triplet_loader_iter = iter(triplet_loader)
    triplet_type = 0
    if ENABLE_INSHOP_DATASET:
        triplet_in_shop_loader_iter = iter(triplet_in_shop_loader)
    for batch_idx, (data, target) in enumerate(train_loader):
        if batch_idx % TEST_INTERVAL == 0:
            test()
        data, target = data.cuda(GPU_ID), target.cuda(GPU_ID)
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        outputs = model(data)[0]
        classification_loss = criterion_c(outputs, target)
        if TRIPLET_WEIGHT:
            if ENABLE_INSHOP_DATASET and random.random() < INSHOP_DATASET_PRECENT:
                triplet_type = 1
                try:
                    data_tri_list = next(triplet_in_shop_loader_iter)
                except StopIteration:
                    triplet_in_shop_loader_iter = iter(triplet_in_shop_loader)
                    data_tri_list = next(triplet_in_shop_loader_iter)
            else:
                triplet_type = 0
                try:
                    data_tri_list = next(triplet_loader_iter)
                except StopIteration:
                    triplet_loader_iter = iter(triplet_loader)
                    data_tri_list = next(triplet_loader_iter)
            triplet_batch_size = data_tri_list[0].shape[0]
            data_tri = torch.cat(data_tri_list, 0)
            data_tri = data_tri.cuda(GPU_ID)
            data_tri = Variable(data_tri, requires_grad=True)
            feats = model(data_tri)[1]
            triplet_loss = criterion_t(
                feats[:triplet_batch_size],
                feats[triplet_batch_size:2 * triplet_batch_size],
                feats[2 * triplet_batch_size:]
            )
            loss = classification_loss + triplet_loss * TRIPLET_WEIGHT
        else:
            loss = classification_loss
        loss.backward()
        optimizer.step()
        if batch_idx % LOG_INTERVAL == 0:
            if TRIPLET_WEIGHT:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAll Loss: {:.4f}\t'
                      'Triple Loss({}): {:.4f}\tClassification Loss: {:.4f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                    100. * batch_idx / len(train_loader), loss.data[0], triplet_type,
                    triplet_loss.data[0], classification_loss.data[0]))
            else:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tClassification Loss: {:.4f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                           100. * batch_idx / len(train_loader), loss.data[0]))
        if batch_idx and batch_idx % DUMP_INTERVAL == 0:
            print('Model saved to {}'.format(dump_model(model, epoch, batch_idx)))

    print('Model saved to {}'.format(dump_model(model, epoch)))
コード例 #3
0
from torch import nn

loss_functions = {
    "MSE": nn.MSELoss(),
    "L1": nn.L1Loss(),
    "CrossEntropy": nn.CrossEntropyLoss(),
    "CTC": nn.CTCLoss(),
    "NLL": nn.NLLLoss(),
    "PoissonNLL": nn.PoissonNLLLoss(),
    "KLDiv": nn.KLDivLoss(),
    "BCE": nn.BCELoss(),
    "BCEWithLogits": nn.BCEWithLogitsLoss(),
    "MarginRanking": nn.MarginRankingLoss(),
    "HingeEmbedding": nn.HingeEmbeddingLoss(),
    "MultiLabelMargin": nn.MultiLabelMarginLoss(),
    "SmoothL1": nn.SmoothL1Loss(),
    "SoftMargin": nn.SoftMarginLoss(),
    "MultiLabelSoftMargin": nn.MultiLabelSoftMarginLoss(),
    "CosineEmbedding": nn.CosineEmbeddingLoss(),
    "MultiMargin": nn.MultiMarginLoss(),
    "TripletMargin": nn.TripletMarginLoss()
}
コード例 #4
0
# opt = torch.optim.SGD(MODEL.parameters(), lr = 0.0001, momentum=0.9)
opt = torch.optim.Adam(res_model.parameters(), lr=cfg.LEARNING_RATE)

TOTAL_STEP = len(data_loader)
CURR_LR = cfg.LEARNING_RATE

for epoch in range(cfg.NUM_EPOCHS):
    for i, (query, pos, neg) in enumerate(data_loader):
        gc.collect()  # probably not needed, but security
        print(i, end='\r')
        query = forward(query, res_model, device)
        pos = forward(pos)
        neg = forward(neg)

        # compute loss https://pytorch.org/docs/0.3.1/nn.html?highlight=tripletmarginloss
        triplet_loss = nn.TripletMarginLoss(margin=cfg.MARGIN, p=2)
        loss = triplet_loss(query, pos, neg)

        opt.zero_grad()
        loss.backward()
        opt.step()
        # scheduler.step(loss.item())
        if (i+1) % 1 == 0:
            print("Epoch [{}/{}], Step [{}/{}] Loss: {:.8e}".format(epoch + 1,
                                                                    cfg.NUM_EPOCHS,
                                                                    i + 1,
                                                                    TOTAL_STEP,
                                                                    loss.item()))

        # probably unessisary but I was having memory overflow during training
        # I fixed the issue by not augmenting the images, and i'm just keeping
コード例 #5
0
ファイル: train.py プロジェクト: xheon/JointEmbedding
def main(opt: argparse.Namespace) -> None:
    utils.set_gpu(opt.gpu)
    device = torch.device("cuda")
    run_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    run_path = os.path.join(opt.output_root, run_name)
    print(f"Start training {run_path}")
    print(vars(opt))

    # Save config
    os.makedirs(run_path, exist_ok=True)
    with open(os.path.join(run_path, "config.json"), "w") as f:
        json.dump(vars(opt), f, indent=4)

    # Data
    train_dataset: Dataset = data.Scan2Cad(
        opt.scan2cad_file,
        opt.scannet_path,
        opt.shapenet_path,
        "train", ["train"],
        rotation=opt.rotation_augmentation,
        flip=opt.flip_augmentation,
        jitter=opt.jitter_augmentation,
        transformation=data.to_occupancy_grid,
        scan_rep="sdf",
        load_mask=True,
        add_negatives=True)
    train_dataloader: DataLoader = DataLoader(train_dataset,
                                              shuffle=True,
                                              batch_size=opt.batch_size,
                                              num_workers=opt.num_workers,
                                              pin_memory=True)

    val_dataset: Dataset = data.Scan2Cad(opt.scan2cad_file,
                                         opt.scannet_path,
                                         opt.shapenet_path,
                                         "validation", ["validation"],
                                         rotation=opt.rotation_augmentation,
                                         flip=opt.flip_augmentation,
                                         jitter=opt.jitter_augmentation,
                                         transformation=data.to_occupancy_grid,
                                         scan_rep="sdf",
                                         load_mask=True,
                                         add_negatives=True)
    val_dataloader: DataLoader = DataLoader(val_dataset,
                                            shuffle=False,
                                            batch_size=opt.batch_size,
                                            num_workers=opt.num_workers,
                                            pin_memory=True)

    # Models
    separation_model: nn.Module = SeparationNet(
        ResNetEncoder(1, [16, 32, 64, 128, 512]), ResNetDecoder(1),
        ResNetDecoder(1))
    completion_model: nn.Module = HourGlass(ResNetEncoder(1), ResNetDecoder(1))

    triplet_model: nn.Module = TripletNet(ResNetEncoder(1))

    separation_model = separation_model.to(device)
    completion_model = completion_model.to(device)
    triplet_model = triplet_model.to(device)

    model_parameters = list(separation_model.parameters()) + \
                       list(completion_model.parameters()) + \
                       list(triplet_model.parameters())

    optimizer = optim.Adam(model_parameters,
                           lr=opt.learning_rate,
                           weight_decay=opt.weight_decay)

    criterion_separation = nn.BCEWithLogitsLoss(reduction="none")
    criterion_completion = nn.BCEWithLogitsLoss(reduction="none")
    criterion_triplet = nn.TripletMarginLoss(reduction="none",
                                             margin=opt.triplet_margin)

    # Main loop
    iteration_number = 0

    for epoch in range(opt.num_epochs):
        train_dataloader.dataset.regenerate_negatives()

        for _, (scan, cad, negative) in enumerate(train_dataloader):
            utils.stepwise_learning_rate_decay(optimizer, opt.learning_rate,
                                               iteration_number,
                                               [40000, 80000, 120000])

            separation_model.train()
            completion_model.train()
            triplet_model.train()

            losses = forward(scan, cad, negative, separation_model,
                             completion_model, triplet_model,
                             criterion_separation, criterion_completion,
                             criterion_triplet, device)

            loss_foreground, loss_background, loss_completion, loss_triplet = losses
            loss_total = loss_foreground + loss_background + loss_completion + loss_triplet

            # Train step
            optimizer.zero_grad()
            loss_total.backward()
            optimizer.step()

            # Log to console
            if iteration_number % opt.log_frequency == opt.log_frequency - 1:
                print(
                    f"[E{epoch:04d}, I{iteration_number:05d}]\tTotal: {loss_total: 05.3f}",
                    f"\tFG: {loss_foreground: 05.3f}\tBG: {loss_background: 05.3f}",
                    f"\tCompletion: {loss_completion: 05.3f} \tTriplet: {loss_triplet: 05.3f}"
                )

            # Validate
            if iteration_number % opt.validate_frequency == opt.validate_frequency - 1:
                with torch.no_grad():
                    separation_model.eval()
                    completion_model.eval()
                    triplet_model.eval()

                    val_losses = defaultdict(list)

                    # Go through entire validation set
                    for _, (scan_v, cad_v,
                            negative_v) in tqdm(enumerate(val_dataloader),
                                                total=len(val_dataloader),
                                                leave=False):
                        losses = forward(scan_v, cad_v, negative_v,
                                         separation_model, completion_model,
                                         triplet_model, criterion_separation,
                                         criterion_completion,
                                         criterion_triplet, device)

                        loss_foreground, loss_background, loss_completion, loss_triplet = losses
                        loss_total = loss_foreground + loss_background + loss_completion + loss_triplet
                        val_losses["FG"].append(loss_foreground.item())
                        val_losses["BG"].append(loss_background.item())
                        val_losses["Completion"].append(loss_completion.item())
                        val_losses["Triplet"].append(loss_triplet.item())
                        val_losses["Total"].append(loss_total.item())

                    # Aggregate losses
                    val_losses_summary = {
                        k: torch.mean(torch.tensor(v))
                        for k, v in val_losses.items()
                    }
                    print(
                        f"-Val [E{epoch:04d}, I{iteration_number:05d}]\tTotal: {val_losses_summary['Total']:05.3f}",
                        f"\tFG: {val_losses_summary['FG']:05.3f} \tBG: {val_losses_summary['BG']:05.3f}",
                        f"\tCompletion: {val_losses_summary['Completion']:05.3f}",
                        f"\tTriplet: {val_losses_summary['Triplet']:05.3f}")

            # Save checkpoint
            if iteration_number % opt.checkpoint_frequency == opt.checkpoint_frequency - 1:
                checkpoint_name = f"{run_name}_{iteration_number:05d}"

                torch.save(
                    separation_model.state_dict(),
                    os.path.join(run_path, f"{checkpoint_name}_separation.pt"))
                torch.save(
                    completion_model.state_dict(),
                    os.path.join(run_path, f"{checkpoint_name}_completion.pt"))
                torch.save(
                    triplet_model.state_dict(),
                    os.path.join(run_path, f"{checkpoint_name}_triplet.pt"))
                print(f"Saved model at {run_path}/{checkpoint_name}")

            iteration_number += 1
コード例 #6
0
    netE.load_state_dict(checkpoint['netE'])
    netD.load_state_dict(checkpoint['netD'])
    lstm.load_state_dict(checkpoint['lstm'])
    print('model loadinged successfully')
optimizerE = optim.Adam(netE.parameters(), lr=opt.lr, betas=(0.9, 0.999),weight_decay=0.001)
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.9, 0.999),weight_decay=0.001)
optimizerLstm = optim.Adam(lstm.parameters(), lr=opt.lr, betas=(0.9, 0.999),weight_decay=0.001)

# optimizerE = optim.Adam(netE.parameters(), lr=opt.lr, betas=(0.9, 0.999))
# optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.9, 0.999))
# optimizerLstm = optim.Adam(lstm.parameters(), lr=opt.lr, betas=(0.9, 0.999))

mse_loss = nn.MSELoss()
bce_loss = nn.BCELoss()
cse_loss = nn.CrossEntropyLoss()
trp_loss = nn.TripletMarginLoss(margin=2.0)
netE.cuda()
netD.cuda()
lstm.cuda()
mse_loss.cuda()
bce_loss.cuda()
cse_loss.cuda()
trp_loss.cuda()

# l1_crit = nn.L1Loss(size_average=False)
# reg_loss = 0
# for param in netE.parameters():
#     reg_loss += l1_crit(param)
#
# factor = 0.0005
# loss = factor * reg_loss
コード例 #7
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.curriculum:
        if os.path.isfile(args.curriculum):
            print("=> using pre-trained model '{}'".format(args.curriculum))
            try:
                # binary -> multi
                model = OsteoSiameseNet(32,
                                        0.3,
                                        2,
                                        True,
                                        args.acm,
                                        use_clinic=args.clinic,
                                        use_acm=False,
                                        return_fc=True)
                loaded_model = load_model(args.curriculum, model)
                model = tf_learning(loaded_model, 3)
            except:
                # multi -> binary
                model = OsteoSiameseNet(32,
                                        0.3,
                                        3,
                                        True,
                                        args.clinic,
                                        args.acm,
                                        use_clinic=args.clinic,
                                        use_acm=False,
                                        return_fc=True)
                loaded_model = load_model(args.curriculum, model)
                model = tf_learning(loaded_model, 2)
        else:
            print("=> There is no file called {}".format(args.curriculum))
    else:
        print("=> creating model '{}'".format('Osteo-Siamese Network'))
        # model = models.__dict__[args.arch]()
        if args.multi:
            model = OsteoSiameseNet(32,
                                    0.3,
                                    3,
                                    use_w_init=True,
                                    use_clinic=args.clinic,
                                    use_acm=args.acm,
                                    return_fc=True)
        else:
            # most general
            model = OsteoSiameseNet(32,
                                    0.3,
                                    2,
                                    use_w_init=True,
                                    use_clinic=args.clinic,
                                    use_acm=args.acm,
                                    return_fc=True)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(
                (args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    print(model)

    ################################################################
    ###### Loss Setting
    ################################################################
    if args.label_smoothing:
        from Siamese import LabelSmoothingLoss
        if args.multi:
            criterion = LabelSmoothingLoss(classes=3,
                                           smoothing=args.smoothing_param)
        else:
            criterion = LabelSmoothingLoss(classes=2,
                                           smoothing=args.smoothing_param)
            triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
    else:
        # define loss function (criterion) and optimizer
        criterion = nn.BCEWithLogitsLoss().cuda(args.gpu)

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.weight_decay)

    if args.lookahead:
        optimizer = Lookahead(optimizer)

    ################################################################
    ###### Resume Training or Evaluation
    ################################################################
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Tensorboard Setting
    TB = TensorBoard(logdir=os.path.join('./runs/', '{}'.format(args.version)))

    # Data loading code
    image_dir = os.path.join(args.data)

    if args.ext:
        pair_list = getExternalPairList(image_dir)
    else:
        pair_list = getPairList(image_dir, duplicate=args.duplicate)

    print(len(pair_list))

    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((args.height_size, args.width_size)),
        transforms.ToTensor()
    ])

    if args.save_fc_only:
        whole_dataset = OsteoSiameseDataset(pair_list, transform)

        whole_loader = torch.utils.data.DataLoader(whole_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=False,
                                                   num_workers=args.workers,
                                                   pin_memory=True)
        validate(whole_loader, model.module.cuda(), criterion, 1, args, TB)
        return

    val_list, train_list = train_test_split(pair_list,
                                            ratio=args.val_ratio,
                                            multi=args.multi)
    validation_dataset = OsteoSiameseDataset(val_list,
                                             transform,
                                             oai=False,
                                             multi=args.multi)
    print(len(train_list), len(val_list))

    train_list = random_over_sampling(train_list, multi=args.multi)
    train_dataset = OsteoSiameseDataset(train_list,
                                        transform,
                                        oai=False,
                                        multi=args.multi,
                                        triplet=True)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(validation_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model.module.cuda(), criterion, 1, args, TB)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args, TB,
              triplet_loss)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, epoch, args, TB,
                        triplet_loss)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.version)
コード例 #8
0
data_set=Semantic3dDatasetIdTriplets('data/pointcloud_images_o3d_merged_occ','train', transform=transform, positive_overlap=0.5, image_limit=IMAGE_LIMIT, return_graph_data=True)
#Option: shuffle, pin_memory crashes on my system, 
data_loader=DataLoader(data_set, batch_size=BATCH_SIZE, num_workers=2, pin_memory=False, shuffle=SHUFFLE) 

loss_dict={}
best_loss=np.inf
best_model=None

#for lr in (1e-4,5e-5):
for lr in (LR,):
    print('\n\nlr: ',lr)

    vgg=create_image_model_vgg11()
    model=VisualGraphEmbeddingCombined(vgg, EMBED_DIM).cuda()

    criterion=nn.TripletMarginLoss(margin=MARGIN)

    optimizer=optim.Adam(model.parameters(), lr=lr) #Adam is ok for PyG
    scheduler=optim.lr_scheduler.ExponentialLR(optimizer,LR_GAMMA)   

    loss_dict[lr]=[]
    for epoch in range(10):
        epoch_loss_sum=0.0
        for i_batch, batch in enumerate(data_loader):
            
            optimizer.zero_grad()
            #print(batch)
            
            a_out=model(batch['images_anchor'].to('cuda'), batch['graphs_anchor'].to('cuda'))
            p_out=model(batch['images_positive'].to('cuda'), batch['graphs_positive'].to('cuda'))
            n_out=model(batch['images_negative'].to('cuda'), batch['graphs_negative'].to('cuda'))
コード例 #9
0
        x = torch.cat((vgg_x, p_x), dim=1)
        x = self.dropout(x)

        x = self.encoder(x)
        return self._normalize(x)

    def parameters(self):
        return list(self.encoder.parameters()) + list(
            self.path1.parameters()) + list(self.path2.parameters())


running_loss = []

model = Model().to(device)
triplet_loss = nn.TripletMarginLoss(margin=args.margin, p=2)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
print('margin = %f' % args.margin)
print('learning rate = %f' % args.lr)

for epoch in range(args.epochs):
    for i, data in enumerate(trainloader, 0):

        optimizer.zero_grad()

        z = list(map(lambda x: model(x.to(device)), data))

        loss = triplet_loss(*z)

        loss.backward()
        optimizer.step()
コード例 #10
0
    def __init__(self, cfg, writer, logger):
        # super(CustomModel, self).__init__()
        self.cfg = cfg
        self.writer = writer
        self.class_numbers = 19
        self.logger = logger
        cfg_model = cfg['model']
        self.cfg_model = cfg_model
        self.best_iou = -100
        self.iter = 0
        self.nets = []
        self.split_gpu = 0
        self.default_gpu = cfg['model']['default_gpu']
        self.PredNet_Dir = None
        self.valid_classes = cfg['training']['valid_classes']
        self.G_train = True
        self.objective_vectors = np.zeros([19, 256])
        self.objective_vectors_num = np.zeros([19])
        self.objective_vectors_dis = np.zeros([19, 19])
        self.class_threshold = np.zeros(self.class_numbers)
        self.class_threshold = np.full([19], 0.95)
        self.metrics = CustomMetrics(self.class_numbers)
        self.cls_feature_weight = cfg['training']['cls_feature_weight']

        bn = cfg_model['bn']
        if bn == 'sync_bn':
            BatchNorm = SynchronizedBatchNorm2d
        # elif bn == 'sync_abn':
        #     BatchNorm = InPlaceABNSync
        elif bn == 'bn':
            BatchNorm = nn.BatchNorm2d
        # elif bn == 'abn':
        #     BatchNorm = InPlaceABN
        elif bn == 'gn':
            BatchNorm = nn.GroupNorm
        else:
            raise NotImplementedError(
                'batch norm choice {} is not implemented'.format(bn))
        self.PredNet = DeepLab(
            num_classes=19,
            backbone=cfg_model['basenet']['version'],
            output_stride=16,
            bn=cfg_model['bn'],
            freeze_bn=True,
        ).cuda()
        self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)
        self.PredNet_DP = self.init_device(self.PredNet,
                                           gpu_id=self.default_gpu,
                                           whether_DP=True)
        self.PredNet.eval()
        self.PredNet_num = 0

        self.BaseNet = DeepLab(
            num_classes=19,
            backbone=cfg_model['basenet']['version'],
            output_stride=16,
            bn=cfg_model['bn'],
            freeze_bn=False,
        )

        logger.info('the backbone is {}'.format(
            cfg_model['basenet']['version']))

        self.BaseNet_DP = self.init_device(self.BaseNet,
                                           gpu_id=self.default_gpu,
                                           whether_DP=True)
        self.nets.extend([self.BaseNet])
        self.nets_DP = [self.BaseNet_DP]

        self.optimizers = []
        self.schedulers = []
        # optimizer_cls = get_optimizer(cfg)
        optimizer_cls = torch.optim.SGD
        optimizer_params = {
            k: v
            for k, v in cfg['training']['optimizer'].items() if k != 'name'
        }
        # optimizer_cls_D = torch.optim.SGD
        # optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items()
        #                     if k != 'name'}
        self.BaseOpti = optimizer_cls(self.BaseNet.parameters(),
                                      **optimizer_params)
        self.optimizers.extend([self.BaseOpti])

        self.BaseSchedule = get_scheduler(self.BaseOpti,
                                          cfg['training']['lr_schedule'])
        self.schedulers.extend([self.BaseSchedule])
        self.setup(cfg, writer, logger)

        self.adv_source_label = 0
        self.adv_target_label = 1
        self.bceloss = nn.BCEWithLogitsLoss(size_average=True)
        self.loss_fn = get_loss_function(cfg)
        self.mseloss = nn.MSELoss()
        self.l1loss = nn.L1Loss()
        self.smoothloss = nn.SmoothL1Loss()
        self.triplet_loss = nn.TripletMarginLoss()
コード例 #11
0
def test(dataset):

    f = open('testData.txt', 'w')
    neural_net = torch.load('model')
    triplet_loss = nn.TripletMarginLoss(margin=1.0)
    neural_net.cuda()

    correct = 0
    total = 0
    similar_total = 0
    different_total = 0
    similar_correct = 0
    different_correct = 0
    similar_wrong = []
    different_wrong = []
    for data, paths in dataset:
        #print data[0], data[1], data[2]

        randomInteger = random.randint(1, 2)

        #print paths
        image1 = Variable(data['anchor']).cuda()
        if randomInteger == 1:
            image2 = Variable(data['positive']).cuda()
            f.write("" + paths[0][0] + ', ' + paths[1][0] + '\n')
        elif randomInteger == 2:
            image2 = Variable(data['negative']).cuda()
            f.write("" + paths[0][0] + ', ' + paths[2][0] + '\n')
        #n = Variable(data['negative']).cuda()

        #print a.size(), a.size()[1]
        image1 = image1.transpose(1, 3)
        image2 = image2.transpose(1, 3)
        #n = n.transpose(1,3)

        #image1 = Variable(loader(Image.open(data[0]))).cuda().unsqueeze(0)
        #image2 = Variable(loader(Image.open(data[randomInteger]))).cuda().unsqueeze(0)

        output1 = neural_net(image1)
        output2 = neural_net(image2)

        output1 = output1.squeeze(2)
        output1 = output1.squeeze(2)
        output2 = output2.squeeze(2)
        output2 = output2.squeeze(2)

        #print total

        pdist = nn.PairwiseDistance(p=2)
        distance = pdist(output1, output2)

        #if randomInteger == 1:
        #    print 'similar', distance
        #elif randomInteger == 2:
        #    print 'different', distance

        total += 1

        #print 'DISTANCE', distance.data[0], 'DIST'
        if distance.data.cpu().numpy()[0] > 0.00052:
            if randomInteger == 1:
                similar_total += 1
                similar_wrong.append(
                    [paths[0], paths[1],
                     distance.data.cpu().numpy()[0]])
            else:
                different_total += 1
                different_correct += 1
                correct += 1
        else:
            if randomInteger == 1:
                similar_total += 1
                similar_correct += 1
                correct += 1
            else:
                different_total += 1
                different_wrong.append(
                    [paths[0], paths[2],
                     distance.data.cpu().numpy()[0]])

    print 'Accuracy', correct * 100.0 / total
    print 'Similar Accuracy', similar_correct * 100.0 / similar_total
    #print 'Similar wrong distance: '
    #for sim in similar_wrong:
    #print sim
    print 'Different Accuracy', different_correct * 100.0 / different_total
コード例 #12
0
transform_train = [
    transforms.RandomCrop(64, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
]

model = resnet50(pretrained=False)
model.fc = nn.Linear(2048, 2048)
model.name = 'ResNet50'

# Hyperparamters
batch_size = 1
# no_epoch = 10
# LR = 0.0001
# optimizer = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=1e-5)
criterion = nn.TripletMarginLoss(
    margin=1.0)  # Only change the params, do not change the criterion.

#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.5)
upsample = nn.Upsample(scale_factor=3.5, mode='bilinear')

data = Data(
    batch_size,
    criterion,
    "../../data/tiny-imagenet-200",
    upsample=upsample,
    #     scheduler=scheduler,
    transform_train=transform_train,
    transform_test=transform_test,
)

start_epoch = 9  # Change me!
コード例 #13
0
def triplet_loss_func(a, p, n):
    triplet_func = nn.TripletMarginLoss()
    return triplet_func(a, p, n)
コード例 #14
0
def train(dm_train_set, dm_test_set):

    EMBEDDING_DIM = 200
    hidden_size = 100
    max_len = 49
    batch_size = 128
    epoch_num = 30
    RNN_type = 'GRU'
    max_acc = 0
    model_save_path = '.tmp/model_save/' + RNN_type + '.model'

    dm_dataloader = data.DataLoader(dataset=dm_train_set,
                                    batch_size=batch_size,
                                    shuffle=True,
                                    drop_last=True,
                                    num_workers=8)

    dm_test_dataloader = data.DataLoader(dataset=dm_test_set,
                                         batch_size=batch_size,
                                         shuffle=False,
                                         drop_last=False,
                                         num_workers=8)

    model = E2ERNNModeler(dm_train_set.vocab_size(), EMBEDDING_DIM,
                          hidden_size, RNN_type)
    print(model)
    init_weight = np.loadtxt("./tmp/24581_we_weights.txt")
    model.init_emb(init_weight)
    if torch.cuda.is_available():
        print("CUDA : On")
        model.cuda()
    else:
        print("CUDA : Off")

    embedding_params = list(map(id, model.embedding.parameters()))
    other_params = filter(lambda p: id(p) not in embedding_params,
                          model.parameters())

    optimizer = optim.Adam([{
        'params': other_params
    }, {
        'params': model.embedding.parameters(),
        'lr': 1e-4
    }],
                           lr=1e-3,
                           betas=(0.9, 0.99))

    logging = True
    if logging:
        writer = SummaryWriter()

    history = None

    for epoch in range(epoch_num):
        for batch_idx, sample_dict in enumerate(dm_dataloader):
            anchor = Variable(torch.LongTensor(sample_dict['anchor']))
            pos = Variable(torch.LongTensor(sample_dict['pos']))
            neg = Variable(torch.LongTensor(sample_dict['neg']))
            label = Variable(torch.LongTensor(sample_dict['label']))
            mask = Variable(torch.LongTensor(sample_dict['mask']))
            mask_ = mask.type(torch.FloatTensor).view(-1)
            if torch.cuda.is_available():
                anchor = anchor.cuda()
                pos = pos.cuda()
                neg = neg.cuda()
                label = label.cuda()
                mask = mask.cuda()
                mask_ = mask_.cuda()

            optimizer.zero_grad()
            anchor_embed = model.embed(anchor)
            pos_embed = model.embed(pos)
            neg_embed = model.embed(neg)
            triplet_loss = nn.TripletMarginLoss(margin=10, p=2)
            embedding_loss = triplet_loss(anchor_embed, pos_embed, neg_embed)
            anchor_pred = model.forward(anchor).unsqueeze(1)
            pos_pred = model.forward(pos).unsqueeze(1)
            neg_pred = model.forward(neg).unsqueeze(1)
            final_pred = torch.cat((anchor_pred, pos_pred, neg_pred), dim=1)
            final_pred = final_pred.view(1, -1, 2)
            final_pred = final_pred.squeeze()

            cross_entropy = nn.NLLLoss(reduction='none')
            label = label.mul(mask)
            label = label.view(-1)
            classify_loss = cross_entropy(F.log_softmax(final_pred, dim=1),
                                          label)
            classify_loss = classify_loss.mul(mask_)
            if mask_.sum() > 0:
                classify_loss = classify_loss.sum() / mask_.sum()
            else:
                classify_loss = classify_loss.sum()

            alpha = stg.dynamic_alpha(embedding_loss, classify_loss)
            loss = alpha * embedding_loss + (1 - alpha) * classify_loss

            if batch_idx % 1000 == 0:
                accuracy = valid_util.running_accuracy(final_pred, label,
                                                       mask_)
                print(
                    'epoch: %d batch %d : loss: %4.6f embed-loss: %4.6f class-loss: %4.6f accuracy: %4.6f'
                    % (epoch, batch_idx, loss.item(), embedding_loss.item(),
                       classify_loss.item(), accuracy))
                if logging:
                    writer.add_scalars(
                        RNN_type + '_data/loss', {
                            'Total Loss': loss,
                            'Embedding Loss': embedding_loss,
                            'Classify Loss': classify_loss
                        }, epoch * 10 + batch_idx // 1000)
            loss.backward()
            optimizer.step()

        if logging:
            result_dict = valid_util.validate(model,
                                              dm_test_set,
                                              dm_test_dataloader,
                                              mode='report')
            writer.add_scalars(
                RNN_type + '_data/0-PRF', {
                    '0-Precision': result_dict['0']['precision'],
                    '0-Recall': result_dict['0']['recall'],
                    '0-F1-score': result_dict['0']['f1-score']
                }, epoch)
            writer.add_scalars(
                RNN_type + '_data/1-PRF', {
                    '1-Precision': result_dict['1']['precision'],
                    '1-Recall': result_dict['1']['recall'],
                    '1-F1-score': result_dict['1']['f1-score']
                }, epoch)
            writer.add_scalar(RNN_type + '_data/accuracy',
                              result_dict['accuracy'], epoch)
        accuracy, history = valid_util.validate(model,
                                                dm_test_set,
                                                dm_test_dataloader,
                                                mode='detail',
                                                pred_history=history)
        # pickle.dump(history, open('./tmp/e2e_'+RNN_type+'_history.pkl', 'wb'))
        if accuracy > max_acc:
            max_acc = accuracy
            # torch.save(model.state_dict(), model_save_path)

        dm_valid_set = pickle.load(
            open('./tmp/triplet_valid_dataset.pkl', 'rb'))
        valid_util.validate(model, dm_valid_set, mode='output')

    if logging:
        writer.close()
    print("Max Accuracy: %4.6f" % max_acc)
    return
コード例 #15
0
ファイル: ranking_base.py プロジェクト: patelrajnath/COMET
 def _build_loss(self):
     """ Initializes the loss function/s. """
     self.loss = nn.TripletMarginLoss(margin=1.0, p=2)
コード例 #16
0
    def __init__(self,hyperparameters):
        super(Model,self).__init__()

        self.device = hyperparameters['device']
        self.auxiliary_data_source = hyperparameters['auxiliary_data_source']
        self.attr = hyperparameters['attr']
        self.all_data_sources  = ['resnet_features', 'attributes']
        self.DATASET = hyperparameters['dataset']
        self.num_shots = hyperparameters['num_shots']
        self.latent_size = hyperparameters['latent_size']
        self.batch_size = hyperparameters['batch_size']
        self.hidden_size_rule = hyperparameters['hidden_size_rule']
        self.warmup = hyperparameters['model_specifics']['warmup']
        self.generalized = hyperparameters['generalized']
        self.classifier_batch_size = 32
        #self.img_seen_samples   = hyperparameters['samples_per_class'][self.DATASET][0]
        #self.att_seen_samples   = hyperparameters['samples_per_class'][self.DATASET][1]
        #self.att_unseen_samples = hyperparameters['samples_per_class'][self.DATASET][2]
       # self.img_unseen_samples = hyperparameters['samples_per_class'][self.DATASET][3]
        self.reco_loss_function = hyperparameters['loss']
        self.margin = hyperparameters['margin_loss']
        self.nepoch = hyperparameters['epochs']
        self.lr_cls = hyperparameters['lr_cls']
        self.cross_reconstruction = hyperparameters['model_specifics']['cross_reconstruction']
        self.cls_train_epochs = hyperparameters['cls_train_steps']
        #self.dataset = dataloader(self.DATASET, copy.deepcopy(self.auxiliary_data_source) , device= 'cuda')
        self.dataset = dataLoader(copy.deepcopy(self.auxiliary_data_source) , device= 'cuda', attr = self.attr)
        if self.DATASET=='CUB':
            self.num_classes=200
            self.num_novel_classes = 50
        elif self.DATASET=='SUN':
            self.num_classes=717
            self.num_novel_classes = 72
        elif self.DATASET=='AWA1' or self.DATASET=='AWA2':
            self.num_classes=50
            self.num_novel_classes = 10
        
        if self.attr == 'attributes':
            feature_dimensions = [2048, self.dataset.K]
        elif self.attr == 'bert':
            feature_dimensions = [2048, 768] #2048, 768

        # Here, the encoders and decoders for all modalities are created and put into dict
        
        self.fc_ft = nn.Linear(2048,2048)
        self.fc_ft.to(self.device)
        
        self.ft_bn = nn.BatchNorm1d(2048).to(self.device)
        
        self.fc_at = nn.Linear(self.dataset.K, self.dataset.K)
        self.fc_at.to(self.device)
        self.at_bn = nn.BatchNorm1d(self.dataset.K).to(self.device)

        self.encoder = {}

        for datatype, dim in zip(self.all_data_sources,feature_dimensions):

            self.encoder[datatype] = models.encoder_template(dim,self.latent_size,self.hidden_size_rule[datatype],self.device)

            print(str(datatype) + ' ' + str(dim))

        self.decoder = {}
        for datatype, dim in zip(self.all_data_sources,feature_dimensions):
            self.decoder[datatype] = models.decoder_template(self.latent_size,dim,self.hidden_size_rule[datatype],self.device)

        # An optimizer for all encoders and decoders is defined here
        parameters_to_optimize = list(self.parameters())
        for datatype in self.all_data_sources:
            parameters_to_optimize +=  list(self.encoder[datatype].parameters())
            parameters_to_optimize +=  list(self.decoder[datatype].parameters())
        parameters_to_optimize += list(self.fc_ft.parameters())
        parameters_to_optimize += list(self.fc_at.parameters())
        parameters_to_optimize += list(self.ft_bn.parameters())
        parameters_to_optimize += list(self.at_bn.parameters())
        
        
        self.optimizer  = optim.Adam( parameters_to_optimize ,lr=hyperparameters['lr_gen_model'], betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)

        if self.reco_loss_function=='l2':
            self.reconstruction_criterion = nn.MSELoss(size_average=False)

        elif self.reco_loss_function=='l1':
            self.reconstruction_criterion = nn.L1Loss(size_average=False)
            
        self.triplet_loss = nn.TripletMarginLoss(margin=self.margin)
コード例 #17
0
def train():

    # 显示系统信息
    print('\n')
    flag = torch.cuda.is_available()
    print('cuda is available on this system ?', flag)
    if not flag:
        return
    # 返回True代表支持,False代表不支持

    # ------------------------------------  显示程序设定参数  -------------------------------------

    cuda_opt._print_opt()

    # ------------------------------------ step 1/5 : 加载数据------------------------------------

    data_pool = DataLoaderPool(cuda_opt)
    train_loader = data_pool.select_dataloader(data_type='train')
    valid_loader = data_pool.select_dataloader(data_type='valid')
    print('load data done !')

    # ------------------------------------ step 2/5 : 定义网络------------------------------------

    net_name = cuda_opt.train_net_name
    net_pool = FaceRecognitionNetPool(cuda_opt)  # 模型选择类
    net = net_pool.select_model(net_name)  # 调用网络
    net = net.cuda()
    print('load net done !')

    # ------------------------------------ step 3/5 : 定义损失函数和优化器 ------------------------------------

    criterion = nn.TripletMarginLoss(margin=cuda_opt.margin).cuda()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     factor=0.1,
                                                     patience=5,
                                                     verbose=True,
                                                     threshold=0.005,
                                                     threshold_mode='rel',
                                                     cooldown=0,
                                                     min_lr=0,
                                                     eps=1e-08)  # 设置学习率下降策略

    # ------------------------------------ step 4/5 : 训练 --------------------------------------------------
    print('train start ------------------------------------------------')
    time_str = time.strftime('%H时%M分%S秒')
    print(time_str)
    train_iter_index = []
    train_loss_list = []
    val_iter_index = []
    val_loss_list = []
    val_acc_list = []
    iteration_number = 0
    best_val_loss = 100
    best_val_acc = 0

    for epoch in range(cuda_opt.max_epoch):
        loss_sigma = 0
        net.train()  # 训练模式

        for i, data in tqdm((enumerate(train_loader))):

            # 获取图片和标签
            inputs0, inputs1, inputs2 = data
            inputs0, inputs1, inputs2 = Variable(inputs0).cuda(), Variable(
                inputs1).cuda(), Variable(inputs2).cuda()
            outputs0, outputs1, outputs2 = net(inputs0), net(inputs1), net(
                inputs2)

            if cuda_opt.train_harder:

                # online triplet select
                # Choose the hard negatives
                d_p = F.pairwise_distance(outputs0, outputs1, 2)
                d_n = F.pairwise_distance(outputs0, outputs2, 2)
                hard_negatives = (
                    d_n - d_p < cuda_opt.margin).data.cpu().numpy().flatten()
                hard_triplets = np.where(hard_negatives == 1)
                print
                if len(hard_triplets[0]) == 0:
                    continue
                outputs0 = outputs0[hard_triplets]
                outputs1 = outputs1[hard_triplets]
                outputs2 = outputs2[hard_triplets]

            loss = criterion(outputs0, outputs1, outputs2)

            # forward, backward, update weights
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iteration_number += 1
            loss_sigma += loss.item()

            if i % 10 == 0:
                # print("Epoch:{},  Current loss {}".format(epoch, loss.item()))
                train_iter_index.append(iteration_number)
                train_loss_list.append(loss.item())

        # 每个epoch的 Loss, accuracy, learning rate
        lr_now = [group['lr'] for group in optimizer.param_groups][0]
        loss_avg_epoch = loss_sigma / len(train_loader)
        print(
            "Training: Epoch[{:0>3}/{:0>3}] Loss_Avg_Epoch: {:.4f}       Lr: {:.8f}"
            .format(epoch + 1, cuda_opt.max_epoch, loss_avg_epoch, lr_now))

        scheduler.step(loss_avg_epoch)  # 更新学习率

        # ------------------------------------ 观察模型在验证集上的表现 ------------------------------------
        if epoch % 1 == 0:
            # print('eval start ')
            loss_sigma = 0
            distance_AP_list = []
            distance_AN_list = []
            predicted = []
            net.eval()  # 测试模式
            for i, data in tqdm((enumerate(valid_loader))):
                # 获取图片和标签
                inputs0, inputs1, inputs2 = data
                inputs0, inputs1, inputs2 = Variable(inputs0).cuda(), Variable(
                    inputs1).cuda(), Variable(inputs2).cuda()

                # forward
                outputs0, outputs1, outputs2 = net(inputs0), net(inputs1), net(
                    inputs2)

                loss = criterion(outputs0, outputs1, outputs2)

                loss_sigma += loss.item()

                # 统计
                distance_AP = F.pairwise_distance(outputs0, outputs1)
                distance_AN = F.pairwise_distance(outputs0, outputs2)
                # print(euclidean_distance.data)
                for tt in range(len(distance_AP)):
                    distance_AP_list.append(distance_AP[tt].item())
                    distance_AN_list.append(distance_AN[tt].item())
                    distance_N_P = distance_AN - distance_AP
                    if distance_N_P[tt] > 0:
                        predicted.append(0)  # positive pairs
                    else:
                        predicted.append(1)  # negative pairs
                # print(predicted, labels.data)

            # print(conf_mat)
            val_acc_avg = (len(predicted) - sum(predicted)) / len(predicted)
            val_loss_avg = loss_sigma / len(valid_loader)
            val_iter_index.append(iteration_number)
            val_loss_list.append(val_loss_avg)
            val_acc_list.append(val_acc_avg)
            print(
                "Validating: Epoch[{:0>3}/{:0>3}] Loss_Avg_Epoch: {:.4f} Accuracy:{:.4f}"
                .format(epoch + 1, cuda_opt.max_epoch, val_loss_avg,
                        val_acc_avg))
            # print(euclidean_distance_list)
            show_distance(distance_AP_list, distance_AN_list,
                          cuda_opt.show_plot_epoch, epoch)
            # 每次验证完,根据验证数据判断是否存储当前网络数据
            if (val_loss_avg < best_val_loss) or (val_acc_avg > best_val_acc):
                best_val_loss = np.min((val_loss_avg, best_val_loss))
                best_val_acc = np.max((val_acc_avg, best_val_acc))
                # 储存权重
                time_str = time.strftime('%m%d%H%M%S')
                save_name = '%s_%s_%s_%s_net_params.pkl' % (
                    time_str, best_val_acc, '{:.4f}'.format(best_val_loss),
                    net_name)
                net_save_path = os.path.join(cuda_opt.log_dir, save_name)
                torch.save(net.state_dict(), net_save_path)

    print('Finished Training')
    time_str = time.strftime('%H时%M分%S秒')
    print(time_str)
    # ------------------------------------ step5: 保存模型 并且绘制混淆矩阵图 ------------------------------------
    show_plot(train_iter_index, train_loss_list, val_iter_index, val_loss_list,
              val_acc_list)
    # show_distance(distance_AP_list, distance_AN_list, cuda_opt.show_plot_epoch, epoch)
    # plt.figure(3)
    # show_pairs(net, valid_loader)

    # time_str = time.strftime('%m%d%H%M')
    # save_name = '%s_%s_%s_net_params.pkl' % (time_str, val_acc_avg, net_name)
    # net_save_path = os.path.join(cuda_opt.log_dir, save_name)
    # torch.save(net.state_dict(), net_save_path)

    pass
コード例 #18
0
def train_dsm_triplet(epoch, train_loader, model, optimizer, opt, pos_aug,
                      neg_aug, recorder):
    """
    one epoch training for instance discrimination
    """
    print("==> (DSM triplet) training...")
    model.train()

    def set_bn_train(m):
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
            m.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    triplet_loss = nn.TripletMarginLoss(margin=0.5, p=2)
    end = time.time()
    for idx, (inputs, _, index) in enumerate(train_loader):
        data_time.update(time.time() - end)

        bsz = inputs[0].size(0)
        # fixed args.batch_size
        if bsz < opt.pt_batch_size:
            print("batch less than 16, continue")
            continue
        for i in range(len(inputs)):
            inputs[i] = inputs[i].float()
            inputs[i] = inputs[i].cuda()
        # ===================forward=====================
        anchor_old, positive, negative = inputs

        # here a series of data augmentation
        # ====================================================postive operation=======================
        anchor = pos_aug(anchor_old)
        feat_k = model(positive)
        feat_n = model(negative)
        feat_q = model(anchor)
        if feat_k.size(0) > feat_q.size(0):
            print("wrong bsz")
        intra_loss = triplet_loss(feat_q, feat_k, feat_n)
        inter_loss = triplet_loss(feat_q, feat_k, flip(feat_n, 0))
        # for j in range(bsz-2):
        #     inter_loss += triplet_loss(feat_q, feat_k, shift(feat_n, 0))
        alpha_1 = 1
        alpha_2 = 1
        loss = alpha_1 * intra_loss + alpha_2 * inter_loss
        # print(loss)
        # ===================backward=====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # ===================meters=====================
        loss_meter.update(loss.item(), bsz)
        torch.cuda.synchronize()
        batch_time.update(time.time() - end)
        end = time.time()
        message = ('DSM triplet Train: [{0}][{1}/{2}]\t'
                   'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                   'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
                   'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
                       epoch,
                       idx + 1,
                       len(train_loader),
                       batch_time=batch_time,
                       data_time=data_time,
                       loss=loss_meter))
        # print info
        if (idx + 1) % opt.pt_print_freq == 0:
            print(message)
            recorder.record_message('a', message)
            # print(out.shape)
            sys.stdout.flush()
    return loss_meter.avg
コード例 #19
0
    if opt.mode.lower() == 'train':
        if opt.optim.upper() == 'ADAM':
            optimizer = optim.Adam(filter(lambda p: p.requires_grad, 
                model.parameters()), lr=opt.lr)#, betas=(0,0.9))
        elif opt.optim.upper() == 'SGD':
            optimizer = optim.SGD(filter(lambda p: p.requires_grad, 
                model.parameters()), lr=opt.lr,
                momentum=opt.momentum,
                weight_decay=opt.weightDecay)

            scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=opt.lrStep, gamma=opt.lrGamma)
        else:
            raise ValueError('Unknown optimizer: ' + opt.optim)

        # original paper/code doesn't sqrt() the distances, we do, so sqrt() the margin, I think :D
        criterion = nn.TripletMarginLoss(margin=opt.margin**0.5, 
                p=2, reduction='sum').to(device)

    if opt.resume:
        if opt.ckpt.lower() == 'latest':
            resume_ckpt = join(opt.resume, 'checkpoints', 'checkpoint.pth.tar')
        elif opt.ckpt.lower() == 'best':
            resume_ckpt = join(opt.resume, 'checkpoints', 'model_best.pth.tar')

        if isfile(resume_ckpt):
            print("=> loading checkpoint '{}'".format(resume_ckpt))
            checkpoint = torch.load(resume_ckpt, map_location=lambda storage, loc: storage)
            opt.start_epoch = checkpoint['epoch']
            best_metric = checkpoint['best_score']
            model.load_state_dict(checkpoint['state_dict'])
            model = model.to(device)
            if opt.mode == 'train':
コード例 #20
0
        identities = identities_per_batch,
        samples = samples_per_class,
        batches_per_epoch = batches_per_epoch,
        face_dict_dir = face_dict_directory,
        data_dir = data_directory,
        transforms = data_transforms
    ),
    batch_size = 1,
    shuffle = False
)


net = Recog_Net()
net.cuda()

Triplet_Loss = nn.TripletMarginLoss(margin=margin, p=2)
optimizer = SGD(net.parameters(), lr=learning_rate)


checkpoint = torch.load(chkpoint_path)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
old_epoch = checkpoint['epoch']



for epoch in range(old_epoch, epochs):
	triplet_loss_sum = 0
	batches = enumerate(tqdm(train_dataloader))

	for batch_idx, (batch_sample) in batches:
コード例 #21
0
    # # Test : figure anchor, positive, negative images
    # img = triplet_image_datasets['val'][np.random.randint(10)]
    # tsf = transforms.ToPILImage()
    # for i in range(3):
    #     tsf(img[i]).show()
    #     print(img[i].size())
    # # Test END

    use_gpu = torch.cuda.is_available()
    batch_size = 8
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_gpu else {}
    triplet_image_loaders = {x: DataLoader(triplet_image_datasets[x], batch_size=batch_size,
                                           shuffle=True if x == 'train' else False, **kwargs)
                             for x in ['train', 'val']}

    # set up the network and training parameters

    margin = 1.
    lr = 1e-3
    model = TripletNet()
    if use_gpu:
        model.cuda()

    loss_fn = nn.TripletMarginLoss(margin=margin)
    optimizer = optims.Adam(model.parameters(), lr=lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.1, last_epoch=-1)
    num_epoch = 25
    log_interval = 5

    fit(triplet_image_loaders['train'], triplet_image_loaders['val'], model,
        loss_fn, optimizer, scheduler, num_epoch, use_gpu, log_interval)
コード例 #22
0
train_loader_K562 = torch.utils.data.DataLoader(training_set,
                                                batch_size=batch_size,
                                                shuffle=False)


def d(M1, M2):
    return np.sqrt(np.sum((M1 - M2)**2))


Net = model.TripletNetwork()

if use_gpu:
    Net = Net.cuda()

optimizer = optim.SGD(Net.parameters(), lr=0.001, momentum=0.9)
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
Net.eval()
#Net.load_state_dict(torch.load(path + '/triplet_losstriplet_chr1_8_epoch_330'))
#Net.load_state_dict(torch.load(path + '/tripletMarginLoss_reg_chr18_epoch_size100_350'))
Net.load_state_dict(
    torch.load(
        path +
        '/tripletMarginLoss_dropout_9_3_3_L2_7latent_chr1_8_epoch_size100_100')
)

running_loss1 = 0.0
running_loss2 = 0.0
for i, (v1, v2, v3, v4) in enumerate(
        zip(train_loader_anchor, train_loader_positive, train_loader_negative,
            train_loader_K562)):
コード例 #23
0

print get_n_params(tripletRNNRGB)

model_parameters = filter(lambda p: p.requires_grad,
                          tripletRNNRGB.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print params

cos = nn.CosineSimilarity(dim=2, eps=1e-6)

# def triplet_loss(H, Hp, Hn):
#     zero = Variable(torch.zeros(1).cuda()) if torch.cuda.is_available() else Variable(torch.zeros(1))
#     return torch.mean(torch.mean(torch.max(zero, alpha - cos(H, Hp) + cos(Hp, Hn)), dim=1))

triplet_loss = nn.TripletMarginLoss(margin=0.4, p=2)

optimizerRGB = torch.optim.Adam(tripletRNNRGB.parameters(),
                                lr=learning_rate)  #, momentum = momentum)

if torch.cuda.is_available():
    torch.backends.cudnn.benchmark = True


def input_creator(triplet):
    anchorFrames = torch.load(features_dir + 'cam1/' + str(triplet[0]) + '.pt')
    positiveFrames = torch.load(features_dir + 'cam2/' + str(triplet[1]) +
                                '.pt')
    negativeFrames = torch.load(features_dir + 'cam2/' + str(triplet[2]) +
                                '.pt')
    anchorFC = anchorFrames.size(0)
コード例 #24
0
    # i_1 = margin - (x[1] - x[1])
    i_2 = margin - (x[1] - x[2])

    loss_h = (i_0 + i_2) / x.shape[0]

    print(loss_h)

# ---------------------------------------------- 15 Triplet Margin Loss -----------------------------------------
flag = 0
# flag = 1
if flag:
    anchor = torch.tensor([[1.]])
    pos = torch.tensor([[2.]])
    neg = torch.tensor([[0.5]])

    loss_f = nn.TripletMarginLoss(margin=1.0, p=1)

    loss = loss_f(anchor, pos, neg)

    print("Triplet Margin Loss", loss)

# --------------------------------- compute by hand
flag = 0
# flag = 1
if flag:
    margin = 1
    a, p, n = anchor[0], pos[0], neg[0]

    d_ap = torch.abs(a - p)
    d_an = torch.abs(a - n)
コード例 #25
0
        'aid2cate': aid2titlevec,
        'cate': 'title',
        # 'aid2cate': aid2abstractvec,
        # 'cate': 'abstract'
    }
    print(keyarg['cate'])
    train_dataset = ReadData(train_posi_pair_path, train_neg_pair_path, whole_author_profile_pub, **keyarg)
    test_dataset = ReadData(test_posi_pair_path, test_neg_pair_path, whole_author_profile_pub, **keyarg)

    # all_dataset = ReadData(all_posi_pair_path, all_neg_pair_path, whole_author_profile_pub, **keyarg)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=2)
    test_loader = DataLoader(test_dataset, batch_size=len(test_dataset))
    # loader = DataLoader(train_loader, batch_size=BATCH_SIZE, num_workers=2)
    triplet_model = TripletModel().to(device)
    criterion = nn.TripletMarginLoss()
    optimizer = torch.optim.Adam(triplet_model.parameters(), lr=LR)
    lr_schedule = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10) #基于epoch训练次数进行学习率调整,#每过30个epoch训练,学习率就乘factor


    ind = 0
    for epoch in range(EPOCHS):
        triplet_model.train()
        train_loss = []
        for anchor, posi, neg in train_loader:
            anchor, posi, neg = anchor.to(device), posi.to(device), neg.to(device)
            if ind ==0:
                print("anchor:",anchor.size(),'\n',anchor) #torch.Size([512, 1, 300])
            optimizer.zero_grad()
            embs = triplet_model(anchor, posi, neg)
            loss = criterion(*embs)
コード例 #26
0
#
# It should take around 1-2 hours on GPU.
#
dir_name = os.path.join('./model', name)
if not os.path.exists('model'):
    os.mkdir('model')

print('class_num = %d' % (class_num))
model = ft_net(class_num)
if use_gpu:
    model.cuda()

# print('model structure')
# print(model)

criterion_triplet = nn.TripletMarginLoss(margin=0.5)

classifier_id = list(map(id, model.fc.parameters()))
classifier_params = filter(lambda p: id(p) in classifier_id,
                           model.parameters())
base_params = filter(lambda p: id(p) not in classifier_id, model.parameters())

optimizer_ft = optim.SGD([
    {
        'params': classifier_params,
        'lr': 1 * opt.lr
    },
    {
        'params': base_params,
        'lr': 0.1 * opt.lr
    },
コード例 #27
0
def main():
    #Train settings
    wandb.init(project="vgg_triplet")
    global args, best_acc
    parser = argparse.ArgumentParser(
        description='VGG Triplet-Loss Speaker Embedding')
    parser.add_argument('--batch-size',
                        type=int,
                        default=32,
                        metavar='N',
                        help='input batch size for training')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=32,
                        metavar='N',
                        help='input batch size for testing')
    parser.add_argument('--epochs',
                        type=int,
                        default=50,
                        metavar='N',
                        help='number of epochs for training')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        metavar='LR',
                        help='learning rate')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='enables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=2,
                        metavar='S',
                        help='random seed')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=20,
        metavar='N',
        help='how many batches to wait before logging training score')
    parser.add_argument('--margin',
                        type=float,
                        default=2,
                        metavar='M',
                        help='margin for triplet loss (default: 0.2)')
    parser.add_argument('--resume',
                        default='',
                        type=str,
                        help='path to latest checkpoint (default: None)')
    parser.add_argument('--name',
                        default='TripletNet_RMSprop',
                        type=str,
                        help='name of experiment')
    parser.add_argument(
        '--base-path',
        type=str,
        default=
        '/home/lucas/PycharmProjects/Papers_with_code/data/AMI/amicorpus_individual/Extracted_Speech',
        help='string to triplets')
    parser.add_argument('--ap-file',
                        default='anchor_pairs.txt',
                        type=str,
                        help='name of file with anchor-positive pairs')
    parser.add_argument('--s-file',
                        default='trimmed_sample_list.txt',
                        type=str,
                        help='name of sample list')
    parser.add_argument(
        '--save-path',
        default=
        '/home/lucas/PycharmProjects/Papers_with_code/data/models/VGG_Triplet',
        type=str,
        help='path to save models to')
    parser.add_argument('--save', type=bool, default=True, help='save model?')
    parser.add_argument('--load',
                        type=bool,
                        default=False,
                        help='load model from latest checkpoint')
    args = parser.parse_args()

    wandb.run.name = args.name
    wandb.run.save()
    wandb.config.update(args)

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    print(args.no_cuda)
    print(torch.cuda.is_available())
    if args.cuda:
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    torch.manual_seed(args.seed)
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}

    #train_loader = torch.utils.data.DataLoader(TripletLoader(base_path=args.base_path,anchor_positive_pairs=args.ap_file,sample_list=args.s_file,train=True),
    #                                           batch_size=args.batch_size,shuffle=True,**kwargs)
    #test_loader = torch.utils.data.DataLoader(TripletLoader(base_path=args.base_path,anchor_positive_pairs=args.ap_file,sample_list=args.s_file,train=False),
    #                                          batch_size=args.test_batch_size,shuffle=True,**kwargs)

    #single_train_loader = torch.utils.data.DataLoader(Spectrogram_Loader(base_path=args.base_path, anchor_positive_pairs=args.ap_file, sample_list=args.s_file, train=True), batch_size=args.batch_size, shuffle=True, **kwargs)
    #single_test_loader = torch.utils.data.DataLoader(Spectrogram_Loader(base_path=args.base_path, anchor_positive_pairs=args.ap_file, sample_list=args.s_file, train=False), batch_size=args.test_batch_size, shuffle=True, **kwargs)

    train_time_loader = torch.utils.data.DataLoader(Triplet_Time_Loader(
        path=os.path.join(args.base_path, args.s_file), train=True),
                                                    batch_size=args.batch_size,
                                                    shuffle=True,
                                                    **kwargs)
    test_time_loader = torch.utils.data.DataLoader(
        Triplet_Time_Loader(path=os.path.join(args.base_path, args.s_file),
                            train=False),
        batch_size=args.test_batch_size,
        shuffle=True,
        **kwargs)

    #global plotter
    #plotter = VisdomLinePlotter(env_name=args.name)

    class Net(nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.conv1 = nn.Conv2d(1, 16, kernel_size=7)
            self.conv2 = nn.Conv2d(16, 16, kernel_size=7)
            self.bn_1 = nn.BatchNorm2d(16)
            self.conv3 = nn.Conv2d(16, 32, kernel_size=7)
            self.conv4 = nn.Conv2d(32, 32, kernel_size=7)
            self.bn_2 = nn.BatchNorm2d(32)
            self.conv2_drop = nn.Dropout2d(p=0.2)
            self.fc1 = nn.Linear(448, 256)
            self.fc2 = nn.Linear(256, 256)

        def forward(self, x):
            x = F.relu(self.conv1(x))
            x = F.relu(F.max_pool2d(self.bn_1(self.conv2(x)), 7))
            x = F.relu(self.conv3(x))
            x = F.relu(
                F.max_pool2d(self.conv2_drop(self.bn_2(self.conv4(x))), 7))
            #print("SIZE  ",x.size())
            x = x.view(x.size(0), -1)
            x = F.relu(self.fc1(x))
            x = F.dropout(x, training=self.training)
            return self.fc2(x)

    model = VGGVox()
    if args.cuda:
        model.to(device)
    if args.load:
        model.load_state_dict(torch.load(args.save_path))
        print("Model loaded from state dict")
    #tnet = TripletNet(model)
    #if args.cuda:
    #    tnet.to(device)
    wandb.watch(model)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #criterion = torch.nn.MarginRankingLoss(margin = args.margin)
    criterion = nn.TripletMarginLoss(margin=args.margin, p=2)

    #optimizer = optim.Adam(tnet.parameters(),lr=args.lr)
    #optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
    optimizer = optim.RMSprop(model.parameters(),
                              lr=args.lr,
                              alpha=0.8,
                              momentum=args.momentum)
    #n_parameters = sum([p.data.nelement() for p in tnet.parameters()])
    #print('  + NUmber of params: {}'.format(n_parameters))

    for epoch in range(1, args.epochs + 1):
        start_time = time.time()
        train_batch(train_time_loader, model, optimizer, epoch)
        test_batch(test_time_loader, model, epoch)
        duration = time.time() - start_time
        print("Done training epoch {} in {:.4f}".format(epoch, duration))

    #for epoch in range(1, args.epochs + 1):
    #    test_batch(single_train_loader, model, epoch)

    if args.save:
        torch.save(model.state_dict(), args.save_path)
        print("Model Saved")
コード例 #28
0
def eval_batch(fts, captioner, retriever, args):
    criterion = nn.TripletMarginLoss(reduction='mean',
                                     margin=args.margin).to(device)
    # generate a mapping for dev, to ensure sampling bias is reduced
    num_target = len(fts['asins'])

    batch_size = args.batch_size
    ranker = Ranker.Ranker(device)
    total_step = math.floor(num_target / batch_size)

    ranking_tracker = [0] * args.num_dialog_turns
    loss_tracker = [0] * args.num_dialog_turns

    with open('data/shuffled.{}.{}.json'.format(args.data_set, 'test')) as f:
        first_candidate_set = json.load(f)

    with torch.no_grad():
        retriever.eval()
        ranker.update_emb(fts, args.batch_size, retriever)

    retriever.eval()
    ret_results = {}
    total_time = 0

    for step in tqdm.tqdm(range(total_step)):
        # sample target
        target_ids = torch.tensor([
            i for i in range(step * batch_size, (step + 1) * batch_size)
        ]).to(device=device, dtype=torch.long)

        # sample first batch of candidates
        candidate_ids = torch.tensor([
            first_candidate_set[i]
            for i in range(step * batch_size, (step + 1) * batch_size)
        ],
                                     device=device,
                                     dtype=torch.long)

        # keep track of results
        ret_result = {}
        for batch_id in range(target_ids.size(0)):
            idx = target_ids[batch_id].cpu().item()
            ret_result[idx] = {}
            ret_result[idx]['candidate'] = []
            ret_result[idx]['ranking'] = []
            ret_result[idx]['caption'] = []

        target_img_ft = utils.get_image_batch(fts, target_ids)
        target_img_ft = target_img_ft.to(device)
        target_img_emb = retriever.encode_image(target_img_ft)

        target_attr = utils.get_attribute_batch(fts, target_ids)
        target_attr = target_attr.to(device)

        # clean up dialog history tracker
        retriever.init_hist()
        # history_hidden = history_hidden.expand_as(target_img_emb)

        loss = 0

        for d_turn in range(args.num_dialog_turns):
            last_timer = int(round(time.time() * 1000))
            # get candidate image features
            candidate_img_ft = utils.get_image_batch(fts, candidate_ids)
            candidate_img_ft = candidate_img_ft.to(device)

            candidate_attr = utils.get_attribute_batch(fts, candidate_ids)
            candidate_attr = candidate_attr.to(device)
            # generate captions from model
            total_time += (int(round(time.time() * 1000)) - last_timer)
            with torch.no_grad():
                sentence_ids, caps = captioner.get_caption(target_img_ft,
                                                           candidate_img_ft,
                                                           target_attr,
                                                           candidate_attr,
                                                           return_cap=True)
            last_timer = int(round(time.time() * 1000))
            sentence_ids = sentence_ids.to(device)

            candidate_img_ft = candidate_img_ft.to(device)

            history_hidden = retriever.forward(text=sentence_ids,
                                               image=candidate_img_ft,
                                               attribute=candidate_attr)

            # sample negatives, update tracker's output to
            # match targets via triplet loss
            negative_ids = torch.tensor([0] * args.batch_size,
                                        device=device,
                                        dtype=torch.long)
            negative_ids.random_(0, num_target)

            negative_img_ft = utils.get_image_batch(fts, negative_ids)
            negative_img_ft = negative_img_ft.to(device)
            negative_img_emb = retriever.encode_image(negative_img_ft)

            # accumulate loss
            loss_tmp = criterion(history_hidden, target_img_emb,
                                 negative_img_emb)
            loss += loss_tmp
            loss_tracker[d_turn] += loss_tmp.item()

            # generate new candidates, compute ranking information
            with torch.no_grad():
                candidate_ids = ranker.nearest_neighbors(history_hidden)
                ranking = ranker.compute_rank(history_hidden, target_ids)
            ranking_tracker[d_turn] += (ranking.mean().item() /
                                        (num_target * 1.0))

            for batch_id in range(target_ids.size(0)):
                idx = target_ids[batch_id].cpu().item()
                ret_result[idx]['caption'].append(caps[batch_id])
                ret_result[idx]['candidate'].append(
                    candidate_ids[batch_id].item())
                ret_result[idx]['ranking'].append(ranking[batch_id].item())

            total_time += (int(round(time.time() * 1000)) - last_timer)

        ret_results.update(ret_result)

    loss = loss.item() / total_step
    for i in range(args.num_dialog_turns):
        ranking_tracker[i] /= total_step
        loss_tracker[i] /= total_step

    metrics = {
        'loss': loss,
        'score': 5 - sum(ranking_tracker),
        'loss_tracker': loss_tracker,
        'ranking_tracker': ranking_tracker,
        'retrieve_time': total_time / float(num_target)
    }
    return metrics, ret_results
コード例 #29
0
G.apply(weights_init)
D.apply(weights_init)
E.apply(weights_init)
""" ===================== TRAINING ======================== """

G_solver = optim.Adam(G.parameters(), lr=1e-4)
D_solver = optim.Adam(D.parameters(), lr=1e-4)
E_solver = optim.Adam(E.parameters(), lr=1e-4)

ones_label = Variable(torch.ones(mb_size)).cuda()
zeros_label = Variable(torch.zeros(mb_size)).cuda()
half_label = Variable(torch.ones(mb_size) * 0.5).cuda()

criterion = nn.BCELoss()
# criterion_r = nn.MarginRankingLoss(margin=0.1,size_average=False)
criterion_t = nn.TripletMarginLoss(p=1)
criterion_mse = nn.MSELoss()

for it in range(100000):

    # Sample data
    z = Variable(torch.randn(mb_size, Z_dim)).cuda()
    X, c = mm.batch_next(mb_size)
    X = Variable(torch.from_numpy(X)).cuda()

    c_v = Variable(torch.from_numpy(
        model.set_label_ve_ma(c).astype('float32'))).cuda()  # label for g c
    # c_t = Variable(torch.from_numpy(model.set_label_ve(c).astype('float32'))).cuda() # label for d c(true)

    # Dicriminator forward-loss-backward-update
    D.zero_grad()
コード例 #30
0
    ['kldiv', nn.KLDivLoss()],
    ['mse', nn.MSELoss()],
    ['bce', nn.BCELoss()],
    ['bce_with_logits', nn.BCEWithLogitsLoss()],
    ['cosine_embedding', nn.CosineEmbeddingLoss()],
    # ['ctc', nn.CTCLoss()],
    ['hinge_embedding', nn.HingeEmbeddingLoss()],
    ['margin_ranking', nn.MarginRankingLoss()],
    ['multi_label_margin', nn.MultiLabelMarginLoss()],
    ['multi_label_soft_margin',
     nn.MultiLabelSoftMarginLoss()],
    ['multi_margin', nn.MultiMarginLoss()],
    ['smooth_l1', nn.SmoothL1Loss()],
    ['soft_margin', nn.SoftMarginLoss()],
    ['cross_entropy', nn.CrossEntropyLoss()],
    ['triplet_margin', nn.TripletMarginLoss()],
    ['poisson_nll', nn.PoissonNLLLoss()]
])

optimizer = dict({
    'adadelta': optim.Adadelta,
    'adagrad': optim.Adagrad,
    'adam': optim.Adam,
    'sparse_adam': optim.SparseAdam,
    'adamax': optim.Adamax,
    'asgd': optim.ASGD,
    'lbfgs': optim.LBFGS,
    'rmsprop': optim.RMSprop,
    'rprop': optim.Rprop,
    'sgd': optim.SGD
})