Example #1
0
def evalu():
    global first
    dev_loss = 0
    criterion = ContrastiveLoss()
    #criterion = nn.MSELoss(size_average = False)

    for i_batch in range(0, math.ceil(len(dev) / batch_size)):
        loss = 0
        batch, i1, i2, score = get_batch(i_batch, dev)

        label = Variable(torch.FloatTensor(score)).cuda(cudanum)
        input1 = make_padding(i1)
        input2 = make_padding(i2)

        hidden1 = Variable(torch.randn(1, len(input1),
                                       hidden_size)).cuda(cudanum)
        cont = Variable(torch.randn(1, len(input2), hidden_size)).cuda(cudanum)

        out1, out2 = net(input1, input2, hidden1, cont)

        loss = criterion(out1, out2, label)
        dev_loss += loss.data[0]

        if i_batch < 1:
            out = F.pairwise_distance(out1, out2)
            print('dev data first batch result')
            for i in range(len(input1)):
                if first:
                    print('sent1 :', ' '.join(batch[i]['sent1']))
                    print('sent2 :', ' '.join(batch[i]['sent2']))
                print('predict : ', out.data[i], ', ', 'label : ',
                      label.data[i])
        if first:
            first = False
    print('total dev loss : ', dev_loss)
def train_siamese_network(nclasses, fp16, transform, batch_size, num_epochs):
    since = time.time()
    net = SiameseNetwork().cuda()
    # net.classifier.classifier = nn.Sequential()

    print(net)
    print("Start time: ", since)

    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)

    if fp16:
        # model = network_to_half(model)
        # optimizer_ft = FP16_Optimizer(optimizer_ft, static_loss_scale = 128.0)
        print("Memory saving is on using fp16")
        net, optimizer = amp.initialize(net, optimizer, opt_level="O1")

    counter = []
    loss_history = []
    iteration_number = 0
    train_dataloader = get_dataloader(transform, batch_size)
    print("Started training siamese network")

    for epoch in range(0, num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()

            optimizer.zero_grad()

            output1, output2 = net(img0, img1)

            loss_contrastive = criterion(output1, output2, label)
            # loss_contrastive.backward()
            # optimizer.step()
            if fp16:  # we use optimier to backward loss
                with amp.scale_loss(loss_contrastive,
                                    optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss_contrastive.backward()
            optimizer.step()

            if i % 10 == 0:
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())

        time_elapsed = time.time() - since
        print('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        print("Epoch number {} finished, Current loss {}\n".format(
            epoch, loss_contrastive.item()))

        if epoch % 10 == 9:
            save_model(epoch, net, loss_contrastive, optimizer)
    show_plot(counter, loss_history)
Example #3
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
    vis_dataloader = DataLoader(train_dataset,
                        shuffle=False,
                        num_workers=0,
                        batch_size=10)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
    loss_vals = []
    '''
    Training Starts
    '''
    print('Training started')
    for epoch in range(10):
       loss_epoch = 0
       for i, data in enumerate(vis_dataloader,0):
           img_0, img_1, label = data
           print(i, label)
           # img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
           optimizer.zero_grad()
           out_0, out_1 = net(img_0, img_1)
           loss = criterion(out_0, out_1, label)
           loss_epoch += loss.item()
           loss.backward()
           optimizer.step()
       loss_vals.append(loss_epoch)
       print('Epoch',str(epoch+1), str(loss_epoch))
       print('Epoch done')
       torch.save(net.state_dict(), 'siamese.pt')
    print('Training completed')
    plt.plot(loss_vals)
    plt.savefig('loss_siamese.png')
   

    # ****************************** Training ends ***************************************


    '''
Example #4
0
def train(ep):
    random.shuffle(dt)
    random.shuffle(dev)

    #criterion = nn.MSELoss(size_average=False)
    criterion = ContrastiveLoss()

    net_optim = optim.Adadelta(net.parameters())

    for i_epoch in range(ep):
        print(str(i_epoch) + ' epoch')
        total_loss = 0
        for i_batch in range(0, math.ceil(len(dt) / batch_size)):
            net_optim.zero_grad()

            loss = 0
            batch, i1, i2, score = get_batch(i_batch, dt)

            label = Variable(torch.FloatTensor(score)).cuda(cudanum)

            input1 = make_padding(i1)
            input2 = make_padding(i2)

            hidden1 = Variable(torch.randn(1, len(input1),
                                           hidden_size)).cuda(cudanum)
            cont = Variable(torch.randn(1, len(input2),
                                        hidden_size)).cuda(cudanum)

            out1, out2 = net(input1, input2, hidden1, cont)
            #print(out1)
            #print(out2)

            loss = criterion(out1, out2, label)

            total_loss += loss.data[0]

            loss.backward()
            net_optim.step()

        print('train total loss : ', total_loss)
        evalu()
def main():
    # Hyper Parameters
    
    opt = opts.parse_opt()

    device_id = opt.gpuid
    device_count = len(str(device_id).split(","))
    #assert device_count == 1 or device_count == 2
    print("use GPU:", device_id, "GPUs_count", device_count, flush=True)
    os.environ['CUDA_VISIBLE_DEVICES']=str(device_id)
    device_id = 0
    torch.cuda.set_device(0)

    # Load Vocabulary Wrapper
    vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
    opt.vocab_size = len(vocab)

    # Load data loaders
    train_loader, val_loader = data.get_loaders(
        opt.data_name, vocab, opt.batch_size, opt.workers, opt)

    # Construct the model
    model = SCAN(opt)
    model.cuda()
    model = nn.DataParallel(model)

     # Loss and Optimizer
    criterion = ContrastiveLoss(opt=opt, margin=opt.margin, max_violation=opt.max_violation)
    mse_criterion = nn.MSELoss(reduction="batchmean")
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)

    # optionally resume from a checkpoint
    if not os.path.exists(opt.model_name):
        os.makedirs(opt.model_name)
    start_epoch = 0
    best_rsum = 0

    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            start_epoch = checkpoint['epoch']
            best_rsum = checkpoint['best_rsum']
            model.load_state_dict(checkpoint['model'])
            print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
                  .format(opt.resume, start_epoch, best_rsum))
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))
    evalrank(model.module, val_loader, opt)

    print(opt, flush=True)
    
    # Train the Model
    for epoch in range(start_epoch, opt.num_epochs):
        message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
        log_file = os.path.join(opt.logger_name, "performance.log")
        logging_func(log_file, message)
        print("model name: ", opt.model_name, flush=True)
        adjust_learning_rate(opt, optimizer, epoch)
        run_time = 0
        for i, (images, captions, lengths, masks, ids, _) in enumerate(train_loader):
            start_time = time.time()
            model.train()

            optimizer.zero_grad()

            if device_count != 1:
                images = images.repeat(device_count,1,1)

            score = model(images, captions, lengths, masks, ids)
            loss = criterion(score)

            loss.backward()
            if opt.grad_clip > 0:
                clip_grad_norm_(model.parameters(), opt.grad_clip)
            optimizer.step()
            run_time += time.time() - start_time
            # validate at every val_step
            if i % 100 == 0:
                log = "epoch: %d; batch: %d/%d; loss: %.4f; time: %.4f" % (epoch, 
                            i, len(train_loader), loss.data.item(), run_time / 100)
                print(log, flush=True)
                run_time = 0
            if (i + 1) % opt.val_step == 0:
                evalrank(model.module, val_loader, opt)

        print("-------- performance at epoch: %d --------" % (epoch))
        # evaluate on validation set
        rsum = evalrank(model.module, val_loader, opt)
        #rsum = -100
        filename = 'model_' + str(epoch) + '.pth.tar'
        # remember best R@ sum and save checkpoint
        is_best = rsum > best_rsum
        best_rsum = max(rsum, best_rsum)
        save_checkpoint({
            'epoch': epoch + 1,
            'model': model.state_dict(),
            'best_rsum': best_rsum,
            'opt': opt,
        }, is_best, filename=filename, prefix=opt.model_name + '/')
Example #6
0
    return running_loss / number_samples, max_accuracy


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--lr', type=float, default=1e-5)
    parser.add_argument('--dataset',
                        type=str,
                        choices=['cedar', 'bengali', 'hindi'],
                        default='cedar')
    args = parser.parse_args()
    print(args)

    model = SigNet().to(device)
    criterion = ContrastiveLoss(alpha=1, beta=1, margin=1).to(device)
    optimizer = optim.RMSprop(model.parameters(),
                              lr=1e-5,
                              eps=1e-8,
                              weight_decay=5e-4,
                              momentum=0.9)
    scheduler = optim.lr_scheduler.StepLR(optimizer, 5, 0.1)
    num_epochs = 20

    image_transform = transforms.Compose([
        transforms.Resize((155, 220)),
        ImageOps.invert,
        transforms.ToTensor(),
        # TODO: add normalize
    ])
Example #7
0
     text_encoder,
     find_unused_parameters=True,
     device_ids=[local_rank],
     output_device=local_rank)
 image_encoder = nn.parallel.DistributedDataParallel(
     image_encoder,
     find_unused_parameters=True,
     device_ids=[local_rank],
     output_device=local_rank)
 score_model = nn.parallel.DistributedDataParallel(
     score_model,
     find_unused_parameters=True,
     device_ids=[local_rank],
     output_device=local_rank)
 contrastive_loss = ContrastiveLoss(0.9,
                                    max_violation=True,
                                    reduction='mean')
 # contrastive_loss = ExponentialLoss()
 for epoch in range(start_epoch, 30):
     # tbar = tqdm(loader)
     if local_rank == 0:
         tbar = tqdm(loader)
     else:
         tbar = loader
     losses_manual_mining = 0.
     losses_hard_mining = 0.
     # losses_gen = 0.
     # losses3 = 0.
     # losses_classify = 0.
     text_encoder.train()
     image_encoder.train()
import torchvision
from torch import optim
from torch.utils.data import DataLoader
from torchvision import transforms

import config
from model import SiameseNetwork, ContrastiveLoss, train
from utils import SiameseNetworkDataset

opt = config.args()
folder_dataset = torchvision.datasets.ImageFolder(root=opt.training_dir)

# 定义图像dataset
transform = transforms.Compose([transforms.Resize((100, 100)),  # 有坑,传入int和tuple有区别
                                transforms.ToTensor()])
siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset,
                                        transform=transform,
                                        should_invert=False)

# 定义图像dataloader
train_dataloader = DataLoader(siamese_dataset,
                              shuffle=True,
                              batch_size=opt.batch_size)

net = SiameseNetwork().cuda() #定义模型且移至GPU
print(net)
criterion = ContrastiveLoss(margin=2.0) #定义损失函数
optimizer = optim.Adam(net.parameters(), lr=opt.lr) #定义优化器

train(net, optimizer, criterion, train_dataloader, opt)
Example #9
0
def train(args):
    # basic arguments.
    ngpu = args.ngpu
    margin = args.margin
    num_epochs = args.num_epochs
    train_batch_size = args.train_batch_size
    test_batch_size = args.test_batch_size
    gamma = args.gamma # for learning rate decay

    root_dir = args.root_dir
    image_txt = args.image_txt
    train_test_split_txt = args.train_test_split_txt
    label_txt = args.label_txt
    ckpt_dir = args.ckpt_dir
    eval_step = args.eval_step


    pretrained = args.pretrained
    aux_logits = args.aux_logits
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits':aux_logits}

    # network and loss
    siamese_network = SiameseNetwork(**kargs)
    gpu_number = torch.cuda.device_count()
    if device.type == 'cuda' and gpu_number > 1:
        siamese_network = nn.DataParallel(siamese_network, list(range(torch.cuda.device_count())))
    siamese_network.to(device)
    contrastive_loss = ContrastiveLoss(margin=margin)

    # params = siamese_network.parameters()
    # optimizer = optim.Adam(params, lr=0.0005)
    # optimizer = optim.SGD(params, lr=0.01, momentum=0.9)

    # using different lr
    optimizer = optim.SGD([
                       {'params': siamese_network.module.inception_v3.parameters() if gpu_number > 1 else siamese_network.inception_v3.parameters()},
                       {'params': siamese_network.module.main.parameters() if gpu_number > 1 else siamese_network.main.parameters(), 'lr': 1e-2}
                      ], lr=0.00001, momentum=0.9)

    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma, last_epoch=-1)


    transform = transforms.Compose([transforms.Resize((299, 299)),
                                    transforms.CenterCrop(299),
                                    transforms.ToTensor(),
                                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
                                  )
    cub_dataset = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=True, offset=1)
    dataloader = DataLoader(dataset=cub_dataset, batch_size=train_batch_size, shuffle=True, num_workers=4)

    cub_dataset_eval = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=False, offset=1)
    dataloader_eval = DataLoader(dataset=cub_dataset_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)

    for epoch in range(num_epochs):
        if epoch == 0:
            feature_set, label_set = get_feature_and_label(siamese_network, dataloader_eval, device)
            evaluation(feature_set, label_set)
        siamese_network.train()
        for i, data in enumerate(dataloader, 0):
            img_1, img_2, sim_label = data['img_1'].to(device), data['img_2'].to(device), data['sim_label'].type(torch.FloatTensor).to(device)
            optimizer.zero_grad()
            output_1, output_2 = siamese_network(img_1, img_2)
            loss = contrastive_loss(output_1, output_2, sim_label)
            loss.backward()
            optimizer.step()

            if i % 20 == 0 and i > 0:
                print("{}, Epoch [{:3d}/{:3d}], Iter [{:3d}/{:3d}], Current loss: {}".format(
                      datetime.datetime.now(), epoch, num_epochs, i, len(dataloader), loss.item()))
        if epoch % eval_step == 0:
            print("Start evalution")
            feature_set, label_set = get_feature_and_label(siamese_network, dataloader_eval, device)
            evaluation(feature_set, label_set)
            torch.save(siamese_network.module.state_dict(), os.path.join(ckpt_dir, 'model_' + str(epoch) +'_.pth'))
Example #10
0
from flow import ROIFlowBatch
from model import SiameseCNN, ContrastiveLoss

from torch.nn import functional as F

if __name__ == '__main__':
    #mask_file = '/Users/ajaver/OneDrive - Imperial College London/aggregation/N2_1_Ch1_29062017_182108_comp3.hdf5'
    data_dir = '/Users/ajaver/OneDrive - Imperial College London/classify_strains/train_data/videos'
    fname = 'BRC20067_worms10_food1-10_Set10_Pos5_Ch6_16052017_165021.hdf5'
    mask_file = os.path.join(data_dir,fname)
    feat_file = os.path.join(data_dir,fname.replace('.hdf5', '_featuresN.hdf5'))
    
    n_epochs = 1
    
    model = SiameseCNN()
    criterion = ContrastiveLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    
    gen = ROIFlowBatch(mask_file, feat_file)
    model.train()
    for epoch in range(n_epochs):
        pbar = tqdm.tqdm(gen)
        for input_var, target in pbar:
            out1, out2 = model.forward(input_var)
            
            loss = criterion(out1, out2, target)
            optimizer.zero_grad()               # clear gradients for this training step
            loss.backward()                     # backpropagation, compute gradients
            optimizer.step() 
            
            pred = (F.pairwise_distance(out1, out2)> 1).long().squeeze()
Example #11
0
ngpu = 2
margin = 1.
num_epochs = 1000
train_batch_size = 64
test_batch_size = 32
gamma = 0.98  # for learning rate decay
pretrained = False
aux_logits = False
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits': aux_logits}
siamese_network = SiameseNetwork(**kargs)
if device.type == 'cuda' and torch.cuda.device_count() > 1:
    siamese_network = nn.DataParallel(siamese_network,
                                      list(range(torch.cuda.device_count())))
siamese_network.to(device)
contrastive_loss = ContrastiveLoss(margin=margin)

# params = siamese_network.parameters()

# optimizer = optim.Adam(params, lr=0.0005)
# optimizer = optim.SGD(params, lr=0.01, momentum=0.9)

# using different lr
optimizer = optim.SGD(
    [{
        'params': siamese_network.module.inception_v3.parameters()
    }, {
        'params': siamese_network.module.main.parameters(),
        'lr': 1e-3
    }],
    lr=0.00001,
Example #12
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/train')
    train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
    vis_dataloader = DataLoader(train_dataset,
                        shuffle=False,
                        num_workers=0,
                        batch_size=1)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
    loss_vals = []
    net.to(device)
    '''
    Training Starts
    '''
    print('Training started')
    for epoch in range(1000):
       loss_epoch = 0
       for i, data in enumerate(vis_dataloader,0):
           img_0, img_1, label = data
           img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
           optimizer.zero_grad()
           out_0, out_1 = net(img_0, img_1)
           loss = criterion(out_0, out_1, label)
           loss_epoch += loss.item()
           loss.backward()
           optimizer.step()
       loss_vals.append(loss_epoch)
       print('Epoch',str(epoch+1), str(loss_epoch))
    print('Training completed')
    plt.plot(loss_vals)
    plt.savefig('loss_siamese.png')
    
    

    # ****************************** Training ends ***************************************


    '''
    Testing starts
    '''
    

    test_dataset_dir = tdatasets.ImageFolder('images/test')
    net.load_state_dict(torch.load('siamese.pt'))
    test_dataset = SiameseNetworkDataset(imageFolderDataset = test_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))

    test_dataloader = DataLoader(test_dataset,
                        shuffle=True,
                        num_workers=2,
                        batch_size=1)
    print('Testing starts')
    correct = 0
    total = 0
    test_img_sub = None
    for i, data in enumerate(test_dataloader, 0):
        img_0, img_1, label = data
        if test_img_sub is None:
            test_img_sub = img_0
        #concat = torch.cat((test_img_sub, img_1), 0)
        concat = torch.cat((img_0, img_1), 0)
        test_img_sub, img_1, label = test_img_sub.to(device), img_1.to(device), label.to(device)
        img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
        out_0, out_1 = net(img_0, img_1)
        dist = F.pairwise_distance(out_0, out_1)
        if dist <= 0.5 and label == 0:
            correct = correct + 1
        elif label == 1:
            correct = correct + 1
        else:
            pass
        total = total + 1
        imshow(torchvision.utils.make_grid(concat),i,'Dissimilarity: {:.2f}'.format(dist.item()), True)
        test_img_sub = test_img_sub.cpu()
#        dist = dist.cpu().detach()
#        print(dist.numpy())
#        dist = torch.sigmoid(dist)
#        print(dist.numpy())
    print(correct/total)


    print('Testing complete')

    torch.save(net.state_dict(), 'siamese_blog.pt')
            open(
                os.path.join(meta.model_path,
                             "deform_{}".format(str(load_epoch))), "rb"))
        discriminator.load_state_dict(
            torch.load(
                os.path.join(meta.model_path,
                             "discriminator_{}".format(str(load_epoch)))))
    else:
        deform_verts = [
            torch.full(mesh['male'][0].verts_packed().shape,
                       0.0,
                       device=meta.device,
                       requires_grad=True) for _ in range(len(dataloader))
        ]

    criterion = ContrastiveLoss().to(meta.device)
    optimizer = torch.optim.Adam(list(discriminator.parameters()) +
                                 deform_verts,
                                 lr=meta.d_lr)

    for epoch in tqdm(range(meta.epochs)):
        epoch_loss = 0
        for i, sample in enumerate(dataloader):
            for n, angle in enumerate([0, 90, 180, 270]):

                optimizer.zero_grad()
                new_mesh = mesh[sample['gender'][0]].offset_verts(
                    deform_verts[i])
                projection = project_mesh_silhouette(new_mesh, angle)
                proj_img = projection.clone()
                # plt.imshow(proj_img.squeeze().detach().cpu().numpy())
Example #14
0
def train(model, loss, graph_data, model_output_dir, epochs=100, lr=0.01, ntp_ratio=2, valid_criterion='loss'):

    print("> Training the model...")

    early_stopping = EarlyStopping(model_save_dir=model_output_dir,
                                   patience=max(10, int((epochs - (0.5*len(graph_data)**2))/3)),
                                   verbose=True)
    optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)

    neg_label = 0.0

    if loss == 'contrastive':
        loss_func = ContrastiveLoss()
    elif loss == 'BCE':
        loss_func = SiameseBCELoss()
    elif loss == 'cosine':
        loss_func = torch.nn.CosineEmbeddingLoss(margin=3.0)
        neg_label = -1.0
    else:
        raise NotImplementedError("Loss function not implemented")

    for ep in range(epochs):

        training_loss = []
        valid_perf = []

        for i in range(len(graph_data)):
            for j in range(len(graph_data)):

                if i == j:
                    continue

                model.train()
                optimizer.zero_grad()

                g1, g2 = graph_data[i], graph_data[j]

                g1.anchor_data[g2.gidx]['train_negative_anchor_edge_index'] = sample_negative_anchors(g1.anchor_data[g2.gidx]['negative_anchor_edge_index'],
                                                                                                      g1.anchor_data[g2.gidx]['train_anchor_edge_index'].size(1)*ntp_ratio)

                x1, x2 = model(g1, g2)
                anchor_link_labels = get_anchor_link_labels(g1.anchor_data[g2.gidx]['train_anchor_edge_index'],
                                                            g1.anchor_data[g2.gidx]['train_negative_anchor_edge_index'],
                                                            neg_label)
                loss = loss_func(x1, x2, anchor_link_labels)

                training_loss.append(loss.item())

                loss.backward()
                optimizer.step()

                valid_perf.append(validate(model, loss_func, g1, g2, valid_criterion))


        print("---> Epoch", ep,
              "| Training Loss:", np.nanmean(training_loss),
              "| Validation criterion:", np.nanmean(valid_perf))

        early_stopping(np.nanmean(valid_perf), model)

        if early_stopping.early_stop:
            print("Early stopping")
            break
Example #15
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = SiameseNetworkDataset(imageFolderDataset=train_dataset_dir,
                                          transform=transforms.Compose([
                                              transforms.Resize((100, 100)),
                                              transforms.ToTensor()
                                          ]))
    vis_dataloader = DataLoader(train_dataset,
                                shuffle=False,
                                num_workers=0,
                                batch_size=1)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)
    loss_vals = []
    '''
    Training Starts
    '''
    print('Training started')
    # for epoch in range(100):
    #    loss_epoch = 0
    #    for i, data in enumerate(vis_dataloader,0):
    #        img_0, img_1, label = data
    #        # img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
    #        optimizer.zero_grad()
    #        out_0, out_1 = net(img_0, img_1)
    #        loss = criterion(out_0, out_1, label)
    #        loss_epoch += loss.item()
    #        loss.backward()
    #        optimizer.step()
    #    loss_vals.append(loss_epoch)
    #    print('Epoch',str(epoch+1), str(loss_epoch))
    # print('Training completed')
    # plt.plot(loss_vals)
    # plt.savefig('loss_siamese.png')
    #
    # torch.save(net.state_dict(), 'siamese.pt')

    # ****************************** Training ends ***************************************
    '''
    Testing starts
    '''
    net.load_state_dict(torch.load('siamese.pt'))
    test_dataset = SiameseTestDataset(train_dataset_dir, \
                                 transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))
    test_vis_dataloader = DataLoader(test_dataset,
                                     shuffle=False,
                                     num_workers=0,
                                     batch_size=1)

    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = FacesDataset(train_dataset_dir, \
                                 transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))

    _, test = split_train_val(train_dataset)
    test_dataloader = DataLoader(test,
                                 shuffle=False,
                                 num_workers=0,
                                 batch_size=1)
    correct = 0
    total = 0
    for i, data in enumerate(test_dataloader, 0):
        total += 1
        img_1, labels = data
        min_dist = float("inf")
        pred = -1
        print('Testing begins', i)
        for j, data_test_vis in enumerate(test_vis_dataloader, 0):
            img_0 = data_test_vis
            out_0, out_1 = net(img_0, img_1)
            dist = F.pairwise_distance(out_0, out_1)
            if min_dist > dist:
                min_dist = dist
                pred = j
        if pred == labels.item():
            correct += 1
        print('Testing ends', i, pred)

    print('Accuracy: ', str(correct / total))
Example #16
0
def train(opt, train_loader, adapt_loader, model, model_ema, epoch, val_loader,
          tb_writer):
    # average meters to record the training statistics
    from model import ContrastiveLoss
    batch_time = AverageMeter()
    data_time = AverageMeter()
    train_logger = LogCollector()

    end = time.time()
    adapt_iter = iter(adapt_loader)
    if opt.adapt_loss == 'mse':
        adapt_loss = torch.nn.MSELoss()
    if opt.adapt_loss == 'contrastive':
        adapt_loss = ContrastiveLoss(margin=opt.margin, measure=opt.measure)

    if opt.ramp_lr:
        adjust_learning_rate_mean_teacher(model.optimizer, epoch,
                                          opt.num_epochs,
                                          opt.initial_lr_rampup,
                                          opt.initial_lr)
    else:
        adjust_learning_rate(opt, model.optimizer, epoch)

    consistency_weight = get_current_consistency_weight(
        opt.consistency_weight, epoch, opt.consistency_rampup)

    if opt.max_violation:
        gamma = 1.
    else:
        gamma = adjust_gamma(init_gamma=0.0, epoch=epoch, increase=0.2)
        train_logger.update('hard_contr_gamma', gamma, n=0)

    for i, train_data in enumerate(train_loader):
        # measure data loading time

        data_time.update(time.time() - end)
        model.Eiters += 1

        # switch to train mode
        model.train_start()
        model_ema.train_start()

        # make sure train logger is used
        model.logger = train_logger

        try:
            adapt_data = next(adapt_iter)
        except:
            adapt_iter = iter(adapt_loader)
            adapt_data = next(adapt_iter)

        # Get embeddings
        img_emb, cap_emb = model.run_emb(*train_data)

        # Data for Domain Adaptation or SS Learning
        # Adapt loader returns different features for the same images
        adapt_imgs_ema, adapt_imgs, adapt_caption, adapt_lens, _ = adapt_data

        adapt_imgs = adapt_imgs.float().cuda()
        adapt_imgs_ema = adapt_imgs_ema.float().cuda()

        consistency_loss_cap = 0.
        if opt.adapt_split != 'unlabeled':
            with torch.no_grad():
                adapt_caption = adapt_caption.cuda()
                ema_adapt_cap_emb = model_ema.txt_enc(
                    adapt_caption, adapt_lens, dropout=opt.dropout_noise)
                adapt_cap_mb = model.txt_enc(adapt_caption,
                                             adapt_lens,
                                             dropout=opt.dropout_noise)
                consistency_loss_cap = adapt_loss(ema_adapt_cap_emb,
                                                  adapt_cap_mb)

        with torch.no_grad():
            ema_adapt_imgs_emb = model_ema.img_enc(adapt_imgs_ema)

        adapt_imgs_emb = model.img_enc(adapt_imgs)

        consistency_loss_img = adapt_loss(ema_adapt_imgs_emb, adapt_imgs_emb)
        consistency_loss = (consistency_loss_img / 2. +
                            consistency_loss_cap / 2.) * consistency_weight

        # measure accuracy and record loss
        model.optimizer.zero_grad()
        loss = model.forward_loss(img_emb, cap_emb, gamma=gamma)
        total_loss = loss + consistency_loss

        # compute gradient and do SGD step
        total_loss.backward()
        if model.grad_clip > 0:
            clip_grad_norm(model.params, model.grad_clip)

        model.optimizer.step()

        if epoch <= opt.ema_late_epoch:
            update_ema_variables(
                model=model,
                ema_model=model_ema,
                alpha=opt.consistency_alpha,
                global_step=model.Eiters,
            )
        else:
            update_ema_variables(
                model=model,
                ema_model=model_ema,
                alpha=opt.consistency_alpha_late,
                global_step=model.Eiters,
            )

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        model.logger.update('Iter', model.Eiters, 0)
        model.logger.update('Lr', model.optimizer.param_groups[0]['lr'], 0)
        model.logger.update('Consistency weight', consistency_weight, 0)

        model.logger.update(
            'Contr Loss',
            loss.item(),
        )
        model.logger.update(
            'Adapt Loss',
            consistency_loss.item(),
        )
        model.logger.update(
            'Total Loss',
            total_loss.item(),
        )

        # Print log info
        if model.Eiters % opt.log_step == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  '{e_log}\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      e_log=str(model.logger)))
            # print(model.logger)
            pass

        # Record logs in tensorboard
        tb_writer.add_scalar('epoch', epoch, model.Eiters)
        tb_writer.add_scalar('step', i, model.Eiters)
        tb_writer.add_scalar('batch_time', batch_time.val, model.Eiters)
        tb_writer.add_scalar('data_time', data_time.val, model.Eiters)

        model.logger.tb_log(tb_writer, model.Eiters)

        # validate at every val_step
        if model.Eiters % opt.val_step == 0:
            # print('Validate normal')
            print('Validate EMA')
            validate(opt, val_loader, model_ema, tb_writer)
            # validate(opt, val_loader, model, tb_writer)

            if opt.log_images:
                plot_img = vutils.make_grid(train_data[0],
                                            normalize=True,
                                            scale_each=True)
                tb_writer.add_image('Labeled Images', plot_img, model.Eiters)

                plot_img = vutils.make_grid(adapt_imgs,
                                            normalize=True,
                                            scale_each=True)
                tb_writer.add_image('Adapt Images', plot_img, model.Eiters)