Пример #1
0
    def test_contrastive_loss_value(self):
        count = 0
        for i in range(0, 100):
            x0_val = Variable(self.x0)
            x1_val = Variable(self.x1)
            t_val = Variable(self.t)
            tml = ContrastiveLoss(margin=self.margin)
            loss = tml.forward(x0_val, x1_val, t_val)
            self.assertEqual(loss.data.numpy().shape, (1, ))
            self.assertEqual(loss.data.numpy().dtype, np.float32)
            loss_value = float(loss.data.numpy())

            # Compute expected value
            loss_expect = 0
            for i in range(self.x0.size()[0]):
                x0d, x1d, td = self.x0[i], self.x1[i], self.t[i]
                d = torch.sum(torch.pow(x0d - x1d, 2))
                if td == 1:  # similar pair
                    loss_expect += d

                elif td == 0:  # dissimilar pair
                    loss_expect += max(1 - np.sqrt(d), 0)**2
            loss_expect /= 2.0 * self.t.size()[0]
            #print("expected %s got %s" % (loss_expect, loss_value))
            if (round(loss_expect, 6) == round(loss_value, 6)):
                count += 1
            #print (self.assertAlmostEqual(loss_expect, loss_value, places=5))
        print(count * 1. / 100)
Пример #2
0
 def test_contrastive_loss(self):
     input1 = Variable(torch.randn(4, 4), requires_grad=True)
     input2 = Variable(torch.randn(4, 4), requires_grad=True)
     target = Variable(torch.randn(4), requires_grad=True)
     tml = ContrastiveLoss(margin=self.margin)
     self.assertTrue(
         gradcheck(lambda x1, x2, t: tml.forward(x1, x2, t),
                   (input1, input2, target)))
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', required=True, help='model path')
    parser.add_argument('--batch_size',
                        type=int,
                        default=1000,
                        help='batch size (default: 1000)')
    opt = parser.parse_args()
    opt.use_gpu = torch.cuda.is_available()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    test_dataset = datasets.MNIST(root='./data',
                                  train=False,
                                  download=True,
                                  transform=transform)
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.batch_size,
                             shuffle=False)

    siamese_net = SiameseNetwork()
    siamese_net.load_state_dict(torch.load(opt.model))
    if opt.use_gpu:
        siamese_net = siamese_net.cuda()

    criterion = ContrastiveLoss()

    running_loss = 0
    num_itrs = len(test_loader)
    for inputs, labels in test_loader:
        x1, x2, t = create_pairs(inputs, labels)
        x1, x2, t = Variable(x1), Variable(x2), Variable(t)
        if opt.use_gpu:
            x1, x2, t = x1.cuda(), x2.cuda(), t.cuda()

        y1, y2 = siamese_net(x1, x2)
        loss = criterion(y1, y2, t)

        running_loss += loss.item()

    print('loss: {:.4f}'.format(running_loss / num_itrs))
Пример #4
0
def test_fun():
    #TODO;加载模型参数,测试模型
    test_model_para = torch.load("model-epoch-5.pth")
    test_model = SiameseNetwork()
    test_model.load_state_dict(test_model_para)

    testdata = dsets.MNIST(
        root='../data/',
        train=False,
        # XXX ToTensor scale to 0-1
        transform=transforms.ToTensor(),
        #     transforms.Normalize((0.1307,), (0.3081,))
    )
    tsdata = testdata.test_data.reshape(-1, 1, 28, 28).type(torch.FloatTensor)
    labels = testdata.test_labels.reshape(-1,1).type(torch.FloatTensor)
    loss = ContrastiveLoss()
    out1, out2 = test_model(tsdata[99].unsqueeze(0)/255., tsdata[16].unsqueeze(0)/255.)
    a = out1.detach()
    b = out2.detach()
    print(a, b)
    print(labels[99],labels[16])   # 同为数字9,可以看到,损失比较低,输出点也靠的比较近
    c = loss(a, b, torch.tensor([1.]))
    print(c.item())
    param.requires_grad = False
for param in netS.conv53.parameters():
    param.requires_grad = False
for param in netS.convfc.parameters():
    param.requires_grad = False


#-----------------params freeze-----------------
if(opt.cuda):
    netS.cuda()

#-------------------Loss & Optimization

optimizerS = torch.optim.Adam(filter(lambda p: p.requires_grad, netS.parameters()),lr=opt.lr, betas=(opt.beta1, 0.999))

poss_contrastive_loss = ContrastiveLoss() # load from the begining
light_contrastive_loss = ContrastiveLoss()
identity_contrastive_loss = ContrastiveLoss()
reconstructe_loss = nn.MSELoss()
pose_class_loss = nn.CrossEntropyLoss()
light_class_loss = nn.CrossEntropyLoss()

#------------------ Global Variables------------------
input_pose_1 = torch.LongTensor(opt.batchSize)
input_light_1 = torch.LongTensor(opt.batchSize)
# input_pose_2 = torch.LongTensor(opt.batchSize)
# input_light_2 = torch.LongTensor(opt.batchSize)

inputImg_1 = torch.FloatTensor(opt.batchSize, 3, opt.fineSize, opt.fineSize)
inputImg_2 = torch.FloatTensor(opt.batchSize, 3, opt.fineSize, opt.fineSize)
GT = torch.FloatTensor(opt.batchSize, 3,opt.fineSize, opt.fineSize)
Пример #6
0
        output1 = self.forward_once(input1)
        # forward pass of input 2
        output2 = self.forward_once(input2)
        return output1, output2


# Load the dataset as pytorch tensors using dataloader
train_dataloader = DataLoader(siamese_dataset,
                              shuffle=True,
                              num_workers=8,
                              batch_size=config.batch_size)

# Declare Siamese Network
net = SiameseNetwork().cuda()
# Decalre Loss Function
criterion = ContrastiveLoss()
# Declare Optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3, weight_decay=0.0005)


# train the model
def train():
    loss = []
    counter = []
    iteration_number = 0

    for epoch in range(1, config.epochs):
        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()
            optimizer.zero_grad()
Пример #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=5,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--model',
                        '-m',
                        default='',
                        help='Give a model to test')
    parser.add_argument('--train-plot',
                        action='store_true',
                        default=True,
                        help='Plot train loss')
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    print("Args: %s" % args)
    # create pair dataset iterator
    train = dsets.MNIST(root='../data/', train=True, download=True)
    test = dsets.MNIST(root='../data/',
                       train=False,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                       ]))

    train_iter = create_iterator(train.train_data.numpy(),
                                 train.train_labels.numpy(), args.batchsize)

    # model
    model = SiameseNetwork()
    if args.cuda:
        model.cuda()

    learning_rate = 0.01
    momentum = 0.9
    # Loss and Optimizer
    criterion = ContrastiveLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=momentum)

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    train_loader = torch.utils.data.DataLoader(train_iter,
                                               batch_size=args.batchsize,
                                               shuffle=True,
                                               **kwargs)

    test_loader = torch.utils.data.DataLoader(test,
                                              batch_size=args.batchsize,
                                              shuffle=True,
                                              **kwargs)

    def train_model(epochs):
        train_loss = []
        for epoch in range(epochs):
            print('Train Epoch:' + str(epoch) + "------------------")
            for batch_idx, (x0, x1, labels) in enumerate(train_loader):
                labels = labels.float()
                if args.cuda:
                    x0, x1, labels = x0.cuda(), x1.cuda(), labels.cuda()
                x0, x1, labels = Variable(x0), Variable(x1), Variable(labels)
                output1, output2 = model(x0, x1)
                loss = criterion(output1, output2, labels)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                train_loss.append(loss.item())
                if batch_idx % args.batchsize == 0:
                    print(
                        'Batch id: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                            batch_idx, batch_idx * len(labels),
                            len(train_loader.dataset),
                            100. * batch_idx / len(train_loader), loss.item()))
                    torch.save(model.state_dict(),
                               './weights/model-epoch-%s.pth' % epoch)
        print("finished_training")
        return train_loss

    def test_model(model):
        model.eval()
        all = []
        all_labels = []
        with torch.nograd():
            for batch_idx, (x, labels) in enumerate(test_loader):
                if args.cuda:
                    x, labels = x.cuda(), labels.cuda()
                x, labels = Variable(x, volatile=True), Variable(labels)
                output = model.forward_once(x)
                all.extend(output.data.cpu().numpy().tolist())
                all_labels.extend(labels.data.cpu().numpy().tolist())

        numpy_all = np.array(all)
        numpy_labels = np.array(all_labels)
        # numpy_all is obtained output and numpy_labels is real output
        return numpy_all, numpy_labels,

    def testing_plots(name_file, model):
        filehandler = open(name_file, "wb")
        dict_pickle = {}
        numpy_all, numpy_labels = test_model(model)
        dict_pickle["numpy_all"] = numpy_all
        dict_pickle["numpy_labels"] = numpy_labels
        pickle.dump(dict_pickle, filehandler)
        plot_mnist(numpy_all, numpy_labels)
        filehandler.close()

    if len(args.model) == 0:
        train_loss = train_model(args.epoch)
        if args.train_plot:
            plot_loss(train_loss)

    else:
        saved_model = torch.load(args.model)
        model = SiameseNetwork()
        model.load_state_dict(saved_model)
        if args.cuda:
            model.cuda()
        testing_plots("./../Results/embeddings.pickle", model)
Пример #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch', '-e', type=int, default=5,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--batchsize', '-b', type=int, default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--model', '-m', default='',
                        help='Give a model to test')
    parser.add_argument('--train-plot', action='store_true', default=False,
                        help='Plot train loss')
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    print("Args: %s" % args)

    # create pair dataset iterator
    train = dsets.MNIST(
        root='../data/',
        train=True,
        # transform=transforms.Compose([
        #     transforms.ToTensor(),
        #     transforms.Normalize((0.1307,), (0.3081,))
        # ]),
        download=True
    )
    test = dsets.MNIST(
        root='../data/',
        train=False,
        # XXX ToTensor scale to 0-1
        transform=transforms.Compose([
            transforms.ToTensor(),
        #     transforms.Normalize((0.1307,), (0.3081,))
        ])
    )

    train_iter = create_iterator(
        train.train_data.numpy(),
        train.train_labels.numpy(),
        args.batchsize)

    # model
    model = SiameseNetwork()
    if args.cuda:
        model.cuda()

    learning_rate = 0.01
    momentum = 0.9
    # Loss and Optimizer
    criterion = ContrastiveLoss()
    # optimizer = torch.optim.Adam(
    #     [p for p in model.parameters() if p.requires_grad],
    #     lr=learning_rate
    # )

    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate,
                                momentum=momentum)

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    train_loader = torch.utils.data.DataLoader(
        train_iter,
        batch_size=args.batchsize, shuffle=True, **kwargs)

    test_loader = torch.utils.data.DataLoader(
        test,
        batch_size=args.batchsize, shuffle=True, **kwargs)

    def train(epoch):
        train_loss = []
        model.train()
        start = time.time()
        start_epoch = time.time()
        for batch_idx, (x0, x1, labels) in enumerate(train_loader):
            labels = labels.float()
            if args.cuda:
                x0, x1, labels = x0.cuda(), x1.cuda(), labels.cuda()
            x0, x1, labels = Variable(x0), Variable(x1), Variable(labels)
            output1, output2 = model(x0, x1)
            loss = criterion(output1, output2, labels)
            train_loss.append(loss.data[0])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            accuracy = []

            for idx, logit in enumerate([output1, output2]):
                corrects = (torch.max(logit, 1)[1].data == labels.long().data).sum()
                accu = float(corrects) / float(labels.size()[0])
                accuracy.append(accu)

            if batch_idx % args.batchsize == 0:
                end = time.time()
                took = end - start
                for idx, accu in enumerate(accuracy):
                    print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss:{:.6f}\tTook: {:.2f}\tOut: {}\tAccu: {:.2f}'.format(
                        epoch, batch_idx * len(labels), len(train_loader.dataset),
                        100. * batch_idx / len(train_loader), loss.data[0],
                        took, idx, accu * 100.))
                start = time.time()
        torch.save(model.state_dict(), './model-epoch-%s.pth' % epoch)
        end = time.time()
        took = end - start_epoch
        print('Train epoch: {} \tTook:{:.2f}'.format(epoch, took))
        return train_loss

    def test(model):
        model.eval()
        all = []
        all_labels = []

        for batch_idx, (x, labels) in enumerate(test_loader):
            if args.cuda:
                x, labels = x.cuda(), labels.cuda()
            x, labels = Variable(x, volatile=True), Variable(labels)
            output = model.forward_once(x)
            all.extend(output.data.cpu().numpy().tolist())
            all_labels.extend(labels.data.cpu().numpy().tolist())

        numpy_all = np.array(all)
        numpy_labels = np.array(all_labels)
        return numpy_all, numpy_labels

    def plot_mnist(numpy_all, numpy_labels):
        c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
             '#ff00ff', '#990000', '#999900', '#009900', '#009999']

        for i in range(10):
            f = numpy_all[np.where(numpy_labels == i)]
            plt.plot(f[:, 0], f[:, 1], '.', c=c[i])
        plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
        plt.savefig('result.png')

    if len(args.model) == 0:
        train_loss = []
        for epoch in range(1, args.epoch + 1):
            train_loss.extend(train(epoch))

        if args.train_plot:
            plt.gca().cla()
            plt.plot(train_loss, label="train loss")
            plt.legend()
            plt.draw()
            plt.savefig('train_loss.png')
            plt.gca().clear()

    else:
        saved_model = torch.load(args.model)
        model = SiameseNetwork()
        model.load_state_dict(saved_model)
        if args.cuda:
            model.cuda()

    numpy_all, numpy_labels = test(model)
    plot_mnist(numpy_all, numpy_labels)
Пример #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--margin',
                        type=float,
                        default=1.0,
                        help='margin for contrastive loss')
    parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
    parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
    parser.add_argument('--num_epochs',
                        type=int,
                        default=100,
                        help='number of epochs')
    parser.add_argument('--batch_size',
                        type=int,
                        default=1000,
                        help='batch size')
    parser.add_argument('--log_dir', required=True, help='log directory')
    parser.add_argument('--num_workers',
                        type=int,
                        default=4,
                        help='number of workers for data loading')
    opt = parser.parse_args()
    opt.use_gpu = torch.cuda.is_available()

    if not os.path.exists(opt.log_dir):
        os.makedirs(opt.log_dir)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    train_dataset = datasets.MNIST(root='./data',
                                   train=True,
                                   download=True,
                                   transform=transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers)

    siamese_net = SiameseNetwork()
    if opt.use_gpu:
        siamese_net = siamese_net.cuda()

    criterion = ContrastiveLoss()
    optimizer = torch.optim.SGD(siamese_net.parameters(),
                                lr=opt.lr,
                                momentum=opt.momentum)

    history = {}
    history['loss'] = []

    for epoch in range(opt.num_epochs):
        num_itrs = len(train_loader)
        running_loss = 0
        for itr, (inputs, labels) in enumerate(train_loader):
            optimizer.zero_grad()

            x1, x2, t = create_pairs(inputs, labels)
            x1, x2, t = Variable(x1), Variable(x2), Variable(t)
            if opt.use_gpu:
                x1, x2, t = x1.cuda(), x2.cuda(), t.cuda()

            y1, y2 = siamese_net(x1, x2)
            loss = criterion(y1, y2, t)

            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            sys.stdout.write('\r\033[Kitr [{}/{}], loss: {:.4f}'.format(
                itr, num_itrs, loss.item()))
            sys.stdout.flush()

        history['loss'].append(running_loss / num_itrs)
        sys.stdout.write('\r\033[Kepoch [{}/{}], loss: {:.4f}'.format(
            epoch + 1, opt.num_epochs, running_loss / num_itrs))
        sys.stdout.write('\n')

    torch.save(siamese_net.state_dict(), os.path.join(opt.log_dir,
                                                      'model.pth'))

    with open(os.path.join(opt.log_dir, 'history.pkl'), 'wb') as f:
        pickle.dump(history, f)

    plt.plot(history['loss'])
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid()
    plt.savefig(os.path.join(opt.log_dir, 'loss.png'))
def main(options):
    # Path configuration
    TRAINING_PATH = '/home/ye/Works/googleCloud-SDK/train.txt'
    TESTING_PATH = '/home/ye/Works/googleCloud-SDK/test.txt'
    IMG_PATH = '/home/ye/Works/googleCloud-SDK/lfw'

    transformations = transforms.Compose([transforms.Scale((128,128)),
                                    transforms.ToTensor()
                                    ])
    
    dset_train = DatasetProcessing(IMG_PATH, TRAINING_PATH, transformations)

    dset_test = DatasetProcessing(IMG_PATH, TESTING_PATH, transformations)

    train_loader = DataLoader(dset_train,
                              batch_size = options.batch_size,
                              shuffle = True,
                              num_workers = 4
                             )

    test_loader = DataLoader(dset_test,
                             batch_size = options.batch_size,
                             shuffle = False,
                             num_workers = 4
                             )

    use_cuda = (len(options.gpuid) >= 1)
    if options.gpuid:
        cuda.set_device(options.gpuid[0])
    
    # Initial the model
    cnn_model = SIAMESE2()
    if use_cuda > 0:
        cnn_model.cuda()
    else:
        cnn_model.cpu()

    # Binary cross-entropy loss
    criterion = ContrastiveLoss()
    optimizer = eval("torch.optim." + options.optimizer)(cnn_model.parameters())

    # main training loop
    last_dev_avg_loss = float("inf")
    for epoch_i in range(options.epochs):
        logging.info("At {0}-th epoch.".format(epoch_i))
        train_loss = 0.0
        correct_prediction = 0.0
        for it, train_data in enumerate(train_loader):
            img0, img1, labels = train_data
            if use_cuda:
                img0, img1 , labels = Variable(img0).cuda(), Variable(img1).cuda() , Variable(labels).cuda()
            else:
                img0, img1 , labels = Variable(img0), Variable(img1), Variable(labels)

            print '##########', img0.size()
            output1, output2 = cnn_model(img0,img1)
            
            loss = criterion(output1, output2, labels)
            train_loss += loss
            # predict = torch.round(train_output)
            # correct_prediction += (predict.view(-1) == labels.view(-1)).sum().float()
            logging.debug("loss at batch {0}: {1}".format(it, loss.data[0]))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            

        train_avg_loss = train_loss / (len(dset_train) / options.batch_size)
        # training_accuracy = (correct_prediction / len(dset_train)).data.cpu().numpy()[0]
        logging.info("Average training loss value per instance is {0} at the end of epoch {1}".format(train_avg_loss.data[0], epoch_i))