コード例 #1
0
def train_siamese_network(nclasses, fp16, transform, batch_size, num_epochs):
    since = time.time()
    net = SiameseNetwork().cuda()
    # net.classifier.classifier = nn.Sequential()

    print(net)
    print("Start time: ", since)

    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)

    if fp16:
        # model = network_to_half(model)
        # optimizer_ft = FP16_Optimizer(optimizer_ft, static_loss_scale = 128.0)
        print("Memory saving is on using fp16")
        net, optimizer = amp.initialize(net, optimizer, opt_level="O1")

    counter = []
    loss_history = []
    iteration_number = 0
    train_dataloader = get_dataloader(transform, batch_size)
    print("Started training siamese network")

    for epoch in range(0, num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()

            optimizer.zero_grad()

            output1, output2 = net(img0, img1)

            loss_contrastive = criterion(output1, output2, label)
            # loss_contrastive.backward()
            # optimizer.step()
            if fp16:  # we use optimier to backward loss
                with amp.scale_loss(loss_contrastive,
                                    optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss_contrastive.backward()
            optimizer.step()

            if i % 10 == 0:
                iteration_number += 10
                counter.append(iteration_number)
                loss_history.append(loss_contrastive.item())

        time_elapsed = time.time() - since
        print('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        print("Epoch number {} finished, Current loss {}\n".format(
            epoch, loss_contrastive.item()))

        if epoch % 10 == 9:
            save_model(epoch, net, loss_contrastive, optimizer)
    show_plot(counter, loss_history)
コード例 #2
0
def siam_train(vectors, seq2seq_model, batch_size, layers, directory):
    first, sec, answ = readBcb(vectors + '/train')

    first_enc = seq2seq_model.get_encoder_status(first)
    sec_enc = seq2seq_model.get_encoder_status(sec)

    siam_model = SiameseNetwork(first_enc[0].shape[1], batch_size, layers)
    siam_model.train(first_enc, sec_enc, answ, directory)
    return siam_model
コード例 #3
0
ファイル: facerec.py プロジェクト: wmylxmj/Face-Recognition
 def __init__(self, input_shape=(240, 180, 1), name=None):
     self.input_shape = input_shape
     self.image_height, self.image_width, _ = input_shape
     self.data_loader = CASPEALR1DataLoader(image_height=self.image_height, \
                                            image_width=self.image_width)
     self.siamese_network = SiameseNetwork(input_shape)
     self.siamese_network.compile(loss='binary_crossentropy', \
                                  optimizer=Adam(lr=0.001), metrics=['accuracy'])
     self.name = name
     pass
コード例 #4
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
    vis_dataloader = DataLoader(train_dataset,
                        shuffle=False,
                        num_workers=0,
                        batch_size=10)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
    loss_vals = []
    '''
    Training Starts
    '''
    print('Training started')
    for epoch in range(10):
       loss_epoch = 0
       for i, data in enumerate(vis_dataloader,0):
           img_0, img_1, label = data
           print(i, label)
           # img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
           optimizer.zero_grad()
           out_0, out_1 = net(img_0, img_1)
           loss = criterion(out_0, out_1, label)
           loss_epoch += loss.item()
           loss.backward()
           optimizer.step()
       loss_vals.append(loss_epoch)
       print('Epoch',str(epoch+1), str(loss_epoch))
       print('Epoch done')
       torch.save(net.state_dict(), 'siamese.pt')
    print('Training completed')
    plt.plot(loss_vals)
    plt.savefig('loss_siamese.png')
   

    # ****************************** Training ends ***************************************


    '''
コード例 #5
0
    def __init__(self):
        data_src = './data_repository/geological_similarity/'
        self.model_path = './trained_model/model_triplet/'

        self.dataset = PreProcessing(data_src)
        self.model = SiameseNetwork()

        # Define Tensor
        self.img_placeholder = tf.placeholder(tf.float32, [None, 28, 28, 3],
                                              name='img')
        self.net = self.model.conv_net(self.img_placeholder, reuse=False)
        self.normalized_training_vectors = self.generate_db_normed_vectors()
        print('Prediction object loaded successfully.')
コード例 #6
0
def get_siamese_model(nclasses):
    device = torch.device("cuda")
    model = SiameseNetwork()
    # model.classifier.classifier = nn.Sequential()
    checkpoint = torch.load('./model/siamese/net_59.pth')
    model.load_state_dict(checkpoint['model_state_dict'])
    model.to(device)
    model.eval()
    return model
コード例 #7
0
def mlnet_method(X, Y, splits=5):
    accuracy, cnt = 0, 0
    mskf = my_SKF(X, Y, splits)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    for X_train, Y_train, X_test, Y_test in mskf:
        new_X = transform(X_train, Y_train)
        dataset = ImageDataset(new_X)
        dataloader = DataLoader(dataset,
                                batch_size=128,
                                shuffle=True,
                                num_workers=4)
        net = SiameseNetwork().to(device)
        net.fit(dataloader)

        net.eval()
        hit = 0
        for i, x in enumerate(X_test):
            y_pred = knn_pred(net, X_train, Y_train, x, batch_num=1)
            y_real = Y_test[i]
            if y_pred == y_real:
                hit += 1
        acc = hit / X_test.shape[0]
        accuracy += acc
        cnt += 1
        print('Split:{}, Acc:{:.4f}'.format(cnt, acc))
    return accuracy / splits
コード例 #8
0
def main():
    global plotter
    plotter = VisdomLinePlotter(env_name=config.visdom_name)

    # instantiate model and initialize weights
    model = SiameseNetwork()
    if config.cuda:
        model.cuda()

    optimizer = create_optimizer(model, config.lr)

    # optionally resume from a checkpoint
    if config.resume:
        if os.path.isfile(config.resume):
            print('=> loading checkpoint {}'.format(config.resume))
            checkpoint = torch.load(config.resume)
            config.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(config.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(config.resume))

    start = config.start_epoch
    end = start + config.epochs

    for epoch in range(start, end):
        train(train_loader, model, optimizer, epoch)
コード例 #9
0
def checkOverfit(num_samples, input_dims, hidden_dims, labels, num_tries=3):
    for _ in range(num_tries):
        random_dataset = RandomDataset(num_samples=num_samples, input_dims=input_dims, useLabels=True, labels=labels)
        
        model = SiameseNetwork(input_dims=input_dims, hidden_dims=hidden_dims, doConv=False)
        
        trainer = Trainer(random_dataset, model=model, model_parameters=model.parameters,
                            batch_size=8, lr=1, shuffle=True, doValidation=False)
        
        trainer.train(num_epochs=30)  # This should print status
        
        if trainer.training_error_plot[-1] == 0:
            return True
    return False
コード例 #10
0
class Predict:
    def __init__(self):
        data_src = './data_repository/geological_similarity/'
        self.model_path = './trained_model/model_triplet/'

        self.dataset = PreProcessing(data_src)
        self.model = SiameseNetwork()

        # Define Tensor
        self.img_placeholder = tf.placeholder(tf.float32, [None, 28, 28, 3],
                                              name='img')
        self.net = self.model.conv_net(self.img_placeholder, reuse=False)
        self.normalized_training_vectors = self.generate_db_normed_vectors()
        print('Prediction object loaded successfully.')

    # Compute Vector representation for each training images and normalize those
    def generate_db_normed_vectors(self):
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(self.model_path)
            saver.restore(sess, self.model_path + "model.ckpt")
            train_vectors = sess.run(
                self.net,
                feed_dict={self.img_placeholder: self.dataset.images_train})
        del self.dataset.images_train
        gc.collect()
        normalized_train_vectors = train_vectors / np.linalg.norm(
            train_vectors, axis=1).reshape(-1, 1)
        return normalized_train_vectors

    # Find k nearest neighbour using cosine similarity
    def find_k_nn(self, normalized_train_vectors, vec, k):
        dist_arr = np.matmul(normalized_train_vectors, vec.T)
        return np.argsort(-dist_arr.flatten())[:k]

    def predict(self, im, k=10):
        # run the test image through the network to get the test features
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(self.model_path)
            saver.restore(sess, self.model_path + "model.ckpt")
            search_vector = sess.run(self.net,
                                     feed_dict={self.img_placeholder: [im]})
        normalized_search_vec = search_vector / np.linalg.norm(search_vector)
        candidate_index = self.find_k_nn(self.normalized_training_vectors,
                                         normalized_search_vec, k)
        return list(candidate_index)
コード例 #11
0
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/train')
    train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))
    vis_dataloader = DataLoader(train_dataset,
                        shuffle=False,
                        num_workers=0,
                        batch_size=1)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
    loss_vals = []
    net.to(device)
    '''
    Training Starts
    '''
    print('Training started')
    for epoch in range(1000):
       loss_epoch = 0
       for i, data in enumerate(vis_dataloader,0):
           img_0, img_1, label = data
           img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
           optimizer.zero_grad()
           out_0, out_1 = net(img_0, img_1)
           loss = criterion(out_0, out_1, label)
           loss_epoch += loss.item()
           loss.backward()
           optimizer.step()
       loss_vals.append(loss_epoch)
       print('Epoch',str(epoch+1), str(loss_epoch))
    print('Training completed')
    plt.plot(loss_vals)
    plt.savefig('loss_siamese.png')
    
    

    # ****************************** Training ends ***************************************


    '''
    Testing starts
    '''
    

    test_dataset_dir = tdatasets.ImageFolder('images/test')
    net.load_state_dict(torch.load('siamese.pt'))
    test_dataset = SiameseNetworkDataset(imageFolderDataset = test_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))

    test_dataloader = DataLoader(test_dataset,
                        shuffle=True,
                        num_workers=2,
                        batch_size=1)
    print('Testing starts')
    correct = 0
    total = 0
    test_img_sub = None
    for i, data in enumerate(test_dataloader, 0):
        img_0, img_1, label = data
        if test_img_sub is None:
            test_img_sub = img_0
        #concat = torch.cat((test_img_sub, img_1), 0)
        concat = torch.cat((img_0, img_1), 0)
        test_img_sub, img_1, label = test_img_sub.to(device), img_1.to(device), label.to(device)
        img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
        out_0, out_1 = net(img_0, img_1)
        dist = F.pairwise_distance(out_0, out_1)
        if dist <= 0.5 and label == 0:
            correct = correct + 1
        elif label == 1:
            correct = correct + 1
        else:
            pass
        total = total + 1
        imshow(torchvision.utils.make_grid(concat),i,'Dissimilarity: {:.2f}'.format(dist.item()), True)
        test_img_sub = test_img_sub.cpu()
#        dist = dist.cpu().detach()
#        print(dist.numpy())
#        dist = torch.sigmoid(dist)
#        print(dist.numpy())
    print(correct/total)


    print('Testing complete')

    torch.save(net.state_dict(), 'siamese_blog.pt')
コード例 #12
0
ファイル: facerec.py プロジェクト: wmylxmj/Face-Recognition
class FaceRecognition(object):
    def __init__(self, input_shape=(240, 180, 1), name=None):
        self.input_shape = input_shape
        self.image_height, self.image_width, _ = input_shape
        self.data_loader = CASPEALR1DataLoader(image_height=self.image_height, \
                                               image_width=self.image_width)
        self.siamese_network = SiameseNetwork(input_shape)
        self.siamese_network.compile(loss='binary_crossentropy', \
                                     optimizer=Adam(lr=0.001), metrics=['accuracy'])
        self.name = name
        pass

    def prepare(self):
        self.data_loader.write_infos()
        pass

    def train(self, epochs=1000, batch_size=3, load_pretrained=False, seed=0):
        if load_pretrained:
            self.siamese_network.load_weights(
                './weights/siamese_network_weights.h5')
            print('Info: weights loaded.')
            pass
        for epoch in range(epochs):
            seed += 1
            for batch_i, (anchor_images, positive_images, negative_images) \
            in enumerate(self.data_loader.load_batches(batch_size, seed=seed)):
                images_A = np.concatenate((anchor_images, anchor_images),
                                          axis=0)
                images_B = np.concatenate((positive_images, negative_images),
                                          axis=0)
                y_true = np.concatenate((np.ones(
                    (batch_size, 1)), np.zeros((batch_size, 1))),
                                        axis=0)
                loss, accuracy = self.siamese_network.train_on_batch(
                    [images_A, images_B], y_true)
                print('[epoch: {0:}/{1:}][batch: {2:}/{3:}][loss: {4:}][accuracy: {5:}]'.format(epoch+1, \
                      epochs, batch_i+1, self.data_loader.n_batches, loss, accuracy))
                if (batch_i + 1) % 250 == 0:
                    self.siamese_network.save_weights(
                        './weights/siamese_network_weights.h5')
                    print('Info: weights saved.')
                    pass
                pass
            if (epoch + 1) % 10 == 0:
                self.siamese_network.save_weights(
                    './weights/siamese_network_weights.h5')
                print('Info: weights saved.')
                pass
            pass
        pass

    def accuracy(self, batch_size=3, seed=0):
        self.siamese_network.load_weights(
            './weights/siamese_network_weights.h5')
        print('Info: weights loaded.')
        num_true = 0
        for batch_i, (anchor_images, positive_images, negative_images) \
        in enumerate(self.data_loader.load_batches(batch_size, seed=seed)):
            images_A = np.concatenate((anchor_images, anchor_images), axis=0)
            images_B = np.concatenate((positive_images, negative_images),
                                      axis=0)
            y_true = np.concatenate((np.ones(
                (batch_size, 1)), np.zeros((batch_size, 1))),
                                    axis=0)
            loss, accuracy = self.siamese_network.evaluate(
                [images_A, images_B], y_true)
            num_true += int(accuracy * batch_size * 2 + 0.5)
            num_sum = (batch_i + 1) * batch_size * 2
            accuracy = num_true / num_sum
            print('[after batch: {0:}][accuracy: {1:}]'.format(
                batch_i + 1, accuracy))
            pass
        num_sum = self.data_loader.n_batches * batch_size * 2
        total_accuracy = num_true / num_sum
        print('total accuracy: {0:}'.format(total_accuracy))
        pass

    def predict(self, image_A_path, image_B_path, have_loaded_weights=False):
        if have_loaded_weights == False:
            self.siamese_network.load_weights(
                './weights/siamese_network_weights.h5')
            print('Info: weights loaded.')
            pass
        images_A, images_B = [], []
        image_A = self.data_loader.imread(image_A_path)
        images_A.append(image_A)
        images_A = np.array(images_A) / 127.5 - 1.0
        image_B = self.data_loader.imread(image_B_path)
        images_B.append(image_B)
        images_B = np.array(images_B) / 127.5 - 1.0
        predictions = self.siamese_network.predict([images_A, images_B])
        prediction = np.squeeze(predictions)
        if prediction >= 0.5:
            return 1
        else:
            return 0
        pass

    pass
コード例 #13
0
ファイル: main.py プロジェクト: rub-ksv/AdHominem
    "-------------------------------------------------------------------------------------"
    + "\n")

for hp in sorted(hyper_parameters.keys()):
    if hp in ["V_c", "V_w"]:
        open(file_results, "a").write("num " + hp + ": " +
                                      str(len(hyper_parameters[hp])) + "\n")
    else:
        open(file_results,
             "a").write(hp + ": " + str(hyper_parameters[hp]) + "\n")

###############################
print("build tensorflow graph")
###############################
model = SiameseNetwork(
    hyper_parameters=hyper_parameters,
    E_w=E_w,
)

#######################################
print("start siamese network training")
#######################################
train_set = (docs_L_tr, docs_R_tr, labels_tr)
test_set = (docs_L_te, docs_R_te, labels_te)

model_type = hyper_parameters["model_type"]
func = {
    "HRSN": model.train_model_hrsn,
    "AdHominem": model.train_model_adhominem,
}[model_type]

func(train_set, test_set, file_results)
コード例 #14
0
hidden_size = 100
num_layers = 2
num_classes = 2
batch_size = 32
num_epochs = 50
learning_rate = 0.003
print_freq = 500
model_time = strftime('%H:%M:%S', gmtime())
#num-perspective = 20?
#dropout=0.1

#  Dataset
print('loading Quora data...')
data = Quora(batch_size, input_size)
word_vocab_size = len(data.TEXT.vocab)
siamese = SiameseNetwork(input_size, word_vocab_size, hidden_size, num_layers,
                         data)

parameters = filter(lambda p: p.requires_grad, siamese.parameters())

# Loss and Optimizer
optimizer = torch.optim.Adam(parameters, lr=learning_rate)
criterion = nn.CrossEntropyLoss()

writer = SummaryWriter(log_dir='runs/' + model_time)

siamese.train()
loss, last_epoch = 0, -1
max_dev_acc, max_test_acc = 0, 0

best_model = copy.deepcopy(siamese)
コード例 #15
0
import torchvision
from torch import optim
from torch.utils.data import DataLoader
from torchvision import transforms

import config
from model import SiameseNetwork, ContrastiveLoss, train
from utils import SiameseNetworkDataset

opt = config.args()
folder_dataset = torchvision.datasets.ImageFolder(root=opt.training_dir)

# 定义图像dataset
transform = transforms.Compose([transforms.Resize((100, 100)),  # 有坑,传入int和tuple有区别
                                transforms.ToTensor()])
siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset,
                                        transform=transform,
                                        should_invert=False)

# 定义图像dataloader
train_dataloader = DataLoader(siamese_dataset,
                              shuffle=True,
                              batch_size=opt.batch_size)

net = SiameseNetwork().cuda() #定义模型且移至GPU
print(net)
criterion = ContrastiveLoss(margin=2.0) #定义损失函数
optimizer = optim.Adam(net.parameters(), lr=opt.lr) #定义优化器

train(net, optimizer, criterion, train_dataloader, opt)
コード例 #16
0
tf.flags.DEFINE_float('momentum', '0.99', 'Momentum')
tf.flags.DEFINE_string('model', 'siamese_net', 'siamese model to run')
tf.flags.DEFINE_string('data_src', './data_repository/questions.csv',
                       'source of training dataset')

flags = tf.app.flags
FLAGS = flags.FLAGS

if __name__ == "__main__":
    # Setup Dataset
    dataset = PreProcessing(FLAGS.data_src)
    model = SiameseNetwork(sequence_length=dataset.X.shape[1],
                           vocab_size=len(dataset.vocab_processor.vocabulary_),
                           embedding_size=FLAGS.embedding_dim,
                           filter_sizes=list(
                               map(int, FLAGS.filter_sizes.split(","))),
                           num_filters=FLAGS.num_filters,
                           output_embedding_size=FLAGS.output_embedding_size,
                           dropout_keep_prob=FLAGS.dropout_keep_prob,
                           embeddings_lookup=dataset.embeddings_lookup,
                           l2_reg_lambda=FLAGS.l2_reg_lambda)
    placeholder_shape = [None] + [dataset.X.shape[1]]
    print("placeholder_shape", placeholder_shape)

    # Setup Network
    next_batch = dataset.get_siamese_batch
    left_input = tf.placeholder(tf.int32, placeholder_shape, name='left_input')
    right_input = tf.placeholder(tf.int32,
                                 placeholder_shape,
                                 name='right_input')

    margin = 2.5
コード例 #17
0
ファイル: main_loss.py プロジェクト: gxdai/FCS_pytorch
def train(args):
    # basic arguments.
    ngpu = args.ngpu
    margin = args.margin
    num_epochs = args.num_epochs
    train_batch_size = args.train_batch_size
    test_batch_size = args.test_batch_size
    gamma = args.gamma # for learning rate decay

    root_dir = args.root_dir
    image_txt = args.image_txt
    train_test_split_txt = args.train_test_split_txt
    label_txt = args.label_txt
    ckpt_dir = args.ckpt_dir
    eval_step = args.eval_step


    pretrained = args.pretrained
    aux_logits = args.aux_logits
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits':aux_logits}

    # network and loss
    siamese_network = SiameseNetwork(**kargs)
    gpu_number = torch.cuda.device_count()
    if device.type == 'cuda' and gpu_number > 1:
        siamese_network = nn.DataParallel(siamese_network, list(range(torch.cuda.device_count())))
    siamese_network.to(device)
    contrastive_loss = ContrastiveLoss(margin=margin)

    # params = siamese_network.parameters()
    # optimizer = optim.Adam(params, lr=0.0005)
    # optimizer = optim.SGD(params, lr=0.01, momentum=0.9)

    # using different lr
    optimizer = optim.SGD([
                       {'params': siamese_network.module.inception_v3.parameters() if gpu_number > 1 else siamese_network.inception_v3.parameters()},
                       {'params': siamese_network.module.main.parameters() if gpu_number > 1 else siamese_network.main.parameters(), 'lr': 1e-2}
                      ], lr=0.00001, momentum=0.9)

    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma, last_epoch=-1)


    transform = transforms.Compose([transforms.Resize((299, 299)),
                                    transforms.CenterCrop(299),
                                    transforms.ToTensor(),
                                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
                                  )
    cub_dataset = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=True, offset=1)
    dataloader = DataLoader(dataset=cub_dataset, batch_size=train_batch_size, shuffle=True, num_workers=4)

    cub_dataset_eval = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=False, offset=1)
    dataloader_eval = DataLoader(dataset=cub_dataset_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)

    for epoch in range(num_epochs):
        if epoch == 0:
            feature_set, label_set = get_feature_and_label(siamese_network, dataloader_eval, device)
            evaluation(feature_set, label_set)
        siamese_network.train()
        for i, data in enumerate(dataloader, 0):
            img_1, img_2, sim_label = data['img_1'].to(device), data['img_2'].to(device), data['sim_label'].type(torch.FloatTensor).to(device)
            optimizer.zero_grad()
            output_1, output_2 = siamese_network(img_1, img_2)
            loss = contrastive_loss(output_1, output_2, sim_label)
            loss.backward()
            optimizer.step()

            if i % 20 == 0 and i > 0:
                print("{}, Epoch [{:3d}/{:3d}], Iter [{:3d}/{:3d}], Current loss: {}".format(
                      datetime.datetime.now(), epoch, num_epochs, i, len(dataloader), loss.item()))
        if epoch % eval_step == 0:
            print("Start evalution")
            feature_set, label_set = get_feature_and_label(siamese_network, dataloader_eval, device)
            evaluation(feature_set, label_set)
            torch.save(siamese_network.module.state_dict(), os.path.join(ckpt_dir, 'model_' + str(epoch) +'_.pth'))
コード例 #18
0
from torch import optim
import torch.nn.functional as F

from config import Config
from utils import imshow,show_plot
from data_reader import SiameseNetworkDataset
from model import SiameseNetwork
from loss import ContrastiveLoss

folder_dataset_test = dset.ImageFolder(root=Config.testing_dir)
siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset_test,
                                        transform=transforms.Compose([transforms.Resize((100,100)),
                                                                      transforms.ToTensor()
                                                                      ])
                                       ,should_invert=False)

test_dataloader = DataLoader(siamese_dataset,num_workers=6,batch_size=1,shuffle=True)
dataiter = iter(test_dataloader)
x0,_,_ = next(dataiter)
model = SiameseNetwork().cuda()
model.load_state_dict(torch.load("./model.pth"))
model.eval()
for i in range(10):
    _,x1,label2 = next(dataiter)
    concatenated = torch.cat((x0,x1),0)
    
    output1,output2 = model(Variable(x0).cuda(),Variable(x1).cuda())
    euclidean_distance = F.pairwise_distance(output1, output2)
    imshow(torchvision.utils.make_grid(concatenated),"output/"+str(i),'Dissimilarity: {:.2f}'.format(euclidean_distance.item()))

コード例 #19
0
ファイル: run.py プロジェクト: kunalmulwani/631project
def main():
    device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')
    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = SiameseNetworkDataset(imageFolderDataset=train_dataset_dir,
                                          transform=transforms.Compose([
                                              transforms.Resize((100, 100)),
                                              transforms.ToTensor()
                                          ]))
    vis_dataloader = DataLoader(train_dataset,
                                shuffle=False,
                                num_workers=0,
                                batch_size=1)

    # dataiter = iter(vis_dataloader)
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0],example_batch[1]),0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    #
    # example_batch = next(dataiter)
    # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)
    # imshow(torchvision.utils.make_grid(concatenated))
    # print(example_batch[2].numpy())
    net = SiameseNetwork()
    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0005)
    loss_vals = []
    '''
    Training Starts
    '''
    print('Training started')
    # for epoch in range(100):
    #    loss_epoch = 0
    #    for i, data in enumerate(vis_dataloader,0):
    #        img_0, img_1, label = data
    #        # img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)
    #        optimizer.zero_grad()
    #        out_0, out_1 = net(img_0, img_1)
    #        loss = criterion(out_0, out_1, label)
    #        loss_epoch += loss.item()
    #        loss.backward()
    #        optimizer.step()
    #    loss_vals.append(loss_epoch)
    #    print('Epoch',str(epoch+1), str(loss_epoch))
    # print('Training completed')
    # plt.plot(loss_vals)
    # plt.savefig('loss_siamese.png')
    #
    # torch.save(net.state_dict(), 'siamese.pt')

    # ****************************** Training ends ***************************************
    '''
    Testing starts
    '''
    net.load_state_dict(torch.load('siamese.pt'))
    test_dataset = SiameseTestDataset(train_dataset_dir, \
                                 transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))
    test_vis_dataloader = DataLoader(test_dataset,
                                     shuffle=False,
                                     num_workers=0,
                                     batch_size=1)

    train_dataset_dir = tdatasets.ImageFolder('images/all')
    train_dataset = FacesDataset(train_dataset_dir, \
                                 transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))

    _, test = split_train_val(train_dataset)
    test_dataloader = DataLoader(test,
                                 shuffle=False,
                                 num_workers=0,
                                 batch_size=1)
    correct = 0
    total = 0
    for i, data in enumerate(test_dataloader, 0):
        total += 1
        img_1, labels = data
        min_dist = float("inf")
        pred = -1
        print('Testing begins', i)
        for j, data_test_vis in enumerate(test_vis_dataloader, 0):
            img_0 = data_test_vis
            out_0, out_1 = net(img_0, img_1)
            dist = F.pairwise_distance(out_0, out_1)
            if min_dist > dist:
                min_dist = dist
                pred = j
        if pred == labels.item():
            correct += 1
        print('Testing ends', i, pred)

    print('Accuracy: ', str(correct / total))
コード例 #20
0
from model import SiameseNetwork
from utils import SiameseNetworkDataset, imshow

opt = config.args()
folder_dataset_test = torchvision.datasets.ImageFolder(root=opt.testing_dir)
# 定义图像dataset
transform_test = transforms.Compose([transforms.Resize((100, 100)),
                                     transforms.ToTensor()])
siamese_dataset_test = SiameseNetworkDataset(imageFolderDataset=folder_dataset_test,
                                             transform=transform_test,
                                             should_invert=False)

# 定义图像dataloader
test_dataloader = DataLoader(siamese_dataset_test,
                             shuffle=True,
                             batch_size=1)

net = SiameseNetwork().cuda()
net.load_state_dict(torch.load('checkpoint/siameseNet49.pth'))
# 生成对比图像
dataiter = iter(test_dataloader)
x0, _, _ = next(dataiter)

for i in range(10):
    _, x1, label2 = next(dataiter)
    concatenated = torch.cat((x0, x1), 0)
    output1, output2 = net(x0.cuda(), x1.cuda())
    euclidean_distance = F.pairwise_distance(output1, output2)
    imshow(torchvision.utils.make_grid(concatenated), 'Dissimilarity: {:.2f}'.format(euclidean_distance.item()))
コード例 #21
0
    config = args.parse_args()

    num_classes = config.num_classes
    base_lr = config.lr
    cuda = config.cuda
    num_epochs = config.num_epochs
    print_iter = config.print_iter
    model_name = config.model_name
    prediction_file = config.prediction_file
    best_prediction_file = config.best_prediction_file  #DBY
    batch = config.batch
    mode = config.mode

    # create model
    model = SiameseNetwork()
    #model = SiameseEfficientNet()
    model = Vgg19()

    if mode == 'test':
        load_model(model_name, model)

    if cuda:
        model = model.cuda()

    # Define 'best loss' - DBY
    best_loss = 0.1
    last_loss = 0
    if mode == 'train':
        # define loss function
        # loss_fn = nn.CrossEntropyLoss()
コード例 #22
0
from flask import Flask, request, Response, jsonify
from http import HTTPStatus
from model import SiameseNetwork
import json
import torch

from utils import cosine_similarity
import numpy as np
import pandas as pd

device = "cpu"

app = Flask(__name__)

model = SiameseNetwork().to(device)
model.load_state_dict(
    torch.load("./model2.pt", map_location=torch.device(device)))
model.eval()

A = 1


@app.route("/a")
def hello():
    return "<h1 style='color:blue'>Hello There!</h1>"


@app.route('/b', methods=['POST'])
def map_recomend():
    request_data = json.loads(request.data)["user"]
コード例 #23
0
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(0)
    random.seed(0)
    np.random.seed(0)

    base_lr = config.lr
    cuda = config.cuda
    num_epochs = config.num_epochs
    print_iter = config.print_iter
    model_name = config.model_name
    prediction_file = config.prediction_file
    batch = config.batch

    # create model
    model = SiameseNetwork()

    if cuda:
        model = model.cuda()

    # define loss function
    class ContrastiveLoss(torch.nn.Module):
        """
        Contrastive loss function.
        Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
        """
        def __init__(self, margin=2.0):
            super(ContrastiveLoss, self).__init__()
            self.margin = margin

        def forward(self, output1, output2, label):
コード例 #24
0
ファイル: main.py プロジェクト: gxdai/FCS_pytorch
def train(args):
    # basic arguments.
    ngpu = args.ngpu
    margin = args.margin
    manual_seed = args.manual_seed
    torch.manual_seed(manual_seed)
    mean_value = args.mean_value
    std_value = args.std_value
    print("margin = {:5.2f}".format(margin))
    print("manual_seed = {:5.2f}".format(manual_seed))
    print("mean_value = {:5.2f}".format(mean_value))
    print("std_value = {:5.2f}".format(std_value))
    num_epochs = args.num_epochs
    train_batch_size = args.train_batch_size
    test_batch_size = args.test_batch_size
    gamma = args.gamma # for learning rate decay
    learning_rate = args.learning_rate
    learning_rate2 = args.learning_rate2


    loss_type = args.loss_type
    dataset_name = args.dataset_name
    pair_type = args.pair_type
    mode = args.mode
    weight_file = args.weight_file
    print("pair_type = {}".format(pair_type))
    print("loss_type = {}".format(loss_type))
    print("mode = {}".format(mode))
    print("weight_file = {}".format(weight_file))

    root_dir = args.root_dir
    image_txt = args.image_txt
    train_test_split_txt = args.train_test_split_txt
    label_txt = args.label_txt
    ckpt_dir = args.ckpt_dir
    eval_step = args.eval_step
    display_step = args.display_step
    embedding_size = args.embedding_size


    pretrained = args.pretrained
    aux_logits = args.aux_logits
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits':aux_logits, 'embedding_size': embedding_size}

    # create directory
    model_dir = os.path.join(ckpt_dir, dataset_name, loss_type, str(int(embedding_size)))
    print("model_dir = {}".format(model_dir))
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    # network and loss
    siamese_network = SiameseNetwork(**kargs)


    first_group, second_group = siamese_network.separate_parameter_group()

    param_lr_dict = [
               {'params': first_group, 'lr': learning_rate2},
               {'params': second_group, 'lr': learning_rate}
              ]

    gpu_number = torch.cuda.device_count()
    if device.type == 'cuda' and gpu_number > 1:
        siamese_network = nn.DataParallel(siamese_network, list(range(torch.cuda.device_count())))
    siamese_network.to(device)

    # contrastive_loss = ContrastiveLoss(margin=margin)

    # params = siamese_network.parameters()

    print("args.optimizer = {:10s}".format(args.optimizer))
    print("learning_rate = {:5.5f}".format(learning_rate))
    print("learning_rate2 = {:5.5f}".format(learning_rate2))
    optimizer = configure_optimizer(param_lr_dict, optimizer=args.optimizer)

    # using different lr
    # scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma, last_epoch=-1)


    transform = transforms.Compose([transforms.Resize((299, 299)),
                                    transforms.CenterCrop(299),
                                    transforms.ToTensor(),
                                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
                                  )

    if dataset_name == 'cub200':
        """
        print("dataset_name = {:10s}".format(dataset_name))
        print(root_dir)
        print(image_txt)
        print(train_test_split_txt)
        print(label_txt)
        """
        dataset_train = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=True, offset=1)
        dataset_eval = CubDataset(root_dir, image_txt, train_test_split_txt, label_txt, transform=transform, is_train=False, offset=1)
    elif dataset_name == 'online_product':
        """
        print("dataset_name = {:10s}".format(dataset_name))
        """
        dataset_train = OnlineProductDataset(root_dir, train_txt=image_txt, test_txt=train_test_split_txt, transform=transform, is_train=True, offset=1)
        dataset_eval = OnlineProductDataset(root_dir, train_txt=image_txt, test_txt=train_test_split_txt, transform=transform, is_train=False, offset=1)
    elif dataset_name == "car196":
        print("dataset_name = {}".format(dataset_name))
        dataset_train = CarDataset(root_dir, image_info_mat=image_txt, transform=transform, is_train=True, offset=1)
        dataset_eval = CarDataset(root_dir, image_info_mat=image_txt, transform=transform, is_train=False, offset=1)


    dataloader = DataLoader(dataset=dataset_train, batch_size=train_batch_size, shuffle=False, num_workers=4)
    dataloader_eval = DataLoader(dataset=dataset_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)

    log_for_loss = []

    if mode == 'evaluation':
        print("Do one time evluation and exit")
        print("Load pretrained model")
        siamese_network.module.load_state_dict(torch.load(weight_file))
        print("Finish loading")
        print("Calculting features")
        feature_set, label_set, path_set = get_feature_and_label(siamese_network, dataloader_eval, device)
        rec_pre = evaluation(feature_set, label_set)
        # np.save("car196_rec_pre_ftl.npy", rec_pre)
        # for visualization
        sum_dict = {'feature': feature_set, 'label': label_set, 'path': path_set}
        np.save('car196_fea_label_path.npy', sum_dict)
        sys.exit()
    print("Finish eval")

    for epoch in range(num_epochs):
        if epoch == 0:
            feature_set, label_set, _ = get_feature_and_label(siamese_network, dataloader_eval, device)
            # distance_type: Euclidean or cosine
            rec_pre = evaluation(feature_set, label_set, distance_type='cosine')
        siamese_network.train()
        for i, data in enumerate(dataloader, 0):
            # img_1, img_2, sim_label = data['img_1'].to(device), data['img_2'].to(device), data['sim_label'].type(torch.FloatTensor).to(device)
            img_1, img_2, label_1, label_2 = data['img_1'].to(device), data['img_2'].to(device), data['label_1'].to(device), data['label_2'].to(device)
            optimizer.zero_grad()
            output_1, output_2 = siamese_network(img_1, img_2)
            pair_dist, pair_sim_label = calculate_distance_and_similariy_label(output_1, output_2, label_1, label_2, sqrt=True, pair_type=pair_type)
            if loss_type == "contrastive_loss":
                loss, positive_loss, negative_loss = contrastive_loss(pair_dist, pair_sim_label, margin)
            elif loss_type == "focal_contrastive_loss":
                loss, positive_loss, negative_loss = focal_contrastive_loss(pair_dist, pair_sim_label, margin, mean_value, std_value)
            elif loss_type == "triplet_loss":
                loss, positive_loss, negative_loss = triplet_loss(pair_dist, pair_sim_label, margin)
            elif loss_type == "focal_triplet_loss":
                loss, positive_loss, negative_loss = focal_triplet_loss(pair_dist, pair_sim_label, margin, mean_value, std_value)
            elif loss_type == "angular_loss":
                center_output = (output_1 + output_2)/2.
                pair_dist_2, _ = calculate_distance_and_similariy_label(center_output, output_2, label_1, label_2, sqrt=True, pair_type=pair_type)
                # angle margin is 45^o
                loss, positive_loss, negative_loss = angular_loss(pair_dist, pair_dist_2, pair_sim_label, 45)
            else:
                print("Unknown loss function")
                sys.exit()

            # try my own customized loss function
            # loss = contrastive_loss(output_1, output_2, pair_sim_label)
            loss.backward()
            optimizer.step()
            log_for_loss.append(loss.detach().item())
            if i % display_step == 0 and i > 0:
                print("{}, Epoch [{:3d}/{:3d}], Iter [{:3d}/{:3d}], Loss: {:6.5f}, Positive loss: {:6.5f}, Negative loss: {:6.5f}".format(
                      datetime.datetime.now(), epoch, num_epochs, i, len(dataloader), loss.item(), positive_loss.item(), negative_loss.item()))
        if epoch % eval_step == 0:
            print("Start evalution")
            # np.save(loss_type +'.npy', log_for_loss)
            feature_set, label_set, _ = get_feature_and_label(siamese_network, dataloader_eval, device)
            # distance_type: Euclidean or cosine
            rec_pre = evaluation(feature_set, label_set, distance_type='cosine')
            torch.save(siamese_network.module.state_dict(), os.path.join(model_dir, 'model_' + str(epoch) +'_.pth'))
コード例 #25
0
folder_dataset = dset.ImageFolder(root=Config.training_dir)
siamese_dataset = SiameseNetworkDataset(
  imageFolderDataset=folder_dataset,
  transform=transforms.Compose(
    [
      transforms.Resize((100,100)),
      transforms.ToTensor()
    ]
  ),
  should_invert=False
)
train_dataloader = DataLoader(siamese_dataset,
                        shuffle=True,
                        num_workers=8,
                        batch_size=Config.train_batch_size)
net = SiameseNetwork().cuda()
print(net)
criterion = ContrastiveLoss()
optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
counter = []
loss_history = [] 
iteration_number= 0
for epoch in range(0,Config.train_number_epochs):
    for i, data in enumerate(train_dataloader,0):
        img0, img1 , label = data
        img0, img1 , label = img0.cuda(), img1.cuda() , label.cuda()
        optimizer.zero_grad()
        output1,output2 = net(img0,img1)
        loss_contrastive = criterion(output1,output2,label)
        loss_contrastive.backward()
        optimizer.step()
コード例 #26
0
ファイル: train_func.py プロジェクト: gxdai/FCS_pytorch
from torch.utils.data import DataLoader
from evaluation import get_feature_and_label, evaluation

os.environ['CUDA_VISIBLE_DEVICES'] = '1,3'

ngpu = 2
margin = 1.
num_epochs = 1000
train_batch_size = 64
test_batch_size = 32
gamma = 0.98  # for learning rate decay
pretrained = False
aux_logits = False
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
kargs = {'ngpu': ngpu, 'pretrained': pretrained, 'aux_logits': aux_logits}
siamese_network = SiameseNetwork(**kargs)
if device.type == 'cuda' and torch.cuda.device_count() > 1:
    siamese_network = nn.DataParallel(siamese_network,
                                      list(range(torch.cuda.device_count())))
siamese_network.to(device)
contrastive_loss = ContrastiveLoss(margin=margin)

# params = siamese_network.parameters()

# optimizer = optim.Adam(params, lr=0.0005)
# optimizer = optim.SGD(params, lr=0.01, momentum=0.9)

# using different lr
optimizer = optim.SGD(
    [{
        'params': siamese_network.module.inception_v3.parameters()
コード例 #27
0
    images = [dataset_path + each_sub_directory + '/' + x for x in images]
    fake.append(images[:30])
    genuine.append(images[30:])


genuine_training_data, genuine_validation_data, genuine_test_data = genuine[:80], genuine[80:90], genuine[90:]
fake_training_data, fake_validation_data, fake_test_data = fake[:80], fake[80:90], fake[90:]
del genuine, fake
image_height, image_width = configuration.configure_parameter["image_height"],configuration.configure_parameter["image_width"]

batch_size = configuration.configure_parameter["batch_size"]
num_train_samples = 57600
num_val_samples = num_test_samples = 11520

input_shape= image_height, image_width,1
get_network = SiameseNetwork(input_shape)
input_1 = Input(shape=input_shape)
input_2 = Input(shape=input_shape)
feature_a = get_network(input_1)
feature_b = get_network(input_2)
feature_distance=Lambda(euclidean_distance)([feature_a, feature_b])
model = Model([input_1, input_2], feature_distance)
adam=Adam(0.001, decay=2.5e-4)
model.compile(loss=contrastive_loss, optimizer=adam, metrics=[accuracy])

callbacks = [
    EarlyStopping(patience=12, verbose=1),
    ReduceLROnPlateau(factor=0.1, patience=5, min_lr=0.000001, verbose=1),
    ModelCheckpoint('/home/rafiqul/SiameseNetwork/siamese-{epoch:05d}.h5', verbose=1, save_weights_only=True)
]
コード例 #28
0
    siamese_dataset = SiameseNetworkDataset(
        imageFolderDataset=images_folder_dataset,
        transform=transforms.Compose([transforms.ToTensor()]),
        should_invert=False)
    """ Train Dataset """
    train_dataloader = DataLoader(siamese_dataset,
                                  shuffle=True,
                                  num_workers=0,
                                  batch_size=config.train_batch_size)

    if os.path.exists(args.savefile):
        print("Loading Existing Model")
        net = torch.load(args.savefile)
    else:
        print("Creating New Model")
        net = SiameseNetwork().cuda()

    criterion = ContrastiveLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.learnrate)

    counter = []
    loss_history = []
    iteration_number = 0

    total_step = len(train_dataloader)

    for epoch in range(0, config.train_number_epochs):
        for i, data in enumerate(train_dataloader, 0):
            img0, img1, label = data
            img0, img1, label = img0.cuda(), img1.cuda(), label.cuda()
            optimizer.zero_grad()