Exemple #1
0
def train():
    dataset = data.get_dataset(train=True)
    iterator = dataset.make_one_shot_iterator()
    next_element = iterator.get_next()

    siamese = Siamese()
    optimizer = tf.train.AdamOptimizer(FLAGS.lr)
    train_step = optimizer.minimize(siamese.loss)

    tf.summary.scalar('loss', siamese.loss)
    tf.summary.scalar('acc', siamese.accuracy)
    merged_summaries = tf.summary.merge_all()

    saver = tf.train.Saver()
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter(FLAGS.summaries_dir, sess.graph)
        sess.run(tf.global_variables_initializer())

        for i in trange(FLAGS.n_iters):
            x1, x2, y = sess.run(next_element)
            _, loss, summary = sess.run(
                [train_step, siamese.loss, merged_summaries],
                feed_dict={
                    siamese.x1: x1,
                    siamese.x2: x2,
                    siamese.y: y,
                })
            assert not np.isnan(loss), 'Model diverged with loss = NaN'
            train_writer.add_summary(summary, i)

            if i % 1000 == 0:
                saver.save(sess, FLAGS.model_path)
        print('Training completed, model saved:',
              saver.save(sess, FLAGS.model_path))
    def __init__(self, flags):
        run_config = tf.ConfigProto()
        run_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=run_config)

        self.flags = flags
        self.dataset = Dataset(self.flags.dataset,
                               is_train=self.flags.is_train)
        self.model = Siamese(self.sess, self.flags, self.dataset.image_size,
                             self.dataset)
        self.accuracy = self.model.accuracy
        self.train_accuracy = self.model.train_accuracy  ###############################################################

        self._make_folders()

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())

        tf_utils.show_all_variables()
Exemple #3
0
def init_model(class_num):
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(config=tf_config)
    with sess.graph.as_default():
        with sess.as_default():
            siamese = Siamese(class_num=class_num)
            sess.run(tf.local_variables_initializer())
            sess.run(tf.global_variables_initializer())
            var_list = [var for var in tf.global_variables() if "moving" in var.name]
            var_list += [var for var in tf.global_variables() if "global_step" in var.name]
            var_list += tf.trainable_variables()
            saver = tf.train.Saver(var_list=var_list, max_to_keep=5)
            last_file = tf.train.latest_checkpoint("../model/")
            if last_file:
                print('Restoring model from {}'.format(last_file))
                saver.restore(sess, last_file)
    return sess, saver, siamese
Exemple #4
0
def test():
    def parse_file(f):
        image = ~cv2.imread(f, 0)
        image = image / 255
        return np.expand_dims(image, axis=-1)

    files = data.get_files(train=False)
    files = files[:FLAGS.n_test_classes]  # subsample for n-way classification
    images = [[parse_file(f) for f in l] for l in files]
    gt_images = [l[0] for l in images]

    siamese = Siamese()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.model_path)

        gt_vals = sess.run(siamese.out, feed_dict={
            siamese.x1: gt_images,
        })

        preds = []
        for i in range(len(images)):
            test_images = images[i][1:]
            test_vals = sess.run(siamese.out,
                                 feed_dict={
                                     siamese.x1: test_images,
                                 })

            test_preds = []
            for val in test_vals:
                d = np.sum(np.abs(gt_vals - val), axis=1)
                test_preds.append(np.argmin(d))
            preds.append(test_preds)

    y_true = [[i] * (len(l) - 1) for i, l in enumerate(images)]
    y_true = np.array(y_true).flatten()
    y_pred = np.array(preds).flatten()
    cm = confusion_matrix(y_true, y_pred)

    tp = np.eye(len(cm)) * cm
    print('Total accuracy:', np.sum(tp) / np.sum(cm))
    plot_confusion_matrix(cm, np.arange(len(images)))
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='EMOTION CLASSIFICATION')
    parser.add_argument('--batch-size',
                        type=int,
                        metavar='N',
                        help='input batch size for training')
    parser.add_argument('--dataset-dir',
                        default='data',
                        help='directory that contains cifar-10-batches-py/ '
                        '(downloaded automatically if necessary)')
    parser.add_argument('--epochs',
                        type=int,
                        metavar='N',
                        help='number of epochs to train')
    parser.add_argument('--log-interval',
                        type=int,
                        default=75,
                        metavar='N',
                        help='number of batches between logging train status')
    parser.add_argument('--lr', type=float, metavar='LR', help='learning rate')
    parser.add_argument('--model-name',
                        type=str,
                        default='run-01',
                        help='saves the current model')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--weight-decay',
                        type=float,
                        default=0.0,
                        help='Weight decay hyperparameter')
    parser.add_argument('--continue-train',
                        type=str,
                        default='NONE',
                        help='saves the current model')
    parser.add_argument('--examine', default=False, action='store_true')

    args = parser.parse_args()
    # set seed

    SEED = 1234

    random.seed(SEED)
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)
    torch.backends.cudnn.deterministic = True

    train_imgs_dir = os.path.join(args.dataset_dir, "train")
    train_labels = pd.read_csv(
        os.path.join(args.dataset_dir, "label/train_label.csv"))

    val_imgs_dir = os.path.join(args.dataset_dir, "val")
    val_labels = pd.read_csv(
        os.path.join(args.dataset_dir, "label/val_label.csv"))

    test_imgs_dir = os.path.join(args.dataset_dir, "test")
    test_labels = pd.read_csv(
        os.path.join(args.dataset_dir, "label/test_label.csv"))

    training_data_transform = T.Compose([
        T.ToPILImage("RGB"),
        T.RandomRotation(5),
        T.RandomHorizontalFlip(0.5),
        # SquarePad(),
        T.Resize(128),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    test_data_transform = T.Compose([
        T.ToPILImage("RGB"),
        # SquarePad(),
        T.Resize(128),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    train_set = ImageDataset(train_labels,
                             train_imgs_dir,
                             transform=training_data_transform)
    val_set = ImageDataset(val_labels,
                           val_imgs_dir,
                           transform=test_data_transform)
    test_set = ImageDataset(test_labels,
                            test_imgs_dir,
                            transform=test_data_transform)

    print("trainset: ", len(train_set))
    print("val: ", len(val_set))
    print("testset: ", len(test_set))

    train_dataloader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True)
    val_dataloader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=True)
    test_dataloader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=True)

    # Load CIFAR10 dataset
    if (args.examine == True):
        model = Siamese()
        model.load_state_dict(
            torch.load('runs/' + args.model_name + '/' + args.model_name +
                       '.pth'))
        model.to(device)

        images, labels, probs = get_predictions(args, model, test_dataloader,
                                                device)
        pred_labels = torch.argmax(probs, 1)
        cm = confusion_matrix(labels, pred_labels)
        #plot_confusion_matrix(args, labels, pred_labels)
        plot_confusion_matrix(args,
                              cm,
                              l_classes=np.asarray(classes),
                              normalize=True,
                              title='Normalized confusion matrix')
        print("done!")
    else:

        writer = SummaryWriter('runs/' + args.model_name)

        if (args.continue_train == "NONE"):
            model = Siamese()
            model.apply(initialize_parameters)

        else:

            model = Siamese()
            model.load_state_dict(
                torch.load('runs/' + args.continue_train + '/' +
                           args.continue_train + '.pth'))
            print("CONTINUE TRAIN MODE----")

        def count_parameters(model):
            return sum(p.numel() for p in model.parameters()
                       if p.requires_grad)

        print(
            f'The model has {count_parameters(model):,} trainable parameters')

        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
        criterion = nn.CrossEntropyLoss()
        model.to(device)
        criterion = criterion.to(device)
        model.train()
        optimizer.zero_grad()

        # Define optimizer
        # opt = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

        # Record loss and accuracy history
        args.train_loss = []
        args.val_loss = []
        args.val_acc = []

        # Train the model
        best_valid_loss = float('inf')

        for epoch in range(1, args.epochs + 1):
            start_time = time.monotonic()
            best_valid_loss = train(args, epoch, model, train_dataloader,
                                    val_dataloader, optimizer, criterion,
                                    device, writer, best_valid_loss)
            end_time = time.monotonic()

            epoch_mins, epoch_secs = epoch_time(start_time, end_time)
            print(
                f'Epoch: {epoch :02} | Epoch Time: {epoch_mins}m {epoch_secs}s'
            )

        # Evaluate on test set
        writer.flush()

        #test time
        model = Siamese()
        model.load_state_dict(
            torch.load('runs/' + args.model_name + '/' + args.model_name +
                       '.pth'))
        model.to(device)
        criterion = nn.CrossEntropyLoss()
        criterion = criterion.to(device)

        loss, acc = evaluate(args, model, test_dataloader, criterion, device)
        print("TEST RESULTS: ", loss, acc)
Exemple #6
0
way = 20
times = 400

dataSet = OmniglotTrain(train_dataset, transform=data_transforms)
testSet = OmniglotTest(test_dataset,
                       transform=transforms.ToTensor(),
                       times=times,
                       way=way)
testLoader = DataLoader(testSet, batch_size=way, shuffle=False, num_workers=16)

dataLoader = DataLoader(dataSet, batch_size=cmd.trainBatch,\
                        shuffle=False, num_workers=16)

# Get the network architecture
net = Siamese()
# Loss criterion
criterion = torch.nn.BCEWithLogitsLoss(size_average=True)

# Optimizer
if cmd.optMethod == 'adam':
    optimizer = torch.optim.Adam(net.parameters(), lr=cmd.lr)

# To store train loss
train_loss = []
# To store the accuracy
accuracy = []
# Get the network in training mode.
net.train()

# Use GPUs.
Exemple #7
0
                                               impo_pairs_test,
                                               impo_labels_test)

    l = len(pair_datas)
    idx = l - 3200
    #:exit()

    while True:

        dataset = SiameseData(subject_imgs, pair_datas, labels, None)
        dataset_test = SiameseData(subject_imgs, pair_data_test, labels_test,
                                   None)
        dataloader = DataLoader(dataset, batch_size=32)
        dataloader_test = DataLoader(dataset_test, batch_size=64)

        model = Siamese()
        #loss = SiameseLoss()
        loss = torch.nn.CrossEntropyLoss()
        optim = torch.optim.Adam(model.parameters())
        #optim = torch.optim.SGD(model.parameters(), lr = 0.001, momentum=0.9)
        if has_gpu:
            print("Push model to gpu")
            model.cuda()
            loss.cuda()

        for i, ((i1, i2), l) in enumerate(dataloader):

            model.train()
            if has_gpu:
                i1 = torch.autograd.Variable(i1.type(torch.FloatTensor)).cuda()
                i2 = torch.autograd.Variable(i2.type(torch.FloatTensor)).cuda()
Exemple #8
0
def main(data_dir):

    siamese_model = Siamese()

    batch_size = 4
    num_train = 30000
    augment = True
    way = 20
    trials = 300
    epochs = 50

    train_loader, val_loader = get_train_valid_loader(data_dir,
                                                      batch_size,
                                                      num_train,
                                                      augment,
                                                      way,
                                                      trials,
                                                      pin_memory=True)

    criterion = torch.nn.BCELoss()
    optimizer = torch.optim.SGD(siamese_model.parameters(),
                                lr=1e-3,
                                momentum=0.9)

    lambda1 = lambda epoch: 0.99**epoch
    #scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda1)

    writer = SummaryWriter()

    siamese_model.cuda()

    best_accuracy = 0.0

    for i in range(epochs):
        siamese_model.train()
        #batch_count = 0
        avg_train_loss = 0.0
        for it, (img_1, img_2, labels) in enumerate(train_loader):
            optimizer.zero_grad()

            img_1 = img_1.cuda()
            img_2 = img_2.cuda()
            labels = labels.cuda()
            preds = siamese_model(img_1, img_2)

            loss = criterion(preds, labels)

            avg_train_loss += loss.item()
            writer.add_scalar('Loss_train', loss.item(),
                              len(train_loader) * i + it)

            loss.backward()
            optimizer.step()

            #batch_count+=1
            #print(batch_count)

        siamese_model.eval()
        count = 0
        with torch.no_grad():
            for ref_images, candidates in val_loader:
                ref_images = ref_images.cuda()
                candidates = candidates.cuda()

                preds = siamese_model(ref_images, candidates)

                if torch.argmax(preds) == 0:
                    count += 1
        if count / len(val_loader) > best_accuracy:
            best_accuracy = count / len(val_loader)
            torch.save(siamese_model.state_dict(), 'best_model.pth')

        writer.add_scalar('Accuracy_validation', count / trials, i)

        print('Epoch {} | Train loss {} | Val accuracy {}'.format(
            i, avg_train_loss / len(train_loader), count / trials))

        #scheduler.step()

    writer.flush()

    best_model = Siamese().cuda()
    best_model.load_state_dict(torch.load('best_model.pth'))
    best_model.eval()

    trials = 400
    test_loader = get_test_loader(data_dir, way, trials)
    test_count = 0
    with torch.no_grad():
        for ref_images, candidates in test_loader:
            ref_images = ref_images.cuda()
            candidates = candidates.cuda()

            preds = best_model(ref_images, candidates)

            if torch.argmax(preds) == 0:
                test_count += 1

    print('Test Accuracy {}'.format(test_count / len(test_loader)))
Exemple #9
0
            max_list = heapq.nlargest(4, range(len(prediction)), prediction.take)
            for num in max_list:
                if num not in result:
                    result.append(num)
                    break

        # Write predictions into mappings.txt.
        f.write(str(i).zfill(4)+",")
        f.write("".join([str(num) for num in result])+'\n')
        sys.stdout.write('\r>> Testing image %d/%d'%(i+1, conf.TEST_NUMBER))
        sys.stdout.flush()
    time2 = time.time()
    print("\nUsing time:", "%.2f"%(time2-time1)+"s")


if __name__=='__main__':
    # Network
    siamese = Siamese()

    # Adaptive use of GPU memory.
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(config=tf_config)
    sess.run(tf.global_variables_initializer())

    # Restore the model.
    saver = tf.train.Saver()
    saver.restore(sess, conf.MODEL_PATH)

    test(siamese, sess)
#trainloader, testloader = dataset.cifar10.process()
trainSet = OmniTrain(args.train_path)
testSet = OmniTest(args.test_path, times=args.times, way=args.way)

trainLoader = DataLoader(trainSet,
                         batch_size=args.batch_size,
                         shuffle=False,
                         num_workers=args.workers)
testLoader = DataLoader(testSet,
                        batch_size=args.way,
                        shuffle=False,
                        num_workers=args.workers)
print('==> Index established.\n')

loss_fn = torch.nn.BCEWithLogitsLoss(size_average=True)
net = Siamese()
net = net.to(device)
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=1e-4)
if not os.path.exists(args.save_path):
    os.makedirs(args.save_path)


def train():
    net.train()
    train_loss = []
    loss_val = 0
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
Exemple #11
0
        losses_v_cumulative.append(np.mean(losses_v))
        accuracy_v_cumulative.append(np.mean(accuracy_v))
        print(np.mean(losses_v))
        print(np.mean(accuracy_v))
        losses_v = []
        accuracy_v = []
        if epoch % 5 == 0:
            torch.save(model.state_dict(), results_dir + "saved_model.pth")
            np.save(results_dir + "losses.npy", losses_cumulative)
            np.save(results_dir + "losses_v.npy", losses_v_cumulative)
            np.save(results_dir + "accuracy.npy", accuracy_cumulative)
            np.save(results_dir + "accuracy_v.npy", accuracy_v_cumulative)
    torch.save(model.state_dict(), results_dir + "saved_model.pth")
    np.save(results_dir + "losses.npy", losses_cumulative)
    np.save(results_dir + "losses_v.npy", losses_v_cumulative)
    np.save(results_dir + "accuracy.npy", accuracy_cumulative)
    np.save(results_dir + "accuracy_v.npy", accuracy_v_cumulative)
    print(results_dir)
    return model


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--network_location", help="No", required=False)
    args = parser.parse_args()
    device = torch.device("cuda:0")
    model = Siamese().to(device)
    if args.network_location:
        model.load_state_dict(torch.load(args.network_location))
    model = train(model, device)
Exemple #12
0
def train():
    parser = argparse.ArgumentParser(
        description='PyTorch One shot siamese training ')

    parser.add_argument("--train_path",
                        default="./images_background",
                        help="training folder")
    parser.add_argument("--test_path",
                        default="./images_evaluation",
                        help='path of testing folder')
    parser.add_argument("--way",
                        default=20,
                        type=int,
                        help="how much way one-shot learning")
    parser.add_argument("--times",
                        default=400,
                        type=int,
                        help="number of samples to test accuracy")
    parser.add_argument("--workers",
                        default=2,
                        type=int,
                        help="number of dataLoader workers")
    parser.add_argument("--batch_size",
                        default=128,
                        type=int,
                        help="number of batch size")
    parser.add_argument("--lr", default=0.1, type=float, help="learning rate")
    parser.add_argument("--max_iter",
                        default=50000,
                        type=int,
                        help="number of iterations before stopping")
    parser.add_argument("--save_path",
                        default="./model/siamese",
                        help="path to store model")

    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    best_acc = 0
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch

    print('==> Preparing data..\n')

    # trainloader, testloader = dataset.cifar10.process()
    trainSet = OmniTrain(args.train_path)
    testSet = OmniTest(args.test_path, times=args.times, way=args.way)

    trainLoader = DataLoader(trainSet,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers)
    testLoader = DataLoader(testSet,
                            batch_size=args.way,
                            shuffle=False,
                            num_workers=args.workers)
    print('==> Index established.\n')

    loss_fn = torch.nn.BCEWithLogitsLoss(size_average=True)
    net = Siamese()
    net = net.to(device)
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=1e-4)
    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    net.train()
    train_loss = []
    loss_val = 0
    accList = []
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=1e-4)
    for batch_id, (img1, img2, label) in enumerate(trainLoader, 1):
        if batch_id > args.max_iter:
            break
        img1, img2, label = img1.to(device), img2.to(device), label.to(device)
        optimizer.zero_grad()
        output = net.forward(img1, img2)

        loss = loss_fn(output, label)
        loss_val += loss.data
        loss.backward()
        optimizer.step()
        if batch_id % 5 == 0:
            print('batch [%d]\tloss:\t%.5f\t' % (
                batch_id,
                loss_val / 5,
            ))
            train_loss.append(loss_val)
            loss_val = 0
        if batch_id % 100 == 0:
            right, error = 0, 0
            for _, (test1, test2, _) in enumerate(testLoader, 1):
                test1, test2 = test1.to(device), test2.to(device)
                output = net.forward(test1, test2).data.cpu().numpy()
                pred = np.argmax(output)
                if pred == 0:
                    right += 1
                else:
                    error += 1
            print('*' * 70)
            print('[%d]\tright:\t%d\terror:\t%d\tprecision:\t%f' %
                  (batch_id, right, error, right * 1.0 / (right + error)))
            acc = right * 1.0 / (right + error)
            # print(acc)
            accList.append(acc)
            print(accList)
            print('*' * 70)

            if best_acc < acc:
                best_acc = acc
            state = {
                'net': net.state_dict(),
                'BestAcc': best_acc,
                'accList': accList,
                'epoch': batch_id,
            }
            savepath = os.path.join(args.save_path, 'bestcheck.plk')
            torch.save(state, savepath)
Exemple #13
0
    # train_dataset = dset.ImageFolder(root=Flags.train_path)
    # test_dataset = dset.ImageFolder(root=Flags.test_path)


    os.environ["CUDA_VISIBLE_DEVICES"] = Flags.gpu_ids
    print("use gpu:", Flags.gpu_ids, "to train.")

    trainSet = OmniglotTrain(Flags.train_path, transform=data_transforms)
    testSet = OmniglotTest(Flags.test_path, transform=transforms.ToTensor(), times = Flags.times, way = Flags.way)
    testLoader = DataLoader(testSet, batch_size=Flags.way, shuffle=False, num_workers=Flags.workers)

    trainLoader = DataLoader(trainSet, batch_size=Flags.batch_size, shuffle=False, num_workers=Flags.workers)

    loss_fn = torch.nn.BCEWithLogitsLoss(size_average=True)
    net = Siamese()

    # multi gpu
    if len(Flags.gpu_ids.split(",")) > 1:
        net = torch.nn.DataParallel(net)

    if Flags.cuda:
        net.cuda()

    net.train()

    optimizer = torch.optim.Adam(net.parameters(),lr = Flags.lr )
    optimizer.zero_grad()

    train_loss = []
    loss_val = 0
Exemple #14
0
def main():

    torch.backends.cudnn.benchmark = True

    args = parse_args()
    mkdir(args.output)
    print(args.__dict__)
    print(args.__dict__, file=open(os.path.join(args.output, "log.txt"), "a"))

    train_set = create_dataset(args, True)
    val_set = create_dataset(args, False)
    labels = torch.tensor(train_set.pairs[2])
    p_class = 1.0 / len(labels[labels == 1])
    n_class = 1.0 / len(labels[labels != 1])
    sample_probabilities = torch.where(
        labels == 1, torch.full_like(labels, p_class), torch.full_like(labels, n_class)
    )

    epoch_length = labels.shape[0]
    sampler = torch.utils.data.sampler.WeightedRandomSampler(
        sample_probabilities, epoch_length
    )

    train_loader = DataLoader(
        train_set,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=sampler,
    )
    val_loader = DataLoader(
        val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers
    )

    model = Siamese()
    model = model.cuda()
    if "best_model.pth" in os.listdir(args.output):
        model.load_state_dict(torch.load(os.path.join(args.output, "best_model.pth")))

    if args.ngpu > 1:
        model = nn.DataParallel(model, range(args.ngpu))

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=args.epochs * len(train_loader)
    )
    criterion = torch.nn.CosineEmbeddingLoss(margin=args.margin)
    if not args.test_only:
        train(
            model, optimizer, scheduler, criterion, train_loader, val_loader, args.epochs, args.output
        )
    else:
        transforms = T.Compose(
            [
                T.Resize(args.size),
                T.CenterCrop(args.size),
                T.ToTensor(),
                T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]
        )
        test_set = MuseumDatasetTest(args.root, transforms, args.val_set)

        test_loader = DataLoader(
            test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers
        )
        embed(model, test_loader, args.output)
Exemple #15
0
from keras.layers import Input, Lambda, Dense
from keras.models import Model
from keras import backend as K
from model import Siamese
import os

model_name = 'weight.h5'
weight_path = os.path.join('model', model_name)

# define model structure
input_shape = (105, 105, 1)
left_input = Input(shape=input_shape)
right_input = Input(shape=input_shape)

siamese_net = Siamese(input_shape=input_shape)

encoded_l = siamese_net(left_input)
encoded_r = siamese_net(right_input)

L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([encoded_l, encoded_r])
prediction = Dense(1, activation='sigmoid')(L1_distance)

net = Model(inputs=[left_input, right_input], outputs=prediction)
net.summary()
net.save(weight_path)

# convert to coreml
import coremltools

coreml_model = coremltools.converters.tensorflow.convert(
def train():
    # network
    siamese = Siamese()

    image_batch_train1, image_batch_train2, label_batch_train = load_training_set(
    )
    image_batch_test1, image_batch_test2, label_batch_test = load_testing_set()

    # Adaptive use of GPU memory.
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with tf.Session(config=tf_config) as sess:
        # general setting
        saver = tf.train.Saver(max_to_keep=20)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, conf.MODEL_PATH)

        # Recording training process.
        writer_train = tf.summary.FileWriter('logs/train/', sess.graph)
        writer_test = tf.summary.FileWriter('logs/test/', sess.graph)
        saver = tf.train.Saver(max_to_keep=20)
        # train
        i = 0
        acc_max = 0
        while 1:
            image_train1, image_train2, label_train = sess.run(
                [image_batch_train1, image_batch_train2, label_batch_train])
            _, loss_ = sess.run(
                [siamese.optimizer, siamese.loss],
                feed_dict={
                    siamese.left: image_train1,
                    siamese.right: image_train2,
                    siamese.label: label_train
                })
            print('step %d: loss %.3f' % (i, loss_))

            if i % 10 == 0:
                image_train1, image_train2, label_train = sess.run([
                    image_batch_train1, image_batch_train2, label_batch_train
                ])
                acc_train, summary = sess.run(
                    [siamese.accuracy, siamese.merged],
                    feed_dict={
                        siamese.left: image_train1,
                        siamese.right: image_train2,
                        siamese.label: label_train
                    })
                writer_train.add_summary(summary, i)
                image_test1, image_test2, label_test = sess.run(
                    [image_batch_test1, image_batch_test2, label_batch_test])
                acc_test, summary = sess.run(
                    [siamese.accuracy, siamese.merged],
                    feed_dict={
                        siamese.left: image_test1,
                        siamese.right: image_test2,
                        siamese.label: label_test
                    })
                writer_test.add_summary(summary, i)
                print("Lter " + str(i) + ",Train Accuracy " + str(acc_train) +
                      ",Test Accuracy " + str(acc_test))

            if i % 100 == 0:
                test(siamese, sess)
                acc = accuracy_calculate()
                if acc > acc_max:
                    acc_max = acc
                    print("Save the model Successfully,max accuracy is",
                          acc_max)
                    saver.save(sess, "model/model_level5.ckpt", global_step=i)
                else:
                    print("pass,max accuracy is", acc_max)
            i += 1

    coord.request_stop()
    coord.join(threads)
Exemple #17
0
    # train_dataset = dset.ImageFolder(root=Flags.train_path)
    # test_dataset = dset.ImageFolder(root=Flags.test_path)


    os.environ["CUDA_VISIBLE_DEVICES"] = Flags.gpu_ids
    print("use gpu:", Flags.gpu_ids, "to train.")

    trainSet = OmniglotTrain(Flags.train_path, transform=data_transforms)
    testSet = OmniglotTest(Flags.test_path, transform=transforms.ToTensor(), times = Flags.times, way = Flags.way)
    testLoader = DataLoader(testSet, batch_size=Flags.way, shuffle=False, num_workers=Flags.workers)

    trainLoader = DataLoader(trainSet, batch_size=Flags.batch_size, shuffle=False, num_workers=Flags.workers)

    loss_fn = torch.nn.BCEWithLogitsLoss(size_average=True)
    net = Siamese()

    # multi gpu
    if len(Flags.gpu_ids.split(",")) > 1:
        net = torch.nn.DataParallel(net)

    if Flags.cuda:
        net.cuda()

    net.train()

    optimizer = torch.optim.Adam(net.parameters(),lr = Flags.lr )
    optimizer.zero_grad()

    train_loss = []
    loss_val = 0
def main():
    # Training settings
    parser = argparse.ArgumentParser(
        description='PRETRAINED EMOTION CLASSIFICATION')
    parser.add_argument('--batch-size',
                        type=int,
                        metavar='N',
                        help='input batch size for training')
    parser.add_argument('--dataset-dir',
                        default='data',
                        help='directory that contains cifar-10-batches-py/ '
                        '(downloaded automatically if necessary)')
    parser.add_argument('--epochs',
                        type=int,
                        metavar='N',
                        help='number of epochs to train')
    parser.add_argument('--log-interval',
                        type=int,
                        default=75,
                        metavar='N',
                        help='number of batches between logging train status')
    parser.add_argument('--lr', type=float, metavar='LR', help='learning rate')
    parser.add_argument('--model-name',
                        type=str,
                        default='run-01',
                        help='saves the current model')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--weight-decay',
                        type=float,
                        default=0.0,
                        help='Weight decay hyperparameter')
    parser.add_argument('--continue-train',
                        type=str,
                        default='NONE',
                        help='saves the current model')
    parser.add_argument('--examine', default=False, action='store_true')
    parser.add_argument('--visualize', default=False, action='store_true')
    args = parser.parse_args()
    # set seed

    SEED = 1234

    random.seed(SEED)
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)
    torch.backends.cudnn.deterministic = True

    train_imgs_dir = os.path.join(args.dataset_dir, "train")
    train_labels = pd.read_csv(
        os.path.join(args.dataset_dir, "label_eliminate_2/train_labels.csv"))

    val_imgs_dir = os.path.join(args.dataset_dir, "test")
    val_labels = pd.read_csv(
        os.path.join(args.dataset_dir, "label_eliminate_2/test_labels.csv"))

    #test_imgs_dir = os.path.join(args.dataset_dir, "test")
    #test_labels = pd.read_csv(os.path.join(args.dataset_dir, "label/test_label.csv"))

    training_data_transform = T.Compose([
        #T.ToPILImage("RGB"),
        #T.RandomRotation(5),
        T.RandomHorizontalFlip(0.5),
        # SquarePad(),
        T.Resize((128, 128)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    test_data_transform = T.Compose([
        #T.ToPILImage("RGB"),
        # SquarePad(),
        T.Resize((128, 128)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    train_sample = np.random.choice(range(249800), 40000, replace=False)
    train_set = PretrainImageDataset(train_labels,
                                     train_imgs_dir,
                                     transform=training_data_transform)
    val_sample = np.random.choice(range(15770), 4000, replace=False)
    val_set = PretrainImageDataset(val_labels,
                                   val_imgs_dir,
                                   transform=test_data_transform)
    #test_set = ImageDataset(test_labels, test_imgs_dir, transform=test_data_transform)

    #test dataset

    #print("testset: ",len(test_set))

    train_dataloader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  sampler=train_sample)
    val_dataloader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                sampler=val_sample)
    #test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=True)
    print("trainset: ", len(train_dataloader))
    print("val: ", len(val_dataloader))

    test_dataloader = None

    # Load CIFAR10 dataset
    if (args.visualize == True):
        writer = SummaryWriter('runs_pretrained/' + args.model_name)
        # plot the images in the batch, along with predicted and true labels
        for i in range(30):
            fig = plt.figure(figsize=(12, 48))

            image1 = train_set[i]['image_1']
            image2 = train_set[i]['image_2']
            image3 = train_set[i]['image_3']
            images = [image1, image2, image3]
            label_A = train_set[i]['label_A']
            label_B = train_set[i]['label_B']

            for idx in np.arange(3):
                ax = fig.add_subplot(1, 3, idx + 1, xticks=[], yticks=[])
                matplotlib_imshow(images[idx], one_channel=False)
                ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
                    "percentage", label_B[idx], classes[label_A]))
            writer.add_figure('predictions vs. actuals', fig, global_step=i)

    elif (args.examine == True):
        model = Siamese()
        model.load_state_dict(
            torch.load('runs_pretrained/' + args.model_name + '/' +
                       args.model_name + '.pth'))
        model.to(device)

        images, labels, probs = get_predictions(args, model, test_dataloader,
                                                device)
        pred_labels = torch.argmax(probs, 1)
        cm = confusion_matrix(labels, pred_labels)
        #plot_confusion_matrix(args, labels, pred_labels)
        plot_confusion_matrix(args,
                              cm,
                              l_classes=np.asarray(classes),
                              normalize=True,
                              title='Normalized confusion matrix')
        print("done!")
    else:

        writer = SummaryWriter('runs_pretrained/' + args.model_name)

        if (args.continue_train == "NONE"):
            model = Siamese()
            model.apply(initialize_parameters)

        else:

            model = Siamese()
            model.load_state_dict(
                torch.load('runs_pretrained/' + args.continue_train + '/' +
                           args.continue_train + '.pth'))
            print("CONTINUE TRAIN MODE----")

        def count_parameters(model):
            return sum(p.numel() for p in model.parameters()
                       if p.requires_grad)

        print(
            f'The model has {count_parameters(model):,} trainable parameters')

        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
        Loss_B = Multi_cross_entropy()
        criterion = [nn.CrossEntropyLoss(), Loss_B]
        model.to(device)
        #criterion = criterion.to(device)
        model.train()
        optimizer.zero_grad()

        # Define optimizer
        # opt = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

        # Record loss and accuracy history
        args.train_loss = []
        args.val_loss = []
        args.val_acc = []

        # Train the model
        best_valid_loss = float('inf')

        for epoch in range(1, args.epochs + 1):
            start_time = time.monotonic()
            best_valid_loss = train(args, epoch, model, train_dataloader,
                                    val_dataloader, optimizer, criterion,
                                    device, writer, best_valid_loss)
            end_time = time.monotonic()

            epoch_mins, epoch_secs = epoch_time(start_time, end_time)
            print(
                f'Epoch: {epoch :02} | Epoch Time: {epoch_mins}m {epoch_secs}s'
            )

        # Evaluate on test set
        writer.flush()
        """
Exemple #19
0
        for inputs, labels in dev_ldr:
            embeds.append(net.get_embeds(inputs).data)
            ids.append(np.squeeze(labels))
        embeds, ids = np.array(np.concatenate(embeds)), np.array(np.concatenate(ids))
    return average_precision(embeds, ids)

if __name__ == "__main__":
    # Loading data
    print('-' * 89)
    print("Loading data...")
    ntokens, train_x, train_y, train_ldr, dev_ldr, test_ldr = data_loader()
    print('-' * 89)
    print("Data loaded")
    print('-' * 89)

    net = Siamese(GatedCNN, out_dims=1024, activation=F.tanh)
    net = net.cuda() if torch.cuda.is_available() else net.cpu()
    optimizer = optim.Adam(net.parameters())

    best_so_far = 0
    dev_APs = np.empty(MAX_EPOCHS)

    try:
        for epoch in range(1, MAX_EPOCHS+1):
            epoch_start_time = time.time()

            train(ntokens, train_x, train_y, train_ldr)
            dev_ap = evaluate(dev_ldr)
            print('-' * 89)
            print('| end of epoch {:3d} | time: {:5.2f}s | dev ap {:5.4f}'.format(
                epoch, (time.time() - epoch_start_time), dev_ap))
class Solver(object):
    def __init__(self, flags):
        run_config = tf.ConfigProto()
        run_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=run_config)

        self.flags = flags
        self.dataset = Dataset(self.flags.dataset,
                               is_train=self.flags.is_train)
        self.model = Siamese(self.sess, self.flags, self.dataset.image_size,
                             self.dataset)
        self.accuracy = self.model.accuracy
        self.train_accuracy = self.model.train_accuracy  ###############################################################

        self._make_folders()

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())

        tf_utils.show_all_variables()

    def _make_folders(self):
        if self.flags.is_train:
            cur_time = datetime.now().strftime("%Y%m%d-%H%M")
            self.model_out_dir = "{}/model/{}".format(self.flags.dataset,
                                                      cur_time)
            if not os.path.isdir(self.model_out_dir):
                os.makedirs(self.model_out_dir)

            self.sample_out_dir = "{}/sample/{}".format(
                self.flags.dataset, cur_time)
            if not os.path.isdir(self.sample_out_dir):
                os.makedirs(self.sample_out_dir)

            self.train_writer = tf.summary.FileWriter(
                "{}/logs/{}".format(self.flags.dataset, cur_time),
                graph_def=self.sess.graph_def)
        else:
            self.model_out_dir = "{}/model/{}".format(self.flags.dataset,
                                                      self.flags.load_model)
            self.test_out_dir = "{}/test/{}".format(self.flags.dataset,
                                                    self.flags.load_model)
            if not os.path.isdir(self.test_out_dir):
                os.makedirs(self.test_out_dir)

    def train(self):
        for iter_time in range(self.flags.iters):
            if self.flags.is_siamese:
                batch_imgs1, batch_label1, batch_imgs2, batch_label2 = self.dataset.train_next_batch_pair(
                    batch_size=self.flags.batch_size)

            else:
                batch_imgs1, batch_label1 = self.dataset.train_next_batch_random(
                    batch_size=self.flags.batch_size)
                batch_imgs2 = None
                batch_label2 = None

            total_loss, siamese_loss, reg_term, cls_loss_1, cls_loss_2, summary = self.model.train_step(
                batch_imgs1,
                batch_label1,
                batch_imgs2,
                batch_label2,
                is_siamese=self.flags.is_siamese)

            self.model.print_info(total_loss, siamese_loss, reg_term,
                                  cls_loss_1, cls_loss_2, self.model.accuracy,
                                  iter_time)

            if iter_time % self.flags.eval_freq == 0:
                print("Evaluaton process...")
                self.model.Calculate_accuracy()

            self.train_writer.add_summary(summary, iter_time)
            self.train_writer.flush()

            # self.train_sample(iter_time, batch_imgs1, batch_label1)
            # self.train_sample(iter_time, self.dataset.train_data, self.dataset.train_label)
            # self.train_sample(iter_time, self.dataset.train_data.images, self.dataset.train_data.labels)

            # save model
            self.save_model(iter_time)

        self.save_model(self.flags.iters)

    def test(self):
        if self.load_model():
            print(' [*] Load SUCCESS!')
        else:
            print(' [!] Load Failed...')

        num_iters = 1
        total_time = 0.
        for iter_time in range(num_iters):
            # measure inference time
            start_time = time.time()

            ################################################
            # self.model.draw_histogram(self.test_out_dir)
            # self.model.save_features(self.test_out_dir)
            # self.model.train_sample_imgs(iter_time, self.test_out_dir, self.dataset.train_data, self.dataset.train_label)
            self.model.Calculate_test_accuracy()
            ################################################
            total_time += time.time() - start_time

        print('Avg PT: {:.2f} msec.'.format(total_time / num_iters * 1000.))

    def save_model(self, iter_time):
        # print('self.train_accuracy:{}, self.model.train_accuracy:{}'.format(self.train_accuracy, self.model.train_accuracy))
        if self.train_accuracy < self.model.train_accuracy:
            self.train_accuracy = self.model.train_accuracy
        print('self.accuracy:{}, self.model.accuracy:{}\n'.format(
            self.accuracy, self.model.accuracy))
        if np.mod(iter_time + 1, self.flags.save_freq
                  ) == 0 and self.accuracy < self.model.accuracy:
            model_name = 'model'
            self.saver.save(self.sess,
                            os.path.join(self.model_out_dir, model_name),
                            global_step=iter_time)
            self.accuracy = self.model.accuracy

            print('=====================================')
            print('             Model saved!            ')
            print('=====================================\n')

    def sample(self, iter_time):
        if np.mod(iter_time, self.flags.sample_freq) == 0:
            self.model.sample_imgs(iter_time, self.sample_out_dir)
            # self.model.plots(imgs, iter_time, self.sample_out_dir)

    def train_sample(self, iter_time, x1_imgs, x1_label):
        if np.mod(iter_time, self.flags.sample_freq) == 0:
            self.model.train_sample_imgs(iter_time, self.sample_out_dir,
                                         x1_imgs, x1_label)
            # self.model.plots(imgs, iter_time, self.sample_out_dir)

    # def train_all_sample(self, iter_time, train_data, train_label):
    #     if np.mod(iter_time, self.flags.sample_freq) == 0:
    #         self.model.train_sample_imgs(iter_time, self.sample_out_dir, train_data, train_label)
    #         # self.model.plots(imgs, iter_time, self.sample_out_dir)

    def load_model(self):
        print(' [*] Reading checkpoint...')

        ckpt = tf.train.get_checkpoint_state(self.model_out_dir)

        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            # print("ckpt_name:{}".format(ckpt_name))
            # print("os.path.join(self.model_out_dir, ckpt_name):{}".format(os.path.join(self.model_out_dir, ckpt_name)))
            # self.saver = tf.train.import_meta_graph('{}.meta'.format(os.path.join(self.model_out_dir, ckpt_name)))
            self.saver.restore(self.sess,
                               os.path.join(self.model_out_dir, ckpt_name))

            meta_graph_path = ckpt.model_checkpoint_path + '.meta'
            self.iter_time = int(meta_graph_path.split('-')[-1].split('.')[0])

            print(' [*] Load iter_time: {}'.format(self.iter_time))

            return True
        else:
            return False
Exemple #21
0
                           transform=transforms.ToTensor(),
                           times=Flags.times,
                           way=Flags.way)

    trainLoader = DataLoader(trainSet,
                             batch_size=Flags.batch_size,
                             shuffle=False,
                             num_workers=Flags.workers)
    testLoader = DataLoader(testSet,
                            batch_size=Flags.way,
                            shuffle=False,
                            num_workers=Flags.workers)

    loss_fn = torch.nn.BCEWithLogitsLoss(reduction='mean')

    net = Siamese()

    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # multi gpu
    if len(Flags.gpu_ids.split(",")) > 1:
        net = torch.nn.DataParallel(net)
    net.to(DEVICE)
    optimizer = torch.optim.Adam(net.parameters(), lr=Flags.lr)

    for epoch in range(1, Flags.Epochs + 1):
        time_start = time.time()
        train_loss = train(net, trainLoader, optimizer)
        test_accuracy = evaluate(net, testLoader)
        print('[%d / %d] loss: %.5f | acc: %f | time_lapsed: %.2f sec' %
              (epoch, 50000, train_loss, test_accuracy,
               time.time() - time_start))
Exemple #22
0
    trainLoader = DataLoader(trainSet,
                             batch_size=Flags.batch_size,
                             shuffle=False,
                             num_workers=Flags.workers)

    loss_fn = torch.nn.BCEWithLogitsLoss(size_average=True)

    # net = Siamese(ResidualBlock)

    resnet18 = torchvision.models.resnet18(pretrained=True)
    # for param in net.parameters():
    #     param.requires_grad = False
    num_ftrs = resnet18.fc.in_features
    resnet18.fc = nn.Linear(num_ftrs, 1)  #将全连接层做出改变类别改为一类

    net = Siamese(ResidualBlock)
    #读取参数
    pretrained_dict = resnet18.state_dict()
    model_dict = net.state_dict()
    # 将pretrained_dict里不属于model_dict的键剔除掉
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    # 更新现有的model_dict
    model_dict.update(pretrained_dict)
    # 加载真正需要的state_dict
    net.load_state_dict(model_dict)
    # print(resnet18)
    # print(net)
    # import pdb; pdb.set_trace()