Exemple #1
0
def test(model, dataset, dataset_path, batch_size, max_rank=100):
    model.eval()
    # test dataloader------------------------------------------------------------
    gallery_dataloader = getDataLoader(dataset,
                                       batch_size,
                                       dataset_path,
                                       'gallery',
                                       shuffle=False,
                                       augment=False)
    query_dataloader = getDataLoader(dataset,
                                     batch_size,
                                     dataset_path,
                                     'query',
                                     shuffle=False,
                                     augment=False)

    # image information------------------------------------------------------------
    gallery_cams, gallery_pids = [], []
    query_cams, query_pids = [], []
    gallery_features = []
    query_features = []

    # gallery_dataloader ------------------------------------------------------------
    for inputs, pids, camids in gallery_dataloader:
        gallery_features.append(
            extract_feature(model, inputs, requires_norm=True,
                            vectorize=True).cpu().data)
        gallery_pids.extend(np.array(pids))
        gallery_cams.extend(np.array(camids))
    gallery_features = torch.cat(gallery_features, dim=0)
    gallery_pids = np.asarray(gallery_pids)
    gallery_cams = np.asarray(gallery_cams)

    # query_dataloader ------------------------------------------------------------
    for inputs, pids, camids in query_dataloader:
        query_features.append(
            extract_feature(model, inputs, requires_norm=True,
                            vectorize=True).cpu().data)
        query_pids.extend(np.array(pids))
        query_cams.extend(np.array(camids))
    query_features = torch.cat(query_features, dim=0)
    query_pids = np.asarray(query_pids)
    query_cams = np.asarray(query_cams)

    # compute cmc and map ------------------------------------------------------------
    distmat = compute_distance_matrix(query_features,
                                      gallery_features,
                                      metric='cosine')
    distmat = distmat.numpy()

    print('Computing CMC and mAP ...')
    cmc, mAP = evaluate_rank(
        distmat,
        query_pids,
        gallery_pids,
        query_cams,
        gallery_cams,
    )

    return cmc, mAP
Exemple #2
0
def test(model, dataset, dataset_path, batch_size, max_rank=100):
    model.eval()

    gallery_dataloader = getDataLoader(
        dataset, batch_size, dataset_path, 'gallery', shuffle=False, augment=False)
    query_dataloader = getDataLoader(
        dataset, batch_size, dataset_path, 'query', shuffle=False, augment=False)

    gallery_cams, gallery_labels = get_cam_label(
        gallery_dataloader.dataset.imgs)
    query_cams, query_labels = get_cam_label(query_dataloader.dataset.imgs)

    # Extract feature
    gallery_features = []
    query_features = []

    for inputs, _ in gallery_dataloader:
        gallery_features.append(extract_feature(
            model, inputs, requires_norm=True, vectorize=True).cpu().data)
    gallery_features = torch.cat(gallery_features, dim=0)

    for inputs, _ in query_dataloader:
        query_features.append(extract_feature(
            model, inputs, requires_norm=True, vectorize=True).cpu().data)
    query_features = torch.cat(query_features, dim=0)

    CMC, mAP, (sorted_index_list, sorted_y_true_list, junk_index_list) = evaluate(
        query_features, query_labels, query_cams, gallery_features, gallery_labels, gallery_cams)

    return CMC, mAP
def main():
    use_cuda = False  # Set it to False if you are using a CPU

    seed(0)

    train_dataset = dataloader.getDataLoader()
    test_dataset = dataloader.getDataLoader('test')

    model = Net()  # Get the model

    if use_cuda:
        model = model.cuda()  # Put the model weights on GPU

    # criterion = nn.CrossEntropyLoss()
    # same as categorical_crossentropy loss used in Keras models which runs on Tensorflow
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=config.lr)  # fine tuned the lr
    # optimizer = torch.optim.SGD(model.parameters(), lr=0.4, momentum=0.9)

    for epoch in range(1, config.numEpochs + 1):
        print(f'epoch={epoch}')
        train(model, use_cuda, train_dataset, optimizer,
              epoch)  # Train the network
        test(model, use_cuda, test_dataset)  # Test the network

    saveModel(model)
Exemple #4
0
def test(args):
    device = torch.device("cuda" if args.cuda else "cpu")
    test_ds, test_dl, classes_num = getDataLoader(args)
    model = SimpleConvNet(classes_num, args.image_size).to(device)
    model.load_state_dict(torch.load(args.model))
    model.eval()
    accuracy = 0.0
    num_images = 9

    with torch.no_grad():
        for images, labels in test_dl:
            images = images.to(device)
            labels = labels.to(device)

            outputs = model(images)
            _, preds = torch.max(outputs, 1)

            accuracy += torch.sum(preds == labels).item()

    for j in range(num_images):
        ax = plt.subplot(num_images // 3, 3, j + 1)
        ax.axis('off')
        ax.set_title(f'predicted: {test_ds.classes[preds[j]]}')
        imshow(images[j].cpu())
    plt.show()
    accuracy /= len(test_ds)
    print(
        f"Accuracy of the network on the test dataset is {100 * accuracy:4f}%")
def main(args):
    if torch.cuda.is_available():
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    dloader, data_num = dataloader.getDataLoader(args.root_dir,
                                                 args.sub_dir,
                                                 batch=args.batch_size,
                                                 shuffle=False)

    G_MF = model.Generator().type(dtype)
    G_FM = model.Generator().type(dtype)

    weight_loc = 'epoch_' + str(args.epoch)
    G_MF.load_state_dict(torch.load('pretrained/' + weight_loc + '/G_MF.pkl'))
    G_FM.load_state_dict(torch.load('pretrained/' + weight_loc + '/G_FM.pkl'))

    if data_num % args.batch_size == 0:
        total_num = data_num / args.batch_size
    else:
        total_num = data_num / args.batch_size + 1

    for idx, (img, label) in enumerate(dloader):
        print('Processing : [%d / %d]' % (idx + 1, total_num))
        img = img.type(dtype)
        label = label.type(dtype)

        img = Variable(img)
        save_result(args.result_path, G_MF, G_FM, img, label, args.nrow,
                    str(idx))
Exemple #6
0
def train(args):
    device = torch.device("cuda" if args.cuda else "cpu")
    dl, params_ds = getDataLoader(args)
    train_dl, val_dl = dl
    train_size, val_size, classes_num = params_ds
    model = SimpleConvNet(classes_num, args.image_size).to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.lr)
    criterion = nn.CrossEntropyLoss()
    best_acc = 0.0
    for epoch in tqdm(range(args.epochs)):
        train_loss = 0.0
        val_loss = 0.0
        accuracy = 0.0
        model.train()
        for inputs, labels in train_dl:
            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item() * inputs.size(0)

        train_loss /= train_size

        model.eval()
        with torch.no_grad():
            for inputs, labels in val_dl:
                inputs = inputs.to(device)
                labels = labels.to(device)
                outputs = model(inputs)
                _, preds = torch.max(outputs, 1)
                val_loss += criterion(outputs, labels).item() * inputs.size(0)
                accuracy += torch.sum(preds == labels.data).item()

        val_loss /= val_size
        accuracy /= val_size

        print(
            f"[epoch={epoch + 1} train_loss={train_loss:.4f} val_loss={val_loss:.4f} val_acc={accuracy:.4f}]"
        )

        if accuracy > best_acc:
            best_acc = accuracy
            best_model = copy.deepcopy(model.state_dict())

    torch.save(best_model,
               args.save_model_dir + f"/epochs{args.epochs}_best.pth")
Exemple #7
0
import sys
sys.path.append('/home/hy/vscode/StyleGANStege')
print(sys.path)
from dataloader import getDataLoader, check_data

if __name__ == "__main__":

    # dataset
    loader = getDataLoader(
        dataset='Celeba',
        batch_size=3,
        dataset_path='/home/hy/vscode/StyleGANStege/data/celeba')

    data = next(loader)
    print(data.shape)
    check_data(data, "./dataloader/checkdata.jpg")

    print('complete check.')
Exemple #8
0
                        default='/home/hy/vscode/pcb_custom/datasets/Market1501')
    parser.add_argument('--batch_size', default=512,
                        type=int, help='batchsize')
    parser.add_argument('--share_conv', default=False, action='store_true')
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Make saving directory
    save_dir_path = os.path.join(args.save_path, args.dataset)
    os.makedirs(save_dir_path, exist_ok=True)

    logger = utils.Logger(save_dir_path)
    logger.info(vars(args))

    train_dataloader = getDataLoader(
        args.dataset, args.batch_size, args.dataset_path, 'train', shuffle=True, augment=True)
    # model = build_model(args.experiment, num_classes=len(train_dataloader.dataset.classes),
    #                     share_conv=args.share_conv)
    model = build_model(args.experiment, num_classes=751,
                            share_conv=args.share_conv)

    model = utils.load_network(model,
                               save_dir_path, args.which_epoch)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = model.to(device)
    CMC, mAP = test(model, args.dataset, args.dataset_path, args.batch_size)

    logger.info('Testing: top1:%.2f top5:%.2f top10:%.2f mAP:%.2f' %
                (CMC[0], CMC[4], CMC[9], mAP))
Exemple #9
0
    def train(self):
        best_acc = 1000
        model = self.net.model
        optimizer = self.net.optimizer
        scheduler = self.net.scheduler

        train_data_loader = getDataLoader(self.train_data,
                                          mode=self.mode[2],
                                          batch_size=self.batch_size *
                                          self.gpu_num,
                                          is_shuffle=False,
                                          is_aug=True,
                                          is_pre_read=self.is_pre_read,
                                          num_worker=self.num_worker)
        val_data_loader = getDataLoader(self.val_data,
                                        mode=self.mode[2],
                                        batch_size=self.batch_size *
                                        self.gpu_num,
                                        is_shuffle=False,
                                        is_aug=False,
                                        is_pre_read=True,
                                        num_worker=0)

        for epoch in range(self.start_epoch, self.epoch):
            print('Epoch: %d' % epoch)
            scheduler.step()
            model.train()
            total_itr_num = len(
                train_data_loader.dataset) // train_data_loader.batch_size

            sum_loss = 0.0
            t_start = time.time()
            num_output = self.mode[3]
            num_input = self.mode[4]
            sum_metric_loss = np.zeros(num_output)

            for i, data in enumerate(train_data_loader):
                # prepare data
                x = data[0]
                x = x.to(self.net.device).float()
                y = [data[j] for j in range(1, 1 + num_input)]
                for j in range(num_input):
                    y[j] = y[j].to(x.device).float()
                optimizer.zero_grad()
                outputs = model(x, *y)

                loss = torch.mean(outputs[0])
                metrics_loss = [
                    torch.mean(outputs[j]) for j in range(1, 1 + num_output)
                ]
                loss.backward()
                optimizer.step()
                sum_loss += loss.item()
                print('\r', end='')
                print('[epoch:%d, iter:%d/%d, time:%d] Loss: %.04f ' %
                      (epoch, i + 1, total_itr_num, int(time.time() - t_start),
                       sum_loss / (i + 1)),
                      end='')
                for j in range(num_output):
                    sum_metric_loss[j] += metrics_loss[j]
                    print(' Metrics%d: %.04f ' % (j, sum_metric_loss[j] /
                                                  (i + 1)),
                          end='')

            # validation
            with torch.no_grad():
                val_sum_metric_loss = np.zeros(self.mode[3])
                model.eval()
                val_i = 0
                print("\nWaiting Test!", val_i, end='\r')
                for i, data in enumerate(val_data_loader):
                    val_i += 1
                    print("Waiting Test!", val_i, end='\r')
                    x = data[0]
                    x = x.to(self.net.device).float()
                    y = [data[j] for j in range(1, 1 + num_input)]
                    for j in range(num_input):
                        y[j] = y[j].to(x.device).float()
                    outputs = model(x, *y)
                    metrics_loss = [
                        torch.mean(outputs[j])
                        for j in range(1, 1 + num_output)
                    ]
                    for j in range(num_output):
                        val_sum_metric_loss[j] += metrics_loss[j]

                for j in range(num_output):
                    print('val Metrics%d: %.04f ' %
                          (j, val_sum_metric_loss[j] / len(val_data_loader)),
                          end='')
                val_loss = val_sum_metric_loss[0]

                print('\nSaving model......', end='\r')
                if self.gpu_num > 1:
                    torch.save(
                        model.module.state_dict(),
                        '%s/net_%03d.pth' % (self.model_save_path, epoch + 1))
                else:
                    torch.save(
                        model.state_dict(),
                        '%s/net_%03d.pth' % (self.model_save_path, epoch + 1))
                # save best
                if val_loss / len(val_data_loader) < best_acc:
                    print('new best %.4f improved from %.4f' %
                          (val_loss / len(val_data_loader), best_acc))
                    best_acc = val_loss / len(val_data_loader)
                    if self.gpu_num > 1:
                        torch.save(model.module.state_dict(),
                                   '%s/best.pth' % self.model_save_path)
                    else:
                        torch.save(model.state_dict(),
                                   '%s/best.pth' % self.model_save_path)
                else:
                    print('not improved from %.4f' % best_acc)

            # write log

            writer.add_scalar('train/loss', sum_loss / len(train_data_loader),
                              epoch + 1)
            for j in range(self.mode[3]):
                writer.add_scalar('train/metrics%d' % j,
                                  sum_metric_loss[j] / len(train_data_loader),
                                  epoch + 1)
                writer.add_scalar(
                    'val/metrics%d' % j,
                    val_sum_metric_loss[j] / len(val_data_loader), epoch + 1)
Exemple #10
0
if __name__ == "__main__":
    # ============================================================================================================
    # devie-------------------------------------------------------------------------------------
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Fix random seed---------------------------------------------------------------------------
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)

    # speed up compution---------------------------------------------------------------------------
    cudnn.benchmark = True

    # ============================================================================================================
    # data------------------------------------------------------------------------------------
    train_loader, query_loader, gallery_loader, num_classes = getDataLoader(
        args.dataset_name, args.dataset_path, args=args)
    test_loader, test_query_loader, test_gallery_loader, test_num_classes = getDataLoader(
        args.test_other_dataset_name, args.test_other_dataset_path, args=args)

    train_data_loader = [train_loader, query_loader, gallery_loader]
    test_data_loader = [test_query_loader, test_gallery_loader]

    dataloader = [train_data_loader, test_data_loader]

    # ============================================================================================================
    # model------------------------------------------------------------------------------------
    model = build_model(num_classes=num_classes, args=args)
    model = model.to(device)

    # criterion-----------------------------------------------------------------------------------
    ce_labelsmooth_loss = CrossEntropyLabelSmoothLoss(num_classes=num_classes)
Exemple #11
0
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件        :test_data.py
@说明        :测试partialdataset数据集
@时间        :2020/11/23 11:46:07
@作者        :HuangYin
@版本        :1.0
'''
from dataloader import getDataLoader
import argparse

parser = argparse.ArgumentParser(description='Person ReID Frame')

# Data parameters-------------------------------------------------------------
parser.add_argument('--img_height', type=int, default=384, help='height of the input image')
parser.add_argument('--img_width', type=int, default=128, help='width of the input image')
parser.add_argument('--batch_size', default=6, type=int, help='batch_size')
parser.add_argument('--test_batch_size', default=6, type=int, help='test_batch_size')
parser.add_argument('--data_sampler_type', type=str, default='RandomIdentitySampler')
parser.add_argument('--num_instance', type=int, default=2)

args = parser.parse_args()
dataset_name = 'Paritial_REID'
dataset_path = '/home/hy/vscode/data/Partial-REID_Dataset'

train_loader, query_loader, gallery_loader, num_classes = getDataLoader(dataset_name, dataset_path, args=args)
train_loader
Exemple #12
0
def main(argv):
    #torch.set_num_threads(2)
    #preliminaires
    np.random.seed(11)
    torch.manual_seed(11)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    dataset_name = "CIFAR10"  # choose from MNIST, CIFAR10, CIFAR100, ELLIPSE, SWISS
    #choose model
    choice = "v"  # "v"
    gpu = True
    conv = True

    batch_size = 256  #1024
    #hyper parameters
    N = 64
    learn_rate = 0.001  #0.05
    step = .01
    epochs = 50  #20#0#50
    begin = 0
    end = 10000
    reg_f = True
    reg_c = False
    graph = False
    #0.059..

    alpha_f = 0.0001
    alpha_c = 0.01

    error_func = nn.CrossEntropyLoss()
    func_f = torch.nn.ReLU()

    func_c = F.softmax
    #load trainset

    if len(argv) > 0:
        #print(argv)
        N = int(argv[0])
        epochs = int(argv[1])
        learn_rate = float(argv[2])
        step = float(argv[3])
        choice = argv[4]
        graph = argv[5]
        print("N", N, "epochs", epochs, "lr", learn_rate, "step", step,
              "choice", choice, "graph", graph)

    model = chooseModel(dataset_name,
                        device,
                        N,
                        func_f,
                        func_c,
                        gpu,
                        choice,
                        conv=conv,
                        first=True)

    dataloader = dl.InMemDataLoader(dataset_name)

    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=True,
                                      num_workers=0,
                                      pin_memory=True,
                                      train=True)
    #train
    if gpu == True:
        model.to(device)
        torch.cuda.synchronize()
    train_time = time.perf_counter()
    model.train(loader, error_func, learn_rate, epochs, begin, end, step,
                reg_f, alpha_f, reg_c, alpha_c, graph)
    torch.cuda.synchronize()
    train_time = time.perf_counter() - train_time

    result_train = model.test(loader, begin=0, end=10000, f_step=step)

    #load testset
    loader = dataloader.getDataLoader(batch_size,
                                      shuffle=False,
                                      num_workers=0,
                                      pin_memory=False,
                                      train=False)
    #test
    result_test = model.test(loader, begin=0, end=10000, f_step=step)

    print("\nfine train result", result_train)
    print("fine test result", result_test, "\n")

    print("--- %s seconds ---" % (train_time))
Exemple #13
0
Optimizer parameters
"""
parser.add_argument('--lr', type=float, default=2e-4)

args = parser.parse_args()

if __name__ == "__main__":
    # devie---------------------------------------------------------------------------
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Fix random seed---------------------------------------------------------------------------
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)

    # dataset------------------------------------------------------------------------------------
    train_dataloader = getDataLoader(args.dataset, args.batch_size,
                                     args.dataset_path)

    # model------------------------------------------------------------------------------------
    model = build_model(args.experiment,
                        image_size=args.image_size,
                        lr=args.lr)
    model = checkpointNet.load_part_network(model, args.checkpoint,
                                            args.which_epoch)
    model = model.to(device)

    # save_dir_path-----------------------------------------------------------------------------------
    save_dir_path = os.path.join(args.save_path, args.dataset)
    os.makedirs(save_dir_path, exist_ok=True)

    # train -----------------------------------------------------------------------------------
    train(train_dataloader, model, device, save_dir_path, args)
    def __init__(self,
                 root_dir='data',
                 sub_dir='train',
                 batch_size=16,
                 D_lr=0.0001,
                 G_lr=0.0002,
                 lr_decay_epoch=10,
                 cyc_lambda=8,
                 cls_lambda=0.1,
                 num_epoch=500):
        """
            < Variables >
            
            1. self.dtype : Data type
            2. self.ltype : Label type
            3. self.D_M : Discriminator for male
            4. self.G_MF : Generator which converts male into female
            5. self.dloader : Data loader
            6. self.lr_decay_epoch : Every this epoch, learning rate will be decreased
            7. self.cyc_lambda : Weight for cycle consistency loss
            8. self.cls_lambda : Weight for gender classification loss
            9. self.criterion_gan : Loss function for GAN loss(Mean squared error). 
            10. self.criterion_cyc : Loss function for Cycle consistency loss(L1 loss).
            11. self.criterion_cls : Loss function for Gender classification loss(Cross entropy loss).
            11. self.criterion_cls : Loss function for Gender classification loss(Cross entropy loss).
        """

        if torch.cuda.is_available():
            self.dtype = torch.cuda.FloatTensor
            self.ltype = torch.cuda.LongTensor
        else:
            self.dtype = torch.FloatTensor
            self.ltype = torch.LongTensor

        self.D_M = model.Discriminator().type(self.dtype)
        self.D_F = model.Discriminator().type(self.dtype)
        self.G_MF = model.Generator().type(self.dtype)
        self.G_FM = model.Generator().type(self.dtype)

        self.dloader, _ = dataloader.getDataLoader(root_dir,
                                                   sub_dir,
                                                   batch=batch_size,
                                                   shuffle=True)
        self.D_lr = D_lr
        self.G_lr = G_lr

        self.lr_decay_epoch = lr_decay_epoch
        self.cyc_lambda = cyc_lambda
        self.cls_lambda = cls_lambda
        self.num_epoch = num_epoch

        self.optim_D_M = optim.Adam(self.D_M.parameters(),
                                    lr=self.D_lr,
                                    betas=(0.5, 0.999))
        self.optim_D_F = optim.Adam(self.D_F.parameters(),
                                    lr=self.D_lr,
                                    betas=(0.5, 0.999))
        self.optim_G_MF = optim.Adam(self.G_MF.parameters(),
                                     lr=self.G_lr,
                                     betas=(0.5, 0.999))
        self.optim_G_FM = optim.Adam(self.G_FM.parameters(),
                                     lr=self.G_lr,
                                     betas=(0.5, 0.999))

        self.criterion_gan = nn.MSELoss()
        self.criterion_cyc = nn.L1Loss()
        self.criterion_cls = nn.CrossEntropyLoss()