Exemplo n.º 1
0
def train(dataset, batch_size, max_epoch):
    train_img, train_id, train_cam_id = map(list, zip(*dataset.train))
    train_dataset = ImageDataset(dataset.train,
                                 flag='train',
                                 process_size=(args.image_height,
                                               args.image_width))
    for eps in range(max_epoch):
        losses_t = AverageMeter()
        losses_x = AverageMeter()
        losses = AverageMeter()
        indicies = [
            x for x in RandomIdentitySampler(train_id, batch_size,
                                             args.num_instances)
        ]
        for i in range(len(indicies) // batch_size):
            try:
                # train_batch[0,1,2] are [imgs, pid, cam_id]
                train_batch = train_dataset.__getbatch__(
                    indicies[i * batch_size:(i + 1) * batch_size])
            except:
                train_batch = train_dataset.__getbatch__(
                    indicies[-batch_size:])
            loss, loss_t, loss_x = reid_train_job(
                train_batch[0], train_batch[1].astype(np.float32)).get()

            losses_t.update(loss_t.numpy_list()[0][0], batch_size)
            losses_x.update(loss_x.numpy_list()[0][0], batch_size)
            losses.update(loss.numpy_list()[0][0], batch_size)
        print('epoch: [{0}/{1}]\t'
              'Loss_t {loss_t.val:.4f} ({loss_t.avg:.4f})\t'
              'Loss_x {loss_x.val:.4f} ({loss_x.avg:.4f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(eps + 1,
                                                              args.max_epoch,
                                                              loss_t=losses_t,
                                                              loss_x=losses_x,
                                                              loss=losses))
        if (eps + 1) % args.eval_freq == 0 and (eps + 1) != args.max_epoch:
            cmc, mAP = eval(dataset)
            print("=".ljust(30, "=") + " Result " + "=".ljust(30, "="))
            print('mAP: {:.1%}'.format(mAP))
            print('CMC curve')
            for r in [1, 5, 10]:
                print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
            print("=".ljust(66, "="))
            # print("rank1:{}, mAP:{}".format(cmc[0], mAP))
    print('=> End training')

    print('=> Final test')
    cmc, mAP = eval(dataset)
    print("=".ljust(30, "=") + " Result " + "=".ljust(30, "="))
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in [1, 5, 10]:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print("=".ljust(66, "="))
Exemplo n.º 2
0
def eval(dataset):
    query_img, query_id, query_cam_id = map(list, zip(*dataset.query))
    gallery_img, gallery_id, gallery_cam_id = map(list, zip(*dataset.gallery))
    query_dataset = ImageDataset(dataset.query,
                                 flag='test',
                                 process_size=(args.image_height,
                                               args.image_width))
    gallery_dataset = ImageDataset(dataset.gallery,
                                   flag='test',
                                   process_size=(args.image_height,
                                                 args.image_width))
    print("extract query feature")
    time1 = time.time()
    query_features = inference(query_dataset)
    print("extract gallery feature")
    gallery_features = inference(gallery_dataset)
    print("done in {}".format(time.time() - time1))
    return evaluate(query_features, np.array(query_id), np.array(query_cam_id),
                    gallery_features, np.array(gallery_id),
                    np.array(gallery_cam_id))
def load_data(fold: int) -> Any:
    torch.multiprocessing.set_sharing_strategy('file_system')  # type: ignore
    cudnn.benchmark = True  # type: ignore

    full_df = pd.read_csv('../input/train.csv')
    print('full_df', full_df.shape)
    train_df, val_df = train_val_split(full_df, fold)
    print('train_df', train_df.shape)

    num_ttas = 1

    if num_ttas > 1:
        transform_test = albu.Compose([
            albu.PadIfNeeded(config.model.input_size, config.model.input_size),
            albu.RandomCrop(height=config.model.input_size,
                            width=config.model.input_size),
            # horizontal flip is done by the data loader
        ])
    else:
        transform_test = albu.Compose([
            albu.PadIfNeeded(config.model.input_size, config.model.input_size),
            albu.CenterCrop(height=config.model.input_size,
                            width=config.model.input_size),
        ])

    val_dataset = ImageDataset(val_df,
                               mode='val',
                               config=config,
                               num_ttas=num_ttas,
                               augmentor=transform_test)

    data_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=config.test.batch_size,
        shuffle=False,
        num_workers=config.general.num_workers,
        drop_last=True)

    return data_loader
Exemplo n.º 4
0
    x, y = next(data_loader_iter)
    try:
        writer.add_graph(model, x)
    except Exception as e:
        print("Failed to save model graph: {}".format(e))

    return writer


transforms_ = [transforms.Resize(int(img_height * 1.12), Image.BICUBIC),
               transforms.RandomCrop((img_height, img_width)),
               transforms.RandomHorizontalFlip(),
               transforms.ToTensor(),
               transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]

train_loader = DataLoader(ImageDataset("E:\\Datasets\\NUMTA", transforms_=transforms_, mode='train'),
                          batch_size=batch_size, shuffle=True)

test_loader = DataLoader(ImageDataset("E:\\Datasets\\NUMTA", transforms_=transforms_, mode='test'),
                         batch_size=batch_size, shuffle=True)


class ConvNet(nn.Module):
    def __init__(self, num_classes=10):
        super(ConvNet, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.layer2 = nn.Sequential(
Exemplo n.º 5
0
    # n_train = len(gtsrb.x_train)
    # n_valid = len(gtsrb.x_valid)
    # n_test = len(gtsrb.x_test)
    # image_shape = gtsrb.shape
    # n_classes = gtsrb.num_classes
    #
    # epochs = 50
    # learning_rate = 1e-3
    # enable_session = True
    # num_experts = 4
    # dynamic_lr = True
    # shape = gtsrb.shape
    # num_classes = gtsrb.num_classes

    # Trying MNIST now
    mnist = ImageDataset(type='TF_MNIST')
    n_train = len(mnist.x_train)
    n_valid = len(mnist.x_valid)
    n_test = len(mnist.x_test)
    image_shape = mnist.shape
    n_classes = mnist.num_classes

    epochs = 500
    learning_rate = 1e-3
    enable_session = True
    num_experts = 8
    dynamic_lr = False
    shape = mnist.shape
    num_classes = mnist.num_classes

    print("Number of training examples =", n_train)
Exemplo n.º 6
0
from data_loader import ImageDataset,VOC

root = "../../dataset/"
width = 320
height = 240
n_class = 3
batch_size = 32
lr = 1e-4
momentum = 0.9
epochs = 5

net = VggBackgroundPretrained(n_class=n_class, width=width, height=height)
net.train()

train_data = ImageDataset(root, width=width, height=height, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=4)

val_data = ImageDataset(root, 'val', width=width, height=height, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
val_loader = DataLoader(val_data, batch_size=batch_size, num_workers=0)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)


def train():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Exemplo n.º 7
0
#%%

save_path = './L2H_intput'
size = 256
batchSize = 10
transforms_ = [
    transforms.Resize(int(size * 1.12)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomCrop(size)
]
transforms_256 = [transforms.ToTensor()]
transforms_128 = [transforms.Resize(int(size / 2)), transforms.ToTensor()]
transforms_64 = [transforms.Resize(int(size / 4)), transforms.ToTensor()]

dataloader = DataLoader(ImageDataset(save_path,
                                     transforms_=transforms_,
                                     unaligned=True),
                        batch_size=batchSize,
                        shuffle=True,
                        drop_last=True)
#%%
files_d = sorted(glob.glob(os.path.join(save_path) + '/*.*'))
im = Image.open(files_d[0])
#%%
input_H = Tensor(batchSize, 3, 256, 256)
input_M = Tensor(batchSize, 3, 128, 128)
input_L = Tensor(batchSize, 3, 64, 64)
#%%
for epoch in range(0, 1):
    epoch += 1
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Exemplo n.º 8
0
def evaluate(model, dataset):
    query_dataset = ImageDataset(dataset.query,
                                 flag="test",
                                 process_size=(args.image_height,
                                               args.image_width))
    gallery_dataset = ImageDataset(dataset.gallery,
                                   flag="test",
                                   process_size=(args.image_height,
                                                 args.image_width))
    eval_batch = args.eval_batch_size
    model.eval()
    dist_metric = args.dist_metric  # distance metric, ['euclidean', 'cosine']
    rerank = args.rerank  # use person re-ranking

    save_dir = args.log_dir
    print("Extracting features from query set ...")
    # query features, query person IDs and query camera IDs
    qf, q_pids, q_camids = [], [], []
    q_ind = list(range(len(query_dataset)))
    for i in range((len(query_dataset) // eval_batch)):
        imgs, pids, camids = query_dataset.__getbatch__(
            q_ind[i * eval_batch:(i + 1) * eval_batch])

        imgs = flow.Tensor(np.array(imgs)).to("cuda")
        with flow.no_grad():
            features = model(imgs)

        qf.append(features.numpy())
        q_pids.extend(pids)
        q_camids.extend(camids)

    qf = np.concatenate(qf, 0)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    print("Done, obtained {}-by-{} matrix".format(qf.shape[0], qf.shape[1]))

    print("Extracting features from gallery set ...")
    # gallery features, gallery person IDs and gallery camera IDs
    gf, g_pids, g_camids = [], [], []
    g_ind = list(range(len(gallery_dataset)))
    for i in range((len(gallery_dataset) // eval_batch)):
        imgs, pids, camids = gallery_dataset.__getbatch__(
            g_ind[i * eval_batch:(i + 1) * eval_batch])

        imgs = flow.Tensor(np.array(imgs)).to("cuda")

        with flow.no_grad():
            features = model(imgs)
        gf.append(features.numpy())
        g_pids.extend(pids)
        g_camids.extend(camids)

    gf = np.concatenate(gf, 0)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)
    print("Done, obtained {}-by-{} matrix".format(gf.shape[0], gf.shape[1]))

    print("Computing distance matrix with metric={} ...".format(dist_metric))
    distmat = compute_distance_matrix(qf, gf, dist_metric)

    if rerank:
        print("Applying person re-ranking ...")
        distmat_qq = compute_distance_matrix(qf, qf, dist_metric)
        distmat_gg = compute_distance_matrix(gf, gf, dist_metric)
        distmat = re_ranking(distmat, distmat_qq, distmat_gg)

    print("Computing CMC and mAP ...")
    cmc, mAP = _eval(distmat, q_pids, g_pids, q_camids, g_camids)

    print("=".ljust(30, "=") + " Result " + "=".ljust(30, "="))
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in [1, 5, 10]:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("=".ljust(66, "="))

    return cmc[0], mAP
Exemplo n.º 9
0
def train(model, dataset, num_classes, optimizer, scheduler):

    batch_size = args.batch_size

    is_best = False
    best_rank = 0
    print("=> Start training")

    # loss
    criterion_t = TripletLoss(margin=args.margin).to("cuda")
    criterion_x = CrossEntropyLossLS(num_classes=num_classes,
                                     epsilon=args.epsilon).to("cuda")
    weight_t = args.weight_t
    weight_x = 1.0 - args.weight_t

    _, train_id, _ = map(list, zip(*dataset.train))
    train_dataset = ImageDataset(dataset.train,
                                 flag="train",
                                 process_size=(args.image_height,
                                               args.image_width))
    # *****training*******#
    for epoch in range(0, args.max_epoch):
        # shift to train
        model.train()
        indicies = [
            x for x in RandomIdentitySampler(train_id, batch_size,
                                             args.num_instances)
        ]
        for i in range(len(indicies) // batch_size):
            try:
                # train_batch[0,1,2] are [imgs, pid, cam_id]
                imgs, pids, _ = train_dataset.__getbatch__(
                    indicies[i * batch_size:(i + 1) * batch_size])
            except:
                imgs, pids, _ = train_dataset.__getbatch__(
                    indicies[-batch_size:])
            imgs = flow.Tensor(np.array(imgs)).to("cuda")
            pids = flow.Tensor(np.array(pids), dtype=flow.int32).to("cuda")
            outputs, features = model(imgs)
            loss_t = compute_loss(criterion_t, features, pids)
            loss_x = compute_loss(criterion_x, outputs, pids)

            loss = weight_t * loss_t + weight_x * loss_x
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
        scheduler.step()

        print(
            "epoch:",
            epoch + 1,
            "loss_t:",
            loss_t.numpy(),
            "loss_x:",
            loss_x.numpy(),
            "loss:",
            loss.numpy(),
            "lr:",
            optimizer.param_groups[0]["lr"],
        )

        # *****testing********#
        if (epoch + 1) % args.eval_freq == 0 and (epoch + 1) != args.max_epoch:
            rank1, mAP = evaluate(model, dataset)
            if (rank1 + mAP) / 2.0 > best_rank:
                is_best = True
            else:
                is_best = False
            if is_best:
                flow.save(model.state_dict(),
                          args.flow_weight + "_" + str(epoch))
    print("=> End training")

    print("=> Final test")
    rank1, _ = evaluate(model, dataset)
    flow.save(model.state_dict(), args.flow_weight)
Exemplo n.º 10
0
        #     labels = torch.LongTensor(labels).to(self.device)
        #
        #     outputs = self(data)
        #     predictions = outputs.argmax(dim=1)
        #
        #     loss = self.loss_func(outputs, labels).item()
        #     acc = (predictions == labels).double().mean().item()
        #
        #     if self.verbose:
        #         print(f'test loss: {loss:.4f}, test acc: {acc:.4f}')
        #
        #     return loss, acc


if __name__ == "__main__":
    mnist = ImageDataset(type='TORCH_MNIST')
    n_train = len(mnist.train.dataset)
    n_valid = len(mnist.validation.dataset)
    n_test = len(mnist.test.dataset)
    image_shape = mnist.shape
    n_classes = mnist.num_classes

    # Hack to test images 1 by 1
    mnist.test = torch.utils.data.TensorDataset(
        torch.FloatTensor(mnist.x_test), torch.LongTensor(mnist.y_test))
    mnist.test = torch.utils.data.DataLoader(mnist.test,
                                             batch_size=1,
                                             shuffle=True)

    print("Number of training examples =", n_train)
    print("Number of testing examples =", n_test)
Exemplo n.º 11
0
from torch.autograd import Variable
import torch.nn.functional as F

from vae_lp import VAE_LP
from ae_gan import AE_GAN
from data_loader import ImageDataset
from data_loader import DataLoader
import utils

args = utils.load_params(json_file='params.json')

data_loader = DataLoader(args)
vae = VAE_LP(args)
gan = AE_GAN(args)

train_dataset = ImageDataset(args, split='train')
test_dataset = ImageDataset(args, split='test')

train_loader = data_loader.get_loader(train_dataset)
test_loader = data_loader.get_loader(test_dataset)

for i in range(args['vae_epoch']):
    vae.train(train_loader)
    if i % args['test_freq'] == 0:
        vae.test(test_loader)
        vae.save_model('%s/ckpt/%03d.pth' % (args['vae_dir'], i))

gan.set_trace2image(vae.inference)

for i in range(args['gan_epoch']):
    gan.train(train_loader)
Exemplo n.º 12
0
def train(scale, device):

    # initialize model
    model = ESPCN(upscale_factor=scale).to(device)

    # MSE loss function
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)

    # Learning rate scheduler
    scheduler = MultiStepLR(optimizer, milestones=[15, 80], gamma=0.1)

    # load the data
    train_dataset = ImageDataset('data/processed/train',
                                 upscale_factor=scale,
                                 input_transform=transforms.ToTensor(),
                                 target_transform=transforms.ToTensor())

    val_dataset = ImageDataset('data/processed/val',
                               upscale_factor=scale,
                               input_transform=transforms.ToTensor(),
                               target_transform=transforms.ToTensor())

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=64,
                              shuffle=True,
                              num_workers=4)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=64,
                            shuffle=True,
                            num_workers=4)

    # Train Model
    epoch = 100
    train_loss = []
    train_psnr = []

    best_weights = copy.deepcopy(model.state_dict())
    best_epoch = 0
    best_psnr = 0.0

    for epoch in range(1, epoch + 1):
        # Train Model
        model.train()
        loss = 0
        losses = []
        print("Starting epoch number " + str(epoch))

        for i, (images, labels) in enumerate(train_loader):
            # images shape torch.Size([64, 1, 85, 85])
            # labels shape torch.Size([64, 1, 255, 255])
            images, labels = images.to(device), labels.to(device)
            optimizer.zero_grad()
            out_images = model(images)

            loss = criterion(out_images, labels)
            loss.backward()
            optimizer.step()
            losses.append(loss)

        loss = torch.stack(losses).mean().item()
        train_loss.append(loss)
        print("Loss for Training on Epoch " + str(epoch) + " is " +
              "{:.6f}".format(loss))

        # save model
        save_dir = 'saved_models/UPSCALE_X' + str(scale) + '/'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        torch.save(model.state_dict(),
                   os.path.join(save_dir, 'epoch_{}.pth'.format(epoch)))

        # Evaluate Model
        model.eval()
        psnr = 0
        psnrs = []
        for i, (images, labels) in enumerate(val_loader):
            images, labels = images.to(device), labels.to(device)

            with torch.no_grad():
                out_images = model(images)

            psnr = PSNR(out_images, labels)
            psnrs.append(psnr)

        psnr = torch.stack(psnrs).mean().item()
        train_psnr.append(psnr)
        print('Eval PSNR: {:.2f}\n'.format(psnr))

        if psnr > best_psnr:
            best_epoch = epoch
            best_psnr = psnr
            best_weights = copy.deepcopy(model.state_dict())

        scheduler.step()

    print('best epoch: {}, psnr: {:.2f}'.format(best_epoch, best_psnr))
    torch.save(best_weights, os.path.join(save_dir, 'best.pth'))

    # write PSNR to CSV file
    csv_name = 'results/Eval_PSNR_X' + str(scale) + '.csv'
    write_csv(csv_name, train_psnr, scale)

    # write losses to CSV file
    file_path = 'results/train_loss_X' + str(scale) + '.csv'
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    solution_rows = [('epoch', 'train_loss', 'Upscale')
                     ] + [(i, y, scale) for (i, y) in enumerate(train_loss)]
    with open(file_path, 'w', newline="") as f:
        writer = csv.writer(f)
        writer.writerows(solution_rows)
Exemplo n.º 13
0
def load_data(fold: int) -> Any:
    torch.multiprocessing.set_sharing_strategy('file_system')
    cudnn.benchmark = True

    logger.info('config:')
    logger.info(pprint.pformat(config))

    fname = f'{config.data.train_filename}_fold_{fold}_'
    train_df = pd.read_csv(
        os.path.join(config.data.data_dir, fname + 'train.csv'))
    val_df = pd.read_csv(os.path.join(config.data.data_dir, fname + 'val.csv'))
    print('train_df', train_df.shape, 'val_df', val_df.shape)

    val_df = pd.concat([
        c[1].iloc[:config.val.images_per_class]
        for c in val_df.groupby('landmark_id')
    ])
    print('val_df after class filtering', val_df.shape)

    test_df = pd.read_csv('../data/test.csv', dtype=str)
    # test_df.drop(columns='url', inplace=True)
    print('test_df', test_df.shape)

    test_df = test_df.loc[test_df.id.apply(lambda img: os.path.exists(
        os.path.join(config.data.test_dir, img + '.jpg')))]
    print('test_df after filtering', test_df.shape)

    label_encoder = LabelEncoder()
    label_encoder.fit(train_df.landmark_id.values)
    print('found classes', len(label_encoder.classes_))
    assert len(label_encoder.classes_) == config.model.num_classes

    train_df.landmark_id = label_encoder.transform(train_df.landmark_id)
    val_df.landmark_id = label_encoder.transform(val_df.landmark_id)

    augs = []

    if config.data.use_rect_crop:
        augs.append(
            RandomRectCrop(rect_min_area=config.data.rect_min_area,
                           rect_min_ratio=config.data.rect_min_ratio,
                           image_size=config.model.image_size,
                           input_size=config.model.input_size))

    if config.augmentations.blur != 0:
        augs.append(
            albu.OneOf([
                albu.MotionBlur(p=.2),
                albu.MedianBlur(blur_limit=3, p=0.1),
                albu.Blur(blur_limit=3, p=0.1),
            ],
                       p=config.augmentations.blur))

    if config.augmentations.color != 0:
        augs.append(
            albu.OneOf([
                albu.CLAHE(clip_limit=2),
                albu.IAASharpen(),
                albu.IAAEmboss(),
                albu.RandomBrightnessContrast(),
            ],
                       p=config.augmentations.color))

    augs.append(albu.HorizontalFlip(0.5))
    transform_train = albu.Compose(augs)

    if config.test.num_ttas > 1:
        transform_test = albu.Compose([
            albu.RandomCrop(height=config.model.input_size,
                            width=config.model.input_size),
            albu.HorizontalFlip(),
        ])
    else:
        transform_test = albu.Compose([
            albu.CenterCrop(height=config.model.input_size,
                            width=config.model.input_size),
        ])

    train_dataset = ImageDataset(
        train_df,
        path=config.data.train_dir,
        mode='train',
        image_size=config.model.image_size,
        num_classes=config.model.num_classes,
        images_per_class=config.train.images_per_class,
        aug_type='albu',
        augmentor=transform_train)

    val_dataset = ImageDataset(val_df,
                               path=config.data.train_dir,
                               mode='val',
                               image_size=config.model.image_size,
                               num_classes=config.model.num_classes)

    if args.dataset == 'test':
        test_dataset = ImageDataset(test_df,
                                    path=config.data.test_dir,
                                    mode='test',
                                    image_size=config.model.image_size,
                                    input_size=config.model.input_size,
                                    num_ttas=config.test.num_ttas,
                                    num_classes=config.model.num_classes)
    else:
        train_df = pd.read_csv('../data/train.csv')
        train_df.drop(columns=['url', 'landmark_id'], inplace=True)

        test_dataset = ImageDataset(train_df,
                                    path=config.data.train_dir,
                                    mode='test',
                                    image_size=config.model.image_size,
                                    num_classes=config.model.num_classes)

    if config.train.use_balancing_sampler:
        sampler = BalancingSampler(
            train_df,
            num_classes=config.model.num_classes,
            images_per_class=config.train.images_per_class)
        shuffle = False
    else:
        sampler = None
        shuffle = config.train.shuffle

    train_loader = DataLoader(train_dataset,
                              batch_size=config.train.batch_size,
                              sampler=sampler,
                              shuffle=shuffle,
                              num_workers=config.num_workers,
                              drop_last=True)

    val_loader = DataLoader(val_dataset,
                            batch_size=config.val.batch_size,
                            shuffle=False,
                            num_workers=config.num_workers)

    test_loader = DataLoader(test_dataset,
                             batch_size=config.test.batch_size,
                             shuffle=False,
                             num_workers=config.num_workers)

    return train_loader, val_loader, test_loader, label_encoder
 def data(self):
     return ImageDataset(root_dir=self.imgs_path,
                         expected_input_size=self.model.expected_input_size,
                         img_type=self.img_type,
                         crop=self.crop)
Exemplo n.º 15
0
        return layer


if __name__ == "__main__":
    """
    Main method for testing the CNN model
    """

    warnings.filterwarnings("ignore")
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    print("TensorFlow Version: ", tf.__version__)

    path = "GTSRB/"
    gtsrb = ImageDataset(path)
    n_train = len(gtsrb.x_train)
    n_valid = len(gtsrb.x_valid)
    n_test = len(gtsrb.x_test)
    width, height = len(gtsrb.x_test[0]), len(gtsrb.x_test[0][0])
    image_shape = (width, height)
    n_classes = len(set(gtsrb.y_test))

    epochs = 50
    learning_rate = 1e-3
    enable_session = True
    dynamic_lr = True
    shape = gtsrb.shape
    num_classes = gtsrb.num_classes

    print("Number of training examples =", n_train)
Exemplo n.º 16
0
def load_data(fold: int) -> Any:
    torch.multiprocessing.set_sharing_strategy('file_system') # type: ignore
    cudnn.benchmark = True # type: ignore

    logger.info('config:')
    logger.info(pprint.pformat(config))

    full_df = pd.read_csv(find_input_file(INPUT_PATH + config.train.csv))
    print('full_df', full_df.shape)
    train_df, _ = train_val_split(full_df, fold)
    print('train_df', train_df.shape)

    # use original train.csv for validation
    full_df2 = pd.read_csv(INPUT_PATH + 'train.csv')
    assert full_df2.shape == full_df.shape
    _, val_df = train_val_split(full_df2, fold)
    print('val_df', val_df.shape)

    test_df = pd.read_csv(find_input_file(INPUT_PATH + 'sample_submission.csv'))

    augs: List[Union[albu.BasicTransform, albu.OneOf]] = []

    if config.augmentations.hflip:
        augs.append(albu.HorizontalFlip(.5))
    if config.augmentations.vflip:
        augs.append(albu.VerticalFlip(.5))
    if config.augmentations.rotate90:
        augs.append(albu.RandomRotate90())

    if config.augmentations.affine == 'soft':
        augs.append(albu.ShiftScaleRotate(shift_limit=0.075, scale_limit=0.15, rotate_limit=10, p=.75))
    elif config.augmentations.affine == 'medium':
        augs.append(albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2))
    elif config.augmentations.affine == 'hard':
        augs.append(albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=.75))

    if config.augmentations.rect_crop.enable:
        augs.append(RandomRectCrop(rect_min_area=config.augmentations.rect_crop.rect_min_area,
                                   rect_min_ratio=config.augmentations.rect_crop.rect_min_ratio,
                                   image_size=config.model.image_size,
                                   input_size=config.model.input_size))

    if config.augmentations.noise != 0:
        augs.append(albu.OneOf([
            albu.IAAAdditiveGaussianNoise(),
            albu.GaussNoise(),
        ], p=config.augmentations.noise))

    if config.augmentations.blur != 0:
        augs.append(albu.OneOf([
            albu.MotionBlur(p=.2),
            albu.MedianBlur(blur_limit=3, p=0.1),
            albu.Blur(blur_limit=3, p=0.1),
        ], p=config.augmentations.blur))

    if config.augmentations.distortion != 0:
        augs.append(albu.OneOf([
            albu.OpticalDistortion(p=0.3),
            albu.GridDistortion(p=.1),
            albu.IAAPiecewiseAffine(p=0.3),
        ], p=config.augmentations.distortion))

    if config.augmentations.color != 0:
        augs.append(albu.OneOf([
            albu.CLAHE(clip_limit=2),
            albu.IAASharpen(),
            albu.IAAEmboss(),
            albu.RandomBrightnessContrast(),
        ], p=config.augmentations.color))

    if config.augmentations.erase.prob != 0:
        augs.append(RandomErase(min_area=config.augmentations.erase.min_area,
                                max_area=config.augmentations.erase.max_area,
                                min_ratio=config.augmentations.erase.min_ratio,
                                max_ratio=config.augmentations.erase.max_ratio,
                                input_size=config.model.input_size,
                                p=config.augmentations.erase.prob))

    transform_train = albu.Compose([
        albu.PadIfNeeded(config.model.input_size, config.model.input_size),
        albu.RandomCrop(height=config.model.input_size, width=config.model.input_size),
        albu.Compose(augs, p=config.augmentations.global_prob),
        ])

    if config.test.num_ttas > 1:
        transform_test = albu.Compose([
            albu.PadIfNeeded(config.model.input_size, config.model.input_size),
            albu.RandomCrop(height=config.model.input_size, width=config.model.input_size),
            # horizontal flip is done by the data loader
        ])
    else:
        transform_test = albu.Compose([
            albu.PadIfNeeded(config.model.input_size, config.model.input_size),
            # albu.CenterCrop(height=config.model.input_size, width=config.model.input_size),
            albu.RandomCrop(height=config.model.input_size, width=config.model.input_size),
            albu.HorizontalFlip(.5)
        ])


    train_dataset = ImageDataset(train_df, mode='train', config=config,
                                 augmentor=transform_train)

    num_ttas_for_val = config.test.num_ttas if args.predict_oof else 1
    val_dataset = ImageDataset(val_df, mode='val', config=config,
                               num_ttas=num_ttas_for_val, augmentor=transform_test)

    test_dataset = ImageDataset(test_df, mode='test', config=config,
                                num_ttas=config.test.num_ttas,
                                augmentor=transform_test)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config.train.batch_size, shuffle=True,
        num_workers=config.num_workers, drop_last=True)

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config.train.batch_size, shuffle=False,
        num_workers=config.num_workers)

    test_loader = torch.utils.data.DataLoader(
        test_dataset, batch_size=config.test.batch_size, shuffle=False,
        num_workers=config.num_workers)

    return train_loader, val_loader, test_loader
Exemplo n.º 17
0
transforms_ = [
    transforms.Resize(int(size * 1.12)),
    transforms.RandomCrop(size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
]

# transforms_ = [ transforms.Resize(int(size*1.12), Image.BICUBIC),
#                 transforms.RandomCrop(size),
#                 transforms.RandomHorizontalFlip(),
#                 transforms.ToTensor(),
#                 transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))]

dataloader = DataLoader(ImageDataset('dataset/',
                                     transforms_=transforms_,
                                     unaligned=True),
                        batch_size=batchSize,
                        shuffle=True,
                        drop_last=True)
#%%
# Train
path = './output/h-z/'
for epoch in range(epoch, n_epochs):
    epoch += 1
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    tStart = time.time()
    for times, batch in enumerate(dataloader):
        d = Variable(input_A.copy_(batch['d']))
        c = Variable(input_B.copy_(batch['c']))
        optimizer_G.zero_grad()