예제 #1
0
 def __init__(self,
              filename,
              gray_scale=False,
              transform=None,
              classes=None):
     flag = 0 if gray_scale else 1
     # retrieve number of classes without decoding images
     td = RecordFileDataset(filename)
     s = set([
         recordio.unpack(td.__getitem__(i))[0].label[0]
         for i in range(len(td))
     ])
     self._num_classes = len(s)
     if not classes:
         self._classes = [str(i) for i in range(self._num_classes)]
     else:
         if len(self._num_classes) != len(classes):
             warnings.warn(
                 'Provided class names do not match data, expected "num_class" is {} '
                 'vs. provided: {}'.format(self._num_classes, len(classes)))
             self._classes = list(classes) + \
                 [str(i) for i in range(len(classes), self._num_classes)]
     self._dataset = ImageRecordDataset(filename, flag=flag)
     if transform:
         self._dataset = self._dataset.transform_first(transform)
예제 #2
0
def main():
    # Create Datasets from rec files
    batch_size = 32

    train_bin = os.path.join(data_dir, 'train/clouds-binary.rec')
    valid_bin = os.path.join(data_dir, 'valid/clouds-binary.rec')
    test_bin  = os.path.join(data_dir, 'test/s2test-binary.rec')

    trainIterBin = ImageRecordDataset(
        filename=train_bin, 
        transform=lambda X, y: transform(X, y, train_augs)
    )

    validIterBin = ImageRecordDataset(
        filename=valid_bin, 
        transform=lambda X, y: transform(X, y, train_augs)
    )

    testIterBin = ImageRecordDataset(
        filename=test_bin,
        transform=lambda X, y: transform(X, y, test_augs)
    )

    train_data = gluon.data.DataLoader(trainIterBin, batch_size, shuffle=True)
    val_data   = gluon.data.DataLoader(validIterBin, batch_size)
    test_data  = gluon.data.DataLoader(testIterBin,  batch_size)

    data = {'train':train_data, 'val':val_data}

    # Create dir where we'll save the params of our model
    checkpoints = os.path.join(checkpoints_dir, 'resnet101')
    os.makedirs(checkpoints, exist_ok=True)

    # Load a pretrained network
    rn_pretrained = vision.resnet101_v2(pretrained=True) 

    # Load the network to train, using the same prefix to avoid problems when saving and loading params,
    # as we will assign the features  part of the pretrained network to this one
    rn101 = vision.resnet101_v2(classes=2, prefix=rn_pretrained.prefix)
    rn101.features = rn_pretrained.features
    rn101.output.initialize(mx.init.Xavier())

    rn101_acc = train(rn101, data, ctx, epochs=2, learning_rate=0.003,checkpoint_dir=checkpoints)

    # Save
    np.save(checkpoints + '/accuracy-results.npy', rn101_acc)

    # Find best scoring model 
    best = rn101_acc['val'].index(max(rn101_acc['val'])) + 1
    best_params = os.path.join(checkpoints, '{}.params'.format(best))

    # Test it on the test dataset
    rn101.load_params(best_params, ctx)
    test_acc = evaluate_accuracy(test_data, rn101)
    print('Best model on validation set saved in: {}'.format(best_params))
    print('Accuracy on test set = {}'.format(test_acc))
예제 #3
0
    def get_data_rec(rec_train, rec_val):
        rec_train = os.path.expanduser(rec_train)
        rec_val = os.path.expanduser(rec_val)

        # mean_rgb = [123.68, 116.779, 103.939]
        # std_rgb = [58.393, 57.12, 57.375]

        train_dataset = ImageRecordDataset(filename=rec_train, flag=1)
        val_dataset = ImageRecordDataset(filename=rec_val, flag=1)
        return train_dataset, val_dataset
예제 #4
0
def _make_datasets(data_desc):
    train, val = None, None
    if 'train' in data_desc:
        # download train set
        train_urls = data_desc['train']
        train_path = _download(train_urls['rec'][0], train_urls['rec'][1])
        _download(train_urls['idx'][0], train_urls['idx'][1])
        _download(train_urls['lst'][0], train_urls['lst'][1])
        train = ImageRecordDataset(train_path, flag=1)
    if 'val' in data_desc:
        # download validation set
        val_urls = data_desc['val']
        val_path = _download(val_urls['rec'][0], val_urls['rec'][1])
        _download(val_urls['idx'][0], val_urls['idx'][1])
        _download(val_urls['lst'][0], val_urls['lst'][1])
        val = ImageRecordDataset(val_path, flag=1)
    return train, val
예제 #5
0
class RecordDataset:
    """A dataset wrapping over a RecordIO file containing images.
       Each sample is an image and its corresponding label.

    Parameters
    ----------
    filename : str
        Local path to the .rec file.
    gray_scale : False
        If True, always convert images to greyscale.
        If False, always convert images to colored (RGB).
    transform : function, default None
        A user defined callback that transforms each sample.
    classes : iterable of str, default is None
        User provided class names. If `None` is provide, will use
        a list of increasing natural number ['0', '1', ..., 'N'] by default.
    """
    def __init__(self,
                 filename,
                 gray_scale=False,
                 transform=None,
                 classes=None):
        flag = 0 if gray_scale else 1
        # retrieve number of classes without decoding images
        td = RecordFileDataset(filename)
        s = set([
            recordio.unpack(td.__getitem__(i))[0].label[0]
            for i in range(len(td))
        ])
        self._num_classes = len(s)
        if not classes:
            self._classes = [str(i) for i in range(self._num_classes)]
        else:
            if len(self._num_classes) != len(classes):
                warnings.warn(
                    'Provided class names do not match data, expected "num_class" is {} '
                    'vs. provided: {}'.format(self._num_classes, len(classes)))
                self._classes = list(classes) + \
                    [str(i) for i in range(len(classes), self._num_classes)]
        self._dataset = ImageRecordDataset(filename, flag=flag)
        if transform:
            self._dataset = self._dataset.transform_first(transform)

    @property
    def num_classes(self):
        return self._num_classes

    @property
    def classes(self):
        return self._classes

    def __len__(self):
        return len(self._dataset)

    def __getitem__(self, idx):
        return self._dataset[idx]
예제 #6
0
def display():
    transform = transforms.ToTensor()
    dataset = ImageRecordDataset(args.input).transform_first(transform)
    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        shuffle=args.shuffle,
                        last_batch='keep',
                        num_workers=args.num_workers,
                        pin_memory=False)
    for idx, (img, label) in enumerate(loader):
        show_images(img, ncols=min(8, args.batch_size))
        # print(label)
        input('Press Enter...')
예제 #7
0
    def create_loader(self):
        """
        Create the data loader
        :return:
        """
        if self.args.mode.upper() == 'TRAIN':
            tforms = []
            tforms.append(transforms.Resize(self.args.resize))

            if self.args.flip:
                tforms.append(transforms.RandomFlipLeftRight())

            if self.args.random_crop:
                tforms.append(
                    transforms.RandomResizedCrop(self.args.im_size,
                                                 scale=(0.8, 1)))
            else:
                tforms.append(transforms.CenterCrop(self.args.im_size))

            if self.args.random_jitter:
                tforms.append(transforms.RandomColorJitter(0.4, 0.4, 0.4, 0.4))

            tforms.append(transforms.ToTensor())
            tforms.append(
                transforms.Normalize((0.485, 0.456, 0.406),
                                     (0.229, 0.224, 0.225)))

            tforms = transforms.Compose(tforms)

            tr_db = list(self.cfg['train'].values())[0]

            dataset = ImageRecordDataset(tr_db['rec'], transform=tforms)

            self.tr_loader = DataLoader(dataset,
                                        batch_size=self.args.bs,
                                        num_workers=8,
                                        pin_memory=True)

        else:
            tforms = transforms.Compose([
                transforms.Resize(self.args.resize),
                transforms.CenterCrop(self.args.im_size),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),
                                     (0.229, 0.224, 0.225))
            ])
            self.eval_tforms = tforms
예제 #8
0
def evaluate():
    # Datasets
    test_transform = ToTensor()
    test_dataset = ImageRecordDataset(args.test_rec).transform_first(test_transform)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size,
                             shuffle=False, last_batch='keep',
                             num_workers=args.num_workers, pin_memory=True)

    logger.info('Evaluate model {}'.format(os.path.basename(args.model)))
    if args.model.endswith('.params'):
        # Create inference
        inference = resnet100(args.num_classes, emb_size=args.emb_size,
                              s=args.margin_s, a=args.margin_a, m=args.margin_m, b=args.margin_b)
        inference.hybridize(static_alloc=True, static_shape=True)
        helper.load_params(inference, args.model, ctx=ctx)
        inference = inference.features
    elif args.model.endswith('-symbol.json'):
        # Load model symbol and params
        sym = mx.sym.load_json(open(args.model, 'r').read())
        inference = gluon.nn.SymbolBlock(outputs=sym, inputs=mx.sym.var('data'))
        inference.load_parameters(args.model[:-11] + '0000.params', ctx=ctx)
    else:
        print('Incorrect model: {}'.format(args.model))
        return

    # Test LFW
    if args.test_name.lower() == 'lfw':
        print('Evaluating LFW...')
        start_time = timeit.default_timer()
        mu, std, t, accuracies = eval_lfw(inference, args.test_rec, test_loader, ctx)
        elapsed_time = timeit.default_timer() - start_time
        scout = helper.print_scalars(OrderedDict([('mu', mu), ('std', std), ('t', t)]), 0, 0, elapsed_time)
        logger.info(scout)
        accuracies = accuracies.tolist() + [mu, std]
        logger.info(' '.join('{:.2f}'.format(x) for x in accuracies))
    elif args.test_name.lower() == 'lfw-failure':
        print('Show LFW pairs...')
        indices, sim = top_failure_pairs_lfw(inference, args.test_rec, test_loader, ctx)
        images = mx.nd.stack(*[test_dataset[idx][0] for idx in indices])
        sim_str = ['{:.2f}'.format(x) for x in sim]
        for idx in range(4):
            show_images(images[idx * 10:(idx + 1) * 10], titles=[''] * 5 + sim_str[idx * 5:(idx + 1) * 5], ncols=5)
        logger.info(' '.join(sim_str))
예제 #9
0
def display_multi():
    transform = transforms.ToTensor()
    dataset = []
    for rec in args.inputs:
        dataset.append(ImageRecordDataset(rec).transform_first(transform))
    dataset = ArrayDataset(*dataset)
    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        shuffle=args.shuffle,
                        last_batch='keep',
                        num_workers=args.num_workers,
                        pin_memory=False)
    for idx, batch_data in enumerate(loader):
        batch_img = []
        for (img, _) in batch_data:
            batch_img.append(img)
        batch_img = mx.nd.concat(*batch_img, dim=0)
        show_images(batch_img, ncols=min(8, args.batch_size))
        input('Press Enter...')
예제 #10
0
def transform(data, label, augs):
    data = data.astype('float32')
    for aug in augs:
        data = aug(data)
    data = nd.transpose(data, (2, 0, 1))
    return data, nd.array([label]).asscalar().astype('float32')


from mxnet.gluon.data.vision import ImageRecordDataset

train_rec = root + '/data/household/train/household.rec'
validation_rec = root + '/data/household/validation/household.rec'
test_rec = root + '/data/household/test/household.rec'
sample_rec = root + '/data/household/sample/household.rec'

trainIterator = ImageRecordDataset(
    filename=train_rec, transform=lambda X, y: transform(X, y, train_augs))
validationIterator = ImageRecordDataset(
    filename=validation_rec, transform=lambda X, y: transform(X, y, test_augs))
testIterator = ImageRecordDataset(
    filename=test_rec, transform=lambda X, y: transform(X, y, test_augs))
sampleIterator = ImageRecordDataset(
    filename=sample_rec, transform=lambda X, y: transform(X, y, test_augs))

import time
from mxnet.image import color_normalize
from mxnet import autograd
import mxnet as mx
from mxnet import nd


def evaluate_accuracy(data_iterator, net):
예제 #11
0
def train():
    # Create inference
    inference = resnet100(args.num_classes,
                          emb_size=args.emb_size,
                          s=args.margin_s,
                          a=args.margin_a,
                          m=args.margin_m,
                          b=args.margin_b)
    # Load inference params
    if args.init.lower() == 'xavier':
        init = mx.init.Xavier(rnd_type='gaussian',
                              factor_type='out',
                              magnitude=2)
    else:
        init = mx.initializer.Uniform()
    if args.model:
        helper.load_params(inference, args.model, ctx=ctx)
        cur_iter = 0
    else:
        cur_iter = helper.load_params(inference,
                                      args.ckpt_dir,
                                      prefix=args.prefix,
                                      init=init,
                                      ctx=ctx)
    # Hybrid mode --> Symbol mode
    inference.hybridize(static_alloc=True, static_shape=True)

    # Datasets
    if args.color:
        train_transform = transforms.Compose([
            transforms.RandomFlipLeftRight(),
            transforms.RandomColorJitter(0.1, 0.1, 0.1),
            ToTensor()
        ])
    else:
        train_transform = transforms.Compose(
            [transforms.RandomFlipLeftRight(),
             ToTensor()])

    train_dataset = ImageRecordDataset(
        args.train_rec).transform_first(train_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              last_batch='discard',
                              num_workers=args.num_workers,
                              pin_memory=True)
    test_transform = ToTensor()
    test_dataset = ImageRecordDataset(
        args.test_rec).transform_first(test_transform)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             last_batch='keep',
                             num_workers=args.num_workers,
                             pin_memory=False)

    # Create learning rate scheduler
    iterations_per_epoch = int(len(train_dataset) / args.batch_size)
    lr_steps = [s * iterations_per_epoch for s in args.lr_steps]
    print('Learning rate drops after iterations: {}'.format(lr_steps))
    lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=lr_steps,
                                                        factor=0.1)

    # Create trainer
    trainer = gluon.Trainer(inference.collect_params(),
                            optimizer='sgd',
                            optimizer_params={
                                'learning_rate': args.lr,
                                'wd': args.wd,
                                'lr_scheduler': lr_scheduler,
                                'rescale_grad': 1. / len(ctx)
                            })
    # Load trainer from saved states
    helper.load_trainer(trainer, args.ckpt_dir, cur_iter, prefix=args.prefix)

    # Define loss functions
    softmax_cross_entropy = mx.gluon.loss.SoftmaxCrossEntropyLoss()

    # Define metric losses
    metric_ce_loss = mx.metric.Loss('CE-Loss')
    best_acc = 80  # only save the model if the accuracy is better than 80%
    # Start training
    print('Start to train {}...'.format(args.prefix))
    start_epoch = cur_iter // iterations_per_epoch
    for cur_epoch in range(start_epoch + 1, args.max_epoch + 1):
        start_time = timeit.default_timer()
        for batch_idx, (image, label) in enumerate(train_loader):
            if label.ndim > 1:
                label = label[:, 0]  # skip the landmarks
            # if batch_idx > 0: break
            cur_iter += 1
            images = gluon.utils.split_and_load(image, ctx)
            labels = gluon.utils.split_and_load(label, ctx)
            with autograd.record(train_mode=True):
                losses = []
                for x, y in zip(images, labels):
                    fc = inference(x, y)
                    loss_ce = softmax_cross_entropy(fc, y)
                    losses.append(loss_ce)
                    # update metrics
                    metric_ce_loss.update(None, preds=loss_ce)
                for l in losses:
                    l.backward()
            trainer.step(image.shape[0])

            if (batch_idx % args.log_interval
                    == 0) or (batch_idx == iterations_per_epoch - 1):
                elapsed_time = timeit.default_timer() - start_time
                scout = helper.print_scalars(
                    OrderedDict([metric_ce_loss.get()]), cur_epoch, batch_idx,
                    elapsed_time)
                logger.info(scout)
                start_time = timeit.default_timer()
                metric_ce_loss.reset()

            if (batch_idx % args.test_interval
                    == 0) or (batch_idx == iterations_per_epoch - 1):
                # if batch_idx > 0: break
                start_time = timeit.default_timer()
                mu, std, t, _ = eval_lfw(inference.features, args.test_rec,
                                         test_loader, ctx)
                elapsed_time = timeit.default_timer() - start_time
                if mu > best_acc:
                    best_acc = mu
                    # Save trained model
                    logger.info(
                        'Find better model at E: {}, B: {}, I: {}'.format(
                            cur_epoch, batch_idx, cur_iter))
                    helper.save_params(inference,
                                       args.ckpt_dir,
                                       cur_iter,
                                       prefix=args.prefix + '-best')
                scout = helper.print_scalars(
                    OrderedDict([('mu', mu), ('std', std), ('t', t)]),
                    cur_epoch, batch_idx, elapsed_time)
                logger.info(scout)

        # Save trained model
        helper.save_params(inference,
                           args.ckpt_dir,
                           cur_iter,
                           prefix=args.prefix)
        helper.save_trainer(trainer,
                            args.ckpt_dir,
                            cur_iter,
                            prefix=args.prefix)
예제 #12
0
    # arguments
    data_dir = sys.argv[1]
    model_name = sys.argv[2]
    batch_size = int(sys.argv[3])
    epochs = int(sys.argv[4])
    lr = float(sys.argv[5])


    # pathing
    train_rec = os.path.join(data_dir, 'train/img.rec')
    validation_rec = os.path.join(data_dir, 'validation/img.rec')
    model_out_path = os.path.join(data_dir, '../models/{}.params'.format(model_name))

    # load data
    train_iterator = ImageRecordDataset(
        filename=train_rec,
        transform=lambda X, y: transform(X, y, train_augs)
    )
    validation_iterator = ImageRecordDataset(
        filename=validation_rec,
        transform=lambda X, y: transform(X, y, test_augs)
    )

    # instantiate source model
    pretrained_net = mobilenet1_0(pretrained=True, prefix='model_')
    # instantiate target model
    net = mobilenet1_0(classes=2, prefix='model_')
    # transfer non output layers from source model to target model
    net.features = pretrained_net.features
    # initializing parameters for output layers of target model
    net.output.initialize(init.Xavier())
        return data[:, 1:].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1), \
               data[:, 0].astype(np.int32)

    def _get_data(self):

        if self._train:
            data_files = self._train_data
        else:
            data_files = self._test_data
        data, label = zip(*(self._read_batch(os.path.join(self._root, name))
                            for name, _ in data_files))
        data = np.concatenate(data)
        label = np.concatenate(label)

        self._data = data
        self._label = label


if __name__ == '__main__':

    cifar_ori = CIFAR10(train=False)
    cifar_rec = ImageRecordDataset("./cifar10_test.rec")

    for a, b in zip(cifar_ori, cifar_rec):
        im1 = a[0]
        im2 = b[0].asnumpy()[..., ::-1]
        diff = im1 - im2
        assert np.sum(diff) == 0
        assert a[1] == b[1]