Exemple #1
0
def create_dataloader(config, data, mode):
    dataset = create_dataset(config, data, mode)
    if mode == 'train':
        # create Sampler
        if dist.is_available() and dist.is_initialized():
            train_RandomSampler = distributed.DistributedSampler(dataset)
        else:
            train_RandomSampler = sampler.RandomSampler(dataset, replacement=False)

        train_BatchSampler = sampler.BatchSampler(train_RandomSampler,
                                              batch_size=config.train.batch_size,
                                              drop_last=config.train.dataloader.drop_last)

        # Augment
        collator = get_collate_fn(config)

        # DataLoader
        data_loader = DataLoader(dataset=dataset,
                                batch_sampler=train_BatchSampler,
                                collate_fn=collator,
                                pin_memory=config.train.dataloader.pin_memory,
                                num_workers=config.train.dataloader.work_nums)

    elif mode == 'val':
        if dist.is_available() and dist.is_initialized():
            val_SequentialSampler = distributed.DistributedSampler(dataset)
        else:
            val_SequentialSampler = sampler.SequentialSampler(dataset)

        val_BatchSampler = sampler.BatchSampler(val_SequentialSampler,
                                                batch_size=config.val.batch_size,
                                                drop_last=config.val.dataloader.drop_last)
        data_loader = DataLoader(dataset,
                                batch_sampler=val_BatchSampler,
                                pin_memory=config.val.dataloader.pin_memory,
                                num_workers=config.val.dataloader.work_nums)
    else:
        if dist.is_available() and dist.is_initialized():
            test_SequentialSampler = distributed.DistributedSampler(dataset)
        else:
            test_SequentialSampler = None

        data_loader = DataLoader(dataset,
                                 sampler=test_SequentialSampler,
                                 batch_size=config.test.batch_size,
                                 pin_memory=config.val.dataloader.pin_memory,
                                 num_workers=config.val.dataloader.work_nums)
    return data_loader
Exemple #2
0
def input_fn(config, mode):
    if mode not in ('train', 'eval', 'pred'):
        raise ValueError('mode must be one of "train", "eval" and "pred"')

    mode_map = MODE_MAPPING[mode]

    numclasses = config['data']['numclasses']
    if not isinstance(numclasses, int) or numclasses <= 0:
        raise ValueError('numclasses must be a positive integer.')

    tf.logging.info(
        'Creating the input function for the estimator for {}.'.format(
            mode_map))

    data_config = config['data'][mode_map]
    preprocessing_config = config['data']['preprocessing']

    dataset = create_dataset(data_config, numclasses, preprocessing_config,
                             mode)

    return dataset
Exemple #3
0
args = parser.parse_args()
options_path = args.options
opt = options.parse(options_path, is_train=False)
util.mkdirs((path for key, path in opt['path'].items()
             if not key == 'pretrain_model_G'))  # Make all directories needed
opt = options.dict2box(opt)

from data.datasets import create_dataset
from data.data_loader import create_dataloader
from models.models import create_model

# Create test dataset and dataloader
test_loaders = []
test_set_names = []
for dataset_opt in opt.datasets:
    test_set = create_dataset(dataset_opt)
    test_loader = create_dataloader(test_set, dataset_opt)
    test_size = len(test_set)
    test_set_name = dataset_opt.name
    print('Number of test images in [%s]: %d' % (test_set_name, test_size))
    test_loaders.append(test_loader)
    test_set_names.append(test_set_name)

# Create model
model = create_model(opt)

# Path for log file
test_log_path = os.path.join(opt.path.log, 'test_log.txt')
if os.path.exists(test_log_path):
    os.remove(test_log_path)
    print('Old test log is removed.')
Exemple #4
0
import tensorflow as tf
from data.datasets import create_dataset
from utils.writeconfig import DEFAULT_CONFIG

tf.logging.set_verbosity(tf.logging.INFO)
db = create_dataset(DEFAULT_CONFIG['data']['training'], 80,
                    DEFAULT_CONFIG['data']['preprocessing'], 'train')

dbiter = db.make_initializable_iterator()

data = dbiter.get_next()

init_op = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init_op)
    sess.run(dbiter.initializer)
    try:
        while True:
            data_out = sess.run(data)
            print(data['features'].shape)
            for key in data_out['labels']:
                print('{} - {}'.format(key, data_out['labels'][key].shape))
                if key == 'object_bboxes':
                    print(data_out['labels'][key][
                        0, 0:data_out['labels']['numbboxes'][0], :])
    except tf.errors.OutOfRangeError:
        print('Finished.')
Exemple #5
0
    def r_squared(self):
        mean_y = mean(self.ys)
        mean_ys = [mean_y for x in self.ys]
        total_sum_of_squares = sum((self.ys - mean_ys)**2)
        residual_sum_of_squares = sum((self.ys - self.regression_line)**2)

        return 1. - (residual_sum_of_squares / total_sum_of_squares)

    def draw(self, predict_x=None):
        style.use('ggplot')

        plt.scatter(self.xs, self.ys, color="#003D72", label="data")
        plt.plot(self.xs, self.regression_line, label="regression line")
        plt.legend(loc=2)
        if predict_x:
            plt.scatter(predict_x,
                        self.predict(predict_x),
                        color="g",
                        label="predict")
        plt.legend(loc=4)
        plt.show()


if __name__ == '__main__':
    # xs, ys = create_dataset(n=40, variance=40, step=2, correlation='pos')
    xs, ys = create_dataset(n=40, variance=5, step=2)
    regression = Regression(xs, ys)
    regression.draw(7)
    print(regression.r_squared())
    # print(regression.best_fit_slope())
def main():
    # Create train dataset
    train_set_opt = opt.datasets[0]
    train_set = create_dataset(train_set_opt)
    train_size = int(math.ceil(len(train_set) / train_set_opt.batch_size))
    print('Number of train images: %d batches of size %d' % (train_size, train_set_opt.batch_size))
    total_iters = int(opt.train.niter)
    total_epoches = int(math.ceil(total_iters / train_size))
    print('Total epoches needed: %d' % total_epoches)

    # Create val dataset
    val_set_opt = opt.datasets[1]
    val_set = create_dataset(val_set_opt)
    val_size = len(val_set)
    print('Number of val images: %d' % val_size)

    # Create dataloader
    train_loader = create_dataloader(train_set, train_set_opt)
    val_loader = create_dataloader(val_set, val_set_opt)

    # Create model
    model = create_model(opt)

    # Create binarization module
    import bin
    global bin_op
    bin_op = bin.BinOp(model.netG)
    
    model.train()

    # Create logger
    logger = Logger(opt)

    current_step = 0
    need_make_val_dir = True
    start_time = time.time()
    for epoch in range(total_epoches):
        for i, train_data in enumerate(train_loader):
            current_step += 1
            if current_step > total_iters:
                break

            train_start_time = time.time()
            # Training
            model.feed_data(train_data)

            # optimize_parameters 함수를 분할해서 대체함 for binarization
            # model.optimize_parameters(current_step)
            bin_op.binarization()
            model.forward_G()
            model.optimizer_G.zero_grad()
            model.backward_G()
            bin_op.restore()
            bin_op.updateBinaryGradWeight()
            model.optimizer_G.step()

            train_duration = time.time() - train_start_time

            if current_step % opt.logger.print_freq == 0:
                losses = model.get_current_losses()
                logger.print_results(losses, epoch, current_step, train_duration, 'loss')

            if current_step % opt.logger.save_checkpoint_freq == 0:
                print('Saving the model at the end of current_step %d' % (current_step))
                model.save(current_step)

            # Validation
            if current_step % opt.train.val_freq == 0:
                validate(val_loader, val_size, model, logger, epoch, current_step)

            model.update_learning_rate(step=current_step, scheme=opt.train.lr_scheme)

        print('End of Epoch %d' % epoch)

    print('Saving the final model')
    model.save('latest')

    print('End of Training \t Time Taken: %d sec' % (time.time() - start_time))