Ejemplo n.º 1
0
    def __init__(self, cfg) -> None:
        super(TrainNet, self).__init__()
        self.cfg = cfg
        self.dataset = DatasetFactory(cfg)
        self.net = ResNet(cfg, **cfg.net)

        self.loss_fn = nn.CrossEntropyLoss()
Ejemplo n.º 2
0
class TrainNet(pl.LightningModule):
    def __init__(self, cfg) -> None:
        super(TrainNet, self).__init__()
        self.cfg = cfg
        self.dataset = DatasetFactory(cfg)
        self.net = ResNet(cfg, **cfg.net)

        self.loss_fn = nn.CrossEntropyLoss()

    def forward(self, x):
        return self.net(x)

    def prepare_data(self):
        self.dataset.prepare_dataset()

    def train_dataloader(self):
        return self.dataset.train_loader()

    def val_dataloader(self):
        return self.dataset.val_loader()

    def configure_optimizers(self):
        optimizer = SGD(self.net.parameters(), **self.cfg.training.optimizer)
        scheduler = lr_scheduler.MultiStepLR(optimizer,
                                             **self.cfg.training.scheduler)

        return [optimizer], [scheduler]

    def training_step(self, batch, batch_idx):
        self.train()
        x, y = batch
        logits = self(x)
        loss = self.loss_fn(logits, y)

        predicted_labels = logits.argmax(dim=1)
        accuracy = (predicted_labels == y).sum().item() / len(y)

        tensorboard_logs = {'train_loss': loss, 'train_acc': accuracy}
        return {'loss': loss, 'log': tensorboard_logs}

    def validation_step(self, batch, batch_nb):
        self.eval()
        x, y = batch
        y_hat = self(x)
        predicted_labels = y_hat.argmax(dim=1)
        accuracy = (predicted_labels == y).sum().float() / len(y)

        return {'val_loss': self.loss_fn(y_hat, y), 'val_acc': accuracy}

    def validation_epoch_end(self, outputs):
        avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
        avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
        tensorboard_logs = {'val_loss': avg_loss, 'val_acc': avg_acc}
        return {
            'val_loss': avg_loss,
            'val_acc': avg_acc,
            'log': tensorboard_logs
        }
Ejemplo n.º 3
0
def main(dataset_name, model_name, epochs=50, batch_size=128):
    dataset = DatasetFactory.get_by_name(dataset_name, train_percentage=0.95)
    checkpoint = 'checkpoint/{}_{}'.format(dataset_name, model_name)
    solver = Solver(dataset, checkpoint, train_batch_size=batch_size)
    model = ModelFactory.get_by_name(model_name, dataset)
    solver.train_model(model, epochs=epochs, warmup_epochs=10, num_epoch_to_log=5, learning_rate=1e-3, weight_decay=1e-4)
    solver.test(model)
Ejemplo n.º 4
0
    def decode_shape(self):
        # build graph
        octree, label = DatasetFactory(FLAGS.DATA.test)()
        code = autoencoder.octree_encoder(octree, training=False, reuse=False)
        octree_pred = autoencoder.octree_decode_shape(code,
                                                      training=False,
                                                      reuse=False)

        # checkpoint
        assert (self.flags.ckpt)  # the self.flags.ckpt should be provided
        tf_saver = tf.train.Saver(max_to_keep=20)

        # start
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            # restore and initialize
            self.initialize(sess)
            tf_saver.restore(sess, self.flags.ckpt)
            logdir = self.flags.logdir
            tf.summary.FileWriter(logdir, sess.graph)

            print('Start testing ...')
            for i in tqdm(range(0, self.flags.test_iter)):
                origin, reconstructed = sess.run([octree, octree_pred])
                with open(logdir + ('/%04d_input.octree' % i), "wb") as f:
                    f.write(origin.tobytes())
                with open(logdir + ('/%04d_output.octree' % i), "wb") as f:
                    f.write(reconstructed.tobytes())
Ejemplo n.º 5
0
    def decode_shape(self):
        # build graph
        octree_in, _ = DatasetFactory(FLAGS.DATA.test, bounding_sphere)()
        convd = network.octree_encoder(octree_in, training=False, reuse=False)
        octree_out = network.decode_shape(convd,
                                          octree_in,
                                          training=False,
                                          reuse=False)

        # checkpoint
        assert (self.flags.ckpt)
        tf_saver = tf.train.Saver(max_to_keep=20)

        # start
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            # restore and initialize
            self.initialize(sess)
            print('Load check point: ' + self.flags.ckpt)
            tf_saver.restore(sess, self.flags.ckpt)
            logdir = self.flags.logdir
            tf.summary.FileWriter(logdir, sess.graph)

            print('Start testing ...')
            for i in tqdm(range(0, self.flags.test_iter), ncols=80):
                o0, o1 = sess.run([octree_in, octree_out])
                with open(logdir + ('/%04d_input.octree' % i), "wb") as f:
                    f.write(o0.tobytes())
                with open(logdir + ('/%04d_output.octree' % i), "wb") as f:
                    f.write(o1.tobytes())
Ejemplo n.º 6
0
def compute_graph(dataset='train', training=True, reuse=False):
    FLAGSD = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
    with tf.name_scope('dataset'):
        dataset = DatasetFactory(FLAGSD)
        octree, label = dataset()
    logit = network(octree, FLAGS.MODEL, training, reuse)
    losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                            FLAGS.LOSS.weight_decay, 'ocnn')
    return losses  # loss, accu, regularizer
Ejemplo n.º 7
0
def compute_graph(dataset='train', training=True, reuse=False):
    flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
    octree, label = DatasetFactory(flags_data)()
    logit = cls_network(octree, FLAGS.MODEL, training, reuse)
    losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                            FLAGS.LOSS.weight_decay, 'ocnn',
                            FLAGS.LOSS.label_smoothing)
    losses.append(losses[0] + losses[2])  # total loss
    names = ['loss', 'accu', 'regularizer', 'total_loss']
    return losses, names
Ejemplo n.º 8
0
def compute_graph(dataset='train', training=True, reuse=False):
    flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
    octree, label = DatasetFactory(flags_data)()
    code = autoencoder.octree_encoder(octree, training, reuse)
    loss, accu = autoencoder.octree_decoder(code, octree, training, reuse)

    with tf.name_scope('total_loss'):
        reg = l2_regularizer('ocnn', FLAGS.LOSS.weight_decay)
        total_loss = tf.add_n(loss + [reg])
    tensors = loss + [reg] + accu + [total_loss]
    depth = FLAGS.MODEL.depth
    names = ['loss%d' % d for d in range(2, depth + 1)] + ['normal', 'reg'] + \
            ['accu%d' % d for d in range(2, depth + 1)] + ['total_loss']
    return tensors, names
Ejemplo n.º 9
0
    def __call__(self, dataset='train', training=True, reuse=False, gpu_num=1):
        FLAGS = self.flags
        with tf.device('/cpu:0'):
            flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
            data_iter = DatasetFactory(flags_data)(return_iter=True)

        tower_tensors = []
        for i in range(gpu_num):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('device_%d' % i):
                    octree, label = data_iter.get_next()
                    logit = cls_network(octree, FLAGS.MODEL, training, reuse)
                    losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                                            FLAGS.LOSS.weight_decay, 'ocnn',
                                            FLAGS.LOSS.label_smoothing)
                    losses.append(losses[0] + losses[2])  # total loss
                    tower_tensors.append(losses)
                    reuse = True

        names = ['loss', 'accu', 'regularizer', 'total_loss']
        tensors = tower_tensors[0] if gpu_num == 1 else list(
            zip(*tower_tensors))
        return tensors, names
Ejemplo n.º 10
0
def compute_graph(training=True, reuse=False):
    FLAGSD = FLAGS.DATA.train if training else FLAGS.DATA.test
    with tf.name_scope('dataset'):
        dataset = DatasetFactory(FLAGSD)
        octree, label = dataset()
    code = octree_encoder(octree, FLAGS.MODEL, training, reuse)
    loss, accu = octree_decoder(code, octree, FLAGS.MODEL, training, reuse)
    with tf.name_scope('compute_loss'):
        var_all = tf.trainable_variables()
        reg = tf.add_n([tf.nn.l2_loss(v)
                        for v in var_all]) * FLAGS.LOSS.weight_decay
        total_loss = tf.add_n(loss + [reg])
    tensors = loss + [reg] + accu + [total_loss]
    depth = FLAGS.MODEL.depth
    names = ['loss%d' % d for d in range(2, depth + 1)] + ['normal', 'reg'] + \
            ['accu%d' % d for d in range(2, depth + 1)] + ['total_loss']
    return tensors, names
Ejemplo n.º 11
0
    def __call__(self, dataset='train', training=True, reuse=False):
        FLAGS = self.flags
        flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
        octree, _, points = DatasetFactory(flags_data)()
        pts, label = get_point_info(points, flags_data.mask_ratio)
        logit = seg_network(octree, FLAGS.MODEL, training, reuse, pts=pts)
        losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                                FLAGS.LOSS.weight_decay, 'ocnn',
                                FLAGS.LOSS.label_smoothing)
        tensors = losses + [losses[0] + losses[2]]  # total loss
        names = ['loss', 'accu', 'regularizer', 'total_loss']

        if flags_data.batch_size == 1:
            iou, valid_part_num = tf_IoU_per_shape(logit, label,
                                                   FLAGS.LOSS.num_class)
            tensors += [iou, valid_part_num]
            names += ['iou', 'valid_part_num']
        return tensors, names
Ejemplo n.º 12
0
def compute_graph(reuse=False):
    with tf.name_scope('dataset'):
        flags_data = FLAGS.DATA.train
        batch_size = flags_data.batch_size
        octree, shape_id = DatasetFactory(flags_data)()
        point_id, point_segment, point_mask = get_point_info(
            octree, FLAGS.MODEL.depth_out, flags_data.mask_ratio)

    # build model
    hrnet = HRNet(FLAGS.MODEL)
    tensors = hrnet.network(octree,
                            training=True,
                            reuse=reuse,
                            mask=point_mask)
    shape_feature, point_feature = tensors['logit_cls'], tensors['logit_seg']

    # shape-level discrimination
    shape_critic = ShapeLoss(FLAGS.LOSS, reuse=reuse)
    shape_logit = shape_critic.forward(shape_feature)
    shape_loss, shape_accu = shape_critic.loss(shape_logit, shape_id)

    # point-level discrimination
    point_critic = PointLoss(FLAGS.LOSS, reuse=reuse)
    point_logit = point_critic.forward(point_feature, shape_id, point_segment,
                                       batch_size)
    point_loss, point_accu = point_critic.loss(point_logit, point_id)

    # run SGD
    reg = l2_regularizer('ocnn', FLAGS.LOSS.weight_decay)
    weights = FLAGS.LOSS.weights
    total_loss = shape_loss * weights[0] + point_loss * weights[1] + reg
    solver, lr = build_solver(total_loss, LRFactory(FLAGS.SOLVER))

    # update memory
    shape_update = shape_critic.update_memory(solver)
    point_update = point_critic.update_memory(solver)
    train_op = tf.group([shape_update, point_update, solver])

    return shape_loss, shape_accu, point_loss, point_accu, reg, lr, train_op
Ejemplo n.º 13
0
    df = pd.DataFrame(index=data_set.frame().index,
                      data={
                          'Real': data_set.labels(),
                          'Predicted': np.nan
                      })

    last_date = df.iloc[-1].name
    last_unix = last_date.timestamp()
    one_day = 86400
    next_unix = last_unix + one_day

    for prediction in predictions:
        next_date = datetime.fromtimestamp(next_unix)
        next_unix += 86400
        df.loc[next_date] = [prediction, np.nan]

    df['Real'].plot()
    df['Predicted'].plot()
    plt.legend(loc=4)
    plt.xlabel('Date')
    plt.ylabel('Price')
    plt.show()


classifier = ObjectStorage().load(LinearRegression)
data_frame = DataSources().google_actions()
data_set = DatasetFactory().create_from(data_frame)

# Show real future prices + predicted prices...
show_graph(data_set, predictions=classifier.predict(data_set.test_features()))
Ejemplo n.º 14
0
from tools import table_decorator, ObjectStorage, first
from dataset import Dataset, DatasetFactory
from sklearn.linear_model import LinearRegression
import pandas as pd

algorithms = [LinearRegression(n_jobs=-1)]


def classifiers():
    return list(map(lambda alg: Classifier(alg), algorithms))


def confidence_table(rows):
    content = pd.DataFrame(list(map(lambda row: (row[0], row[1] * 100), rows)),
                           columns=['Algorithm', 'Confidence (%)'])
    return table_decorator("Results", content)


data_frame = DataSources().google_actions()
data_set = DatasetFactory().create_from(data_frame)
classifiers = classifiers()

train_info = map(lambda clr: [
    clr.name(),
    clr.train(data_set),
], classifiers)
print(confidence_table(train_info))

ObjectStorage().save(first(classifiers))
print("...trained data saved!")
Ejemplo n.º 15
0
    def __init__(self, experiment):
        self.experiment = experiment

        processing = experiment['processing']

        #config dataset
        self.dataset = DatasetFactory(
            name=experiment['dataset'],
            flat=processing['flat'],
            concat=processing['concat'],
            expand=processing['expand'],
            normalize=processing['normalize'],
        )
        
        #config state of the experiment
        self.state = LoaderState(
            id_exp=self.experiment_name(),
            epochs=experiment['epochs'],
            dataset=self.dataset,
            valid_exp=experiment['exp'],
            url=experiment['dir']
        ).state


        #compiler parameters
        optimizer = experiment['optimizer']
        opt_params = experiment['opt_params']
        loss = experiment['loss']
        metrics = [m for m in experiment['metrics'] if m != 'f1_score']
        history_metrics = [m.lower() for m in experiment['metrics'] if m != 'f1_score']
        metrics.append(f1_score)

        self.compiler_params = dict([
            ('optimizer', Optimizers(optimizer, opt_params).optimizer()),
            ('loss', loss),
            ('metrics', metrics)
        ])

        #Config training
        callbacks = []

        history_metrics.insert(0, 'loss')
        history_metrics.append('f1_score')

        cp = [m for m in history_metrics]

        for m in cp:
            history_metrics.append('val_' + m)

        callbacks.append(HistoryCheckpoint(
            self.experiment['dir'],
            self.state,
            history_metrics))
            
        callbacks.append(WeightsCheckpoint(self.experiment['dir'], self.state))

        
        
        if experiment['decay']:
            callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=50, min_lr=0.1e-3))

        datagen = None
        if experiment['data_augmentation']:
            datagen = ImageDataGenerator(width_shift_range=0.1,
                                        height_shift_range=0.1,
                                        horizontal_flip=True)
        
        self.trainner = Trainner(
            epochs=experiment['epochs'],
            batch_size=experiment['batch'], 
            data_augmentation=datagen, 
            callbacks=callbacks, 
            dir_path=experiment['dir'],
            state=self.state
        )
Ejemplo n.º 16
0
 def create_dataset(self, flags_data):
     return DatasetFactory(flags_data)(return_iter=True)
Ejemplo n.º 17
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
from dataset import DatasetFactory, Config
from dataset.trees import TensorTree

# Config().dataset_type = 'wrapper_dataset'
t_ops = ('cropping', 'label_normalization')
v_ops = ('cropping', 'label_normalization')

factory = DatasetFactory()
factory.add_image_type('image', 'hierachical_label', 'mask')
factory.add_dataset(dataset_id='tmc', dirname='data')
factory.add_dataset(dataset_id='kki', dirname='ped_data')
factory.add_training_operation(*t_ops)
t_dataset, v_dataset = factory.create()

indices = [0, 1, len(t_dataset) - 1]
tensor_trees = [t_dataset[ind][1] for ind in indices]
print(tensor_trees[-1])
tensor_tree = TensorTree.stack(tensor_trees)
print(tensor_tree)
Ejemplo n.º 18
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
from dataset import DatasetFactory, Config
from dataset.images import Image

Config().verbose = True
Config().dataset_type = 'wrapper_dataset'
ops = ('cropping', )

# val_ind
factory = DatasetFactory()
factory.add_image_type('image', 'label', 'mask', 'bounding_box')
factory.add_dataset(dataset_id='1', dirname='t_data_1', val_ind=[0, 2])
factory.add_dataset(dataset_id='2', dirname='t_data_2')
factory.add_dataset(dataset_id='3', dirname='t_data_3', val_ind=[0])
factory.add_training_operation(*ops)
t_dataset, v_dataset = factory.create()
t_keys = ['1/at1006', '2/at1025', '2/at1029', '3/at1034', '3/at1040']
v_keys = ['1/at1000', '1/at1007', '3/at1033']
assert list(t_dataset.images.keys()) == t_keys
assert list(v_dataset.images.keys()) == v_keys
for im in t_dataset[0]:
    if hasattr(im, 'labels'):
        print(im.labels)
    assert isinstance(im, Image)

Config().dataset_type = 'dataset'
t_dataset, v_dataset = factory.create()
for im in t_dataset[0]:
Ejemplo n.º 19
0
            'k': self.k,
            'current_k': self.current_k,
            'weights': self.weights,
            'historys': self.historys,
        }

    def __str__(self):
        return 'kfold'


if __name__ == '__main__':
    from dataset import DatasetFactory
    from save import SaveExperiment
    with open('experiment.json', mode='r') as f:
        experiment = json.loads(f.read())
    dt = DatasetFactory(name=experiment['dataset'], concat=True)

    ldr = LoaderState(
        id_exp='cifar10_resnet_k10',
        epochs=experiment['epochs'],
        dataset=dt,
        valid_exp=experiment['exp'],
        url=experiment['dir'],
    )
    state = ldr.state
    state.get_state_validation('kfold', k=10).status = True
    state.get_state_validation('kfold', k=10).weights[0] = 'teste'
    print(state.get_state_validation('kfold', k=10).weights)
    v = state.get_validation('kfold')
    print(v)
    SaveExperiment(root_dir=experiment['dir'] + 'states/').save_state(