Exemplo n.º 1
0
    def decode_shape(self):
        # build graph
        octree_in, _ = DatasetFactory(FLAGS.DATA.test, bounding_sphere)()
        convd = network.octree_encoder(octree_in, training=False, reuse=False)
        octree_out = network.decode_shape(convd,
                                          octree_in,
                                          training=False,
                                          reuse=False)

        # checkpoint
        assert (self.flags.ckpt)
        tf_saver = tf.train.Saver(max_to_keep=20)

        # start
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            # restore and initialize
            self.initialize(sess)
            print('Load check point: ' + self.flags.ckpt)
            tf_saver.restore(sess, self.flags.ckpt)
            logdir = self.flags.logdir
            tf.summary.FileWriter(logdir, sess.graph)

            print('Start testing ...')
            for i in tqdm(range(0, self.flags.test_iter), ncols=80):
                o0, o1 = sess.run([octree_in, octree_out])
                with open(logdir + ('/%04d_input.octree' % i), "wb") as f:
                    f.write(o0.tobytes())
                with open(logdir + ('/%04d_output.octree' % i), "wb") as f:
                    f.write(o1.tobytes())
Exemplo n.º 2
0
    def decode_shape(self):
        # build graph
        octree, label = DatasetFactory(FLAGS.DATA.test)()
        code = autoencoder.octree_encoder(octree, training=False, reuse=False)
        octree_pred = autoencoder.octree_decode_shape(code,
                                                      training=False,
                                                      reuse=False)

        # checkpoint
        assert (self.flags.ckpt)  # the self.flags.ckpt should be provided
        tf_saver = tf.train.Saver(max_to_keep=20)

        # start
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            # restore and initialize
            self.initialize(sess)
            tf_saver.restore(sess, self.flags.ckpt)
            logdir = self.flags.logdir
            tf.summary.FileWriter(logdir, sess.graph)

            print('Start testing ...')
            for i in tqdm(range(0, self.flags.test_iter)):
                origin, reconstructed = sess.run([octree, octree_pred])
                with open(logdir + ('/%04d_input.octree' % i), "wb") as f:
                    f.write(origin.tobytes())
                with open(logdir + ('/%04d_output.octree' % i), "wb") as f:
                    f.write(reconstructed.tobytes())
Exemplo n.º 3
0
    def __init__(self, cfg) -> None:
        super(TrainNet, self).__init__()
        self.cfg = cfg
        self.dataset = DatasetFactory(cfg)
        self.net = ResNet(cfg, **cfg.net)

        self.loss_fn = nn.CrossEntropyLoss()
Exemplo n.º 4
0
def compute_graph(dataset='train', training=True, reuse=False):
    FLAGSD = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
    with tf.name_scope('dataset'):
        dataset = DatasetFactory(FLAGSD)
        octree, label = dataset()
    logit = network(octree, FLAGS.MODEL, training, reuse)
    losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                            FLAGS.LOSS.weight_decay, 'ocnn')
    return losses  # loss, accu, regularizer
Exemplo n.º 5
0
def compute_graph(dataset='train', training=True, reuse=False):
    flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
    octree, label = DatasetFactory(flags_data)()
    logit = cls_network(octree, FLAGS.MODEL, training, reuse)
    losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                            FLAGS.LOSS.weight_decay, 'ocnn',
                            FLAGS.LOSS.label_smoothing)
    losses.append(losses[0] + losses[2])  # total loss
    names = ['loss', 'accu', 'regularizer', 'total_loss']
    return losses, names
Exemplo n.º 6
0
def compute_graph(dataset='train', training=True, reuse=False):
    flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
    octree, label = DatasetFactory(flags_data)()
    code = autoencoder.octree_encoder(octree, training, reuse)
    loss, accu = autoencoder.octree_decoder(code, octree, training, reuse)

    with tf.name_scope('total_loss'):
        reg = l2_regularizer('ocnn', FLAGS.LOSS.weight_decay)
        total_loss = tf.add_n(loss + [reg])
    tensors = loss + [reg] + accu + [total_loss]
    depth = FLAGS.MODEL.depth
    names = ['loss%d' % d for d in range(2, depth + 1)] + ['normal', 'reg'] + \
            ['accu%d' % d for d in range(2, depth + 1)] + ['total_loss']
    return tensors, names
Exemplo n.º 7
0
def compute_graph(training=True, reuse=False):
    FLAGSD = FLAGS.DATA.train if training else FLAGS.DATA.test
    with tf.name_scope('dataset'):
        dataset = DatasetFactory(FLAGSD)
        octree, label = dataset()
    code = octree_encoder(octree, FLAGS.MODEL, training, reuse)
    loss, accu = octree_decoder(code, octree, FLAGS.MODEL, training, reuse)
    with tf.name_scope('compute_loss'):
        var_all = tf.trainable_variables()
        reg = tf.add_n([tf.nn.l2_loss(v)
                        for v in var_all]) * FLAGS.LOSS.weight_decay
        total_loss = tf.add_n(loss + [reg])
    tensors = loss + [reg] + accu + [total_loss]
    depth = FLAGS.MODEL.depth
    names = ['loss%d' % d for d in range(2, depth + 1)] + ['normal', 'reg'] + \
            ['accu%d' % d for d in range(2, depth + 1)] + ['total_loss']
    return tensors, names
Exemplo n.º 8
0
    def __call__(self, dataset='train', training=True, reuse=False):
        FLAGS = self.flags
        flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
        octree, _, points = DatasetFactory(flags_data)()
        pts, label = get_point_info(points, flags_data.mask_ratio)
        logit = seg_network(octree, FLAGS.MODEL, training, reuse, pts=pts)
        losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                                FLAGS.LOSS.weight_decay, 'ocnn',
                                FLAGS.LOSS.label_smoothing)
        tensors = losses + [losses[0] + losses[2]]  # total loss
        names = ['loss', 'accu', 'regularizer', 'total_loss']

        if flags_data.batch_size == 1:
            iou, valid_part_num = tf_IoU_per_shape(logit, label,
                                                   FLAGS.LOSS.num_class)
            tensors += [iou, valid_part_num]
            names += ['iou', 'valid_part_num']
        return tensors, names
Exemplo n.º 9
0
def compute_graph(reuse=False):
    with tf.name_scope('dataset'):
        flags_data = FLAGS.DATA.train
        batch_size = flags_data.batch_size
        octree, shape_id = DatasetFactory(flags_data)()
        point_id, point_segment, point_mask = get_point_info(
            octree, FLAGS.MODEL.depth_out, flags_data.mask_ratio)

    # build model
    hrnet = HRNet(FLAGS.MODEL)
    tensors = hrnet.network(octree,
                            training=True,
                            reuse=reuse,
                            mask=point_mask)
    shape_feature, point_feature = tensors['logit_cls'], tensors['logit_seg']

    # shape-level discrimination
    shape_critic = ShapeLoss(FLAGS.LOSS, reuse=reuse)
    shape_logit = shape_critic.forward(shape_feature)
    shape_loss, shape_accu = shape_critic.loss(shape_logit, shape_id)

    # point-level discrimination
    point_critic = PointLoss(FLAGS.LOSS, reuse=reuse)
    point_logit = point_critic.forward(point_feature, shape_id, point_segment,
                                       batch_size)
    point_loss, point_accu = point_critic.loss(point_logit, point_id)

    # run SGD
    reg = l2_regularizer('ocnn', FLAGS.LOSS.weight_decay)
    weights = FLAGS.LOSS.weights
    total_loss = shape_loss * weights[0] + point_loss * weights[1] + reg
    solver, lr = build_solver(total_loss, LRFactory(FLAGS.SOLVER))

    # update memory
    shape_update = shape_critic.update_memory(solver)
    point_update = point_critic.update_memory(solver)
    train_op = tf.group([shape_update, point_update, solver])

    return shape_loss, shape_accu, point_loss, point_accu, reg, lr, train_op
Exemplo n.º 10
0
    def __call__(self, dataset='train', training=True, reuse=False, gpu_num=1):
        FLAGS = self.flags
        with tf.device('/cpu:0'):
            flags_data = FLAGS.DATA.train if dataset == 'train' else FLAGS.DATA.test
            data_iter = DatasetFactory(flags_data)(return_iter=True)

        tower_tensors = []
        for i in range(gpu_num):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('device_%d' % i):
                    octree, label = data_iter.get_next()
                    logit = cls_network(octree, FLAGS.MODEL, training, reuse)
                    losses = loss_functions(logit, label, FLAGS.LOSS.num_class,
                                            FLAGS.LOSS.weight_decay, 'ocnn',
                                            FLAGS.LOSS.label_smoothing)
                    losses.append(losses[0] + losses[2])  # total loss
                    tower_tensors.append(losses)
                    reuse = True

        names = ['loss', 'accu', 'regularizer', 'total_loss']
        tensors = tower_tensors[0] if gpu_num == 1 else list(
            zip(*tower_tensors))
        return tensors, names
Exemplo n.º 11
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
from dataset import DatasetFactory, Config
from dataset.images import Image

Config().verbose = True
Config().dataset_type = 'wrapper_dataset'
ops = ('cropping', )

# val_ind
factory = DatasetFactory()
factory.add_image_type('image', 'label', 'mask', 'bounding_box')
factory.add_dataset(dataset_id='1', dirname='t_data_1', val_ind=[0, 2])
factory.add_dataset(dataset_id='2', dirname='t_data_2')
factory.add_dataset(dataset_id='3', dirname='t_data_3', val_ind=[0])
factory.add_training_operation(*ops)
t_dataset, v_dataset = factory.create()
t_keys = ['1/at1006', '2/at1025', '2/at1029', '3/at1034', '3/at1040']
v_keys = ['1/at1000', '1/at1007', '3/at1033']
assert list(t_dataset.images.keys()) == t_keys
assert list(v_dataset.images.keys()) == v_keys
for im in t_dataset[0]:
    if hasattr(im, 'labels'):
        print(im.labels)
    assert isinstance(im, Image)

Config().dataset_type = 'dataset'
t_dataset, v_dataset = factory.create()
for im in t_dataset[0]:
Exemplo n.º 12
0
    df = pd.DataFrame(index=data_set.frame().index,
                      data={
                          'Real': data_set.labels(),
                          'Predicted': np.nan
                      })

    last_date = df.iloc[-1].name
    last_unix = last_date.timestamp()
    one_day = 86400
    next_unix = last_unix + one_day

    for prediction in predictions:
        next_date = datetime.fromtimestamp(next_unix)
        next_unix += 86400
        df.loc[next_date] = [prediction, np.nan]

    df['Real'].plot()
    df['Predicted'].plot()
    plt.legend(loc=4)
    plt.xlabel('Date')
    plt.ylabel('Price')
    plt.show()


classifier = ObjectStorage().load(LinearRegression)
data_frame = DataSources().google_actions()
data_set = DatasetFactory().create_from(data_frame)

# Show real future prices + predicted prices...
show_graph(data_set, predictions=classifier.predict(data_set.test_features()))
Exemplo n.º 13
0
    def __init__(self, experiment):
        self.experiment = experiment

        processing = experiment['processing']

        #config dataset
        self.dataset = DatasetFactory(
            name=experiment['dataset'],
            flat=processing['flat'],
            concat=processing['concat'],
            expand=processing['expand'],
            normalize=processing['normalize'],
        )
        
        #config state of the experiment
        self.state = LoaderState(
            id_exp=self.experiment_name(),
            epochs=experiment['epochs'],
            dataset=self.dataset,
            valid_exp=experiment['exp'],
            url=experiment['dir']
        ).state


        #compiler parameters
        optimizer = experiment['optimizer']
        opt_params = experiment['opt_params']
        loss = experiment['loss']
        metrics = [m for m in experiment['metrics'] if m != 'f1_score']
        history_metrics = [m.lower() for m in experiment['metrics'] if m != 'f1_score']
        metrics.append(f1_score)

        self.compiler_params = dict([
            ('optimizer', Optimizers(optimizer, opt_params).optimizer()),
            ('loss', loss),
            ('metrics', metrics)
        ])

        #Config training
        callbacks = []

        history_metrics.insert(0, 'loss')
        history_metrics.append('f1_score')

        cp = [m for m in history_metrics]

        for m in cp:
            history_metrics.append('val_' + m)

        callbacks.append(HistoryCheckpoint(
            self.experiment['dir'],
            self.state,
            history_metrics))
            
        callbacks.append(WeightsCheckpoint(self.experiment['dir'], self.state))

        
        
        if experiment['decay']:
            callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=50, min_lr=0.1e-3))

        datagen = None
        if experiment['data_augmentation']:
            datagen = ImageDataGenerator(width_shift_range=0.1,
                                        height_shift_range=0.1,
                                        horizontal_flip=True)
        
        self.trainner = Trainner(
            epochs=experiment['epochs'],
            batch_size=experiment['batch'], 
            data_augmentation=datagen, 
            callbacks=callbacks, 
            dir_path=experiment['dir'],
            state=self.state
        )
Exemplo n.º 14
0
 def create_dataset(self, flags_data):
     return DatasetFactory(flags_data)(return_iter=True)
Exemplo n.º 15
0
            'k': self.k,
            'current_k': self.current_k,
            'weights': self.weights,
            'historys': self.historys,
        }

    def __str__(self):
        return 'kfold'


if __name__ == '__main__':
    from dataset import DatasetFactory
    from save import SaveExperiment
    with open('experiment.json', mode='r') as f:
        experiment = json.loads(f.read())
    dt = DatasetFactory(name=experiment['dataset'], concat=True)

    ldr = LoaderState(
        id_exp='cifar10_resnet_k10',
        epochs=experiment['epochs'],
        dataset=dt,
        valid_exp=experiment['exp'],
        url=experiment['dir'],
    )
    state = ldr.state
    state.get_state_validation('kfold', k=10).status = True
    state.get_state_validation('kfold', k=10).weights[0] = 'teste'
    print(state.get_state_validation('kfold', k=10).weights)
    v = state.get_validation('kfold')
    print(v)
    SaveExperiment(root_dir=experiment['dir'] + 'states/').save_state(