Esempio n. 1
0
 def __init__(self):
     self.conf = ModelConfig()
     self.seq_max_len, self.input_size, self.num_class = cal_model_para(
         filename=self.conf.training_data)
     self._init_varible()
     self.loss_op, self.optimizer_op, self.accuracy_op, self.predict_op = self.define_operator(
     )
Esempio n. 2
0
    def create(configuration, input, reuse=False, is_training=True):
        input_size = configuration.data().input_size()
        num_classes = configuration.settings().num_classes()
        dropout = configuration.settings().dropout()

        conf = ModelConfig()

        with tf.variable_scope('ConvNet', reuse=reuse):
            # przeskalowanie do wymaganego rozmiaru
            x = tf.reshape(input, shape=[-1, input_size[0], input_size[1], 1])

            activation = Model.__get_activation(conf.conv_1_activation)
            # tworzenie warstw
            conv1 = tf.layers.conv2d(x,
                                     conf.conv_1_filters,
                                     conf.conv_1_size,
                                     activation=activation,
                                     padding=conf.conv_1_padding)
            pool1 = tf.layers.max_pooling2d(conv1, conf.pool_1_size,
                                            conf.pool_1_size)
            conv2 = tf.layers.conv2d(pool1,
                                     conf.conv_2_filters,
                                     conf.conv_2_size,
                                     activation=activation,
                                     padding=conf.conv_2_padding)
            pool2 = tf.layers.max_pooling2d(conv2, conf.conv_2_size,
                                            conf.conv_2_size)
            fc1 = tf.contrib.layers.flatten(pool2)
            fc1 = tf.layers.dense(fc1, conf.full_size)
            fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
            out = tf.layers.dense(fc1, num_classes)
            # out = tf.nn.dropout(out, dropout)
            # out = tf.nn.softmax(out)

        return out
Esempio n. 3
0
def execute(model_config_dict):
    model_config: ModelConfig = ModelConfig(model_config_dict)
    print(model_config)
    print()

    train_df = data_loader.load_train_csv(model_config.train_data_path)

    model_config.set_defaults(train_df)

    if model_config.problem_type == ProblemType.REGRESSION:
        for strategy in ['mean', 'median']:
            pipeline: GeneralPipeline = GeneralPipeline(
                model_config, DummyRegressor(strategy), train_df)
            evaluation.evaluate(pipeline, train_df, model_config.goal_metric)
    if model_config.problem_type == ProblemType.CLASSIFICATION:
        for strategy in ['stratified', 'most_frequent', 'uniform']:
            pipeline: GeneralPipeline = GeneralPipeline(
                model_config, DummyClassifier(strategy), train_df)
            evaluation.evaluate(pipeline, train_df, model_config.goal_metric)

    pipeline: GeneralPipeline = GeneralPipeline(model_config,
                                                model_config.model_class(),
                                                train_df)
    # evaluation.evaluate(pipeline, main_df, model_config.goal_metric)

    df = pd.read_csv('./example_data/test.csv')
    pipeline.train_predict(train_df, df)
 def __save_results(self, accuracies, output_model_folder, saver,
                    tf_session):
     # utworzenie modelu z datą w nazwie ( data wstawiana jest poza tą metodą)
     os.makedirs(output_model_folder)
     # zapisanie modelu
     saver.save(tf_session, output_model_folder + '/model.ckpt')
     # zapisanie listy wyników w poszczególnych iteracji
     with open(output_model_folder + '/accuracy.txt', 'w+') as file:
         for acc in accuracies:
             file.write(str(acc) + '\n')
     # zapisanie parametrów wykorzystanych do uczenia modelu
     with open(output_model_folder + "/settings.txt", 'w+') as file:
         for field, value in ModelConfig().get_config().items():
             file.write(field + ": " + str(value) + '\n')
Esempio n. 5
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np

import tfplot
import tfplot.summary

from train_config import TrainConfig
from model_config import ModelConfig

# config instance generation
train_config = TrainConfig()
model_config = ModelConfig()


def argmax_2d(tensor):

    # input format: BxHxWxD
    assert len(tensor.get_shape()) == 4

    with tf.name_scope(name='argmax_2d', values=[tensor]):
        tensor_shape = tensor.get_shape().as_list()

        # flatten the Tensor along the height and width axes
        flat_tensor = tf.reshape(tensor,
                                 (tensor_shape[0], -1, tensor_shape[3]))

        # argmax of the flat tensor
# @Author  : [email protected]
# @FileName: dataset_augument.py
# @Software: PyCharm
# @updated by Jaewook Kang 20181010 for tf-tiny-pose-estimation

import math
import random

import cv2
import numpy as np
from tensorpack.dataflow.imgaug.geometry import RotationAndCropValid
from enum import Enum

# custom addition for tf-tiny-pose-estimation
from model_config import ModelConfig
model_config = ModelConfig(setuplog_dir=None)

_network_w = int(model_config._input_size)
_network_h = _network_w
_scale = int(model_config._input_size / model_config._output_size)


class CocoPart(Enum):
    Top = 0
    Neck = 1
    RShoulder = 2
    RElbow = 3
    RWrist = 4
    LShoulder = 5
    LElbow = 6
    LWrist = 7
Esempio n. 7
0
def train(dataset_train, dataset_test):
    model_config = ModelConfig()
    train_config = TrainConfig()

    dataset_handle = tf.placeholder(tf.string, shape=[])
    dataset_train_iterator = dataset_train.make_one_shot_iterator()
    # dataset_test_iterator  = dataset_test.make_one_shot_iterator()

    inputs = tf.placeholder(dtype=model_config.dtype,
                            shape=[
                                train_config.batch_size,
                                model_config._input_size,
                                model_config._input_size,
                                model_config.input_chnum
                            ])

    true_heatmap = tf.placeholder(dtype=model_config.dtype,
                                  shape=[
                                      train_config.batch_size,
                                      model_config._output_size,
                                      model_config._output_size,
                                      model_config.output_chnum
                                  ])

    # model building =========================
    # < complete codes here >
    modelbuilder = ModelBuilder(model_config=model_config)
    pred_heatmap = modelbuilder.get_model(model_in=inputs, scope='model')

    # traning ops =============================================
    # < complete codes here >
    loss_heatmap = train_config.loss_fn(true_heatmap -
                                        pred_heatmap) / train_config.batch_size
    loss_regularizer = tf.losses.get_regularization_loss()
    loss_op = loss_heatmap + loss_regularizer

    global_step = tf.Variable(0, trainable=False)
    batchnum_per_epoch = np.floor(train_config.train_data_size /
                                  train_config.batch_size)

    lr_op = tf.train.exponential_decay(
        learning_rate=train_config.learning_rate,
        global_step=global_step,
        decay_steps=train_config.learning_rate_decay_step,
        decay_rate=train_config.learning_rate_decay_rate,
        staircase=True)

    opt_op = train_config.opt_fn(learning_rate=lr_op, name='opt_op')
    train_op = opt_op.minimize(loss_op, global_step)

    # For Tensorboard ===========================================
    file_writer = tf.summary.FileWriter(logdir=train_config.tflogdir)
    file_writer.add_graph(tf.get_default_graph())

    tb_summary_loss_train = tf.summary.scalar('loss_train', loss_op)
    tb_summary_loss_test = tf.summary.scalar('loss_test', loss_op)

    tb_summary_lr = tf.summary.scalar('learning_rate', lr_op)

    # training ==============================

    init_var = tf.global_variables_initializer()
    print('[train] training_epochs = %s' % train_config.training_epochs)
    print('------------------------------------')

    # build dataset ========================

    # inputs_test_op, true_heatmap_test_op =  dataset_test_iterator.get_next()
    inputs_train_op, true_heatmap_train_op = dataset_train_iterator.get_next()

    with tf.Session() as sess:
        # Run the variable initializer
        sess.run(init_var)

        # train_handle    = sess.run(dataset_train_iterator.string_handle())
        # test_handle     = sess.run(dataset_test_iterator.string_handle())

        for epoch in range(train_config.training_epochs):

            inputs_train, true_heatmap_train = sess.run(
                [inputs_train_op, true_heatmap_train_op])
            # inputs_valid,true_heatmap_valid  = sess.run([inputs_test_op,true_heatmap_test_op])

            train_start_time = time.time()

            # train model
            # _,loss_train = sess.run([train_op,loss_op],
            #                          feed_dict={dataset_handle: train_handle,
            #                          modelbuilder.dropout_keeprate:model_config.output.dropout_keeprate})

            _, loss_train = sess.run(
                [train_op, loss_op],
                feed_dict={
                    inputs:
                    inputs_train,
                    true_heatmap:
                    true_heatmap_train,
                    modelbuilder.dropout_keeprate:
                    model_config.output.dropout_keeprate
                })

            train_elapsed_time = time.time() - train_start_time

            global_step_eval = global_step.eval()

            if train_config.display_step == 0:
                continue
            elif global_step_eval % train_config.display_step == 0:
                print('[train] curr epochs = %s' % epoch)

                # # test model
                # loss_test = loss_op.eval(feed_dict={dataset_handle: test_handle,
                #                                     modelbuilder.dropout_keeprate: 1.0})
                #
                # loss_test = loss_op.eval( feed_dict={inputs: inputs_valid,
                #                                     true_heatmap: true_heatmap_valid,
                #                                     modelbuilder.dropout_keeprate: 1.0})

                # tf summary
                summary_loss_train = tb_summary_loss_train.eval(
                    feed_dict={
                        inputs: inputs_train,
                        true_heatmap: true_heatmap_train,
                        modelbuilder.dropout_keeprate: 1.0
                    })
                # summary_loss_test  = tb_summary_loss_test.eval( feed_dict={inputs: inputs_valid,
                #                                                             true_heatmap: true_heatmap_valid,
                #                                                             modelbuilder.dropout_keeprate: 1.0})
                #

                # summary_loss_train = tb_summary_loss_train.eval(feed_dict={dataset_handle: train_handle,
                #                                                            modelbuilder.dropout_keeprate:1.0})
                #
                # summary_loss_test  = tb_summary_loss_test.eval(feed_dict={dataset_handle: test_handle,
                #                                                           modelbuilder.dropout_keeprate: 1.0})

                summary_lr = tb_summary_lr.eval()

                file_writer.add_summary(summary_loss_train, global_step_eval)
                # file_writer.add_summary(summary_loss_test,global_step_eval)
                file_writer.add_summary(summary_lr, global_step_eval)

                print('At step = %d, train elapsed_time = %.1f ms' %
                      (global_step_eval, train_elapsed_time))
                print("Training set loss (avg over batch)= %.2f   " %
                      (loss_train))
                # print("Test set Err loss (total batch)= %.2f %%" % (loss_test))
                print("--------------------------------------------")

        print("Training finished!")

    file_writer.close()
Esempio n. 8
0
                                            train_config.ckpt_dir +
                                            'model.ckpt',
                                            global_step=global_step_eval)
                tf.logging.info("Global step - %s: Model saved in file: %s" %
                                (global_step_eval, ckpt_save_path))

        print("Training finished!")

    file_writer_train.close()
    file_writer_valid.close()


if __name__ == '__main__':
    tf.logging.set_verbosity(tf.logging.INFO)
    train_config = TrainConfig()
    model_config = ModelConfig(setuplog_dir=train_config.setuplog_dir)
    preproc_config = PreprocessingConfig(
        setuplog_dir=train_config.setuplog_dir)

    train_config.send_setuplog_to_gcp_bucket()
    preproc_config.show_info()

    # dataloader instance gen
    dataloader_train, dataloader_valid = \
        [DataLoader(
        is_training     =is_training,
        data_dir        =DATASET_DIR,
        transpose_input =False,
        train_config    = train_config,
        model_config    = model_config,
        preproc_config  = preproc_config,
def main():

    sys.path.insert(0, TF_MODULE_DIR)
    sys.path.insert(0, EXPORT_DIR)
    sys.path.insert(0, COCO_DATALOAD_DIR)

    # # configuration file
    # config = configparser.ConfigParser()
    #
    # config_file = "mv2_cpm.cfg"
    # if os.path.exists(config_file):
    #     config.read(config_file)

    # params = {}
    # for _ in config.options("Train"):
    #     params[_] = eval(config.get("Train", _))
    #
    # os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    train_config = TrainConfig()
    model_config = ModelConfig(setuplog_dir=train_config.setuplog_dir)
    preproc_config = PreprocessingConfig(
        setuplog_dir=train_config.setuplog_dir)

    # ================================================
    # =============== dataset pipeline ===============
    # ================================================

    # dataloader instance gen
    dataloader_train, dataloader_valid = \
    [DataLoader(
    is_training     =is_training,
    data_dir        =DATASET_DIR,
    transpose_input =False,
    train_config    =train_config,
    model_config    =model_config,
    preproc_config  =preproc_config,
    use_bfloat16    =False) for is_training in [True, False]]

    dataset_train = dataloader_train.input_fn()
    # dataset_valid   = dataloader_valid.input_fn()

    data = dataset_train.repeat()
    # data = dataset_train

    # ================================================
    # ============== configure model =================
    # ================================================

    model_builder = HourglassModelBuilder()
    model_builder.build_model()

    model = model_builder.model
    model.summary()

    model.compile(
        optimizer=tf.optimizers.Adam(0.001, epsilon=1e-8),  #'adam',
        loss=tf.losses.MeanSquaredError(),
        metrics=['accuracy'])  #tf.metrics.Accuracy

    # ================================================
    # =============== setup output ===================
    # ================================================
    current_time = datetime.now().strftime("%Y%m%d%H%M%S")
    output_path = os.path.join(PROJ_HOME, "outputs")

    # output model file(.hdf5)
    model_path = os.path.join(output_path, "models")
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    checkpoint_path = os.path.join(model_path,
                                   "hg_" + current_time + ".hdf5")  #".ckpt"
    check_pointer = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                       save_weights_only=False,
                                                       verbose=1)
    # output tensorboard log
    log_path = os.path.join(output_path, "logs")
    log_path = os.path.join(log_path, "hg_" + current_time)
    tensorboard = tf.keras.callbacks.TensorBoard(log_path)

    # ================================================
    # ==================== train! ====================
    # ================================================

    model.fit(data,
              epochs=300,
              steps_per_epoch=100,
              callbacks=[check_pointer, tensorboard])  # steps_per_epoch=100,