def train(data_path):
    train_loader, test_loader, vocab = get_dataloader(data_path=data_path, bs=32, seq_len=50)
    model = TextCNN(ModelConfig())

    print(model)

    config = TrainConfig()
    optimizer = optim.Adam(model.parameters(), lr=config.lr)
    criterion = nn.CrossEntropyLoss(ignore_index=1) # Ignoring <PAD> Token

    model.train()

    gs = 0

    for epoch in tqdm(range(config.num_epochs)):
        for idx, batch in tqdm(enumerate(train_loader)):
            gs += 1
            inputs, targets = batch.text, batch.label

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            loss.backward()
            optimizer.step()

            if gs % 500 == 0:
                writer.add_scalar('train/loss', loss.item(), gs)
                print(f'{gs} loss : {loss.item()}')

        train_acc, train_f1, test_acc, test_f1 = evaluate(model, './rsc/data/')
        writer.add_scalar('train/acc', train_acc, epoch)
        writer.add_scalar('train/f1', train_f1, epoch)
        writer.add_scalar('test/acc', test_acc, epoch)
        writer.add_scalar('test/f1', test_f1, epoch)
Beispiel #2
0
def main(_):
    # parse arguments
    config = TrainConfig(local=True)
    
    # train
    if config.should_continue:
        continue_training(config)
    elif config.sample > 0:
        sample(config)
    else:
        begin_training(config)
Beispiel #3
0
def get_model(input):

    trainconfig_worker = TrainConfig()
    model = tf.keras.models.Sequential()
    model.add(
        tf.keras.layers.SimpleRNN(
            model_chout_num['u1'],
            input_shape=(input, trainconfig_worker.train_input_size)))
    model.add(tf.keras.layers.Dense(1))

    return model
    def test_data_loader_coco(self):
        '''
            This test checks below:
        '''

        fm = FileManager()
        train_config = TrainConfig()

        dataloader = MnistDataLoader(
            is_training=train_config.is_trainable,
            datasize=train_config.data_size,
            batch_size=train_config.batch_size,
            multiprocessing_num=train_config.multiprocessing_num,
            is_image_scaling=False)

        images_placeholder = tf.placeholder(dtype=model_config['image_dtype'],
                                            shape=dataloader.image_shape)

        label_placeholder = tf.placeholder(dtype=model_config['label_dtype'],
                                           shape=[None])

        dataset = dataloader.input_fn(images_placeholder, label_placeholder)

        iterator = dataset.make_initializable_iterator()

        image_numpy, label_numpy = dataloader.import_data(
            imagefilename=fm.train_images_filename,
            labelfilename=fm.train_labels_filename)

        with self.test_session() as sess:

            sess.run(iterator.initializer,
                     feed_dict={
                         images_placeholder: image_numpy,
                         label_placeholder: label_numpy
                     })

            images_op, labels_op = iterator.get_next()

            for n in range(0, 50):
                image_numpy_batch, label_numpy_batch = sess.run(
                    [images_op, labels_op])

                image_index = 0
                image_pick = image_numpy_batch[image_index, :, :, 0]
                label_pick = label_numpy_batch[image_index]

                plt.figure(n)
                plt.imshow(image_pick.astype(np.uint8))
                plt.title('True = %d' % label_pick)
                plt.show()
from train_config import PreprocessingConfig
from train_config import FLAGS

from model_config import DEFAULT_INPUT_RESOL
from model_config import DEFAULT_HG_INOUT_RESOL
from model_config import DEFAULT_INPUT_CHNUM
from model_config import NUM_OF_KEYPOINTS

# for coco dataset
import dataset_augment
from dataset_prepare import CocoMetadata

DEFAULT_HEIGHT = DEFAULT_INPUT_RESOL
DEFAULT_WIDTH = DEFAULT_INPUT_RESOL
preproc_config = PreprocessingConfig()
train_config = TrainConfig()


class DataSetInput(object):
    """Generates DataSet input_fn for training or evaluation
        Args:
            is_training: `bool` for whether the input is for training
            data_dir:   `str` for the directory of the training and validation data;
                            if 'null' (the literal string 'null', not None), then construct a null
                            pipeline, consisting of empty images.
            use_bfloat16: If True, use bfloat16 precision; else use float32.
            transpose_input: 'bool' for whether to use the double transpose trick
    """
    def __init__(self,
                 is_training,
                 data_dir,
Beispiel #6
0
                print("--------------------------------------------")

                rate_record_index += 1

        print("Training finished!")

    file_writer.close()





if __name__ == '__main__':
    tf.logging.set_verbosity(tf.logging.INFO)

    trainconfig_worker = TrainConfig()
    fm = FileManager()

    # dataloader instance gen
    dataloader_train = MnistDataLoader(is_training        =trainconfig_worker.is_trainable,
                                         datasize           =trainconfig_worker.train_data_size,
                                         batch_size         =trainconfig_worker.batch_size,
                                         multiprocessing_num=trainconfig_worker.multiprocessing_num,
                                         is_image_scaling   =True)

    dataloader_test    = MnistDataLoader(is_training        =False,
                                         datasize           =trainconfig_worker.test_data_size)

    # model tranining
    with tf.name_scope(name='trainer',values=[dataloader_train,dataloader_test]):
Beispiel #7
0
def train(dataset_train, dataset_test):
    model_config = ModelConfig()
    train_config = TrainConfig()

    dataset_handle = tf.placeholder(tf.string, shape=[])
    dataset_train_iterator = dataset_train.make_one_shot_iterator()
    # dataset_test_iterator  = dataset_test.make_one_shot_iterator()

    inputs = tf.placeholder(dtype=model_config.dtype,
                            shape=[
                                train_config.batch_size,
                                model_config._input_size,
                                model_config._input_size,
                                model_config.input_chnum
                            ])

    true_heatmap = tf.placeholder(dtype=model_config.dtype,
                                  shape=[
                                      train_config.batch_size,
                                      model_config._output_size,
                                      model_config._output_size,
                                      model_config.output_chnum
                                  ])

    # model building =========================
    # < complete codes here >
    modelbuilder = ModelBuilder(model_config=model_config)
    pred_heatmap = modelbuilder.get_model(model_in=inputs, scope='model')

    # traning ops =============================================
    # < complete codes here >
    loss_heatmap = train_config.loss_fn(true_heatmap -
                                        pred_heatmap) / train_config.batch_size
    loss_regularizer = tf.losses.get_regularization_loss()
    loss_op = loss_heatmap + loss_regularizer

    global_step = tf.Variable(0, trainable=False)
    batchnum_per_epoch = np.floor(train_config.train_data_size /
                                  train_config.batch_size)

    lr_op = tf.train.exponential_decay(
        learning_rate=train_config.learning_rate,
        global_step=global_step,
        decay_steps=train_config.learning_rate_decay_step,
        decay_rate=train_config.learning_rate_decay_rate,
        staircase=True)

    opt_op = train_config.opt_fn(learning_rate=lr_op, name='opt_op')
    train_op = opt_op.minimize(loss_op, global_step)

    # For Tensorboard ===========================================
    file_writer = tf.summary.FileWriter(logdir=train_config.tflogdir)
    file_writer.add_graph(tf.get_default_graph())

    tb_summary_loss_train = tf.summary.scalar('loss_train', loss_op)
    tb_summary_loss_test = tf.summary.scalar('loss_test', loss_op)

    tb_summary_lr = tf.summary.scalar('learning_rate', lr_op)

    # training ==============================

    init_var = tf.global_variables_initializer()
    print('[train] training_epochs = %s' % train_config.training_epochs)
    print('------------------------------------')

    # build dataset ========================

    # inputs_test_op, true_heatmap_test_op =  dataset_test_iterator.get_next()
    inputs_train_op, true_heatmap_train_op = dataset_train_iterator.get_next()

    with tf.Session() as sess:
        # Run the variable initializer
        sess.run(init_var)

        # train_handle    = sess.run(dataset_train_iterator.string_handle())
        # test_handle     = sess.run(dataset_test_iterator.string_handle())

        for epoch in range(train_config.training_epochs):

            inputs_train, true_heatmap_train = sess.run(
                [inputs_train_op, true_heatmap_train_op])
            # inputs_valid,true_heatmap_valid  = sess.run([inputs_test_op,true_heatmap_test_op])

            train_start_time = time.time()

            # train model
            # _,loss_train = sess.run([train_op,loss_op],
            #                          feed_dict={dataset_handle: train_handle,
            #                          modelbuilder.dropout_keeprate:model_config.output.dropout_keeprate})

            _, loss_train = sess.run(
                [train_op, loss_op],
                feed_dict={
                    inputs:
                    inputs_train,
                    true_heatmap:
                    true_heatmap_train,
                    modelbuilder.dropout_keeprate:
                    model_config.output.dropout_keeprate
                })

            train_elapsed_time = time.time() - train_start_time

            global_step_eval = global_step.eval()

            if train_config.display_step == 0:
                continue
            elif global_step_eval % train_config.display_step == 0:
                print('[train] curr epochs = %s' % epoch)

                # # test model
                # loss_test = loss_op.eval(feed_dict={dataset_handle: test_handle,
                #                                     modelbuilder.dropout_keeprate: 1.0})
                #
                # loss_test = loss_op.eval( feed_dict={inputs: inputs_valid,
                #                                     true_heatmap: true_heatmap_valid,
                #                                     modelbuilder.dropout_keeprate: 1.0})

                # tf summary
                summary_loss_train = tb_summary_loss_train.eval(
                    feed_dict={
                        inputs: inputs_train,
                        true_heatmap: true_heatmap_train,
                        modelbuilder.dropout_keeprate: 1.0
                    })
                # summary_loss_test  = tb_summary_loss_test.eval( feed_dict={inputs: inputs_valid,
                #                                                             true_heatmap: true_heatmap_valid,
                #                                                             modelbuilder.dropout_keeprate: 1.0})
                #

                # summary_loss_train = tb_summary_loss_train.eval(feed_dict={dataset_handle: train_handle,
                #                                                            modelbuilder.dropout_keeprate:1.0})
                #
                # summary_loss_test  = tb_summary_loss_test.eval(feed_dict={dataset_handle: test_handle,
                #                                                           modelbuilder.dropout_keeprate: 1.0})

                summary_lr = tb_summary_lr.eval()

                file_writer.add_summary(summary_loss_train, global_step_eval)
                # file_writer.add_summary(summary_loss_test,global_step_eval)
                file_writer.add_summary(summary_lr, global_step_eval)

                print('At step = %d, train elapsed_time = %.1f ms' %
                      (global_step_eval, train_elapsed_time))
                print("Training set loss (avg over batch)= %.2f   " %
                      (loss_train))
                # print("Test set Err loss (total batch)= %.2f %%" % (loss_test))
                print("--------------------------------------------")

        print("Training finished!")

    file_writer.close()
Beispiel #8
0
                ckpt_save_path = saver.save(sess,
                                            train_config.ckpt_dir +
                                            'model.ckpt',
                                            global_step=global_step_eval)
                tf.logging.info("Global step - %s: Model saved in file: %s" %
                                (global_step_eval, ckpt_save_path))

        print("Training finished!")

    file_writer_train.close()
    file_writer_valid.close()


if __name__ == '__main__':
    tf.logging.set_verbosity(tf.logging.INFO)
    train_config = TrainConfig()
    model_config = ModelConfig(setuplog_dir=train_config.setuplog_dir)
    preproc_config = PreprocessingConfig(
        setuplog_dir=train_config.setuplog_dir)

    train_config.send_setuplog_to_gcp_bucket()
    preproc_config.show_info()

    # dataloader instance gen
    dataloader_train, dataloader_valid = \
        [DataLoader(
        is_training     =is_training,
        data_dir        =DATASET_DIR,
        transpose_input =False,
        train_config    = train_config,
        model_config    = model_config,
Beispiel #9
0
        epoch = increment(ops.epoch_var, sess)
        sess.run(tf.assign(ops.batch_var, 0))
        batch = sess.run(ops.batch_var)

    sess.close()


def begin_training(config):
    create_training_ops()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    ops = TrainOps()
    ops.populate(sess)
    train(sess, ops, config)


def continue_training(config):
    sess, ops = load_session(config)
    train(sess, ops, config)


# Run
if __name__ == '__main__':
    config = TrainConfig()
    if config.sample > 0:
        sample(config)
    elif config.should_continue:
        continue_training(config)
    else:
        begin_training(config)
def main():

    sys.path.insert(0, TF_MODULE_DIR)
    sys.path.insert(0, EXPORT_DIR)
    sys.path.insert(0, COCO_DATALOAD_DIR)

    # # configuration file
    # config = configparser.ConfigParser()
    #
    # config_file = "mv2_cpm.cfg"
    # if os.path.exists(config_file):
    #     config.read(config_file)

    # params = {}
    # for _ in config.options("Train"):
    #     params[_] = eval(config.get("Train", _))
    #
    # os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    train_config = TrainConfig()
    model_config = ModelConfig(setuplog_dir=train_config.setuplog_dir)
    preproc_config = PreprocessingConfig(
        setuplog_dir=train_config.setuplog_dir)

    # ================================================
    # =============== dataset pipeline ===============
    # ================================================

    # dataloader instance gen
    dataloader_train, dataloader_valid = \
    [DataLoader(
    is_training     =is_training,
    data_dir        =DATASET_DIR,
    transpose_input =False,
    train_config    =train_config,
    model_config    =model_config,
    preproc_config  =preproc_config,
    use_bfloat16    =False) for is_training in [True, False]]

    dataset_train = dataloader_train.input_fn()
    # dataset_valid   = dataloader_valid.input_fn()

    data = dataset_train.repeat()
    # data = dataset_train

    # ================================================
    # ============== configure model =================
    # ================================================

    model_builder = HourglassModelBuilder()
    model_builder.build_model()

    model = model_builder.model
    model.summary()

    model.compile(
        optimizer=tf.optimizers.Adam(0.001, epsilon=1e-8),  #'adam',
        loss=tf.losses.MeanSquaredError(),
        metrics=['accuracy'])  #tf.metrics.Accuracy

    # ================================================
    # =============== setup output ===================
    # ================================================
    current_time = datetime.now().strftime("%Y%m%d%H%M%S")
    output_path = os.path.join(PROJ_HOME, "outputs")

    # output model file(.hdf5)
    model_path = os.path.join(output_path, "models")
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    checkpoint_path = os.path.join(model_path,
                                   "hg_" + current_time + ".hdf5")  #".ckpt"
    check_pointer = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                       save_weights_only=False,
                                                       verbose=1)
    # output tensorboard log
    log_path = os.path.join(output_path, "logs")
    log_path = os.path.join(log_path, "hg_" + current_time)
    tensorboard = tf.keras.callbacks.TensorBoard(log_path)

    # ================================================
    # ==================== train! ====================
    # ================================================

    model.fit(data,
              epochs=300,
              steps_per_epoch=100,
              callbacks=[check_pointer, tensorboard])  # steps_per_epoch=100,