Exemplo n.º 1
0
import util.flags as flags
import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.tfr_helper as tfr_helper
from util.misc import get_commit_id

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # or any {'0', '1', '2'} set tensorflow logleve 2=warning
os.environ["CUDA_VISIBLE_DEVICES"] = ""  # hide all gpu's until needed


# ========
flags.define_string("data_id", "magic_synthetic_dataset", "select a name unique name for the dataset")
flags.define_boolean("to_log_file", False,
                     "if set redirect stdout & stderr to this file in data/syntetic_data/<data_id>/<log-file.log>")
flags.define_string("mode", "val", "select 'val' or 'train'")
flags.define_list('files_train_val', int, "[int(train_files), int(val_files)]",
                  'files to generate for train data/val data', default_value=[1000, 10])
flags.define_integer("samples_per_file", 1000, "set number of samples saved in each file")
flags.define_integer("jobs", -1, "set number of samples saved in each file")

if __name__ == "__main__":
    main_data_out = "data/synthetic_data/{}".format(flags.FLAGS.data_id)
    original_out = sys.stdout
    original_err = sys.stderr
    if flags.FLAGS.to_log_file:
        logfile_path = os.path.join(main_data_out, "log_{}_{}.txt".format(flags.FLAGS.data_id, flags.FLAGS.mode))
        if not os.path.isdir(os.path.dirname(logfile_path)):
            os.makedirs(os.path.dirname(logfile_path))
        print("redirect messages to: {}".format(logfile_path))
        log_file_object = open(logfile_path, 'w')
        sys.stdout = log_file_object
        sys.stderr = log_file_object
# ===============
flags.define_string('model_type', 'ModelPolygonClassifier',
                    'Model Type to use choose from: ModelTriangle')
flags.define_string('loss_mode', "abs_diff",
                    "'abs_diff', 'softmax_crossentropy")
flags.define_string('graph', 'GraphConv2MultiFF',
                    'class name of graph architecture')

flags.define_dict(
    'graph_params', {"edge_classifier": True},
    "key=value pairs defining the configuration of the inference class. see used "
    "'inference'/'encoder'/'decoder'-class for available options. e.g.["
    "mvn (bool), nhidden_lstm_1 (int), nhidden_lstm_2 (int),"
    "nhidden_lstm_3 (int), dropout_lstm_1 (float), dropout_lstm_2 (float), "
    "dropout_lstm_3 (float), relu_clip (float)]")
flags.define_integer('data_len', 3142,
                     'F(phi) amount of values saved in one line')
flags.define_integer(
    'max_edges', 6, "Max number of edges must be known (depends on dataset), "
    "if unknown pick one which is definitv higher than edges in dataset")


class TrainerPolygon2DClassifier(TrainerBase):
    def __init__(self):
        super(TrainerPolygon2DClassifier, self).__init__()
        self._input_fn_generator = InputFnPolygon2D(self._flags)
        self._model_fn = getattr(models, self._flags.model_type)(self._params)
        self._model_fn.info()


if __name__ == '__main__':
    # logging.basicConfig(level=logging.INFO)
Exemplo n.º 3
0
import os
import time

import tensorflow as tf

import util.flags as flags
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

flags.define_string('model_dir', '', 'dir with "export"-folder which was checkpoint_dir before or path to "export"')
flags.define_string('val_list', 'lists/dummy_val.lst', '.lst-file specifying the dataset used for validation')
flags.define_integer('val_batch_size', 100, 'number of elements in a val_batch')
flags.define_list('gpu_devices', int, 'space seperated list of GPU indices to use. ', " ", [])
flags.FLAGS.parse_flags()
flags.define_float('gpu_memory', 0.0, 'amount of gpu memory in MB if set above 0')
flags.define_string("debug_dir", "", "specify dir to save debug outputs, saves are model specific ")
flags.define_integer("batch_limiter", -1, "set to positiv value to stop validation after this number of batches")
flags.FLAGS.parse_flags()


class LavBase(object):
    def __init__(self):
        self._flags = flags.FLAGS
        flags.print_flags()
        self.set_run_config()
        self._input_fn_generator = None
        self._val_dataset = None
        self._graph_eval = None
        self._model = None
        self._model_fn_classes = None
        self._params = None
Exemplo n.º 4
0
import glob
import logging
import os
import time

import tensorflow as tf

import util.flags as flags
from util.misc import get_commit_id, Tee
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Training
# ========
flags.define_integer(
    'epochs', 200, 'Epochs to train. If checkpoint already has these epochs, '
    'a evaluation and export is done')
flags.define_integer('samples_per_epoch', 100000,
                     'Samples shown to the net per epoch.')
# flags.define_boolean('calc_ema', False, 'Choose whether you want to use EMA (Exponential Moving Average) '
#                                         'weights or not,')
# flags.define_float('clip_grad', 0.0, 'gradient clipping value: for positive values GLOBAL norm clipping is performed,'
#                                      ' for negative values LOCAL norm clipping is performed (default: %(default)s)')
flags.define_string('optimizer', 'DecayOptimizer',
                    'the optimizer used to compute and apply gradients.')
flags.define_dict(
    'optimizer_params', {},
    "key=value pairs defining the configuration of the optimizer.")
flags.define_string('learn_rate_schedule', "decay",
                    'decay, finaldecay, warmupfinaldecay')
flags.define_dict(
    "learn_rate_params", {},
Exemplo n.º 5
0
# Model parameter
# ===============
flags.define_string('model_type', 'ModelTriangle',
                    'Model Type to use choose from: ModelTriangle')
flags.define_string('graph', 'KerasGraphFF3',
                    'class name of graph architecture')
flags.define_dict(
    'graph_params', {},
    "key=value pairs defining the configuration of the inference class. see used "
    "'inference'/'encoder'/'decoder'-class for available options. e.g.["
    "mvn (bool), nhidden_lstm_1 (int), nhidden_lstm_2 (int),"
    "nhidden_lstm_3 (int), dropout_lstm_1 (float), dropout_lstm_2 (float), "
    "dropout_lstm_3 (float), relu_clip (float)]")
flags.define_string('loss_mode', 'point3',
                    'switch loss calculation, see model_fn_2dtriangle.py')
flags.define_integer('data_len', 3142,
                     'F(phi) amount of values saved in one line')
flags.define_boolean(
    'complex_phi', False,
    "if set: a=phi.real, b=phi.imag, instead of a=cos(phi) b=sin(phi)-1")
flags.FLAGS.parse_flags()


class Trainer2DTriangle(TrainerBase):
    def __init__(self):
        super(Trainer2DTriangle, self).__init__()
        self._input_fn_generator = InputFn2DT(self._flags)
        self._model_class = getattr(models, self._flags.model_type)
        # self._graph.info()


if __name__ == '__main__':