Exemplo n.º 1
0
def save_image(dest, img):
    _logger.info("[SAVE] '{}'".format(dest))

    if str(dest).endswith(".tif"):
        tifffile.imwrite(dest, img)
    else:
        PIL.Image.fromarray(img).write(dest)
Exemplo n.º 2
0
def read_image(path):
    _logger.info("[READ] '{}'".format(path))

    if str(path).endswith(".tif"):
        img = tifffile.imread(path)
    else:
        img = np.asarray(PIL.Image.open(path))

    return img
Exemplo n.º 3
0
def task_test_tfrecords_utils():
    """Test the shortcut functions to convert a standard TensorFlow type to a tf.Example-compatible tf.train.Feature
    """

    import numpy as np

    dl_multi.config.dl_multi.set_cuda_properties(
        glu.get_value(dl_multi.config.settings._SETTINGS, "param_cuda",
                      dict()))

    _logger.info(
        "Test the shortcut functions to convert a standard TensorFlow type to a tf.Example-compatible tf.train.Feature"
    )

    # print the results of testing the shortcut functions
    print(dl_multi.tfrecords_utils._bytes_feature(b'test_string'))
    print(
        dl_multi.tfrecords_utils._bytes_feature(u'test_bytes'.encode('utf-8')))
    print(dl_multi.tftools.tfutils._float_feature(np.exp(1)))
    print(dl_multi.tftools.tfutils._int64_feature(True))
    print(dl_multi.tftools.tfutils._int64_feature(1))
    print(dl_multi.tftools.tfutils._int64_feature(1, True))
Exemplo n.º 4
0
def copy_image(path, dest):
    _logger.info("[COPY] '{}'".format(dest))
    shutil.copy2(path, dest)
def train(param_info, param_log, param_batch, param_save, param_train):

    _logger.info(
        "Start training multi task classification and regression model with settings:\nparam_info:\t{}\nparam_log:\t{}\nparam_batch:\t{},\nparam_save:\t{},\nparam_train:\t{}"
        .format(param_info, param_log, param_batch, param_save, param_train))

    #   settings ------------------------------------------------------------
    # -----------------------------------------------------------------------

    # Create the log and checkpoint folders if they do not exist
    checkpoint = dl_multi.utils.general.Folder().set_folder(
        **param_train["checkpoint"])
    log_dir = dl_multi.utils.general.Folder().set_folder(**param_log)

    tasks = len(param_train["objective"]) if isinstance(
        param_train["objective"], list) else 1

    data_io = dl_multi.tftools.tfrecord.tfrecord(param_train["tfrecord"],
                                                 param_info,
                                                 param_train["input"],
                                                 param_train["output"])
    data = data_io.get_data()
    data = dl_multi.tftools.tfutils.preprocessing(data, param_train["input"],
                                                  param_train["output"])
    data = dl_multi.tftools.tfaugmentation.rnd_crop(
        data, param_train["image-size"],
        data_io.get_spec_item_list("channels"),
        data_io.get_spec_item_list("scale"), **param_train["augmentation"])

    objectives = dl_multi.tftools.tflosses.Losses(param_train["objective"],
                                                  logger=_logger,
                                                  **glu.get_value(
                                                      param_train,
                                                      "multi-task", dict()))

    #   execution -----------------------------------------------------------
    # -----------------------------------------------------------------------

    # Create batches by randomly shuffling tensors. The capacity specifies the maximum of elements in the queue
    data_batch = tf.train.shuffle_batch(data, **param_batch)

    input_batch = data_batch[0]
    output_batch = data_batch[1:] if isinstance(data_batch[1:],
                                                list) else [data_batch[1:]]

    with tf.variable_scope("net"):
        pred = dl_multi.plugin.get_module_task(
            "models", *param_train["model"])(input_batch)
        pred = list(pred) if isinstance(pred, tuple) else [pred]

    objectives.update(output_batch, pred)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_step = tf.contrib.opt.AdamWOptimizer(0).minimize(
            objectives.get_loss())

    #   tfsession -----------------------------------------------------------
    # -----------------------------------------------------------------------

    # Operation for initializing the variables.
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    saver = dl_multi.tftools.tfsaver.Saver(tf.train.Saver(),
                                           **param_save,
                                           logger=_logger)
    with tf.Session() as sess:
        sess.run(init_op)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        # Iteration over epochs
        for epoch in saver:
            stats_epoch, _ = sess.run([objectives.get_stats(), train_step])
            print(objectives.get_stats_str(epoch._index, stats_epoch))
            saver.save(sess, checkpoint, step=True)

        coord.request_stop()
        coord.join(threads)
        saver.save(sess, checkpoint)
def eval(files, param_specs, param_io, param_log, param_eval, param_label,
         param_class):

    _logger.info(
        "Start training multi task classification and regression model with settings:\nparam_io:\t{}\nparam_log:\t{}\nparam_eval:\t{}\nparam_label:\t{}\nparam_class:\t{}"
        .format(param_io, param_log, param_eval, param_label, param_class))

    #   settings ------------------------------------------------------------
    # -----------------------------------------------------------------------
    img_in, img_out, log_out, _ = dl_multi.utils.imgio.get_data(
        files,
        param_specs,
        param_io,
        param_log=param_log,
        param_label=param_label)

    # Create the log and checkpoint folders if they do not exist
    checkpoint = glu.Folder().set_folder(**param_eval["checkpoint"])
    log_file = glu.Folder().set_folder(**param_log)

    tasks = len(param_eval["objective"]) if isinstance(param_eval["objective"],
                                                       list) else 1

    eval_obj = dl_multi.metrics.metrics.Metrics(
        param_eval["objective"],
        len(img_in),
        categories=len(param_label),
        labels=list(param_label.values()),
        label_spec=param_class,
        sklearn=glu.get_value(param_eval, "sklearn", True),
        logger=_logger)

    time_obj_img = dl_multi.utils.time.MTime(number=len(img_in), label="IMAGE")

    #   execution -------------------------------------------------------
    # -------------------------------------------------------------------
    for item, time_img, eval_img in zip(img_in, time_obj_img, eval_obj):
        img = dl_multi.plugin.get_module_task(
            "tftools", param_eval["input"]["method"],
            "normalization")(item.spec("image").data,
                             **param_eval["input"]["param"])
        truth = [
            dl_multi.plugin.get_module_task(
                "tftools", param_eval["output"][task]["method"],
                "normalization")(imgtools.expand_image_dim(
                    item.spec(param_eval["truth"][task]).data,
                    **param_eval["output"][task]["param"]))
            for task in range(tasks)
        ]

        patches = dl_multi.utils.patches.Patches(img,
                                                 obj=param_eval["objective"],
                                                 categories=len(param_label),
                                                 limit=param_eval["limit"],
                                                 margin=param_eval["margin"],
                                                 pad=param_eval["pad"],
                                                 stitch=param_eval["stitch"],
                                                 logger=_logger)

        for patch in patches:
            patch.status()

            tf.reset_default_graph()
            tf.Graph().as_default()

            data = tf.expand_dims(patch.get_image_patch(), 0)

            with tf.variable_scope("net", reuse=tf.AUTO_REUSE):
                pred = dl_multi.plugin.get_module_task(
                    "models", *param_eval["model"])(data)

            #   tfsession ---------------------------------------------------
            # ---------------------------------------------------------------
            # Operation for initializing the variables.
            init_op = tf.global_variables_initializer()
            saver = tf.train.Saver()

            with tf.Session() as sess:
                sess.run(init_op)
                saver.restore(sess, checkpoint)
                sess.graph.finalize()

                model_out = sess.run([pred])
                patch.set_patch([model_out[0]])
                patch.time()
            #   tfsession ---------------------------------------------------
            # ---------------------------------------------------------------

    #   output --------------------------------------------------------------
    # -----------------------------------------------------------------------
        label = item.spec(glu.get_value(
            param_eval, "truth_label", None)).data if glu.get_value(
                param_eval, "truth_label", None) else None

        for task in range(tasks):
            img_out(item.spec(param_eval["truth"][task]).path,
                    patches.get_img(task=task),
                    prefix=param_eval["truth"][task])

        eval_img.update(truth,
                        [patches.get_img(task=task) for task in range(tasks)],
                        label=label)
        eval_obj.write_log([
            log_out(item.spec(param_eval["truth"][task]).log,
                    prefix=param_eval["truth"][task]) for task in range(tasks)
        ],
                           write="w+",
                           current=True,
                           verbose=True)
        print(eval_img.print_current_stats())

        time_img.stop()
        _logger.info(time_img.overall())
        _logger.info(time_img.stats())

    eval_obj.write_log(log_file, verbose=True)
    print(eval_obj)
Exemplo n.º 7
0
def task_print_user_data():
    """Print the user data"""

    # print user's defined data
    _logger.info("Print user's defined data")
    dl_multi.utils.format.print_data(dl_multi.config.settings.get_data_dict())
Exemplo n.º 8
0
def task_print_user_settings():
    """Print the user settings"""

    # print user's defined settings
    _logger.info("Print user's defined settings")
    dl_multi.utils.format.print_data(dl_multi.config.settings._SETTINGS)