예제 #1
0
def generate_tf_record(data_dir,
                       raw_data=False,
                       tfrecord_path="serialized_dataset",
                       num_shards=8):

    teacher_sett = settings.Settings(use_student_settings=False)
    student_sett = settings.Settings(use_student_settings=True)
    dataset_args = teacher_sett["dataset"]
    if dataset_args["name"].lower().strip() == "div2k":
        assert len(data_dir) == 2
        ds = dataset.load_div2k_dataset(data_dir[0],
                                        data_dir[1],
                                        student_sett["hr_size"],
                                        shuffle=True)
    elif raw_data:
        ds = dataset.load_dataset_directory(
            dataset_args["name"], data_dir,
            dataset.scale_down(method=dataset_args["scale_method"],
                               size=student_sett["hr_size"]))
    else:
        ds = dataset.load_dataset(dataset_args["name"],
                                  dataset.scale_down(
                                      method=dataset_args["scale_method"],
                                      size=student_sett["hr_size"]),
                                  data_dir=data_dir)
    to_tfrecord(ds, tfrecord_path, num_shards)
예제 #2
0
 def __init__(
         self,
         teacher,
         discriminator,
         summary_writer,
         summary_writer_2=None,
         model_dir="",
         data_dir="",
         strategy=None):
   """
     Args:
       teacher: Keras Model of pre-trained teacher generator.
                (Generator of ESRGAN)
       discriminator: Keras Model of pre-trained teacher discriminator.
                      (Discriminator of ESRGAN)
       summary_writer: tf.summary.SummaryWriter object for writing
                        summary for Tensorboard.
       data_dir: Location of the stored dataset.
       raw_data: Indicate if data_dir contains Raw Data or TFRecords.
       model_dir: Location to store checkpoints and SavedModel directory.
   """
   self.teacher_generator = teacher
   self.teacher_discriminator = discriminator
   self.teacher_settings = settings.Settings(use_student_settings=False)
   self.student_settings = settings.Settings(use_student_settings=True)
   self.model_dir = model_dir
   self.strategy = strategy
   self.train_args = self.student_settings["train"]
   self.batch_size = self.teacher_settings["batch_size"]
   self.hr_size = self.student_settings["hr_size"]
   self.lr_size = tf.unstack(self.hr_size)[:-1]
   self.lr_size.append(tf.gather(self.hr_size, len(self.hr_size) - 1) * 4)
   self.lr_size = tf.stack(self.lr_size) // 4
   self.summary_writer = summary_writer
   self.summary_writer_2 = summary_writer_2
   # Loading TFRecord Dataset
   self.dataset = dataset.load_dataset(
       data_dir,
       lr_size=self.lr_size,
       hr_size=self.hr_size)
   self.dataset = (
       self.dataset.repeat()
       .batch(self.batch_size, drop_remainder=True)
       .prefetch(1024))
   self.dataset = iter(self.strategy.experimental_distribute_dataset(
       self.dataset))
   # Reloading Checkpoint from Phase 2 Training of ESRGAN
   checkpoint = tf.train.Checkpoint(
     G=self.teacher_generator,
     D=self.teacher_discriminator)
   utils.load_checkpoint(
       checkpoint,
       "phase_2",
       basepath=model_dir,
       use_student_settings=False)
예제 #3
0
 def init(self, first_call=True):
     self.settings = settings.Settings(use_student_settings=True)
     self._scale_factor = self.settings["scale_factor"]
     self._scale_value = self.settings["scale_value"]
     rrdb_student_config = self.settings["student_config"]["rrdb_student"]
     rrdb_block = partial(ResidualInResidualBlock, first_call=first_call)
     growth_channels = rrdb_student_config["growth_channels"]
     depthwise_conv = partial(tf.keras.layers.DepthwiseConv2D,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              use_bias=True,
                              padding="same")
     convolution = partial(tf.keras.layers.Conv2D,
                           kernel_size=[3, 3],
                           use_bias=True,
                           strides=[1, 1],
                           padding="same")
     conv_transpose = partial(tf.keras.layers.Conv2DTranspose,
                              kernel_size=[3, 3],
                              use_bias=True,
                              strides=self._scale_value,
                              padding="same")
     self._rrdb_trunk = tf.keras.Sequential(
         [rrdb_block() for _ in range(rrdb_student_config["trunk_size"])])
     self._first_conv = convolution(filters=64)
     self._upsample_layers = {
         "upsample_%d" % index: conv_transpose(filters=growth_channels)
         for index in range(1, self._scale_factor)
     }
     key = "upsample_%d" % self._scale_factor
     self._upsample_layers[key] = conv_transpose(filters=3)
     self._conv_last = depthwise_conv()
     self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
예제 #4
0
    def init(self):
        sett = settings.Settings(use_student_settings=True)
        model_args = sett["student_config"]["vgg_student"]
        self._scale_factor = sett["scale_factor"]
        self._scale_value = sett["scale_value"]
        depth = model_args["trunk_depth"]  # Minimum 2 for scale factor of 4
        depthwise_convolution = partial(tf.keras.layers.DepthwiseConv2D,
                                        kernel_size=[3, 3],
                                        padding="same",
                                        use_bias=model_args["use_bias"])
        conv_transpose = partial(tf.keras.layers.Conv2DTranspose,
                                 kernel_size=[3, 3],
                                 strides=self._scale_value,
                                 padding="same")
        convolution = partial(tf.keras.layers.Conv2D,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              use_bias=model_args["use_bias"],
                              padding="same")
        trunk_depth = depth - self._scale_factor
        self._conv_layers = {
            "conv_%d" % index: depthwise_convolution()
            for index in range(1, trunk_depth + 1)
        }

        self._upsample_layers = {
            "upsample_%d" % index: conv_transpose(filters=32)
            for index in range(1, self._scale_factor)
        }
        self._last_layer = conv_transpose(filters=3)
예제 #5
0
 def init(self):
     self.student_settings = settings.Settings(use_student_settings=True)
     self._scale_factor = self.student_settings["scale_factor"]
     self._scale_value = self.student_settings["scale_value"]
     model_args = self.student_settings["student_config"][
         "residual_student"]
     depth = model_args["trunk_depth"]
     depthwise_convolution = partial(tf.keras.layers.DepthwiseConv2D,
                                     kernel_size=[3, 3],
                                     strides=[1, 1],
                                     padding="same",
                                     use_bias=model_args["use_bias"])
     convolution = partial(tf.keras.layers.Conv2D,
                           kernel_size=[3, 3],
                           strides=[1, 1],
                           padding="same",
                           use_bias=model_args["use_bias"])
     conv_transpose = partial(tf.keras.layers.Conv2DTranspose,
                              kernel_size=[3, 3],
                              strides=self._scale_value,
                              padding="same")
     self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
     self._residual_scale = model_args["residual_scale_beta"]
     self._conv_layers = {
         "conv_%d" % index: depthwise_convolution()
         for index in range(1, depth + 1)
     }
     self._upscale_layers = {
         "upscale_%d" % index: conv_transpose(filters=32)
         for index in range(1, self._scale_factor)
     }
     self._last_layer = conv_transpose(filters=3)
예제 #6
0
 def __init__(self, first_call=True):
     super(ResidualInResidualBlock, self).__init__()
     self.settings = settings.Settings(use_student_settings=True)
     rrdb_config = self.settings["student_config"]["rrdb_student"][
         "rrdb_config"]
     self._rdb_layers = {
         "rdb_%d" % index: ResidualDenseBlock(first_call=first_call)
         for index in range(1, rrdb_config["rdb_units"])
     }
     self._beta = rrdb_config["residual_scale_beta"]
예제 #7
0
def export_tflite(config="", modeldir="", mode="", **kwargs):
    """
    Exports SavedModel(if not present) TFLite of the student generator.
    Args:
      config: Path to config file of the student.
      modeldir: Path to export the SavedModel and the TFLite to.
      mode: Mode of training to export. (Advsersarial /  comparative)
  """
    # TODO (@captain-pool): Fix Quantization and mention them in the args list.

    lazy = lazy_loader.LazyLoader()
    lazy.import_("teacher_imports", parent="libs", return_=False)
    lazy.import_("utils", parent="libs", return_=False)
    lazy.import_("dataset", parent="libs", return_=False)
    globals().update(lazy.import_dict)
    status = None
    sett = settings.Settings(config, use_student_settings=True)
    stats = settings.Stats(os.path.join(sett.path, "stats.yaml"))
    student_name = sett["student_network"]
    student_generator = model.Registry.models[student_name](first_call=False)
    ckpt = tf.train.Checkpoint(student_generator=student_generator)
    logging.info("Initiating Variables. Tracing Function.")
    student_generator.predict(tf.random.normal([1, 180, 320, 3]))
    if stats.get(mode):
        status = utils.load_checkpoint(ckpt,
                                       "%s_checkpoint" % mode,
                                       basepath=modeldir,
                                       use_student_settings=True)
    if not status:
        raise IOError("No checkpoint found to restore")
    saved_model_dir = os.path.join(modeldir, "compressed_esrgan")

    if not tf.io.gfile.exists(os.path.join(saved_model_dir, "saved_model.pb")):
        tf.saved_model.save(student_generator, saved_model_dir)

    converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]

    # TODO (@captain-pool): Try to fix Qunatization
    # Current Error: Cannot Quantize LEAKY_RELU and CONV2D_TRANSPOSE
    # Quantization Code Fragment
    # converter.target_spec.supported_types = [tf.float16]
    # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
    # converter.representative_dataset = representative_dataset_gen(
    #      kwargs["calibration_steps"],
    #      kwargs["datadir"],
    #      sett["hr_size"],
    #      [180, 320, 3])

    tflite_model = converter.convert()
    tflite_path = os.path.join(modeldir, "tflite", "compressed_esrgan.tflite")

    with tf.io.gfile.GFile(tflite_path, "wb") as f:
        f.write(tflite_model)
    logging.info("Successfully writen the TFLite to: %s" % tflite_path)
예제 #8
0
def save_checkpoint(checkpoint, name, basepath="", use_student_settings=False):
    """ Saves Checkpoint
      Args:
        checkpoint: tf.train.Checkpoint object to save.
        name: name of the checkpoint to save.
        basepath: base directory where checkpoint should be saved
        student: boolean to indicate if settings of the student should be used.
  """
    sett = settings.Settings(use_student_settings=use_student_settings)
    dir_ = os.path.join(basepath, sett["checkpoint_path"][name])
    logging.info("Saving checkpoint: %s Path: %s" % (name, dir_))
    prefix = os.path.join(dir_, os.path.basename(dir_))
    checkpoint.save(file_prefix=prefix)
예제 #9
0
def checkpoint_exists(names, basepath="", use_student_settings=False):
    sett = settings.Settings(use_student_settings=use_student_settings)
    if tf.nest.is_nested(names):
        if isinstance(names, dict):
            return False
    else:
        names = [names]
    values = []
    for name in names:
        dir_ = os.path.join(basepath, sett["checkpoint_path"][name],
                            "checkpoint")
        values.append(tf.io.gfile.exists(dir_))
    return any(values)
예제 #10
0
def load_checkpoint(checkpoint, name, basepath="", use_student_settings=False):
    """ Restores Checkpoint
      Args:
        checkpoint: tf.train.Checkpoint object to restore.
        name: name of the checkpoint to restore.
        basepath: base directory where checkpoint is located.
        student: boolean to indicate if settings of the student should be used.
  """
    sett = settings.Settings(use_student_settings=use_student_settings)
    dir_ = os.path.join(basepath, sett["checkpoint_path"][name])
    if tf.io.gfile.exists(os.path.join(dir_, "checkpoint")):
        logging.info("Found checkpoint: %s Path: %s" % (name, dir_))
        status = checkpoint.restore(tf.train.latest_checkpoint(dir_))
        return status
    logging.info("No Checkpoint found for %s" % name)
예제 #11
0
 def __init__(self, first_call=True):
     super(ResidualDenseBlock, self).__init__()
     self.settings = settings.Settings(use_student_settings=True)
     rdb_config = self.settings["student_config"]["rrdb_student"][
         "rdb_config"]
     convolution = partial(tf.keras.layers.DepthwiseConv2D,
                           depthwise_initializer="he_normal",
                           bias_initializer="he_normal",
                           kernel_size=[3, 3],
                           strides=[1, 1],
                           padding="same")
     self._first_call = first_call
     self._conv_layers = {
         "conv_%d" % index: convolution()
         for index in range(1, rdb_config["depth"])
     }
     self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
     self._beta = rdb_config["residual_scale_beta"]
    avg_min = POST.Evaluation.calc_avg_mse(root_dir)
    # best avg values form middlebury evaluation page
    # http://vision.middlebury.edu/stereo/eval3/
    best_avg_sparse = 4.61
    best_avg_dense = 12.9
    data = [avg_min,best_avg_sparse,best_avg_dense]
    name = 'avg_mse_bar_plot'
    VISUALIZE.bar_chart(data, root_dir, name)




cm = CAMERA_MODEL.CameraModel(329.115520046, 329.115520046, 320.0, 240.0)
image_loader = IMAGE_LOADER.ImageLoader(images_dir)
vo = VO_PARSER.VoParserSynth(odom_path)
runtime_settings = SETTINGS.Settings(10, 0.0001, 0.8, 10.0, 0.05, 1.0)  # TODO ADD RESCALE FACTOR HERE

ref_count = 10
ref_list = []
for count in range(1, ref_count + 1):
    ref_list.append(key + count)

stereo_pair_builder = STEREO_PAIR_BUILDER.StereoPairBuilder(cm, image_loader, vo, 0, runtime_settings)
runner = RUNNER.Runner(stereo_pair_builder, runtime_settings, key, ref_list, root_dir)

ground_truth = POST.Evaluation.load_ground_truth(depth_path, 480, 640)
inverted_ground_truth = POST.Evaluation.calc_inverse_ground_truth(ground_truth, runtime_settings, 329.115520046)
# VISUALIZE.show_frame(inverted_ground_truth, runtime_settings, path=root_dir, name='ground_truth', cmap='nipy_spectral')
runner.run(VISUALIZE.visualize_enum.SHOW_DEPTH, inverted_ground_truth, normalize=True, calc_error_metrics=False,
        post_process=True, regularize=True,
           show_frame=True, skip_guards=False)
예제 #13
0
def train_and_export(**kwargs):
    """ Train and Export Compressed ESRGAN
      Args:
        config: path to config file.
        logdir: path to logging directory
        modeldir: Path to store the checkpoints and exported model.
        datadir: Path to custom data directory.
        manual: Boolean to indicate if `datadir` contains Raw Files(True) / TFRecords (False)
  """
    lazy = lazy_loader.LazyLoader()

    student_settings = settings.Settings(kwargs["config"],
                                         use_student_settings=True)

    # Lazy importing dependencies from teacher
    lazy.import_("teacher_imports", parent="libs", return_=False)
    lazy.import_("teacher", parent="libs.models", return_=False)
    lazy.import_("train", parent="libs", return_=False)
    lazy.import_("utils", parent="libs", return_=False)
    globals().update(lazy.import_dict)
    tf.random.set_seed(10)
    teacher_settings = settings.Settings(student_settings["teacher_config"],
                                         use_student_settings=False)
    stats = settings.Stats(os.path.join(student_settings.path, "stats.yaml"))
    strategy = utils.SingleDeviceStrategy()

    if kwargs["tpu"]:
        cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            kwargs["tpu"])
        tf.config.experimental_connect_to_host(cluster_resolver.get_master())
        tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
        strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)

    device_name = utils.assign_to_worker(kwargs["tpu"])

    with tf.device(device_name), strategy.scope():
        summary_writer = tf.summary.create_file_writer(
            os.path.join(kwargs["logdir"], "student"))
        teacher_summary_writer = tf.summary.create_file_writer(
            os.path.join(kwargs["logdir"], "teacher"))

        teacher_generator = teacher.generator(out_channel=3, first_call=False)
        teacher_discriminator = teacher.discriminator(
            batch_size=teacher_settings["batch_size"])

        student_generator = (
            model.Registry.models[student_settings["student_network"]]())

        hr_size = tf.cast(
            tf.convert_to_tensor([1] + student_settings['hr_size']),
            tf.float32)
        lr_size = tf.cast(hr_size * tf.convert_to_tensor([1, 1 / 4, 1 / 4, 1]),
                          tf.int32)

        logging.debug("Initializing Convolutions")
        student_generator.unsigned_call(tf.random.normal(lr_size))

        trainer = train.Trainer(teacher_generator,
                                teacher_discriminator,
                                summary_writer,
                                summary_writer_2=teacher_summary_writer,
                                model_dir=kwargs["modeldir"],
                                data_dir=kwargs["datadir"],
                                strategy=strategy)
        phase_name = None
        if kwargs["type"].lower().startswith("comparative"):
            trainer.train_comparative(student_generator,
                                      export_only=stats.get("comparative")
                                      or kwargs["export_only"])
            if not kwargs["export_only"]:
                stats["comparative"] = True
        elif kwargs["type"].lower().startswith("adversarial"):
            trainer.train_adversarial(student_generator,
                                      export_only=stats.get("adversarial")
                                      or kwargs["export_only"])
            if not kwargs["export_only"]:
                stats["adversarial"] = True
    # Tracing Graph to put input signature
    _ = student_generator.predict(tf.random.normal([1, 180, 320, 3]))
    tf.saved_model.save(student_generator,
                        os.path.join(kwargs["modeldir"], "compressed_esrgan"))
예제 #14
0
# key = 290
# key = 320
# key = 340
# key = 400
# key = 480
# key = 482
# key = 910

cm = CAMERA_MODEL.CameraModel.load_from_file(
    root_dir + dataset + type + sequence + intrinsics_dir + 'intrinsics.txt',
    key)
image_loader = IMAGE_LOADER.ImageLoader(root_dir + dataset + type + sequence +
                                        images_dir)
vo = VO_PARSER.VoParserSynth(root_dir, dataset + type + sequence + odom_path)
runtime_settings = SETTINGS.Settings(
    10, 0.0001, 0.9, 5, 0.05,
    -1.0)  # -1.0 for z (or general motion eq.) TODO investigate

ref_count = 8
step = 1
ref_list = []
for count in range(step, ref_count + 1, step):
    ref_list.append(key + count)
stereo_pair_builder = STEREO_PAIR_BUILDER.StereoPairBuilder(
    cm, image_loader, vo, 0, runtime_settings)

runner = RUNNER.Runner(stereo_pair_builder, runtime_settings, key, ref_list,
                       root_dir)

# ground_truth = POST.Evaluation.load_ground_truth(depth_file, 424, 512,flip_across_y=True)
# inverted_ground_truth = POST.Evaluation.calc_inverse_ground_truth(ground_truth, runtime_settings,max_thresh=2500,isIpad=True)
예제 #15
0
파일: main.py 프로젝트: shagilhmx/GSOC
def main(**kwargs):
    student_settings = settings.Settings("../E2_ESRGAN/config.yaml",
                                         student=True)
    teacher_settings = settings.Settings("config/config.yaml", student=False)
예제 #16
0
root_dir = 'datasets/synth/'
images_dir = root_dir + 'data/img'
odom_path = root_dir + 'info/groundtruth.txt'
mse_dir = root_dir + 'mse/'

key = 1

depth_folder = root_dir + 'data/depth/'
depth_file = IMAGE_LOADER.ImageLoader.parse_id(key) + '.depth'
depth_path = depth_folder + depth_file

cm = CAMERA_MODEL.CameraModel(329.115520046, 329.115520046, 320.0, 240.0)
image_loader = IMAGE_LOADER.ImageLoader(images_dir)
vo = VO_PARSER.VoParserSynth(odom_path)
runtime_settings = SETTINGS.Settings(20, 0.0001, 0.8, 1.0, 0.05, 1.0)

ref_count = 1
ref_list = []
for count in range(1, ref_count + 1):
    ref_list.append(key + count)


def load_and_plot_mse():
    mse_list = POST.Evaluation.load_data_list(mse_dir + str(key) + '/' +
                                              'mse_list.txt')
    name = str(key) + '_mse_line_plot'
    VISUALIZE.line_graph(mse_list, root_dir, name)


def load_and_bar_chart_mse():
예제 #17
0
import sys
import os
from libs import settings
_teacher_directory = settings.Settings(use_student_settings=True)[
    "teacher_directory"]
TEACHER_DIR = os.path.abspath(_teacher_directory)
if _teacher_directory not in sys.path:
  sys.path.insert(0, TEACHER_DIR)
예제 #18
0
# key = 1400076116786324 # keyframe

# ref_6 = 1400076139533227
# ref_5 = 1400076139470738
# ref_4 = 1400076139408250
# ref_3 = 1400076139345758
# ref_2 = 1400076139283268
# ref_1 = 1400076139220778
# key = 1400076139158288

camera_loader = CAMERA_MODEL_LOADER.CameraModelLoader(models_dir, cam_type)
cm = camera_loader.load()
image_loader = IMAGE_LOADER.ImageLoader(images_dir)
vo = VO_PARSER.VoParser(odom_path)
runtime_settings = SETTINGS.Settings(cm.focal_length[0], cm.focal_length[1],
                                     cm.principal_point[0],
                                     cm.principal_point[1], 12, 0.01, 0.3, 1.0)

ref_list = [ref_1, ref_2, ref_3]

stereo_pair_builder = STEREO_PAIR_BUILDER.StereoPairBuilder(
    cm, image_loader, vo, 25, runtime_settings)
stereo_pairs = stereo_pair_builder.generate_stereo_pairs(key,
                                                         ref_list,
                                                         invert=True)

iteration = 1
for stereo_pair in stereo_pairs:
    #libs.visualize.show_frame(stereo_pair.key_frame.get_image(),root_dir,'keyframe')
    it_string = str(iteration)
    depth_estimation_re = DEPTH_ESTIMATION.DepthEstimation(
예제 #19
0
# key = 250
# key = 360
# key = 456
# key = 108
# key = 145
# key = 1152

# cm = CAMERA_MODEL.CameraModel(718.8560000000, 718.8560000000, 607.1928000000, 185.2157000000)
# cm = CAMERA_MODEL.CameraModel(718.8560000000*0.5157131346, 718.8560000000, 320, 185.2157000000)
cm = CAMERA_MODEL.CameraModel(718.8560000000 * 0.5157131346,
                              718.8560000000 * 1.2765957447, 320,
                              185.2157000000 * 1.2765957447)  #640x480
image_loader = IMAGE_LOADER.ImageLoader(root_dir + images_dir + left_cam_dir)
vo = VO_PARSER.VoParserSynth(root_dir, sequence)
runtime_settings = SETTINGS.Settings(cm.focal_length[0], cm.focal_length[1],
                                     cm.principal_point[0],
                                     cm.principal_point[1], 40, 0.0001, 0.8,
                                     1.0, 0.05)

ref_count = 2
ref_list = []
for count in range(1, ref_count + 1):
    ref_list.append(key + count)

stereo_pair_builder = STEREO_PAIR_BUILDER.StereoPairBuilder(
    cm, image_loader, vo, 0, runtime_settings)
runner = RUNNER.Runner(stereo_pair_builder, runtime_settings, key, ref_list,
                       root_dir)

runner.run(VISUALIZE.visualize_enum.SHOW_DEPTH,
           normalize=True,
           regularize=True,