def load_imagenet(ckpt_path):
    """Initialize the network parameters for our xception-lite using ImageNet pretrained weight
    Args:
    Path to the checkpoint
    Returns:
    Function that takes a session and initializes the network

    # ckpt_path: the full path to the model checkpoint (pre-trained model)
    # vars_corresp: A list of `Variable` objects or a dictionary mapping names in the
    # checkpoint (pre-trained model) to the corresponding variables to initialize.
    """

    reader = tf.train.NewCheckpointReader(ckpt_path)
    var_to_shape_map = reader.get_variable_to_shape_map()

    vars_corresp = dict()

    for v in var_to_shape_map:
        if "entry_flow" in v and 'gamma' not in v and 'depthwise/BatchNorm' not in v and 'Momentum' not in v:
            vars_corresp[v] = slim.get_model_variables('xfcn/' + v)[0]
        elif "middle_flow" in v and 'gamma' not in v and 'depthwise/BatchNorm' not in v and 'Momentum' not in v:
            for i in range(1, 3):
                if 'unit_{}/'.format(i) in v:
                    vars_corresp[v] = slim.get_model_variables('xfcn/' + v)[0]
        elif "exit_flow" in v and 'gamma' not in v and 'depthwise/BatchNorm' not in v and 'Momentum' not in v:
            if 'block1/' in v:
                vars_corresp[v] = slim.get_model_variables('xfcn/' + v)[0]
        elif 'shortcut' in v and 'Momentum' not in v and 'gamma' not in v:
            vars_corresp[v] = slim.get_model_variables('xfcn/' + v)[0]

    init_fn = slim.assign_from_checkpoint_fn(ckpt_path, vars_corresp)

    return init_fn
Пример #2
0
def _proc_initializer():
    """Runs initializer for the process. """
    logging.info('Proc initializer is called, pid=%i', os.getpid())

    config = load_model_proto(FLAGS.detection_pipeline_proto)
    model = model_builder.build(config, is_training=False)

    image = tf.placeholder(shape=[None, None, 3], dtype=tf.uint8)
    preprocessed_inputs, true_image_shapes = model.preprocess(
        tf.cast(tf.expand_dims(image, 0), tf.float32))
    predictions = model.predict(preprocessed_inputs=preprocessed_inputs,
                                true_image_shapes=true_image_shapes)
    num_proposals = tf.squeeze(predictions['num_proposals'])
    proposals = tf.squeeze(predictions['proposal_boxes_normalized'], 0)
    proposal_features = tf.reduce_mean(predictions['box_classifier_features'],
                                       [1, 2])
    init_fn = slim.assign_from_checkpoint_fn(FLAGS.detection_checkpoint_file,
                                             tf.global_variables())

    global tf_session
    global tf_inputs
    global tf_outputs

    config = tf.ConfigProto()
    config.allow_soft_placement = True
    config.gpu_options.allow_growth = True

    tf_session = tf.Session(config=config)
    tf_inputs = image
    tf_outputs = [num_proposals, proposals, proposal_features]

    tf_session.run(tf.global_variables_initializer())
    init_fn(tf_session)
    uninitialized_variable_names = tf.report_uninitialized_variables()
    assert len(tf_session.run(uninitialized_variable_names)) == 0
Пример #3
0
def _get_init_fn(checkpoint_path, train_dir):
    if checkpoint_path is None:
        return None
    if tf.train.latest_checkpoint(train_dir):
        tf.compat.v1.logging.info(
            'Ignoring --checkpoint_path because a checkpoint already exists in %s'
            % train_dir)
        return None

    exclusions = []
    if checkpoint_exclude_scopes:
        exclusions = [
            scope.strip() for scope in checkpoint_exclude_scopes.split(',')
        ]

    # TODO(sguada) variables.filter_variables()
    variables_to_restore = []
    for var in slim.get_model_variables():
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                break
        else:
            variables_to_restore.append(var)

    if tf.io.gfile.isdir(checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
    else:
        checkpoint_path = checkpoint_path

    tf.compat.v1.logging.info('Fine-tuning from %s' % checkpoint_path)

    return slim.assign_from_checkpoint_fn(checkpoint_path,
                                          variables_to_restore,
                                          ignore_missing_vars=True)
def freeze_graph(model,
                 checkpoint_path,
                 tensor_shape,
                 moving_average_decay=0.9999):
    """Converts model ckpts."""
    logging.info('Processing ckpt=%s, tensor_shape=%s', checkpoint_path,
                 tensor_shape)
    out_node = 'InceptionV3/Predictions/Reshape_1'
    in_node = 'input'

    inp = tf.compat.v1.placeholder(shape=[1] + tensor_shape,
                                   dtype=tf.float32,
                                   name=in_node)
    _ = model.create(inp, num_classes=3, is_training=False)

    ema = tf.train.ExponentialMovingAverage(moving_average_decay)
    variables_to_restore = ema.variables_to_restore()

    load_ema = slim.assign_from_checkpoint_fn(checkpoint_path,
                                              variables_to_restore,
                                              ignore_missing_vars=True)

    with tf.compat.v1.Session() as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        load_ema(sess)

        graph_def = sess.graph.as_graph_def()
        graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
            sess, graph_def, [out_node])
        graph_def = optimize_for_inference_lib.optimize_for_inference(
            graph_def, [in_node], [out_node], tf.float32.as_datatype_enum)

        with tf.io.gfile.GFile('model.pb', 'wb') as f:
            f.write(graph_def.SerializeToString())
def _get_init_fn():
  """Returns a function to initialize model from a checkpoint."""
  if FLAGS.checkpoint_path is None:
    return None

  # Warn the user if a checkpoint exists in the train_dir. Then we'll be
  # ignoring the checkpoint anyway.
  if tf.train.latest_checkpoint(FLAGS.train_dir):
    tf.logging.info(
        'Ignoring --checkpoint_path because a checkpoint already exists in %s',
        FLAGS.train_dir)
    return None

  exclusions = []
  if FLAGS.checkpoint_exclude_scopes:
    exclusions = [scope.strip()
                  for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

  variables_to_restore = []
  for var in slim.get_model_variables():
    for exclusion in exclusions:
      if var.op.name.startswith(exclusion):
        break
    else:
      variables_to_restore.append(var)

  if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
    checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
  else:
    checkpoint_path = FLAGS.checkpoint_path

  tf.logging.info('Fine-tuning from %s', checkpoint_path)

  return slim.assign_from_checkpoint_fn(checkpoint_path, variables_to_restore)
Пример #6
0
def build_deeplabv3(inputs, num_classes, preset_model='DeepLabV3-Res50', weight_decay=1e-5, is_training=True, pretrained_dir="models"):
    """
    Builds the DeepLabV3 model.

    Arguments:
      inputs: The input tensor=
      preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
      num_classes: Number of classes

    Returns:
      DeepLabV3 model
    """

    if preset_model == 'DeepLabV3-Res50':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_50(inputs, is_training=is_training, scope='resnet_v2_50')
            resnet_scope='resnet_v2_50'
            # DeepLabV3 requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50'))
    elif preset_model == 'DeepLabV3-Res101':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_101(inputs, is_training=is_training, scope='resnet_v2_101')
            resnet_scope='resnet_v2_101'
            # DeepLabV3 requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101'))
    elif preset_model == 'DeepLabV3-Res152':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_152(inputs, is_training=is_training, scope='resnet_v2_152')
            resnet_scope='resnet_v2_152'
            # DeepLabV3 requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152'))
    else:
        raise ValueError("Unsupported ResNet model '%s'. This function only supports ResNet 50, ResNet 101, and ResNet 152" % (preset_model))


    label_size = tf.shape(inputs)[1:3]

    net = AtrousSpatialPyramidPoolingModule(end_points['pool4'])

    net = Upsampling(net, label_size)

    net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')

    return net, init_fn
def _get_init_fn():
    """Returns a function run by the chief worker to warm-start the training.

  Note that the init_fn is only run when initializing the model during the very
  first global step.

  Returns:
    An init function run by the supervisor.
  """
    if FLAGS.checkpoint_path is None:
        return None

    # Warn the user if a checkpoint exists in the train_dir. Then we'll be
    # ignoring the checkpoint anyway.
    if tf.train.latest_checkpoint(FLAGS.train_dir):
        if not FLAGS.continue_training:
            raise ValueError(
                'continue_training set to False but there is a checkpoint in the training_dir.'
            )
        tf.compat.v1.logging.info(
            'Ignoring --checkpoint_path because a checkpoint already exists in %s'
            % FLAGS.train_dir)
        return None

    exclusions = []
    if FLAGS.checkpoint_exclude_scopes:
        exclusions = [
            scope.strip()
            for scope in FLAGS.checkpoint_exclude_scopes.split(',')
        ]

    # TODO(sguada) variables.filter_variables()
    variables_to_restore = []
    for var in slim.get_model_variables():
        excluded = False
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                excluded = True
                break
        if not excluded:
            variables_to_restore.append(var)

    if tf.io.gfile.isdir(FLAGS.checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
        checkpoint_path = FLAGS.checkpoint_path

    tf.compat.v1.logging.info('Fine-tuning from %s' % checkpoint_path)

    return slim.assign_from_checkpoint_fn(
        checkpoint_path,
        variables_to_restore,
        ignore_missing_vars=FLAGS.ignore_missing_vars)
Пример #8
0
        def scaffold_fn():
            """Scaffold function."""
            warm_start_hparams = self.hparams.warm_start
            if FLAGS.reference_ckpt:
                with tf.name_scope('warm_start'):
                    include_pattern_list = []
                    if warm_start_hparams.warm_start_encoder:
                        include_pattern_list.append('ContrastiveModel/Encoder')
                    if warm_start_hparams.warm_start_projection_head:
                        include_pattern_list.append(
                            'ContrastiveModel/ProjectionHead')
                    if warm_start_hparams.warm_start_classifier:
                        include_pattern_list.append(
                            'ContrastiveModel/ClassificationHead')
                    # This needs to be updated if new optimizers are added.
                    exclude_pattern_list = [
                        'Optimizer', 'Momentum', 'RMSProp', 'LARSOptimizer'
                    ]
                    variables = filter(
                        lambda v: var_matches_patterns(v,
                                                       include_pattern_list),
                        tf.global_variables())
                    variables = filter(
                        lambda v: not var_matches_patterns(
                            v, exclude_pattern_list), variables)
                    var_init_fn = slim.assign_from_checkpoint_fn(
                        tf.train.latest_checkpoint(FLAGS.reference_ckpt),
                        list(variables),
                        ignore_missing_vars=(
                            warm_start_hparams.ignore_missing_checkpoint_vars),
                        reshape_variables=True)

            def init_fn(scaffold, sess):
                del scaffold  # unused.

                if FLAGS.reference_ckpt:
                    var_init_fn(sess)

            return tf.train.Scaffold(init_fn=init_fn)
Пример #9
0
def get_checkpoint_init_fn():
    """Returns the checkpoint init_fn if the checkpoint is provided."""
    if FLAGS.fine_tune_checkpoint:
        variables_to_restore = slim.get_variables_to_restore()
        global_step_reset = tf.assign(tf.train.get_or_create_global_step(), 0)
        # When restoring from a floating point model, the min/max values for
        # quantized weights and activations are not present.
        # We instruct slim to ignore variables that are missing during restoration
        # by setting ignore_missing_vars=True
        slim_init_fn = slim.assign_from_checkpoint_fn(
            FLAGS.fine_tune_checkpoint,
            variables_to_restore,
            ignore_missing_vars=True)

        def init_fn(sess):
            slim_init_fn(sess)
            # If we are restoring from a floating point model, we need to initialize
            # the global step to zero for the exponential decay to result in
            # reasonable learning rates.
            sess.run(global_step_reset)

        return init_fn
    else:
        return None
    def __init__(self,
                 network_name,
                 checkpoint_path,
                 batch_size,
                 image_size=None):
        self._network_name = network_name
        self._checkpoint_path = checkpoint_path
        self._batch_size = batch_size
        self._image_size = image_size
        self._layer = {}

        self._global_step = tf.train.get_or_create_global_step()

        # Retrieve the function that returns logits and endpoints
        self._network_fn = nets_factory.get_network_fn(self._network_name,
                                                       num_classes=num_classes,
                                                       is_training=False)

        # Retrieve the model scope from network factory
        self._model_scope = nets_factory.arg_scopes_map[self._network_name]

        # Fetch the default image size
        self._image_size = self._network_fn.default_image_size
        self._filename_queue = tf.FIFOQueue(100000, [tf.string],
                                            shapes=[[]],
                                            name="filename_queue")
        self._pl_image_files = tf.placeholder(tf.string,
                                              shape=[None],
                                              name="image_file_list")
        self._enqueue_op = self._filename_queue.enqueue_many(
            [self._pl_image_files])
        self._num_in_queue = self._filename_queue.size()

        self._batch_from_queue, self._batch_filenames = self._preproc_image_batch(
            self._batch_size, num_threads=4)

        #self._image_batch = tf.placeholder_with_default(
        #        self._batch_from_queue, shape=[self._batch_size, _STRIDE, self._image_size, self._image_size, 3])
        self._image_batch = tf.placeholder(
            tf.float32, [batch_size, _STRIDE, image_size, image_size, 3])

        # Retrieve the logits and network endpoints (for extracting activations)
        # Note: endpoints is a dictionary with endpoints[name] = tf.Tensor
        self._logits, self._endpoints = self._network_fn(self._image_batch)

        # Find the checkpoint file
        checkpoint_path = self._checkpoint_path
        if tf.gfile.IsDirectory(self._checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(self._checkpoint_path)

        # Load pre-trained weights into the model
        variables_to_restore = slim.get_variables_to_restore()
        restore_fn = slim.assign_from_checkpoint_fn(self._checkpoint_path,
                                                    variables_to_restore)

        # Start the session and load the pre-trained weights
        self._sess = tf.Session()
        restore_fn(self._sess)

        # Local variables initializer, needed for queues etc.
        self._sess.run(tf.local_variables_initializer())

        # Managing the queues and threads
        self._coord = tf.train.Coordinator()
        self._threads = tf.train.start_queue_runners(coord=self._coord,
                                                     sess=self._sess)
def main(unused_argv=None):
    with tf.Graph().as_default():
        # Force all input processing onto CPU in order to reserve the GPU for the
        # forward inference and back-propagation.
        device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
        with tf.device(
                tf.train.replica_device_setter(FLAGS.ps_tasks,
                                               worker_device=device)):
            inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                                    FLAGS.image_size)
            # Load style images and select one at random (for each graph execution, a
            # new random selection occurs)
            _, style_labels, style_gram_matrices = image_utils.style_image_inputs(
                os.path.expanduser(FLAGS.style_dataset_file),
                batch_size=FLAGS.batch_size,
                image_size=FLAGS.image_size,
                square_crop=True,
                shuffle=True)

        with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
            # Process style and weight flags
            num_styles = FLAGS.num_styles
            if FLAGS.style_coefficients is None:
                style_coefficients = [1.0 for _ in range(num_styles)]
            else:
                style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
            if len(style_coefficients) != num_styles:
                raise ValueError(
                    'number of style coefficients differs from number of styles'
                )
            content_weights = ast.literal_eval(FLAGS.content_weights)
            style_weights = ast.literal_eval(FLAGS.style_weights)

            # Rescale style weights dynamically based on the current style image
            style_coefficient = tf.gather(tf.constant(style_coefficients),
                                          style_labels)
            style_weights = dict((key, style_coefficient * value)
                                 for key, value in style_weights.items())

            # Define the model
            stylized_inputs = model.transform(inputs,
                                              alpha=FLAGS.alpha,
                                              normalizer_params={
                                                  'labels': style_labels,
                                                  'num_categories': num_styles,
                                                  'center': True,
                                                  'scale': True
                                              })

            # Compute losses.
            total_loss, loss_dict = learning.total_loss(
                inputs, stylized_inputs, style_gram_matrices, content_weights,
                style_weights)
            for key, value in loss_dict.items():
                tf.summary.scalar(key, value)

            instance_norm_vars = [
                var for var in slim.get_variables('transformer')
                if 'InstanceNorm' in var.name
            ]
            other_vars = [
                var for var in slim.get_variables('transformer')
                if 'InstanceNorm' not in var.name
            ]

            # Function to restore VGG16 parameters.
            init_fn_vgg = slim.assign_from_checkpoint_fn(
                vgg.checkpoint_file(), slim.get_variables('vgg_16'))

            # Function to restore N-styles parameters.
            init_fn_n_styles = slim.assign_from_checkpoint_fn(
                os.path.expanduser(FLAGS.checkpoint), other_vars)

            def init_fn(session):
                init_fn_vgg(session)
                init_fn_n_styles(session)

            # Set up training.
            optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
            train_op = slim.learning.create_train_op(
                total_loss,
                optimizer,
                clip_gradient_norm=FLAGS.clip_gradient_norm,
                variables_to_train=instance_norm_vars,
                summarize_gradients=False)

            # Run training.
            slim.learning.train(train_op=train_op,
                                logdir=os.path.expanduser(FLAGS.train_dir),
                                master=FLAGS.master,
                                is_chief=FLAGS.task == 0,
                                number_of_steps=FLAGS.train_steps,
                                init_fn=init_fn,
                                save_summaries_secs=FLAGS.save_summaries_secs,
                                save_interval_secs=FLAGS.save_interval_secs)
Пример #12
0
model = get_model('inception_v3')

out_node = 'InceptionV3/Predictions/Reshape_1'
in_node = 'input'

inp = tf.compat.v1.placeholder(
    shape=[1, args.height, args.width, args.channels],
    dtype=tf.float32,
    name=in_node)
b = model.create(inp, num_classes=3, is_training=False)

ema = tf.train.ExponentialMovingAverage(args.moving_average_decay)
variables_to_restore = ema.variables_to_restore()

load_ema = slim.assign_from_checkpoint_fn(args.checkpoint,
                                          variables_to_restore,
                                          ignore_missing_vars=True)

with tf.compat.v1.Session() as sess:
    sess.run(tf.compat.v1.global_variables_initializer())
    load_ema(sess)

    graph_def = sess.graph.as_graph_def()
    graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
        sess, graph_def, [out_node])
    graph_def = optimize_for_inference_lib.optimize_for_inference(
        graph_def, [in_node], [out_node], tf.float32.as_datatype_enum)

    with tf.io.gfile.GFile(args.output, 'wb') as f:
        f.write(graph_def.SerializeToString())
Пример #13
0
def build_frontend(inputs,
                   frontend,
                   is_training=True,
                   pretrained_dir="models",
                   num_classes=None):
    if frontend == 'ResNet50':
        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, end_points = resnet_v2.resnet_v2_50(
                inputs,
                is_training=is_training,
                scope='resnet_v2_50',
                num_classes=num_classes)
            frontend_scope = 'resnet_v2_50'
            init_fn = slim.assign_from_checkpoint_fn(
                model_path=os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'),
                var_list=slim.get_model_variables('resnet_v2_50'),
                ignore_missing_vars=True)
    elif frontend == 'ResNet101':
        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, end_points = resnet_v2.resnet_v2_101(
                inputs,
                is_training=is_training,
                scope='resnet_v2_101',
                num_classes=num_classes)
            frontend_scope = 'resnet_v2_101'
            init_fn = slim.assign_from_checkpoint_fn(
                model_path=os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'),
                var_list=slim.get_model_variables('resnet_v2_101'),
                ignore_missing_vars=True)
    elif frontend == 'ResNet152':
        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, end_points = resnet_v2.resnet_v2_152(
                inputs,
                is_training=is_training,
                scope='resnet_v2_152',
                num_classes=num_classes)
            frontend_scope = 'resnet_v2_152'
            init_fn = slim.assign_from_checkpoint_fn(
                model_path=os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'),
                var_list=slim.get_model_variables('resnet_v2_152'),
                ignore_missing_vars=True)
    elif frontend == 'MobileNetV2':
        with slim.arg_scope(mobilenet_v2.training_scope()):
            logits, end_points = mobilenet_v2.mobilenet(
                inputs,
                is_training=is_training,
                scope='mobilenet_v2',
                base_only=True)
            frontend_scope = 'mobilenet_v2'
            init_fn = slim.assign_from_checkpoint_fn(
                model_path=os.path.join(pretrained_dir, 'mobilenet_v2.ckpt'),
                var_list=slim.get_model_variables('mobilenet_v2'),
                ignore_missing_vars=True)
    elif frontend == 'InceptionV4':
        with slim.arg_scope(inception_v4.inception_v4_arg_scope()):
            logits, end_points = inception_v4.inception_v4(
                inputs, is_training=is_training, scope='inception_v4')
            frontend_scope = 'inception_v4'
            init_fn = slim.assign_from_checkpoint_fn(
                model_path=os.path.join(pretrained_dir, 'inception_v4.ckpt'),
                var_list=slim.get_model_variables('inception_v4'),
                ignore_missing_vars=True)
    else:
        raise ValueError(
            "Unsupported fronetnd model '%s'. This function only supports ResNet50, ResNet101, ResNet152, and MobileNetV2"
            % (frontend))

    return logits, end_points, frontend_scope, init_fn
Пример #14
0
def build_icnet(inputs, label_size, num_classes, preset_model='ICNet', pooling_type = "MAX",
    frontend="ResNet101", weight_decay=1e-5, is_training=True, pretrained_dir="models"):
    """
    Builds the ICNet model. 

    Arguments:
      inputs: The input tensor
      label_size: Size of the final label tensor. We need to know this for proper upscaling 
      preset_model: Which model you want to use. Select which ResNet model to use for feature extraction 
      num_classes: Number of classes
      pooling_type: Max or Average pooling

    Returns:
      ICNet model
    """

    inputs_4 = tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*4,  tf.shape(inputs)[2]*4])   
    inputs_2 = tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*2,  tf.shape(inputs)[2]*2])
    inputs_1 = inputs

    if frontend == 'Res50':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits_32, end_points_32 = resnet_v2.resnet_v2_50(inputs_4, is_training=is_training, scope='resnet_v2_50')
            logits_16, end_points_16 = resnet_v2.resnet_v2_50(inputs_2, is_training=is_training, scope='resnet_v2_50')
            logits_8, end_points_8 = resnet_v2.resnet_v2_50(inputs_1, is_training=is_training, scope='resnet_v2_50')
            resnet_scope='resnet_v2_50'
            # ICNet requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50'))
    elif frontend == 'Res101':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits_32, end_points_32 = resnet_v2.resnet_v2_101(inputs_4, is_training=is_training, scope='resnet_v2_101')
            logits_16, end_points_16 = resnet_v2.resnet_v2_101(inputs_2, is_training=is_training, scope='resnet_v2_101')
            logits_8, end_points_8 = resnet_v2.resnet_v2_101(inputs_1, is_training=is_training, scope='resnet_v2_101')
            resnet_scope='resnet_v2_101'
            # ICNet requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101'))
    elif frontend == 'Res152':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits_32, end_points_32 = resnet_v2.resnet_v2_152(inputs_4, is_training=is_training, scope='resnet_v2_152')
            logits_16, end_points_16 = resnet_v2.resnet_v2_152(inputs_2, is_training=is_training, scope='resnet_v2_152')
            logits_8, end_points_8 = resnet_v2.resnet_v2_152(inputs_1, is_training=is_training, scope='resnet_v2_152')
            resnet_scope='resnet_v2_152'
            # ICNet requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152'))
    else:
        raise ValueError("Unsupported ResNet model '%s'. This function only supports ResNet 50, ResNet 101, and ResNet 152" % (frontend))



    feature_map_shape = [int(x / 32.0) for x in label_size]
    block_32 = PyramidPoolingModule(end_points_32['pool3'], feature_map_shape=feature_map_shape, pooling_type=pooling_type)

    out_16, block_16 = CFFBlock(psp_32, end_points_16['pool3'])
    out_8, block_8 = CFFBlock(block_16, end_points_8['pool3'])
    out_4 = Upsampling_by_scale(out_8, scale=2)
    out_4 = slim.conv2d(out_4, num_classes, [1, 1], activation_fn=None) 

    out_full = Upsampling_by_scale(out_4, scale=2)
    
    out_full = slim.conv2d(out_full, num_classes, [1, 1], activation_fn=None, scope='logits')

    net = tf.concat([out_16, out_8, out_4, out_final])

    return net, init_fn
Пример #15
0
def RCNN(inputs, proposals, options, is_training=True):
  """Runs RCNN model on the `inputs`.

  Args:
    inputs: Input image, a [batch, height, width, 3] uint8 tensor. The pixel
      values are in the range of [0, 255].
    proposals: Boxes used to crop the image features, using normalized
      coordinates. It should be a [batch, max_num_proposals, 4] float tensor
      denoting [y1, x1, y2, x2].
    options: A fast_rcnn_pb2.FastRCNN proto.
    is_training: If true, the model shall be executed in training mode.

  Returns:
    A [batch, max_num_proposals, feature_dims] tensor.

  Raises:
    ValueError if options is invalid.
  """
  if not isinstance(options, rcnn_pb2.RCNN):
    raise ValueError('The options has to be a rcnn_pb2.RCNN proto!')
  if inputs.dtype != tf.uint8:
    raise ValueError('The inputs has to be a tf.uint8 tensor.')

  net_fn = nets_factory.get_network_fn(name=options.feature_extractor_name,
                                       num_classes=1001)
  default_image_size = getattr(net_fn, 'default_image_size', 224)

  # Preprocess image.
  preprocess_fn = preprocessing_factory.get_preprocessing(
      options.feature_extractor_name, is_training=False)
  inputs = preprocess_fn(inputs,
                         output_height=None,
                         output_width=None,
                         crop_image=False)

  # Crop and resize images.
  batch = proposals.shape[0]
  max_num_proposals = tf.shape(proposals)[1]

  box_ind = tf.expand_dims(tf.range(batch), axis=-1)
  box_ind = tf.tile(box_ind, [1, max_num_proposals])

  cropped_inputs = tf.image.crop_and_resize(
      inputs,
      boxes=tf.reshape(proposals, [-1, 4]),
      box_ind=tf.reshape(box_ind, [-1]),
      crop_size=[default_image_size, default_image_size])

  # Run CNN.
  _, end_points = net_fn(cropped_inputs)
  outputs = end_points[options.feature_extractor_endpoint]
  outputs = tf.reshape(outputs, [batch, max_num_proposals, -1])

  init_fn = slim.assign_from_checkpoint_fn(
      options.feature_extractor_checkpoint,
      slim.get_model_variables(options.feature_extractor_scope))

  def _init_from_ckpt_fn(_, sess):
    return init_fn(sess)

  return outputs, _init_from_ckpt_fn
Пример #16
0
def get_init_fn(train_dir=None,
                model_checkpoint=None,
                exclude_list=None,
                include_list=None,
                reset_global_step_if_necessary=True,
                ignore_missing_vars=True):
    """Gets model initializer function.

  The initialization logic is as follows:
    1. If a checkpoint is found in `train_dir`, initialize from it.
    2. Otherwise, if `model_checkpoint` is valid, initialize from it, and reset
       global step if necessary.
    3. Otherwise, do not initialize from any checkpoint.

  Args:
    train_dir: A string as the path to an existing training directory to resume.
      Use None to skip.
    model_checkpoint: A string as the path to an existing model checkpoint to
      initialize from. Use None to skip.
    exclude_list: A list of strings for the names of variables not to load.
    include_list: A list of strings for the names of variables to load. Use
      None to load all variables.
    reset_global_step_if_necessary: A boolean for whether to reset global step.
      Only used in the case of initializing from an existing checkpoint
      `model_checkpoint` rather than resuming training from `train_dir`.
    ignore_missing_vars: A boolean for whether to ignore missing variables. If
      False, errors will be raised if there is a missing variable.

  Returns:
    An model initializer function if an existing checkpoint is found. None
      otherwise.
  """
    # Make sure the exclude list is a list.
    if not exclude_list:
        exclude_list = []

    if train_dir:
        train_checkpoint = tf.train.latest_checkpoint(train_dir)
        if train_checkpoint:
            model_checkpoint = train_checkpoint
            logging.info('Resume latest training checkpoint in: %s.',
                         train_dir)
        elif model_checkpoint:
            logging.info('Use initial checkpoint: %s.', model_checkpoint)
            if reset_global_step_if_necessary:
                exclude_list.append('global_step')
                logging.info('Reset global step.')
    elif model_checkpoint:
        logging.info('Use initial checkpoint: %s.', model_checkpoint)
        if reset_global_step_if_necessary:
            exclude_list.append('global_step')
            logging.info('Reset global step.')

    if not model_checkpoint:
        logging.info('Do not initialize from a checkpoint.')
        return None

    variables_to_restore = tf_slim.get_variables_to_restore(
        include=include_list, exclude=exclude_list)

    return tf_slim.assign_from_checkpoint_fn(
        model_checkpoint,
        variables_to_restore,
        ignore_missing_vars=ignore_missing_vars)
Пример #17
0
def predict(model_root, datasets_dir, model_name, test_image_name):
    with tf.Graph().as_default():
        tf_global_step = slim.get_or_create_global_step()

        test_image = os.path.join(datasets_dir, test_image_name)

        # dataset = convert_data.get_datasets('test',dataset_dir=datasets_dir)

        network_fn = net_select.get_network_fn(model_name,
                                               num_classes=20,
                                               is_training=False)
        batch_size = 1
        eval_image_size = network_fn.default_image_size

        # images, images_raw, labels = load_batch(datasets_dir,
        #                                         height=eval_image_size,
        #                                         width=eval_image_size)

        image_preprocessing_fn = preprocessing_select.get_preprocessing(
            model_name, is_training=False)

        image_data = tf.io.read_file(test_image)
        image_data = tf.image.decode_jpeg(image_data, channels=3)
        image_data = image_preprocessing_fn(image_data, eval_image_size,
                                            eval_image_size)
        image_data = tf.expand_dims(image_data, 0)

        logits_1, end_points_1 = network_fn(image_data)
        attention_maps = tf.reduce_mean(end_points_1['attention_maps'],
                                        axis=-1,
                                        keepdims=True)
        attention_maps = tf.image.resize(attention_maps,
                                         [eval_image_size, eval_image_size],
                                         method=tf.image.ResizeMethod.BILINEAR)
        bboxes = tf_v1.py_func(mask2bbox, [attention_maps], [tf.float32])
        bboxes = tf.reshape(bboxes, [batch_size, 4])
        # print(bboxes)
        box_ind = tf.range(batch_size, dtype=tf.int32)

        images = tf.image.crop_and_resize(
            image_data,
            bboxes,
            box_ind,
            crop_size=[eval_image_size, eval_image_size])
        logits_2, end_points_2 = network_fn(images, reuse=True)

        logits = tf.math.log(
            tf.nn.softmax(logits_1) * 0.5 + tf.nn.softmax(logits_2) * 0.5)

        checkpoint_path = os.path.join(model_root, model_name)

        if tf.io.gfile.isdir(checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
        else:
            checkpoint_path = checkpoint_path

        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path, slim.get_variables_to_restore())

        # with tf_v1.Session() as sess:
        #     with slim.queues.QueueRunners(sess):
        #         sess.run(tf_v1.initialize_local_variables())
        #         init_fn(sess)
        #         np_probabilities, np_images_raw, np_labels = sess.run([logits, images_raw, labels])
        #
        #         for i in range(batch_size):
        #             image = np_images_raw[i, :, :, :]
        #             true_label = np_labels[i]
        #             predicted_label = np.argmax(np_probabilities[i, :])
        #             print('true is {}, predict is {}'.format(true_label, predicted_label))

        with tf_v1.Session() as sess:
            with slim.queues.QueueRunners(sess):
                sess.run(tf_v1.initialize_local_variables())
                init_fn(sess)
                np_images, np_probabilities = sess.run([image_data, logits])
                predicted_label = np.argmax(np_probabilities[0, :])
                print(predicted_label)
Пример #18
0
def main(unused_argv=None):
    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        # Forces all input processing onto CPU in order to reserve the GPU for the
        # forward inference and back-propagation.
        device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
        with tf.device(
                tf.train.replica_device_setter(FLAGS.ps_tasks,
                                               worker_device=device)):
            # Loads content images.
            content_inputs_, _ = image_utils.imagenet_inputs(
                FLAGS.batch_size, FLAGS.image_size)

            # Loads style images.
            [style_inputs_, _, _] = image_utils.arbitrary_style_image_inputs(
                FLAGS.style_dataset_file,
                batch_size=FLAGS.batch_size,
                image_size=FLAGS.image_size,
                shuffle=True,
                center_crop=FLAGS.center_crop,
                augment_style_images=FLAGS.augment_style_images,
                random_style_image_size=FLAGS.random_style_image_size)

        with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
            # Process style and content weight flags.
            content_weights = ast.literal_eval(FLAGS.content_weights)
            style_weights = ast.literal_eval(FLAGS.style_weights)

            # Define the model
            stylized_images, total_loss, loss_dict, \
                  _ = build_mobilenet_model.build_mobilenet_model(
                      content_inputs_,
                      style_inputs_,
                      mobilenet_trainable=False,
                      style_params_trainable=True,
                      transformer_trainable=True,
                      mobilenet_end_point='layer_19',
                      transformer_alpha=FLAGS.alpha,
                      style_prediction_bottleneck=100,
                      adds_losses=True,
                      content_weights=content_weights,
                      style_weights=style_weights,
                      total_variation_weight=FLAGS.total_variation_weight,
                  )

            # Adding scalar summaries to the tensorboard.
            for key in loss_dict:
                tf.summary.scalar(key, loss_dict[key])

            # Adding Image summaries to the tensorboard.
            tf.summary.image('image/0_content_inputs', content_inputs_, 3)
            tf.summary.image('image/1_style_inputs_aug', style_inputs_, 3)
            tf.summary.image('image/2_stylized_images', stylized_images, 3)

            # Set up training
            optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
            train_op = slim.learning.create_train_op(
                total_loss,
                optimizer,
                clip_gradient_norm=FLAGS.clip_gradient_norm,
                summarize_gradients=False)

            # Function to restore VGG16 parameters.
            init_fn_vgg = slim.assign_from_checkpoint_fn(
                vgg.checkpoint_file(), slim.get_variables('vgg_16'))

            # Function to restore Mobilenet V2 parameters.
            mobilenet_variables_dict = {
                var.op.name: var
                for var in slim.get_model_variables('MobilenetV2')
            }
            init_fn_mobilenet = slim.assign_from_checkpoint_fn(
                FLAGS.mobilenet_checkpoint, mobilenet_variables_dict)

            # Function to restore VGG16 and Mobilenet V2 parameters.
            def init_sub_networks(session):
                init_fn_vgg(session)
                init_fn_mobilenet(session)

            # Run training
            slim.learning.train(train_op=train_op,
                                logdir=os.path.expanduser(FLAGS.train_dir),
                                master=FLAGS.master,
                                is_chief=FLAGS.task == 0,
                                number_of_steps=FLAGS.train_steps,
                                init_fn=init_sub_networks,
                                save_summaries_secs=FLAGS.save_summaries_secs,
                                save_interval_secs=FLAGS.save_interval_secs)
Пример #19
0
def build_gcn(inputs,
              num_classes,
              preset_model='GCN-Res101',
              weight_decay=1e-5,
              is_training=True,
              upscaling_method="bilinear",
              pretrained_dir="models"):
    """
    Builds the GCN model.

    Arguments:
      inputs: The input tensor
      preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
      num_classes: Number of classes

    Returns:
      GCN model
    """

    if preset_model == 'GCN-Res50':
        with slim.arg_scope(
                resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_50(
                inputs, is_training=is_training, scope='resnet_v2_50')
            resnet_scope = 'resnet_v2_50'
            # GCN requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(
                os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'),
                slim.get_model_variables('resnet_v2_50'))
    elif preset_model == 'GCN-Res101':
        with slim.arg_scope(
                resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_101(
                inputs, is_training=is_training, scope='resnet_v2_101')
            resnet_scope = 'resnet_v2_101'
            # GCN requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(
                os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'),
                slim.get_model_variables('resnet_v2_101'))
    elif preset_model == 'GCN-Res152':
        with slim.arg_scope(
                resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_152(
                inputs, is_training=is_training, scope='resnet_v2_152')
            resnet_scope = 'resnet_v2_152'
            # GCN requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(
                os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'),
                slim.get_model_variables('resnet_v2_152'))
    else:
        raise ValueError(
            "Unsupported ResNet model '%s'. This function only supports ResNet 101 and ResNet 152"
            % (preset_model))

    res = [
        end_points['pool5'], end_points['pool4'], end_points['pool3'],
        end_points['pool2']
    ]

    down_5 = GlobalConvBlock(res[0], n_filters=21, size=3)
    down_5 = BoundaryRefinementBlock(down_5, n_filters=21, kernel_size=[3, 3])
    down_5 = ConvUpscaleBlock(down_5,
                              n_filters=21,
                              kernel_size=[3, 3],
                              scale=2)

    down_4 = GlobalConvBlock(res[1], n_filters=21, size=3)
    down_4 = BoundaryRefinementBlock(down_4, n_filters=21, kernel_size=[3, 3])
    down_4 = tf.add(down_4, down_5)
    down_4 = BoundaryRefinementBlock(down_4, n_filters=21, kernel_size=[3, 3])
    down_4 = ConvUpscaleBlock(down_4,
                              n_filters=21,
                              kernel_size=[3, 3],
                              scale=2)

    down_3 = GlobalConvBlock(res[2], n_filters=21, size=3)
    down_3 = BoundaryRefinementBlock(down_3, n_filters=21, kernel_size=[3, 3])
    down_3 = tf.add(down_3, down_4)
    down_3 = BoundaryRefinementBlock(down_3, n_filters=21, kernel_size=[3, 3])
    down_3 = ConvUpscaleBlock(down_3,
                              n_filters=21,
                              kernel_size=[3, 3],
                              scale=2)

    down_2 = GlobalConvBlock(res[3], n_filters=21, size=3)
    down_2 = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3])
    down_2 = tf.add(down_2, down_3)
    down_2 = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3])
    down_2 = ConvUpscaleBlock(down_2,
                              n_filters=21,
                              kernel_size=[3, 3],
                              scale=2)

    net = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3])
    net = ConvUpscaleBlock(net, n_filters=21, kernel_size=[3, 3], scale=2)
    net = BoundaryRefinementBlock(net, n_filters=21, kernel_size=[3, 3])

    net = slim.conv2d(net,
                      num_classes, [1, 1],
                      activation_fn=None,
                      scope='logits')

    return net, init_fn
Пример #20
0
    def __init__(self,
                 net_name,
                 snapshot_path,
                 feature_norm_method=None,
                 should_restore_classifier=False,
                 gpu_memory_fraction=None,
                 vgg_16_heads=None):
        """
        Args:
            snapshot_path: path or dir with checkpoints
            feature_norm_method:
            should_restore_classifier: if None - do not restore last layer from the snapshot,
                         otherwise must be equal to the number of classes of the snapshot.
                         if vgg_16_heads is not None then the classifiers will be restored anyway.

        """
        self.net_name = net_name
        if net_name != 'vgg_16_multihead' and vgg_16_heads is not None:
            raise ValueError(
                'vgg_16_heads must be not None only for vgg_16_multihead')
        if net_name == 'vgg_16_multihead' and vgg_16_heads is None:
            raise ValueError(
                'vgg_16_heads must be not None for vgg_16_multihead')

        if tf.io.gfile.isdir(snapshot_path):
            snapshot_path = tf.train.latest_checkpoint(snapshot_path)

        if not isinstance(feature_norm_method, list):
            feature_norm_method = [feature_norm_method]
        accepable_methods = [None, 'signed_sqrt', 'unit_norm']
        for method in feature_norm_method:
            if method not in accepable_methods:
                raise ValueError(
                    'unknown norm method: {}. Use one of {}'.format(
                        method, accepable_methods))
        self.feature_norm_method = feature_norm_method
        if vgg_16_heads is not None:
            should_restore_classifier = True

        if should_restore_classifier:
            if vgg_16_heads is None:
                reader = pywrap_tensorflow.NewCheckpointReader(snapshot_path)
                if net_name == 'inception_v1':
                    var_value = reader.get_tensor(
                        'InceptionV1/Logits/Conv2d_0c_1x1/weights')
                else:
                    var_value = reader.get_tensor('vgg_16/fc8/weights')
                num_classes = var_value.shape[3]
            else:
                num_classes = vgg_16_heads
        else:
            num_classes = 2 if vgg_16_heads is None else vgg_16_heads

        network_fn = nets_factory.get_network_fn(net_name,
                                                 num_classes=num_classes,
                                                 is_training=False)
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            net_name, is_training=False)

        eval_image_size = network_fn.default_image_size
        self.img_resize_shape = (eval_image_size, eval_image_size
                                 )  # (224, 224) for VGG

        with tf.Graph().as_default() as graph:
            self.graph = graph
            with tf.compat.v1.variable_scope('input'):
                input_pl = tf.compat.v1.placeholder(
                    tf.float32,
                    shape=[None, eval_image_size, eval_image_size, 3],
                    name='x')
                # not used
                is_phase_train_pl = tf.compat.v1.placeholder(
                    tf.bool, shape=tuple(), name='is_phase_train')

            function_to_map = lambda x: image_preprocessing_fn(
                x, eval_image_size, eval_image_size)
            images = tf.map_fn(function_to_map, input_pl)

            logits, self.end_points = network_fn(images)
            self.__dict__.update(self.end_points)
            if net_name == 'inception_v1':
                for tensor_name in [
                        'Branch_0/Conv2d_0a_1x1', 'Branch_1/Conv2d_0a_1x1',
                        'Branch_1/Conv2d_0b_3x3', 'Branch_2/Conv2d_0a_1x1',
                        'Branch_2/Conv2d_0b_3x3', 'Branch_3/MaxPool_0a_3x3',
                        'Branch_3/Conv2d_0b_1x1'
                ]:
                    full_tensor_name = 'InceptionV1/InceptionV1/Mixed_4d/' + tensor_name
                    if 'MaxPool' in tensor_name:
                        full_tensor_name += '/MaxPool:0'
                    else:
                        full_tensor_name += '/Relu:0'
                    short_name = 'Mixed_4d/' + tensor_name
                    self.__dict__[short_name] = tf.compat.v1.get_default_graph(
                    ).get_tensor_by_name(full_tensor_name)
                self.MaxPool_0a_7x7 = tf.compat.v1.get_default_graph(
                ).get_tensor_by_name(
                    "InceptionV1/Logits/MaxPool_0a_7x7/AvgPool:0")
            elif net_name in ['vgg_16', 'vgg_16_multihead']:
                for layer_name in ['fc6', 'fc7'] + \
                        ['conv{0}/conv{0}_{1}'.format(i, j) for i in range(3, 6) for j in range(1, 4)]:
                    self.__dict__['vgg_16/{}_prerelu'.format(layer_name)] = \
                        tf.compat.v1.get_default_graph().get_tensor_by_name("vgg_16/{}/BiasAdd:0".format(layer_name))
            config = tf.compat.v1.ConfigProto(
                gpu_options=tf.compat.v1.GPUOptions(
                    per_process_gpu_memory_fraction=gpu_memory_fraction))
            self.sess = tf.compat.v1.Session(config=config)

            if should_restore_classifier:
                variables_to_restore = slim.get_model_variables()
            else:
                variables_to_restore = [
                    var for var in slim.get_model_variables()
                    if not var.op.name.startswith(classifier_scope[net_name])
                ]

            init_fn = slim.assign_from_checkpoint_fn(snapshot_path,
                                                     variables_to_restore)
            init_fn(self.sess)
Пример #21
0
gpu_config.gpu_options.per_process_gpu_memory_fraction = 0.8
with tf.Session(config=gpu_config) as sess:
    summary_writer = tf.summary.FileWriter('../logs', sess.graph)
    sess.run(tf.global_variables_initializer())
    train_data.init(sess)

    global_step = 0
    new_checkpoint = None
    if cfg_train_continue:
        new_checkpoint = tf.train.latest_checkpoint(
            '../checkpoints/checkpoints')
    if new_checkpoint:
        exclusions = ['global_step']
        net_except_logits = slim.get_variables_to_restore(exclude=exclusions)
        init_fn = slim.assign_from_checkpoint_fn(new_checkpoint,
                                                 net_except_logits,
                                                 ignore_missing_vars=True)
        init_fn(sess)
        print('load params from {}'.format(new_checkpoint))

    try:
        cur_epoch = 1
        while True:
            t0 = time.time()
            batch_x_img, batch_center_map, batch_scale_map, batch_offset_map, batch_landmark_map = train_data.batch(
                sess)
            t1 = time.time()
            debug_info_, train_loss_, loss_class_, loss_scale_, loss_offset_, loss_landmark_, loss_l2_, summary_, _ = sess.run(
                [
                    debug_info, loss_all_op, loss_class_op, loss_scale_op,
                    loss_offset_op, loss_landmark_op, loss_l2_op, summary_op,
Пример #22
0
def build_refinenet(inputs, num_classes, preset_model='RefineNet-Res101', weight_decay=1e-5, is_training=True, upscaling_method="bilinear", pretrained_dir="models"):
    """
    Builds the RefineNet model.

    Arguments:
      inputs: The input tensor
      preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
      num_classes: Number of classes

    Returns:
      RefineNet model
    """

    if preset_model == 'RefineNet-Res50':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_50(inputs, is_training=is_training, scope='resnet_v2_50')
            # RefineNet requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50'))
    elif preset_model == 'RefineNet-Res101':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_101(inputs, is_training=is_training, scope='resnet_v2_101')
            # RefineNet requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101'))
    elif preset_model == 'RefineNet-Res152':
        with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
            logits, end_points = resnet_v2.resnet_v2_152(inputs, is_training=is_training, scope='resnet_v2_152')
            # RefineNet requires pre-trained ResNet weights
            init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152'))
    else:
    	raise ValueError("Unsupported ResNet model '%s'. This function only supports ResNet 101 and ResNet 152" % (preset_model))




    high = [end_points['pool5'], end_points['pool4'],
         end_points['pool3'], end_points['pool2']]

    low = [None, None, None, None]

    # Get the feature maps to the proper size with bottleneck
    high[0]=slim.conv2d(high[0], 512, 1)
    high[1]=slim.conv2d(high[1], 256, 1)
    high[2]=slim.conv2d(high[2], 256, 1)
    high[3]=slim.conv2d(high[3], 256, 1)

    # RefineNet
    low[0]=RefineBlock(high_inputs=high[0],low_inputs=None) # Only input ResNet 1/32
    low[1]=RefineBlock(high[1],low[0]) # High input = ResNet 1/16, Low input = Previous 1/16
    low[2]=RefineBlock(high[2],low[1]) # High input = ResNet 1/8, Low input = Previous 1/8
    low[3]=RefineBlock(high[3],low[2]) # High input = ResNet 1/4, Low input = Previous 1/4

    # g[3]=Upsampling(g[3],scale=4)

    net = low[3]

    net = ResidualConvUnit(net)
    net = ResidualConvUnit(net)

    if upscaling_method.lower() == "conv":
        net = ConvUpscaleBlock(net, 128, kernel_size=[3, 3], scale=2)
        net = ConvBlock(net, 128)
        net = ConvUpscaleBlock(net, 64, kernel_size=[3, 3], scale=2)
        net = ConvBlock(net, 64)
    elif upscaling_method.lower() == "bilinear":
        net = Upsampling(net, scale=4)

    net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')

    return net, init_fn
Пример #23
0
def main(unused_argv=None):
  tf.logging.set_verbosity(tf.logging.INFO)
  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MkDir(FLAGS.output_dir)

  with tf.Graph().as_default(), tf.Session() as sess:
    # Defines place holder for the style image.
    style_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
    if FLAGS.style_square_crop:
      style_img_preprocessed = image_utils.center_crop_resize_image(
          style_img_ph, FLAGS.style_image_size)
    else:
      style_img_preprocessed = image_utils.resize_image(style_img_ph,
                                                        FLAGS.style_image_size)

    # Defines place holder for the content image.
    content_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
    if FLAGS.content_square_crop:
      content_img_preprocessed = image_utils.center_crop_resize_image(
          content_img_ph, FLAGS.image_size)
    else:
      content_img_preprocessed = image_utils.resize_image(
          content_img_ph, FLAGS.image_size)

    # Defines the model.
    stylized_images, _, _, bottleneck_feat = build_model.build_model(
        content_img_preprocessed,
        style_img_preprocessed,
        trainable=False,
        is_training=False,
        inception_end_point='Mixed_6e',
        style_prediction_bottleneck=100,
        adds_losses=False)

    if tf.gfile.IsDirectory(FLAGS.checkpoint):
      checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint)
    else:
      checkpoint = FLAGS.checkpoint
      tf.logging.info('loading latest checkpoint file: {}'.format(checkpoint))

    init_fn = slim.assign_from_checkpoint_fn(checkpoint,
                                             slim.get_variables_to_restore())
    sess.run([tf.local_variables_initializer()])
    init_fn(sess)

    # Gets the list of the input style images.
    style_img_list = tf.gfile.Glob(FLAGS.style_images_paths)
    if len(style_img_list) > FLAGS.maximum_styles_to_evaluate:
      np.random.seed(1234)
      style_img_list = np.random.permutation(style_img_list)
      style_img_list = style_img_list[:FLAGS.maximum_styles_to_evaluate]

    # Gets list of input content images.
    content_img_list = tf.gfile.Glob(FLAGS.content_images_paths)

    for content_i, content_img_path in enumerate(content_img_list):
      content_img_np = image_utils.load_np_image_uint8(content_img_path)[:, :, :
                                                                         3]
      content_img_name = os.path.basename(content_img_path)[:-4]

      # Saves preprocessed content image.
      inp_img_croped_resized_np = sess.run(
          content_img_preprocessed, feed_dict={
              content_img_ph: content_img_np
          })
      image_utils.save_np_image(inp_img_croped_resized_np,
                                os.path.join(FLAGS.output_dir,
                                             '%s.jpg' % (content_img_name)))

      # Computes bottleneck features of the style prediction network for the
      # identity transform.
      identity_params = sess.run(
          bottleneck_feat, feed_dict={style_img_ph: content_img_np})

      for style_i, style_img_path in enumerate(style_img_list):
        if style_i > FLAGS.maximum_styles_to_evaluate:
          break
        style_img_name = os.path.basename(style_img_path)[:-4]
        style_image_np = image_utils.load_np_image_uint8(style_img_path)[:, :, :
                                                                         3]

        if style_i % 10 == 0:
          tf.logging.info('Stylizing (%d) %s with (%d) %s' %
                          (content_i, content_img_name, style_i,
                           style_img_name))

        # Saves preprocessed style image.
        style_img_croped_resized_np = sess.run(
            style_img_preprocessed, feed_dict={
                style_img_ph: style_image_np
            })
        image_utils.save_np_image(style_img_croped_resized_np,
                                  os.path.join(FLAGS.output_dir,
                                               '%s.jpg' % (style_img_name)))

        # Computes bottleneck features of the style prediction network for the
        # given style image.
        style_params = sess.run(
            bottleneck_feat, feed_dict={style_img_ph: style_image_np})

        interpolation_weights = ast.literal_eval(FLAGS.interpolation_weights)
        # Interpolates between the parameters of the identity transform and
        # style parameters of the given style image.
        for interp_i, wi in enumerate(interpolation_weights):
          stylized_image_res = sess.run(
              stylized_images,
              feed_dict={
                  bottleneck_feat:
                      identity_params * (1 - wi) + style_params * wi,
                  content_img_ph:
                      content_img_np
              })

          # Saves stylized image.
          image_utils.save_np_image(
              stylized_image_res,
              os.path.join(FLAGS.output_dir, '%s_stylized_%s_%d.jpg' %
                           (content_img_name, style_img_name, interp_i)))
Пример #24
0
def main(unused_argv=None):
    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        # Forces all input processing onto CPU in order to reserve the GPU for the
        # forward inference and back-propagation.
        device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
        with tf.device(
                tf.train.replica_device_setter(FLAGS.ps_tasks,
                                               worker_device=device)):
            # Load content images
            content_inputs_, _ = image_utils.imagenet_inputs(
                FLAGS.batch_size, FLAGS.image_size)

            # Loads style images.
            [style_inputs_, _,
             style_inputs_orig_] = image_utils.arbitrary_style_image_inputs(
                 FLAGS.style_dataset_file,
                 batch_size=FLAGS.batch_size,
                 image_size=FLAGS.image_size,
                 shuffle=True,
                 center_crop=FLAGS.center_crop,
                 augment_style_images=FLAGS.augment_style_images,
                 random_style_image_size=FLAGS.random_style_image_size)

        with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
            # Process style and content weight flags.
            content_weights = ast.literal_eval(FLAGS.content_weights)
            style_weights = ast.literal_eval(FLAGS.style_weights)

            # Define the model
            stylized_images, \
            true_loss, \
            _, \
            bottleneck_feat = build_mobilenet_model.build_mobilenet_model(
                content_inputs_,
                style_inputs_,
                mobilenet_trainable=True,
                style_params_trainable=False,
                style_prediction_bottleneck=100,
                adds_losses=True,
                content_weights=content_weights,
                style_weights=style_weights,
                total_variation_weight=FLAGS.total_variation_weight,
            )

            _, inception_bottleneck_feat = build_model.style_prediction(
                style_inputs_,
                [],
                [],
                is_training=False,
                trainable=False,
                inception_end_point='Mixed_6e',
                style_prediction_bottleneck=100,
                reuse=None,
            )

            print('PRINTING TRAINABLE VARIABLES')
            for x in tf.trainable_variables():
                print(x)

            mse_loss = tf.losses.mean_squared_error(inception_bottleneck_feat,
                                                    bottleneck_feat)
            total_loss = mse_loss
            if FLAGS.use_true_loss:
                true_loss = FLAGS.true_loss_weight * true_loss
                total_loss += true_loss

            if FLAGS.use_true_loss:
                tf.summary.scalar('mse', mse_loss)
                tf.summary.scalar('true_loss', true_loss)
            tf.summary.scalar('total_loss', total_loss)
            tf.summary.image('image/0_content_inputs', content_inputs_, 3)
            tf.summary.image('image/1_style_inputs_orig', style_inputs_orig_,
                             3)
            tf.summary.image('image/2_style_inputs_aug', style_inputs_, 3)
            tf.summary.image('image/3_stylized_images', stylized_images, 3)

            mobilenet_variables_to_restore = slim.get_variables_to_restore(
                include=['MobilenetV2'], exclude=['global_step'])

            optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
            train_op = slim.learning.create_train_op(
                total_loss,
                optimizer,
                clip_gradient_norm=FLAGS.clip_gradient_norm,
                summarize_gradients=False)

            init_fn = slim.assign_from_checkpoint_fn(
                FLAGS.initial_checkpoint,
                slim.get_variables_to_restore(
                    exclude=['MobilenetV2', 'mobilenet_conv', 'global_step']))
            init_pretrained_mobilenet = slim.assign_from_checkpoint_fn(
                FLAGS.mobilenet_checkpoint, mobilenet_variables_to_restore)

            def init_sub_networks(session):
                init_fn(session)
                init_pretrained_mobilenet(session)

            slim.learning.train(train_op=train_op,
                                logdir=os.path.expanduser(FLAGS.train_dir),
                                master=FLAGS.master,
                                is_chief=FLAGS.task == 0,
                                number_of_steps=FLAGS.train_steps,
                                init_fn=init_sub_networks,
                                save_summaries_secs=FLAGS.save_summaries_secs,
                                save_interval_secs=FLAGS.save_interval_secs)
Пример #25
0
def main(argv=None):

    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
    if not tf.gfile.Exists(FLAGS.checkpoint_path):
        tf.gfile.MkDir(FLAGS.checkpoint_path)
    else:
        if not FLAGS.restore:
            tf.gfile.DeleteRecursively(FLAGS.checkpoint_path)
            tf.gfile.MkDir(FLAGS.checkpoint_path)

    input_images = tf.placeholder(tf.float32,
                                  shape=[None, None, None, 3],
                                  name='input_images')
    input_score_maps = tf.placeholder(tf.float32,
                                      shape=[None, None, None, 1],
                                      name='input_score_maps')
    if FLAGS.geometry == 'RBOX':
        input_geo_maps = tf.placeholder(tf.float32,
                                        shape=[None, None, None, 5],
                                        name='input_geo_maps')
    else:
        input_geo_maps = tf.placeholder(tf.float32,
                                        shape=[None, None, None, 8],
                                        name='input_geo_maps')
    input_training_masks = tf.placeholder(tf.float32,
                                          shape=[None, None, None, 1],
                                          name='input_training_masks')

    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                               global_step,
                                               decay_steps=10000,
                                               decay_rate=0.94,
                                               staircase=True)
    # add summary
    tf.summary.scalar('learning_rate', learning_rate)
    opt = tf.train.AdamOptimizer(learning_rate)
    # opt = tf.train.MomentumOptimizer(learning_rate, 0.9)

    # split
    input_images_split = tf.split(input_images, len(gpus))
    input_score_maps_split = tf.split(input_score_maps, len(gpus))
    input_geo_maps_split = tf.split(input_geo_maps, len(gpus))
    input_training_masks_split = tf.split(input_training_masks, len(gpus))

    tower_grads = []
    reuse_variables = None
    for i, gpu_id in enumerate(gpus):
        with tf.device('/gpu:%d' % gpu_id):
            with tf.name_scope('model_%d' % gpu_id) as scope:
                iis = input_images_split[i]
                isms = input_score_maps_split[i]
                igms = input_geo_maps_split[i]
                itms = input_training_masks_split[i]
                total_loss, model_loss = tower_loss(iis, isms, igms, itms,
                                                    reuse_variables)
                batch_norm_updates_op = tf.group(
                    *tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
                reuse_variables = True

                grads = opt.compute_gradients(total_loss)
                tower_grads.append(grads)

    grads = average_gradients(tower_grads)
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    summary_op = tf.summary.merge_all()
    # save moving average
    variable_averages = tf.train.ExponentialMovingAverage(
        FLAGS.moving_average_decay, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    # batch norm updates
    with tf.control_dependencies(
        [variables_averages_op, apply_gradient_op, batch_norm_updates_op]):
        train_op = tf.no_op(name='train_op')

    saver = tf.train.Saver(tf.global_variables())
    summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path,
                                           tf.get_default_graph())

    init = tf.global_variables_initializer()

    if FLAGS.pretrained_model_path is not None:
        variable_restore_op = slim.assign_from_checkpoint_fn(
            FLAGS.pretrained_model_path,
            slim.get_trainable_variables(),
            ignore_missing_vars=True)
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        if FLAGS.restore:
            print('continue training from previous checkpoint')
            ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
            saver.restore(sess, ckpt)
        else:
            sess.run(init)
            if FLAGS.pretrained_model_path is not None:
                variable_restore_op(sess)

        data_generator = icdar.get_batch(num_workers=FLAGS.num_readers,
                                         input_size=FLAGS.input_size,
                                         batch_size=FLAGS.batch_size_per_gpu *
                                         len(gpus))

        start = time.time()
        for step in range(FLAGS.max_steps):
            data = next(data_generator)
            ml, tl, _ = sess.run(
                [model_loss, total_loss, train_op],
                feed_dict={
                    input_images: data[0],
                    input_score_maps: data[2],
                    input_geo_maps: data[3],
                    input_training_masks: data[4]
                })
            if np.isnan(tl):
                print('Loss diverged, stop training')
                break

            if step % 10 == 0:
                avg_time_per_step = (time.time() - start) / 10
                avg_examples_per_second = (10 * FLAGS.batch_size_per_gpu *
                                           len(gpus)) / (time.time() - start)
                start = time.time()
                print(
                    'Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, {:.2f} examples/second'
                    .format(step, ml, tl, avg_time_per_step,
                            avg_examples_per_second))

            if step % FLAGS.save_checkpoint_steps == 0:
                saver.save(sess,
                           FLAGS.checkpoint_path + 'model.ckpt',
                           global_step=global_step)

            if step % FLAGS.save_summary_steps == 0:
                _, tl, summary_str = sess.run(
                    [train_op, total_loss, summary_op],
                    feed_dict={
                        input_images: data[0],
                        input_score_maps: data[2],
                        input_geo_maps: data[3],
                        input_training_masks: data[4]
                    })
                summary_writer.add_summary(summary_str, global_step=step)
Пример #26
0
def main(argv=None):
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
    now = datetime.datetime.now()
    StyleTime = now.strftime("%Y-%m-%d-%H-%M-%S")
    os.makedirs(FLAGS.logs_path + StyleTime)
    if not os.path.exists(FLAGS.checkpoint_path):
        os.makedirs(FLAGS.checkpoint_path)

    input_image = tf.placeholder(tf.float32,
                                 shape=[None, None, None, 3],
                                 name='input_image')
    input_bbox = tf.placeholder(tf.float32, shape=[None, 5], name='input_bbox')
    input_im_info = tf.placeholder(tf.float32,
                                   shape=[None, 3],
                                   name='input_im_info')

    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    learning_rate = tf.Variable(FLAGS.learning_rate, trainable=False)
    tf.summary.scalar('learning_rate', learning_rate)
    opt = tf.train.AdamOptimizer(learning_rate)

    gpu_id = int(FLAGS.gpu)
    with tf.device('/gpu:%d' % gpu_id):
        with tf.name_scope('model_%d' % gpu_id) as scope:
            bbox_pred, cls_pred, cls_prob = model.model(input_image)
            total_loss, model_loss, rpn_cross_entropy, rpn_loss_box = model.loss(
                bbox_pred, cls_pred, input_bbox, input_im_info)
            batch_norm_updates_op = tf.group(
                *tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
            grads = opt.compute_gradients(total_loss)

    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    summary_op = tf.summary.merge_all()
    variable_averages = tf.train.ExponentialMovingAverage(
        FLAGS.moving_average_decay, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    with tf.control_dependencies(
        [variables_averages_op, apply_gradient_op, batch_norm_updates_op]):
        train_op = tf.no_op(name='train_op')

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
    summary_writer = tf.summary.FileWriter(FLAGS.logs_path + StyleTime,
                                           tf.get_default_graph())

    init = tf.global_variables_initializer()

    if FLAGS.pretrained_model_path is not None:
        variable_restore_op = slim.assign_from_checkpoint_fn(
            FLAGS.pretrained_model_path,
            slim.get_trainable_variables(),
            ignore_missing_vars=True)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.95
    config.allow_soft_placement = True
    with tf.Session(config=config) as sess:
        if FLAGS.restore:
            ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
            restore_step = int(ckpt.split('.')[0].split('_')[-1])
            print("continue training from previous checkpoint {}".format(
                restore_step))
            saver.restore(sess, ckpt)
        else:
            sess.run(init)
            restore_step = 0
            if FLAGS.pretrained_model_path is not None:
                variable_restore_op(sess)

        data_generator = data_provider.get_batch(num_workers=FLAGS.num_readers)
        start = time.time()
        for step in range(restore_step, FLAGS.max_steps):
            data = next(data_generator)
            ml, tl, _, summary_str = sess.run(
                [model_loss, total_loss, train_op, summary_op],
                feed_dict={
                    input_image: data[0],
                    input_bbox: data[1],
                    input_im_info: data[2]
                })

            summary_writer.add_summary(summary_str, global_step=step)

            if step != 0 and step % FLAGS.decay_steps == 0:
                sess.run(
                    tf.assign(learning_rate,
                              learning_rate.eval() * FLAGS.decay_rate))

            if step % 10 == 0:
                avg_time_per_step = (time.time() - start) / 10
                start = time.time()
                print(
                    'Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, LR: {:.6f}'
                    .format(step, ml, tl, avg_time_per_step,
                            learning_rate.eval()))

            if (step + 1) % FLAGS.save_checkpoint_steps == 0:
                filename = ('ctpn_{:d}'.format(step + 1) + '.ckpt')
                filename = os.path.join(FLAGS.checkpoint_path, filename)
                saver.save(sess, filename)
                print('Write model to: {:s}'.format(filename))