예제 #1
0
 def model_fn(self, features, labels, mode, params, config=None):
     src = features["input"]
     src_len = features["input_length"]
     # embedding source input
     with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
         sequence_inputs, input_ids = self.embedding.encoder_embedding_input(
             src)
         encoder_outputs, encoder_states = self.encoder.encode(
             sequence_inputs=sequence_inputs,
             sequence_length=src_len,
             mode=mode)
         if params["time_major"]:
             encoder_outputs = tf.transpose(encoder_outputs, perm=[1, 0, 2])
         # embedding target sequence
         tgt_in, tgt_in_ids = self.embedding.encoder_embedding_input(
             labels["output_in"])
         tgt_out, tgt_out_ids = self.embedding.encoder_embedding_input(
             labels["output_out"])
         tgt_len = labels["output_length"]
         new_labels = {
             "output_in": tgt_in,
             "output_out": tgt_out,
             "output_length": tgt_len
         }
         # decoder
         logits, predict_ids, des_states = self.decoder.decode(
             mode=mode,
             encoder_outputs=encoder_outputs,
             encoder_state=encoder_states,
             labels=new_labels,
             src_seq_len=src_len)
     if mode == tf.estimator.ModeKeys.PREDICT:
         predictions = self._build_predictions(params, predict_ids)
         tf.add_to_collections("predictions", predictions)
         key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
         export_outputs = {key: predictions}
         prediction_hooks = self._build_prediction_hooks()
         return tf.estimator.EstimatorSpec(
             mode=mode,
             predictions=predictions,
             export_outputs=export_outputs,
             prediction_hooks=prediction_hooks)
     loss = self.compute_loss(logits, new_labels, params)
     if mode == tf.estimator.ModeKeys.TRAIN:
         train_op = self._build_train_op(mode, params, loss)
         training_hooks = []
         return tf.estimator.EstimatorSpec(mode=mode,
                                           loss=loss,
                                           train_op=train_op,
                                           training_hooks=training_hooks)
     if mode == tf.estimator.ModeKeys.EVAL:
         eval_metric_ops = self._build_eval_metric(predict_ids, labels,
                                                   src_len)
         evaluation_hooks = []
         return tf.estimator.EstimatorSpec(
             mode=mode,
             loss=loss,
             eval_metric_ops=eval_metric_ops,
             evaluation_hooks=evaluation_hooks)
예제 #2
0
파일: misc.py 프로젝트: Li-Ming-Fan/KerBS
def advanced_add_to_collections(name, value, name_value):
    name_name = name + '_name'
    if value not in tf.get_collection(
            name) and name_value not in tf.get_collection(name_name):
        tf.add_to_collections(name, value)
        tf.add_to_collections(name_name, name_value)
        return True
    return False
예제 #3
0
    def __call__(self, image):

        with tf.name_scope(self._scope_name):
            image = self._max_pool(image,
                                   ksize=self._ksize,
                                   strides=self._strides,
                                   padding=self._padding)
        tf.add_to_collections([tf.GraphKeys.ACTIVATIONS, 'MaxPool'], image)
        return image
예제 #4
0
def conv2d(x, k=3, in_num=1, out_num=32, strides=1, padding='SAME', activ=None, bias=True, name='conv'):
    w = tf.Variable(tf.random_normal([k, k, in_num, out_num]), name=name+'_w')
    x = tf.nn.conv2d(x, w, strides=[1, strides, strides, 1], padding=padding)
    tf.add_to_collections('vars', w)
    if bias:
        b = tf.Variable(tf.random_normal([out_num]), name=name+'_b')
        tf.add_to_collections('vars', b)
        x = tf.nn.bias_add(x, b)
    if activ is not None:
        x = activ(x)
    return x
예제 #5
0
def fc(x, in_num=100, out_num=100, bias=True, activ=None, name='fc'):
    w = tf.Variable(tf.random_normal([in_num, out_num]), name=name+'_w')
    x = tf.matmul(x, w)
    tf.add_to_collections('vars', w)
    if bias:
        b = tf.Variable(tf.random_normal([out_num]), name=name+'_b')
        tf.add_to_collections('vars', b)
        x = tf.add(x, b)
    if activ is not None:
        x = activ(x)
    return x
예제 #6
0
	def __init__(in_channels, filters, kernel_size=1, padding='same', dilation_rate=1, use_bias=True, **kwargs):
		super(Conv1d1x1, self).__init__(
			filters=filters,
			kernel_size=kernel_size,
			padding=padding,
			dilation_rate=dilation_rate,
			use_bias=use_bias,
			**kwargs)
		self.in_channels = in_channels
		self.input_buffer = None
		self._linearizer_weight = None
		tf.add_to_collections(tf.GraphKeys.UPDATE_OPS, self._clear_linearized_weight)
예제 #7
0
 def __call__(self, images):
     with tf.variable_scope(name_or_scope=self._conv_scope + '/'):
         with tf.variable_scope('conv2d'):
             images = self._conv2d(
                 images,
                 self._W,
                 strides=[1, self._strides, self._strides, 1],
                 padding=self._padding)
         tf.summary.histogram(self._name + '/conv2d', images)
         tf.add_to_collections([tf.GraphKeys.ACTIVATIONS, 'Conv2D'], images)
         images = BiasAdd(self._B)(images)
         tf.summary.histogram(self._name + '/bias_add', images)
     # print(self._conv_scope)
     return images
예제 #8
0
def conv2d(x, k=3, in_num=1, out_num=32, strides=1, activ=None, bias=True, name='conv'):
    w = tf.Variable(tf.random_normal([k, k, in_num, out_num]), name=name+'_w')  #重みの初期値の変数を定義
    x = tf.nn.conv2d(x, w, strides=[1, strides, strides, 1], padding='SAME')    #畳み込み演算 stridesの1つ目と4つ目はN,Cなので当然1、SAMEは、入力と出力のサイズを合わせるようなパディング
    tf.add_to_collections('vars', w)    #コレクションに変数を名前付きで保存
    if bias:
        b = tf.Variable(tf.random_normal([out_num]), name=name+'_b')
        tf.add_to_collections('vars', b)
        x = tf.nn.bias_add(x, b)    #バイアスとして足し算処理、複数チャンネルにも対応するようブロードキャストして加算する
    if activ is not None:
        x = activ(x)    #引数の関数オブジェクトでアクティベーションする
    
    tf.summary.histogram('conv_weights', w)
    tf.summary.histogram('conv_bias', b)
    return x
예제 #9
0
def load_variables_to_tf_graph(g: 'graph.Graph'):
    """
  Convenience function to load all variables present in a `gde.Graph` into
  the current default TensorFlow graph, without generating a MetaGraphDef.
  Also adds those variables to the appropriate TensorFlow collections.

  Args:
    g: `gde.Graph` object from which all variables and variable collections
      should be loaded
  """
    for var_name in g.variable_names:
        var = g.name_to_variable(var_name)
        tf_var = tf.Variable.from_proto(var.to_proto())
        tf.add_to_collections(var.collection_names, tf_var)
예제 #10
0
    def __init__(self,
                 variable_op,
                 initial_value,
                 assignment_op,
                 collections=None,
                 caching_device=None,
                 constraint=None,
                 trainable=True):
        if collections is None:
            collections = [tf.GraphKeys.GLOBAL_VARIABLES]
        if not isinstance(collections, (list, tuple, set)):
            raise ValueError(
                "collections argument to Variable constructor must be a list, tuple, "
                "or set. Got %s of type %s" % (collections, type(collections)))
        self._graph_key = tf.get_default_graph()._graph_key  # pylint: disable=protected-access

        if isinstance(initial_value, checkpointable.CheckpointInitialValue):
            self._maybe_initialize_checkpointable()
            self._update_uid = initial_value.checkpoint_position.restore_uid
            initial_value = initial_value.wrapped_value

        if constraint is not None and not callable(constraint):
            raise ValueError("The `constraint` argument must be a callable.")

        self._trainable = trainable
        if trainable and tf.GraphKeys.TRAINABLE_VARIABLES not in collections:
            collections = list(collections) + [
                tf.GraphKeys.TRAINABLE_VARIABLES
            ]

        self._initial_value = initial_value
        shape = self._initial_value.shape
        self._variable_op = variable_op
        self._variable = self._variable_op.outputs[0]
        self._initializer_op = assignment_op

        if caching_device is not None:
            with tf.device(caching_device):
                self._snapshot = array_ops.identity(self._variable,
                                                    name="read")
        else:
            with tf.colocate_with(self._variable_op):
                self._snapshot = array_ops.identity(self._variable,
                                                    name="read")
        tf.add_to_collections(collections, self)

        self._caching_device = caching_device
        self._save_slice_info = None
        self._constraint = constraint
예제 #11
0
def _log_summaries(input_image, label, num_of_classes, output, ignore_label):
    """Logs the summaries for the model.

  Args:
    input_image: Input image of the model. Its shape is [batch_size, height,
      width, channel].
    label: Label of the image. Its shape is [batch_size, height, width].
    num_of_classes: The number of classes of the dataset.
    output: Output of the model. Its shape is [batch_size, height, width].
  """
    # Add summaries for model variables.
    for model_var in tf.model_variables():
        tf.summary.histogram(model_var.op.name, model_var)

    logits = tf.image.resize_bilinear(
        output,
        preprocess_utils.resolve_shape(label, 4)[1:3])
    pred = tf.argmax(logits, 3)
    label = tf.squeeze(label, [3])

    weights = tf.to_float(tf.not_equal(label, ignore_label))
    label_valid = tf.where(tf.equal(label, ignore_label), tf.zeros_like(label),
                           label)

    acc = tf.metrics.accuracy(label_valid, pred, weights=weights)
    miou, update_op = tf.metrics.mean_iou(label_valid,
                                          pred,
                                          num_of_classes,
                                          weights=weights)
    tf.add_to_collections(tf.GraphKeys.UPDATE_OPS, update_op)

    miou = tf.Print(miou, [miou], 'mIoU :')
    acc = tf.Print(acc[1], [acc[1]], 'ACC is :')

    tf.summary.scalar('px_accuracy/train_px_accuracy', acc)
    tf.summary.scalar('mean_iou/train_mean_iou', miou)

    # Add summaries for images, labels, semantic predictions.
    if FLAGS.save_summaries_images:
        img = tf.cast(input_image, tf.uint8)

        summary_label = get_dataset_colormap.label_to_color2(
            label, FLAGS.dataset)
        summary_predictions = get_dataset_colormap.label_to_color2(
            pred, FLAGS.dataset)

        img = tf.concat(axis=2,
                        values=[img, summary_label, summary_predictions])
        tf.summary.image('samples', img, max_outputs=6)
예제 #12
0
def mIOU(y_true,
         y_pred,
         metrics_collections=None,
         updates_collections=None,
         name="iou"):

    with tf.variable_scope(name):

        def iou_internal(y_true_internal, y_pred_internal):

            cm = tf.confusion_matrix(
                tf.reshape(y_true_internal, (-1, )),  # flatten matrix
                tf.reshape(y_pred_internal, (-1, )),  # flatten matrix
                num_classes=2,
                dtype=tf.float32,
                name="confusion_matrix",
            )

            # denominator = TP + FP + FN
            denominator = tf.reduce_sum(
                tf.gather_nd(cm, [[1, 1], [0, 1], [1, 0]]))

            # score = TP / (TP + FP + FN)
            # if TP + FP + FN is 0 then return 1 (correctly identified an image with a constant 0 mask)
            score = tf.expand_dims(
                tf.cond(tf.greater(denominator, 0),
                        lambda: tf.gather_nd(cm, [1, 1]) / denominator,
                        lambda: 1.0), 0)

            return tf.reduce_mean(score * tf.to_float(
                tf.greater(tf.tile(score, [len(IOU_THRESHOLDS)]),
                           IOU_THRESHOLDS)))

        scores = tf.map_fn(lambda x: iou_internal(x[0], x[1]),
                           elems=(y_true, y_pred),
                           dtype=tf.float32)

        # return tf.reduce_mean(scores)

        # based on https://stackoverflow.com/questions/47753736/custom-metrics-with-tf-estimator
        iou, update_op = tf.metrics.mean(scores)

        if metrics_collections:
            tf.add_to_collections(metrics_collections, iou)

        if updates_collections:
            tf.add_to_collections(updates_collections, update_op)

        return iou, update_op
예제 #13
0
 def __call__(self, images):
     with tf.variable_scope(name_or_scope=self._final_fully_connect_scope +
                            '/'):
         with tf.variable_scope(name_or_scope='finalfullyconnect'):
             with tf.variable_scope(name_or_scope='mat_mul'):
                 images = tf.matmul(images, self._W)
                 tf.summary.histogram(self._name + '/finalfullyconnect',
                                      images)
                 tf.add_to_collections(
                     [tf.GraphKeys.ACTIVATIONS, 'FinalFullyConnect'],
                     images)
             images = BiasAdd(self._B)(images)
             tf.summary.histogram(self._name + '/bias_add', images)
     # print(self._final_fully_connect_scope)
     return images
예제 #14
0
    def create_model_parameters(
            self, var_collections=extension.GraphKeys.METAPARAMETERS):
        """
        :param var_collections: name of collections to store the created variables.
        :return: dictionary to index the created variables.
        """
        if self.use_t:
            # hyper parameters of transformation layer
            for i in range(len(self.dim_hidden)):
                self.model_param_dict["conv" + str(i) +
                                      "_z"] = network_utils.get_identity(
                                          self.dim_hidden[0],
                                          name="conv" + str(i) + "_z",
                                          conv=True)
        elif self.use_warp:
            for i in range(len(self.dim_hidden)):
                self.model_param_dict["conv" + str(i) +
                                      "_z"] = network_utils.get_warp_weight(
                                          self, i, self.conv_initializer)
                self.model_param_dict["bias" + str(i) +
                                      "_z"] = network_utils.get_warp_bias(
                                          self, i, self.bias_initializer)
        [
            tf.add_to_collections(var_collections, model_param)
            for model_param in self.model_param_dict.values()
        ]

        return self.model_param_dict
예제 #15
0
파일: meta_init_v1.py 프로젝트: xuyu67/BOML
 def create_model_parameters(self,
                             var_collections=GraphKeys.METAPARAMETERS):
     if self.use_T:
         # hyper parameters of transformation layer
         for i in range(len(self.dim_hidden)):
             self.model_param_dict["conv" + str(i) +
                                   "_z"] = network_utils.get_identity(
                                       self.dim_hidden[0],
                                       name="conv" + str(i) + "_z",
                                       conv=True)
         self.model_param_dict["w" + str(len(self.dim_hidden)) +
                               "_z"] = network_utils.get_identity(
                                   self.dims[-1],
                                   name="w" + str(len(self.dim_hidden)) +
                                   "_z",
                                   conv=False)
     elif self.use_Warp:
         for i in range(len(self.dim_hidden)):
             self.model_param_dict["conv" + str(i) +
                                   "_z"] = network_utils.get_warp_weight(
                                       self,
                                       layer=i,
                                       initializer=self.conv_initializer)
             self.model_param_dict["bias" + str(i) +
                                   "_z"] = network_utils.get_warp_bias(
                                       self,
                                       layer=i,
                                       initializer=self.bias_initializer)
     [
         tf.add_to_collections(var_collections, model_param)
         for model_param in self.model_param_dict.values()
     ]
     return self.model_param_dict
def _bias_on_cpu(name, shape, initializer, collect):
    """Helper to create a Variable stored on CPU memory.
        
    Args:
        name: name of the variable
        shape: list of ints
        initializer: initializer for Variable
        
    Returns:
        Variable Tensor
    """
    dtype = tf.float32
    var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
    tf.add_to_collections(collect, var)

    return var
예제 #17
0
    def create_outer_parameters(self):
        """
        :param var_collections: name of collections to store the created variables.
        :return: dictionary to index the created variables.
        """
        for i in range(len(self.dim_hidden)):
            self.outer_param_dict["conv" +
                                  str(i)] = network_utils.get_conv_weight(
                                      self,
                                      i=i,
                                      initializer=self.conv_initializer)

            self.outer_param_dict["bias" +
                                  str(i)] = network_utils.get_bias_weight(
                                      self,
                                      i=i,
                                      initializer=self.bias_initializer)
        [
            tf.add_to_collections(extension.GraphKeys.METAPARAMETERS, hyper)
            for hyper in self.outer_param_dict.values()
        ]

        if len(self.model_param_dict) == 0 and callable(
                getattr(self, "create_model_parameters", None)):
            self.create_model_parameters()

        return self.outer_param_dict
예제 #18
0
def collect_named_outputs(collections, alias, outputs):
  """Add `Tensor` outputs tagged with alias to collections.
  It is useful to collect end-points or tags for summaries. Example of usage:
  logits = collect_named_outputs('end_points', 'inception_v3/logits', logits)
  assert 'inception_v3/logits' in logits.aliases
  Args:
    collections: A collection or list of collections. If None skip collection.
    alias: String to append to the list of aliases of outputs, for example,
           'inception_v3/conv1'.
    outputs: Tensor, an output tensor to collect
  Returns:
    The outputs Tensor to allow inline call.
  """
  if collections:
    append_tensor_alias(outputs, alias)
    tf.add_to_collections(collections, outputs)
  return outputs
예제 #19
0
    def __call__(self, images):
        with tf.variable_scope(name_or_scope=self._fully_connect_scope + '/'):
            reshape_filter = [-1, self._W.shape[0]]

            with tf.variable_scope(name_or_scope='reshape_convoluted_images'):
                images = tf.reshape(images, reshape_filter)
                tf.summary.histogram(self._name + '/reshape', images)
            with tf.variable_scope(name_or_scope='fullyconnect'):
                with tf.variable_scope(name_or_scope='mat_multiply'):
                    images = tf.matmul(images, self._W)
                tf.summary.histogram(self._name + '/FullyConnect', images)
                tf.add_to_collections(
                    [tf.GraphKeys.ACTIVATIONS, 'FullyConnect'], images)
                images = BiasAdd(self._B)(images)
                tf.summary.histogram(self._name + '/bias_add', images)
        # print(self._fully_connect_scope)
        return images
예제 #20
0
def euclideanDistance(input_batch, weights, verbose):
    """
    Calculates the Euclidean distance between the given weights and the input.
    :param input_batch: (Tensor) The input_batch to calculate the distance with.
                        Shape: [batch_size, 1 or num_maps, 1 or num_som_neurons, weight_vector_size]
    :param weights: (Tensor) The weights to calculate the distance with.
                    Shape: [1, num_maps, num_som_neurons, som_neuron_weight_vector_size]
    :param verbose: (Boolean) If true, the sum of distances will be added to the collection sum_distance_measure
    :return: bmus_per_map_batch: (Tensor) The BMU for each map form each input sample in the batch
                                 Shape: [batch_size, num_maps]
    :return: euclidean_distances_per_map_batch: (Tensor) The Euclidean distance for each map form each input sample
                                                in the batch to the given weights.
                                                Shape: [batch_size, num_maps, num_som_neurons]
    """
    with tf.variable_scope('euclideanDistance'):
        # Compute the distance_vectors between each weight (or SOM neuron) and the input_batch per map.
        # Input Tensor Shape weights: [1, num_maps, num_som_neurons, weight_vector_size]
        # Input Tensor Shape input_batch: [batch_size, 1 or num_maps, 1 or num_som_neurons, weight_vector_size]
        # Output Tensor Shape: [batch_size, num_maps, num_som_neurons, weight_vector_size]
        distance_vectors_per_map_batch = tf.subtract(weights, input_batch)

        # Compute the Euclidean distance between each weight (or SOM neuron) and the input_batch per map.
        # Input Tensor Shape: [batch_size, num_maps, num_som_neurons, weight_vector_size]
        # Output Tensor Shape: [batch_size, num_maps, num_som_neurons]
        euclidean_distances_per_map_batch = tf.norm(
            distance_vectors_per_map_batch, axis=3)

        # Compute the BMUs with the minimum distance to the input_batches for each map.
        # Input Tensor Shape: [batch_size, num_maps, num_som_neurons]
        # Output Tensor Shape: [batch_size, num_maps]
        bmus_per_map_batch = tf.argmin(euclidean_distances_per_map_batch,
                                       axis=2)

        # Input Tensor Shape: [batch_size, num_maps, num_som_neurons]
        # Output Tensor Shape: [1]
        if verbose:
            min_distances_per_map = tf.reduce_min(
                euclidean_distances_per_map_batch, axis=2)
            min_distance_sum = tf.reduce_sum(min_distances_per_map)
            tf.add_to_collections("sum_distance_measure", min_distance_sum)

        # Output Tensor Shape: [batch_size, num_maps]
        # Output Tensor Shape: [batch_size, num_maps, num_som_neurons]
        return bmus_per_map_batch, euclidean_distances_per_map_batch
예제 #21
0
파일: algo.py 프로젝트: houluy/Gymer
 def _build_layer(self, ipt_layer, opt_layer):
     with tf.variable_scope(opt_layer.name, reuse=tf.AUTO_REUSE):
         ipt_layer = tf.layers.Flatten()(ipt_layer)
         ipt_size = ipt_layer.get_shape()[-1]
         weight_shape = [ipt_size, opt_layer.shape]
         weights, biases = self.gen_weights(
             opt_layer.name,
             opt_layer.name + str(opt_layer.layer),
             weight_shape,
             bias_shape=[opt_layer.shape],
             regularizer=opt_layer.regularizer,
             wl=self.regularizer_weight,
         )
         tf.add_to_collections(opt_layer.name, [weights, biases])
         clayer = tf.add(tf.matmul(ipt_layer, weights), biases)
         if opt_layer.activation is not None:
             clayer = opt_layer.activation(clayer)
             clayer = tf.nn.dropout(clayer, rate=self.dropout_rate)
     return clayer
예제 #22
0
    def create_outer_parameters(self,
                                var_collections=GraphKeys.METAPARAMETERS):
        """
        :param var_collections: name of collections to store the created variables.
        :return: dictionary to index the created variables.
        """
        for i in range(len(self.dim_hidden)):
            self.outer_param_dict["conv" +
                                  str(i)] = network_utils.get_conv_weight(
                                      self,
                                      i=i,
                                      initializer=self.conv_initializer)
            self.outer_param_dict["bias" +
                                  str(i)] = network_utils.get_bias_weight(
                                      self,
                                      i=i,
                                      initializer=self.bias_initializer)
        if self.max_pool:
            self.outer_param_dict[
                "w" + str(len(self.dim_hidden))] = tf.get_variable(
                    "w" + str(len(self.dim_hidden)),
                    [self.dim_hidden[-1] * 5 * 5, self.dims[-1]],
                    initializer=self.output_weight_initializer,
                )
            self.outer_param_dict["bias" +
                                  str(len(self.dim_hidden))] = tf.get_variable(
                                      "bias" + str(len(self.dim_hidden)),
                                      [self.dims[-1]],
                                      initializer=self.bias_initializer,
                                      dtype=self.datatype,
                                  )
        else:
            self.outer_param_dict["w" +
                                  str(len(self.dim_hidden))] = tf.get_variable(
                                      "w" + str(len(self.dim_hidden)),
                                      [self.dim_hidden[-1], self.dims[-1]],
                                      initializer=tf.random_normal_initializer,
                                  )
            self.outer_param_dict["bias" +
                                  str(len(self.dim_hidden))] = tf.get_variable(
                                      "bias" + str(len(self.dim_hidden)),
                                      [self.dims[-1]],
                                      initializer=self.bias_initializer,
                                      dtype=self.datatype,
                                  )
        [
            tf.add_to_collections(var_collections, hyper)
            for hyper in self.outer_param_dict.values()
        ]

        if len(self.model_param_dict) == 0 and callable(
                getattr(self, "create_model_parameters", None)):
            self.create_model_parameters()

        return self.outer_param_dict
예제 #23
0
def mean_accuracy(y_true,
                  y_pred,
                  metrics_collections=None,
                  updates_collections=None,
                  name="acc"):
    with tf.variable_scope(name):

        scores = tf.reduce_mean(tf.to_float(tf.equal(y_true, y_pred)),
                                axis=[1, 2, 3])

        # return tf.reduce_mean(scores)
        acc, update_op = tf.metrics.mean(scores)

        if metrics_collections:
            tf.add_to_collections(metrics_collections, acc)

        if updates_collections:
            tf.add_to_collections(updates_collections, update_op)

        return acc, update_op
예제 #24
0
def convolutionDistance(input_batch, weights, verbose):
    """
    Calculates the convolution distance between the given weights and the input.
    :param input_batch: (Tensor) The input_batch to calculate the distance with.
                        Shape: [batch_size, 1 or num_maps, 1 or num_som_neurons, weight_vector_size]
    :param weights: (Tensor) The weights to calculate the distance with.
                    Shape: [1, num_maps, num_som_neurons, som_neuron_weight_vector_size]
    :param verbose: (Boolean) If true, the sum of distances will be added to the collection sum_distance_measure
    :return: bmus_per_map_batch: (Tensor) The BMU for each map form each input sample in the batch
                                 Shape: [batch_size, num_maps]
    :return: convolution_distance_per_map_batch: (Tensor) The convolution Correlation for each map form each input
                                                sample in the batch to the given weights.
                                                Shape: [batch_size, num_maps, num_som_neurons]
    """
    with tf.variable_scope('convolutionDistance'):
        # Compute the convolution distance between each weight (or SOM neuron) and the input_batch per map.
        # Input Tensor Shape weights:  [1, num_maps, num_som_neurons, weight_vector_size]
        # Input Tensor Shape input_batch: [batch_size, 1 or num_maps, 1 or num_som_neurons, weight_vector_size]
        # Tensor Shape after multiply: [batch_size, num_maps, num_som_neurons, weight_vector_size]
        # Output Tensor Shape: [batch_size, num_maps, num_som_neurons]
        convolution_distance_per_map_batch = tf.reduce_sum(tf.multiply(
            weights, input_batch),
                                                           axis=3)

        # Compute the BMUs with the maximum distance to the input_batches for each map.
        # Input Tensor Shape: [batch_size, num_maps, num_som_neurons]
        # Output Tensor Shape: [batch_size, num_maps]
        bmus_per_map_batch = tf.argmax(convolution_distance_per_map_batch,
                                       axis=2)

        # Input Tensor Shape: [batch_size, num_maps, num_som_neurons]
        # Output Tensor Shape: [1]
        if verbose:
            max_distances_per_map = tf.reduce_max(
                convolution_distance_per_map_batch, axis=2)
            max_distance_sum = tf.reduce_sum(max_distances_per_map)
            tf.add_to_collections("sum_distance_measure", max_distance_sum)

        # Output Tensor Shape: [batch_size, num_maps]
        # Output Tensor Shape: [batch_size, num_maps, num_som_neurons]
        return bmus_per_map_batch, convolution_distance_per_map_batch
def _variable_with_weight_decay(name, shape, stddev, wd, collect):
    """Helper to create an initialized Variable with weight decay.
        Note that the Variable is initialized with a truncated normal distribution.
        A weight decay is added only if one is specified.
        Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.
        Returns:
        Variable Tensor
        """
    #dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var = cifar10._variable_on_cpu(
        name, shape, initializer=tf.contrib.layers.xavier_initializer_conv2d())
    tf.add_to_collections(collect, var)
    if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
예제 #26
0
def center_loss(features,
                labels,
                num_classes,
                alpha=0.5,
                updates_collections=tf.GraphKeys.UPDATE_OPS,
                scope=None):
    # modified from https://github.com/EncodeTS/TensorFlow_Center_Loss/blob/master/center_loss.py

    assert features.shape.ndims == 2, 'The rank of `features` should be 2!'
    assert 0 <= alpha <= 1, '`alpha` should be in [0, 1]!'

    with tf.variable_scope(scope, 'center_loss', [features, labels]):
        centers = tf.get_variable(
            'centers',
            shape=[num_classes, features.get_shape()[-1]],
            dtype=tf.float32,
            initializer=tf.constant_initializer(0),
            trainable=False)

        centers_batch = tf.gather(centers, labels)
        diff = centers_batch - features
        _, unique_idx, unique_count = tf.unique_with_counts(labels)
        appear_times = tf.gather(unique_count, unique_idx)
        appear_times = tf.reshape(appear_times, [-1, 1])
        diff = diff / tf.cast((1 + appear_times), tf.float32)
        diff = alpha * diff
        update_centers = tf.scatter_sub(centers, labels, diff)

        center_loss = 0.5 * tf.reduce_mean(
            tf.reduce_sum((centers_batch - features)**2, axis=-1))

        if updates_collections is None:
            with tf.control_dependencies([update_centers]):
                center_loss = tf.identity(center_loss)
        else:
            tf.add_to_collections(updates_collections, update_centers)

    return center_loss, centers
예제 #27
0
def conv2d_t_sn(inputs,
                num_outputs,
                kernel_size,
                stride=1,
                activation_fn=tf.nn.leaky_relu,
                normalizer_fn=None,
                normalizer_params=None,
                weights_initializer=tf.random_normal_initializer(stddev=0.02),
                weights_regularizer=None,
                biases_initializer=tf.zeros_initializer(),
                biases_regularizer=None,
                reuse=None,
                variables_collections=None,
                outputs_collections=None,
                trainable=True,
                scope=None):

    with tf.variable_scope(scope, default_name="dconv", reuse=reuse):
        x_shape = inputs.get_shape().as_list()
        output_shape = [
            x_shape[0], x_shape[1] * stride, x_shape[2] * stride, num_outputs
        ]

        w = tf.get_variable("weights",
                            shape=[
                                kernel_size, kernel_size,
                                inputs.get_shape()[-1], num_outputs
                            ],
                            initializer=weights_initializer,
                            regularizer=weights_regularizer)
        if variables_collections:
            tf.add_to_collections(variables_collections, w)

        net = tf.nn.conv2d_transpose(inputs,
                                     filter=spectral_norm(w),
                                     output_shape=output_shape,
                                     strides=[1, stride, stride, 1],
                                     padding='SAME')

        if biases_initializer is not None:
            b = tf.get_variable("biases", [num_outputs],
                                initializer=biases_initializer,
                                regularizer=biases_regularizer)
            if variables_collections:
                tf.add_to_collections(variables_collections, b)
            net = tf.nn.bias_add(net, b)

        if normalizer_fn is not None:
            normalizer_params = normalizer_params or {}
            net = normalizer_fn(net, **normalizer_params)

        if activation_fn:
            net = activation_fn(net)

        if outputs_collections:
            tf.add_to_collections(outputs_collections, net)

    return net
예제 #28
0
파일: cifar10.py 프로젝트: deepuhb/cifar10
def _variable_with_weight_decay(name, shape, stddev, wd):
    """ Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    name: name of the variable
    shape: lists of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2 loss decay multiplied by this float. If none, weight
        decay is not added for this Variable.

    Returns:variable tensor
    """

    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var = _variable_on_cpu(
        name, shape, tf.truncated_normal_initializer(stddev=stddev,
                                                     dtype=dtype))
    if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collections('losses', weight_decay)
    return var
예제 #29
0
 def create_initial_parameter(self):
     self.task_parameter = OrderedDict()
     self.task_parameter["fc_weight"] = tf.get_variable(
         "fc_weight",
         shape=[self.layers[-1].shape.as_list()[-1], self.dims[-1]],
         initializer=self.output_weight_initializer,
         dtype=self.data_type,
     )
     self.task_parameter["fc_bias"] = tf.get_variable(
         "fc_bias",
         [self.dims[-1]],
         initializer=tf.zeros_initializer(tf.float32),
         dtype=self.data_type,
     )
     [
         tf.add_to_collections(self.var_collections, initial_param)
         for initial_param in self.task_parameter.values()
     ]
     remove_from_collection(GraphKeys.GLOBAL_VARIABLES,
                            *self.task_parameter.values())
예제 #30
0
 def regularizer(self):
     with tf.variable_scope('Model',reuse=True):
         # Regularization in the model
         if self.regs_user:
             tf.add_to_collections(['reg1', 'reg2'],
                                     tf.contrib.layers.apply_regularization(
                                     tf.contrib.layers.l2_regularizer(scale=self.regs_user),
                                     [tf.get_variable('user_embed')]))
         if self.dom1_regs_item:
             tf.add_to_collections(['reg1'],
                                     tf.contrib.layers.apply_regularization(
                                     tf.contrib.layers.l2_regularizer(scale=self.dom1_regs_item),
                                     [tf.get_variable('item1_embed')]))
         if self.dom2_regs_item:
             tf.add_to_collections(['reg2'],
                                     tf.contrib.layers.apply_regularization(
                                     tf.contrib.layers.l2_regularizer(scale=self.dom2_regs_item),
                                     [tf.get_variable('item2_embed')]))