def triplet_loss(y_true, y_pred, alpha=0.2):
    """
    Implementation of the triplet loss as defined by formula (3)

    Arguments:
    y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
    y_pred -- python list containing three objects:
            anchor -- the encodings for the anchor images, of shape (None, 128)
            positive -- the encodings for the positive images, of shape (None, 128)
            negative -- the encodings for the negative images, of shape (None, 128)

    Returns:
    loss -- real number, value of the loss
    """

    anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]

    ### START CODE HERE ### (≈ 4 lines)
    # Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
    pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
    # Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
    neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)
    # Step 3: subtract the two previous distances and add alpha.
    basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
    # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
    loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))
    ### END CODE HERE ###

    return loss
def custom_my_loss(y_true, y_pred):
    m1 = mod1(y_true)
    m2 = mod2(y_true)
    m3 = mod3(y_true)
    m4 = mod4(y_true)

    n1 = mod1(y_pred)
    n2 = mod2(y_pred)
    n3 = mod3(y_pred)
    n4 = mod4(y_pred)

    l1 = tf.square(tf.subtract(m1, n1))
    l2 = tf.square(tf.subtract(m2, n2))
    l3 = tf.square(tf.subtract(m3, n3))
    l4 = tf.square(tf.subtract(m4, n4))

    _, a1, b1, c1 = m1.shape
    _, a2, b2, c2 = m2.shape
    _, a3, b3, c3 = m3.shape
    _, a4, b4, c4 = m4.shape
    m1 = a1 * b1 * c1
    m2 = a2 * b2 * c2
    m3 = a3 * b3 * c3
    m4 = a4 * b4 * c4
    m1 = tf.cast(m1, tf.float32)
    m2 = tf.cast(m2, tf.float32)
    m3 = tf.cast(m3, tf.float32)
    m4 = tf.cast(m4, tf.float32)

    loss = tf.sqrt(
        tf.reduce_sum(l1 / m1) + tf.reduce_sum(l2 / m2) +
        tf.reduce_sum(l3 / m3) + tf.reduce_sum(l4 / m4))

    return loss
def make_relu6(output_name, input_name, const6_name='const6'):
    graph = tf.Graph()
    with graph.as_default():
        tf_x = tf.placeholder(tf.float32, [10, 10], name=input_name)
        tf_6 = tf.constant(dtype=tf.float32, value=6.0, name=const6_name)
        with tf.name_scope(output_name):
            tf_y1 = tf.nn.relu(tf_x, name='relu1')
            tf_y2 = tf.nn.relu(tf.subtract(tf_x, tf_6, name='sub1'), name='relu2')

            #tf_y = tf.nn.relu(tf.subtract(tf_6, tf.nn.relu(tf_x, name='relu1'), name='sub'), name='relu2')
        #tf_y = tf.subtract(tf_6, tf_y, name=output_name)
        tf_y = tf.subtract(tf_y1, tf_y2, name=output_name)
        
    graph_def = graph.as_graph_def()
    graph_def.node[-1].name = output_name

    # remove unused nodes
    for node in graph_def.node:
        if node.name == input_name:
            graph_def.node.remove(node)
    for node in graph_def.node:
        if node.name == const6_name:
            graph_def.node.remove(node)
    for node in graph_def.node:
        if node.op == '_Neg':
            node.op = 'Neg'
            
    return graph_def
Beispiel #4
0
    def _link(self, pre_states, x, **kwargs):
        self._check_state(pre_states, 2)
        h, c = pre_states

        # Get neuron activations
        if self.lottery_activated:
            f_tilde, i_tilde_bar, f, i, o, c_hat = self._get_neurons(x, h)
        else:
            f_tilde, i_tilde_bar, f, i, o, c_hat = self._get_neurons_fast(x, h)
        f_tilde_bar = tf.subtract(1., f_tilde)
        i_tilde = tf.subtract(1., i_tilde_bar)

        # Update
        # omega = tf.multiply(f_tilde, i_tilde)
        f_hat = tf.multiply(f_tilde,
                            tf.add(tf.multiply(f, i_tilde), i_tilde_bar))
        i_hat = tf.multiply(i_tilde,
                            tf.add(tf.multiply(f_tilde, i), f_tilde_bar))
        new_c = tf.add(tf.multiply(f_hat, c), tf.multiply(i_hat, c_hat))
        new_h = tf.multiply(o, tf.tanh(new_c))

        # Register gates and return
        self._gate_dict['master_forget_gate'] = f_tilde
        self._gate_dict['master_input_gate'] = i_tilde
        self._gate_dict['forget_gate'] = f
        self._gate_dict['input_gate'] = i
        self._gate_dict['output_gate'] = o
        return new_h, (new_h, new_c)
Beispiel #5
0
def triplet_loss(y_true, y_pred, alpha=0.5):
    anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
    pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
    neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)
    basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
    loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))
    return loss
Beispiel #6
0
def r2_op(predictions, targets):
    """ r2_op.

    An op that calculates the standard error.

    Examples:
        ```python
        input_data = placeholder(shape=[None, 784])
        y_pred = my_network(input_data) # Apply some ops
        y_true = placeholder(shape=[None, 10]) # Labels
        stderr_op = r2_op(y_pred, y_true)

        # Calculate standard error by feeding data X and labels Y
        std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
        ```

    Arguments:
        predictions: `Tensor`.
        targets: `Tensor`.

    Returns:
        `Float`. The standard error.

    """
    with tf.name_scope('StandardError'):
        a = tf.reduce_sum(tf.square(tf.subtract(targets, predictions)))
        b = tf.reduce_sum(
            tf.square(tf.subtract(targets, tf.reduce_mean(targets))))
        return tf.subtract(1.0, tf.divide(a, b))
Beispiel #7
0
def create_logistic(m, optimize_model):
    batch_size = 1
    model_name = f"logistic{m}"
    with tf.Session() as sess:
        x = tf.placeholder(tf.float32, shape=(m,), name='x')
        y = tf.placeholder(tf.float32, shape=(1,), name='y')

        w = tf.placeholder(tf.float32, shape=(m,), name='W')
        input_shapes = {"x:0": x.shape, "W:0": w.shape, "y:0": y.shape}

        mu = tf.constant(1, dtype=tf.float32, name="mu")
        h = tf.reduce_sum(tf.multiply(w, x))
        h = tf.math.sigmoid(h)
        d = tf.subtract(h, y)
        g = tf.multiply(d, x)

        g = tf.multiply(mu, g)
        w = tf.subtract(w, g, name='w_out')

        input_names = ['x:0', 'y:0']
        output_names = ['w_out:0']

        onnx_graph = tf2onnx.tfonnx.process_tf_graph(sess.graph, input_names=input_names, output_names=output_names)

        model_proto = onnx_graph.make_model(model_name)

        model_proto = optimizer.optimize(model_proto, ['eliminate_identity'])
        if optimize_model:
            model_proto, check = simplify(model_proto, input_shapes=input_shapes)
            assert check
        with open(f"./{model_name}.onnx", "wb") as f:
            f.write(model_proto.SerializeToString())
Beispiel #8
0
    def z_q_neighbors(self):
        """Aggregates the respective neighbors in the SOM for every embedding in z_q."""
        k_1 = self.k // self.som_dim[1]
        k_2 = self.k % self.som_dim[1]
        k_stacked = tf.stack([k_1, k_2], axis=1)

        k1_not_top = tf.less(k_1, tf.constant(self.som_dim[0]-1, dtype=tf.int64))
        k1_not_bottom = tf.greater(k_1, tf.constant(0, dtype=tf.int64))
        k2_not_right = tf.less(k_2, tf.constant(self.som_dim[1]-1, dtype=tf.int64))
        k2_not_left = tf.greater(k_2, tf.constant(0, dtype=tf.int64))

        k1_up = tf.where(k1_not_top, tf.add(k_1, 1), k_1)
        k1_down = tf.where(k1_not_bottom, tf.subtract(k_1, 1), k_1)
        k2_right = tf.where(k2_not_right, tf.add(k_2, 1), k_2)
        k2_left = tf.where(k2_not_left, tf.subtract(k_2, 1), k_2)

        z_q_up = tf.where(k1_not_top, tf.gather_nd(self.embeddings, tf.stack([k1_up, k_2], axis=1)),
                          tf.zeros([self.batch_size, self.latent_dim]))
        z_q_down = tf.where(k1_not_bottom, tf.gather_nd(self.embeddings, tf.stack([k1_down, k_2], axis=1)),
                          tf.zeros([self.batch_size, self.latent_dim]))
        z_q_right = tf.where(k2_not_right, tf.gather_nd(self.embeddings, tf.stack([k_1, k2_right], axis=1)),
                          tf.zeros([self.batch_size, self.latent_dim]))
        z_q_left = tf.where(k2_not_left, tf.gather_nd(self.embeddings, tf.stack([k_1, k2_left], axis=1)),
                          tf.zeros([self.batch_size, self.latent_dim]))

        z_q_neighbors = tf.stack([self.z_q, z_q_up, z_q_down, z_q_right, z_q_left], axis=1)
        return z_q_neighbors
def _flip_boxes_left_right(boxes):
    ymin, xmin, ymax, xmax = tf.split(value=boxes,
                                      num_or_size_splits=4,
                                      axis=1)
    flipped_xmin = tf.subtract(1.0, xmax)
    flipped_xmax = tf.subtract(1.0, xmin)
    flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
    return flipped_boxes
Beispiel #10
0
    def loss_som(self):
        """Computes the SOM loss."""
        k = tf.range(self.som_dim[0] * self.som_dim[1])
        k_1 = k // self.som_dim[0]
        k_2 = k % self.som_dim[1]

        k1_not_top = tf.less(k_1,
                             tf.constant(self.som_dim[0] - 1, dtype=tf.int32))
        k1_not_bottom = tf.greater(k_1, tf.constant(0, dtype=tf.int32))
        k2_not_right = tf.less(
            k_2, tf.constant(self.som_dim[1] - 1, dtype=tf.int32))
        k2_not_left = tf.greater(k_2, tf.constant(0, dtype=tf.int32))

        k1_up = tf.where(k1_not_top, tf.add(k_1, 1),
                         tf.zeros(tf.shape(k_1), dtype=tf.dtypes.int32))
        k1_down = tf.where(
            k1_not_bottom, tf.subtract(k_1, 1),
            tf.ones(tf.shape(k_1), dtype=tf.dtypes.int32) *
            (self.som_dim[0] - 1))
        k2_right = tf.where(k2_not_right, tf.add(k_2, 1),
                            tf.zeros(tf.shape(k_2), dtype=tf.dtypes.int32))
        k2_left = tf.where(
            k2_not_left, tf.subtract(k_2, 1),
            tf.ones(tf.shape(k_2), dtype=tf.dtypes.int32) *
            (self.som_dim[0] - 1))

        k_up = k1_up * self.som_dim[0] + k_2
        k_down = k1_down * self.som_dim[0] + k_2
        k_right = k_1 * self.som_dim[0] + k2_right
        k_left = k_1 * self.som_dim[0] + k2_left

        q_t = tf.transpose(self.q_ng)
        q_up = tf.transpose(
            tf.gather_nd(
                q_t, tf.reshape(k_up, [self.som_dim[0] * self.som_dim[1], 1])))
        q_down = tf.transpose(
            tf.gather_nd(
                q_t, tf.reshape(k_down,
                                [self.som_dim[0] * self.som_dim[1], 1])))
        q_right = tf.transpose(
            tf.gather_nd(
                q_t, tf.reshape(k_right,
                                [self.som_dim[0] * self.som_dim[1], 1])))
        q_left = tf.transpose(
            tf.gather_nd(
                q_t, tf.reshape(k_left,
                                [self.som_dim[0] * self.som_dim[1], 1])))
        q_neighbours = tf.stack([q_up, q_down, q_right, q_left], axis=2)
        q_neighbours = tf.reduce_sum(tf.math.log(q_neighbours), axis=-1)
        # threshold
        #maxx = 0.1
        #mask = tf.greater_equal(self.q, maxx * tf.ones_like(self.q))
        #new_q = tf.multiply(self.q, tf.cast(mask, tf.float32))
        new_q = self.q
        q_n = tf.math.multiply(q_neighbours, tf.stop_gradient(new_q))
        q_n = tf.reduce_sum(q_n, axis=-1)
        qq = -tf.reduce_mean(q_n)
        return qq
Beispiel #11
0
def triplet_loss(anchor, positive, negative, alpha):
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
Beispiel #12
0
def _softmax_cross_entropy_mme(logits, label):
    """Helper function to compute softmax cross entropy loss."""
    with tf.name_scope("softmax_cross_entropy"):
        batch_size = tf.shape(logits)[0]
        num_boxes = tf.shape(logits)[1]
        num_classes = tf.shape(logits)[2]
        reduce_sum_filter = tf.fill([1, 1, num_classes, 1],
                                    1.0,
                                    name="reduce_sum_filter")

        logits = tf.reshape(logits, [1, batch_size, num_boxes, num_classes])
        logits_t = tf.transpose(logits, perm=(0, 1, 3, 2), name="logits_t")
        reduce_max = tf.reduce_max(logits_t, 2, name="reduce_max")

        max_logits = tf.reshape(reduce_max, [1, batch_size, num_boxes, 1],
                                name="max_logits")

        shifted_logits = tf.subtract(logits, max_logits, name="shifted_logits")
        exp_shifted_logits = tf.math.exp(shifted_logits,
                                         name="exp_shifted_logits")

        # MME was idle during classification_loss computation.
        # In this case, conv2d is equvalent to reduce_sum but reduce_sum is executed on TPC while conv2d on MME.
        sum_exp = tf.nn.conv2d(exp_shifted_logits,
                               reduce_sum_filter,
                               strides=1,
                               padding="VALID",
                               name="sum_exp")

        log_sum_exp = tf.math.log(sum_exp, name="log_sum_exp")
        one_hot_label = tf.one_hot(label, num_classes, name="one_hot_label")

        # MME was idle during classification_loss computation.
        # In this case, conv2d is equvalent to reduce_sum but reduce_sum is executed on TPC while conv2d on MME.
        shifted_logits2 = tf.nn.conv2d(shifted_logits * one_hot_label,
                                       reduce_sum_filter,
                                       strides=1,
                                       padding="VALID",
                                       name="shifted_logits2")

        loss = tf.subtract(log_sum_exp, shifted_logits2, name="loss/sub")
        loss = tf.reshape(loss, [batch_size, -1], name="loss")

    def grad(dy):
        with tf.name_scope("gradients/softmax_cross_entropy"):
            dy_reshaped = tf.reshape(dy, [1, batch_size, num_boxes, 1],
                                     name="dy/Reshape")
            div = tf.math.truediv(exp_shifted_logits, sum_exp, name="div")
            sub = tf.math.subtract(div, one_hot_label, name="sub")
            ret = tf.math.multiply(sub, dy_reshaped, name="mul")
            reshaped_ret = tf.reshape(ret,
                                      [batch_size, num_boxes, num_classes],
                                      name="Reshape")
        return reshaped_ret, dy

    return loss, grad
Beispiel #13
0
def _read_and_decode(filename_queue, image_pixel=96, distort=0):
  """Read a norb tf record file."""
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      # Defaults are not specified since both keys are required.
      features={
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label': tf.FixedLenFeature([], tf.int64),
          'height': tf.FixedLenFeature([], tf.int64),
          'width': tf.FixedLenFeature([], tf.int64),
          'depth': tf.FixedLenFeature([], tf.int64),
          'meta': tf.FixedLenFeature([4], tf.int64),
      })

  # Convert from a scalar string tensor (whose single string has
  # length image_pixels) to a uint8 tensor with shape
  # [image_pixels].
  image = tf.decode_raw(features['image_raw'], tf.uint8)
  height = tf.cast(features['height'], tf.int32)
  depth = tf.cast(features['depth'], tf.int32)
  image = tf.reshape(image, tf.stack([depth, height, height]))
  image = tf.transpose(image, [1, 2, 0])
  image = tf.cast(image, tf.float32)
  if image_pixel < 96:
    print('image resizing to {}'.format(image_pixel))
    image = tf.image.resize_images(image, [image_pixel, image_pixel])
    orig_images = image

  if image_pixel == 48:
    new_dim = 32
  elif image_pixel == 32:
    new_dim = 22
  if distort == 1:
    image = tf.image.random_brightness(image, max_delta=63)
    image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
    image = tf.random_crop(image, tf.stack([new_dim, new_dim, depth]))
    # 0.26179938779 is 15 degress in radians
    image = tf.image.per_image_standardization(image)
    image_pixel = new_dim
  elif distort == 2:
    image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)
    image = tf.image.per_image_standardization(image)
    image_pixel = new_dim
  else:
    image = image * (1.0 / 255.0)
    image = tf.div(
        tf.subtract(image, tf.reduce_min(image)),
        tf.subtract(tf.reduce_max(image), tf.reduce_min(image)))

  # Convert label from a scalar uint8 tensor to an int32 scalar.
  label = tf.cast(features['label'], tf.int32)

  return image, label, image_pixel, orig_images
Beispiel #14
0
def cindex_score(y_true, y_pred):

    g = tf.subtract(tf.expand_dims(y_pred, -1), y_pred)
    g = tf.cast(g == 0.0, tf.float32) * 0.5 + tf.cast(g > 0.0, tf.float32)

    f = tf.subtract(tf.expand_dims(y_true, -1), y_true) > 0.0
    f = tf.matrix_band_part(tf.cast(f, tf.float32), -1, 0)

    g = tf.reduce_sum(tf.multiply(g, f))
    f = tf.reduce_sum(f)

    return tf.where(tf.equal(g, 0), 0.0, g / f)  #select
Beispiel #15
0
def rescale_input(x):
    """Rescales image input to be in range [0,1]."""

    current_min = tf.reduce_min(x)
    current_max = tf.reduce_max(x)

    # we add an epsilon value to prevent division by zero
    epsilon = 1e-5
    rescaled_x = tf.div(
        tf.subtract(x, current_min),
        tf.maximum(tf.subtract(current_max, current_min), epsilon))
    return rescaled_x
Beispiel #16
0
 def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
     """Returns result of eq # 24 of http://arxiv.org/abs/1308.0850."""
     norm1 = tf.subtract(x1, mu1)
     norm2 = tf.subtract(x2, mu2)
     s1s2 = tf.multiply(s1, s2)
     # eq 25
     z = (tf.square(tf.div(norm1, s1)) + tf.square(tf.div(norm2, s2)) -
          2 * tf.div(tf.multiply(rho, tf.multiply(norm1, norm2)), s1s2))
     neg_rho = 1 - tf.square(rho)
     result = tf.exp(tf.div(-z, 2 * neg_rho))
     denom = 2 * np.pi * tf.multiply(s1s2, tf.sqrt(neg_rho))
     result = tf.div(result, denom)
     return result
Beispiel #17
0
 def _gast(self, pre_s, s_bar, update_gate=None, forget_gate=None):
     """Gated Additive State Transition."""
     assert not all([update_gate is None, forget_gate is None])
     # Couple gates if necessary
     if forget_gate is None: forget_gate = tf.subtract(1.0, update_gate)
     elif update_gate is None: update_gate = tf.subtract(1.0, forget_gate)
     # Apply recurrent dropout without memory loss if necessary
     if self._dropout_rate > 0:
         s_bar = self.dropout(s_bar, self._dropout_rate)
     # Update states
     with tf.name_scope('GAST'):
         return tf.add(tf.multiply(forget_gate, pre_s),
                       tf.multiply(update_gate, s_bar))
Beispiel #18
0
 def contastiveLoss(self, margin = 5.0):# not used in triplet
     with tf.variable_scope("triplet") as scope:
         labels = self.tf_Y
         # Euclidean distance squared
         dist = tf.pow(tf.subtract(self.outputA, self.outputB), 2, name = 'Dw')
         Dw = tf.reduce_sum(dist, 1)
         # add 1e-6 to increase the stability of calculating the gradients
         Dw2 = tf.sqrt(Dw + 1e-6, name = 'Dw2')
         # Loss function
         lossSimilar = tf.multiply(labels, tf.pow(Dw2,2), name = 'constrastiveLoss_1')
         lossDissimilar = tf.multiply(tf.subtract(1.0, labels), tf.pow(tf.maximum(tf.subtract(margin, Dw2), 0), 2), name = 'constrastiveLoss_2')
         loss = tf.reduce_mean(tf.add(lossSimilar, lossDissimilar), name = 'constrastiveLoss')
     return loss
Beispiel #19
0
 def loss_with_step(self):
     margin = 5.0
     labels_t = self.y_
     labels_f = tf.subtract(1.0, self.y_, name="1-yi")  # labels_ = !labels;
     eucd2 = tf.pow(tf.subtract(self.o1, self.o2), 2)
     eucd2 = tf.reduce_sum(eucd2, 1)
     eucd = tf.sqrt(eucd2 + 1e-6, name="eucd")
     C = tf.constant(margin, name="C")
     pos = tf.multiply(labels_t, eucd, name="y_x_eucd")
     neg = tf.multiply(labels_f,
                       tf.maximum(0.0, tf.subtract(C, eucd)),
                       name="Ny_C-eucd")
     losses = tf.add(pos, neg, name="losses")
     loss = tf.reduce_mean(losses, name="loss")
     return loss
Beispiel #20
0
def ls(x):
    # 0.8
    threshold = 1 / 256 / 2
    x_coef = 2 / threshold
    a = tf.constant(-0.001)
    # a = tf.constant(1.0)

    nom = tf.abs(tf.subtract(tf.constant(2.0), a))
    mul1 = tf.divide(nom, a)
    x = tf.multiply(x, tf.constant(x_coef))
    x = tf.multiply(x, x)
    mul2 = tf.subtract(
        tf.pow(tf.add(tf.divide(tf.multiply(x, x), nom), tf.constant(1.0)),
               tf.div(a, tf.constant(2.0))), tf.constant(1.0))

    return tf.multiply(mul1, mul2)
Beispiel #21
0
 def __init__(self, name, input_size, output_size, size_layer,
              learning_rate):
     with tf.variable_scope(name):
         self.X = tf.placeholder(tf.float32, (None, None, input_size))
         self.Y = tf.placeholder(tf.float32, (None, output_size))
         self.hidden_layer = tf.placeholder(tf.float32,
                                            (None, 2 * size_layer))
         self.REWARD = tf.placeholder(tf.float32, (None, 1))
         feed_critic = tf.layers.dense(self.X,
                                       size_layer,
                                       activation=tf.nn.relu)
         cell = tf.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple=False)
         self.rnn, self.last_state = tf.nn.dynamic_rnn(
             inputs=self.X,
             cell=cell,
             dtype=tf.float32,
             initial_state=self.hidden_layer)
         tensor_action, tensor_validation = tf.split(self.rnn[:, -1], 2, 1)
         feed_action = tf.layers.dense(tensor_action, output_size)
         feed_validation = tf.layers.dense(tensor_validation, 1)
         feed_critic = feed_validation + tf.subtract(
             feed_action, tf.reduce_mean(
                 feed_action, axis=1, keep_dims=True))
         feed_critic = tf.nn.relu(feed_critic) + self.Y
         feed_critic = tf.layers.dense(feed_critic,
                                       size_layer // 2,
                                       activation=tf.nn.relu)
         self.logits = tf.layers.dense(feed_critic, 1)
         self.cost = tf.reduce_mean(tf.square(self.REWARD - self.logits))
         self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
             self.cost)
Beispiel #22
0
    def read_tensor_from_image_file(self,
                                    file_name,
                                    input_height=299,
                                    input_width=299,
                                    input_mean=0,
                                    input_std=255):
        input_name = "file_reader"
        output_name = "normalized"
        file_reader = tf.read_file(file_name, input_name)
        if file_name.endswith(".png"):
            image_reader = tf.image.decode_png(file_reader,
                                               channels=3,
                                               name='png_reader')
        elif file_name.endswith(".gif"):
            image_reader = tf.squeeze(
                tf.image.decode_gif(file_reader, name='gif_reader'))
        elif file_name.endswith(".bmp"):
            image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
        else:
            image_reader = tf.image.decode_jpeg(file_reader,
                                                channels=3,
                                                name='jpeg_reader')
        float_caster = tf.cast(image_reader, tf.float32)
        dims_expander = tf.expand_dims(float_caster, 0)
        resized = tf.image.resize_bilinear(dims_expander,
                                           [input_height, input_width])
        normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
        sess = tf.Session()
        result = sess.run(normalized)

        return result
Beispiel #23
0
    def _make_activity_op(self, input_tensor):
        """ Creates the op for calculating the activity of a SOM
        :param input_tensor: A tensor to calculate the activity of. Must be of shape `[batch_size, dim]` where `dim` is
        the dimensionality of the SOM's weights.
        :return A handle to the newly created activity op:
        """
        with self._graph.as_default():
            with tf.name_scope("Activity"):
                # This constant controls the width of the gaussian.
                # The closer to 0 it is, the wider it is.
                c = tf.constant(self._c, dtype="float32")
                # Get the euclidean distance between each neuron and the input vectors
                dist = tf.norm(tf.subtract(
                    tf.expand_dims(self._weights, axis=0),
                    tf.expand_dims(input_tensor, axis=1)),
                               name="Distance",
                               axis=2)  # [batch_size, neurons]

                # Calculate the Gaussian of the activity. Units with distances closer to 0 will have activities
                # closer to 1.
                activity = tf.exp(tf.multiply(tf.pow(dist, 2), c),
                                  name="Gaussian")

                # Convert the activity into a softmax probability distribution
                if self._softmax_activity:
                    activity = tf.divide(tf.exp(activity),
                                         tf.expand_dims(tf.reduce_sum(
                                             tf.exp(activity), axis=1),
                                                        axis=-1),
                                         name="Softmax")

                return tf.identity(activity, name="Output")
Beispiel #24
0
    def _build_model(self):
        self.graph_built = True
        tf.set_random_seed(self.seed)
        self.labels = tf.placeholder(tf.float32, shape=[None])
        self.is_training = tf.placeholder_with_default(False, shape=[])
        self.linear_embed, self.pairwise_embed, self.deep_embed = [], [], []

        self._build_user_item()
        if self.sparse:
            self._build_sparse()
        if self.dense:
            self._build_dense()

        linear_embed = tf.concat(self.linear_embed, axis=1)
        pairwise_embed = tf.concat(self.pairwise_embed, axis=1)
        deep_embed = tf.concat(self.deep_embed, axis=1)

        linear_term = tf.layers.dense(linear_embed, units=1, activation=None)
        pairwise_term = 0.5 * tf.subtract(
            tf.square(tf.reduce_sum(pairwise_embed, axis=1)),
            tf.reduce_sum(tf.square(pairwise_embed), axis=1)
        )
        deep_term = dense_nn(deep_embed,
                             self.hidden_units,
                             use_bn=self.use_bn,
                             dropout_rate=self.dropout_rate,
                             is_training=self.is_training)

        concat_layer = tf.concat(
            [linear_term, pairwise_term, deep_term], axis=1)
        self.output = tf.squeeze(
            tf.layers.dense(concat_layer, units=1, activation=None))
        count_params()
Beispiel #25
0
 def hinge_loss(self, aff, neg_aff):
     """Maximum-margin optimization using the hinge loss."""
     diff = tf.nn.relu(tf.subtract(neg_aff,
                                   tf.expand_dims(aff, 0) - self.margin),
                       name='diff')
     loss = tf.reduce_sum(diff)
     return loss
Beispiel #26
0
def cifar_process(image, augmentation=True):
    """Map function for cifar dataset.

  Args:
    image: An image tensor.
    augmentation: If True, process train images.

  Returns:
    A processed image tensor.
  """
    # label = tf.cast(label, dtype=tf.int32)
    image = tf.math.divide(tf.cast(image, dtype=tf.float32), 255.0)

    if augmentation:
        image = tf.image.resize_image_with_crop_or_pad(image, 32 + 4, 32 + 4)
        # Randomly crop a [HEIGHT, WIDTH] section of the image.
        image = tf.image.random_crop(image, [32, 32, 3])
        # Randomly flip the image horizontally.
        image = tf.image.random_flip_left_right(image)
        image = tf.clip_by_value(image, 0, 1)

    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)

    return image
Beispiel #27
0
def preprocess_image(image,
                     output_height,
                     output_width,
                     is_training=False,
                     resize_side_min=_RESIZE_SIDE_MIN,
                     resize_side_max=_RESIZE_SIDE_MAX):
    """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.
    resize_side_min: The lower bound for the smallest side of the image for
      aspect-preserving resizing. If `is_training` is `False`, then this value
      is used for rescaling.
    resize_side_max: The upper bound for the smallest side of the image for
      aspect-preserving resizing. If `is_training` is `False`, this value is
      ignored. Otherwise, the resize side is sampled from
        [resize_size_min, resize_size_max].
  Returns:
    A preprocessed image.
  """
    if is_training:
        image = preprocess_for_train(image, output_height, output_width,
                                     resize_side_min, resize_side_max)
    else:
        image = preprocess_for_eval(image, output_height, output_width,
                                    resize_side_min)
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image
Beispiel #28
0
    def read_tensor_from_image_file(self,
                                    url,
                                    input_height=299,
                                    input_width=299,
                                    input_mean=0,
                                    input_std=255):
        input_name = "file_reader"
        output_name = "normalized"
        imageData = requests.get(url).content

        if ".png" in url:
            image_reader = tf.image.decode_png(imageData,
                                               channels=3,
                                               name='png_reader')
        elif ".gif" in url:
            image_reader = tf.squeeze(
                tf.image.decode_gif(imageData, name='gif_reader'))
        elif ".bmp" in url:
            image_reader = tf.image.decode_bmp(imageData, name='bmp_reader')
        else:
            image_reader = tf.image.decode_jpeg(imageData,
                                                channels=3,
                                                name='jpeg_reader')
        float_caster = tf.cast(image_reader, tf.float32)
        dims_expander = tf.expand_dims(float_caster, 0)
        resized = tf.image.resize_bilinear(dims_expander,
                                           [input_height, input_width])
        normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
        sess = tf.Session()
        result = sess.run(normalized)

        return result
Beispiel #29
0
def TSS(y_true, y_pred):
    """
    TSS

    import tensorflow as tf
    sess = tf.Session()
    a=tf.contrib.metrics.confusion_matrix([1, 0, 0, 0, 0], [1, 0, 1, 0, 0])
    a.eval(session=sess)
    array([[3, 1],
          [0, 1]], dtype=int32)
    a[0][0].eval(session=sess)
    3 -> tn
    a[0][1].eval(session=sess)
    1 -> fp
    """
    confusion_matrix = tf.confusion_matrix(labels=tf.argmax(y_true, 1),
                                           predictions=tf.argmax(y_pred, 1),
                                           num_classes=2,
                                           dtype=tf.float32)
    tp = confusion_matrix[1][1]
    fn = confusion_matrix[1][0]
    fp = confusion_matrix[0][1]
    tn = confusion_matrix[0][0]
    tmp1 = tf.divide(tp, tf.add(tp, fn))
    tmp2 = tf.divide(fp, tf.add(fp, tn))
    tss = tf.subtract(tmp1, tmp2)
    return tss
 def _hinge_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None):
     aff = self.affinity(inputs1, inputs2)
     neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples)
     diff = tf.nn.relu(tf.subtract(neg_aff, tf.expand_dims(aff, 1) - self.margin), name='diff')
     loss = tf.reduce_sum(diff)
     self.neg_shape = tf.shape(neg_aff)
     return loss