def bound_layer(val_in, bound_val, name="bound_scale"):
    with tf.name_scope(name):        
        # Bound val_in between -1..1 and scale by multipling by bound_val
        activation = tf.multiply(tf.atan(val_in), bound_val)
        # Add summaries for helping debug        
        tf.summary.histogram("val_in", val_in)
        tf.summary.histogram("activation", activation)
        return activation
Example #2
0
def scharr_edges(image, magnitude):
    """
    Returns a tensor holding modified Scharr edge maps.
    Arguments:
    image: Image tensor with shape [batch_size, h, w, d] and type float32.
    The image(s) must be 2x2 or larger.
    magnitude: Boolean to determine if the edge magnitude or edge direction is returned
    Returns:
    Tensor holding edge maps for each channel. Returns a tensor with shape
    [batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]],
    [dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Scharr filter.
    """

    # Define vertical and horizontal Scharr filters.
    static_image_shape = image.get_shape()
    image_shape = tf.shape(image)

    # modified 3x3 Scharr
    # kernels = [[[-17.0, -61.0, -17.0], [0.0, 0.0, 0.0], [17.0, 61.0, 17.0]],
    #         [[-17.0, 0.0, 17.0], [-61.0, 0.0, 61.0], [-17.0, 0.0, 17.0]]]

    # 5x5 Scharr
    kernels = [[[-1.0, -2.0, -3.0, -2.0, -1.0],
                [-1.0, -2.0, -6.0, -2.0, -1.0],
                [0.0, 0.0, 0.0, 0.0, 0.0],
                [1.0, 2.0, 6.0, 2.0, 1.0],
                [1.0, 2.0, 3.0, 2.0, 1.0]],
               [[-1.0, -1.0, 0.0, 1.0, 1.0],
                [-2.0, -2.0, 0.0, 2.0, 2.0],
                [-3.0, -6.0, 0.0, 6.0, 3.0],
                [-2.0, -2.0, 0.0, 2.0, 2.0],
                [-1.0, -1.0, 0.0, 1.0, 1.0]]]
    num_kernels = len(kernels)
    kernels = np.transpose(np.asarray(kernels), (1, 2, 0))
    kernels = np.expand_dims(kernels, -2) / np.sum(np.abs(kernels))
    kernels_tf = tf.constant(kernels, dtype=image.dtype)
    kernels_tf = tf.tile(kernels_tf, [1, 1, image_shape[-1], 1], name='scharr_filters')

    # Use depth-wise convolution to calculate edge maps per channel.
    pad_sizes = [[0, 0], [2, 2], [2, 2], [0, 0]]
    padded = tf.pad(image, pad_sizes, mode='REFLECT')

    # Output tensor has shape [batch_size, h, w, d * num_kernels].
    strides = [1, 1, 1, 1]
    output = tf.nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID')

    # Reshape to [batch_size, h, w, d, num_kernels].
    shape = tf.concat([image_shape, [num_kernels]], 0)
    output = tf.reshape(output, shape=shape)
    output.set_shape(static_image_shape.concatenate([num_kernels]))

    if magnitude:  # magnitude of edges
        output = tf.sqrt(tf.reduce_sum(tf.square(output), axis=-1))
    else:  # direction of edges
        output = tf.atan(tf.squeeze(tf.div(output[:, :, :, :, 0]/output[:, :, :, :, 1])))

    return output
Example #3
0
def get_angle(page):
  img = tf.cast(page.image, tf.float32)
  square = get_square(img)
  f = tf.complex_abs(tf.fft2d(tf.cast(square, tf.complex64))[:MAX_SIZE//2, :])
  x_arr = (
      tf.cast(tf.concat(0,
                        [tf.range(MAX_SIZE // 2),
                         tf.range(1, MAX_SIZE // 2 + 1)[::-1]]),
              tf.float32))[None, :]
  y_arr = tf.cast(tf.range(MAX_SIZE // 2), tf.float32)[:, None]
  f = tf.select(x_arr * x_arr + y_arr * y_arr < 32 * 32, tf.zeros_like(f), f)
  m = tf.argmax(tf.reshape(f, [-1]), dimension=0)
  x = tf.cast((m + MAX_SIZE // 4) % (MAX_SIZE // 2) - (MAX_SIZE // 4), tf.float32)
  y = tf.cast(tf.floordiv(m, MAX_SIZE // 2), tf.float32)
  return(tf.cond(
      y > 0, lambda: tf.atan(x / y), lambda: tf.constant(np.nan, tf.float32)),
      square)
Example #4
0
    def call(self, inputs, mask=None):
        # Import graph tensors
        # scalar_features = (samples, max_atoms, atom_feat)
        # vector_features = (samples, max_atoms, coor_dims, atom_feat)
        scalar_features, vector_features = inputs

        # Get parameters
        coor_dims = int(vector_features.shape[2])
        atom_feat = int(vector_features.shape[-1])

        # Integrate over atom axis
        if self.pooling == "sum":
            scalar_features = tf.reduce_sum(scalar_features, axis=1)
            vector_features = tf.reduce_sum(vector_features, axis=1)

        elif self.pooling == "avg":
            scalar_features = tf.reduce_mean(scalar_features, axis=1)
            vector_features = tf.reduce_mean(vector_features, axis=1)

        elif self.pooling == "max":
            scalar_features = tf.reduce_max(scalar_features, axis=1)

            vector_features = tf.transpose(vector_features, perm=[0, 2, 3, 1])
            size = tf.sqrt(tf.reduce_sum(tf.square(vector_features), axis=1))
            idx = tf.reshape(tf.argmax(size, axis=-1, output_type=tf.int32),
                             [-1, 1, atom_feat, 1])
            idx = tf.tile(idx, [1, coor_dims, 1, 1])
            vector_features = tf.reshape(tf.batch_gather(vector_features, idx),
                                         [-1, coor_dims, atom_feat])

        # Activation
        scalar_features = self.activation(scalar_features)
        vector_features = self.activation(vector_features)

        if self.system == "spherical":
            x, y, z = tf.unstack(vector_features, axis=1)
            r = tf.sqrt(tf.square(x) + tf.square(y) + tf.square(z))
            t = tf.acos(tf.divide(z, r + tf.cast(tf.equal(r, 0), dtype=float)))
            p = tf.atan(tf.divide(y, x + tf.cast(tf.equal(x, 0), dtype=float)))
            vector_features = tf.stack([r, t, p], axis=1)

        return [scalar_features, vector_features]
Example #5
0
def get_field_of_view(focal_length, sensor_length, dtype=tf.float32):
    """
    Get the field of view based on focal length and sensor length.

    Inputs must be same size.

    Args:
        focal_length: focal length of camera.
        sensor_length: length of sensor in the appropriate dimension.

    Returns:
        field of view in radians.
    """
    with tf.name_scope('field_of_view'):
        focal_length = tf.convert_to_tensor(focal_length, dtype=dtype)
        sensor_length = tf.convert_to_tensor(sensor_length, dtype=dtype)
        if sensor_length.shape.ndims == focal_length.shape.ndims + 1:
            focal_length = tf.expand_dims(focal_length, axis=-1)
        fov = 2 * tf.atan(sensor_length / (2*focal_length))
    return fov
Example #6
0
def create_src_field(opt, d):

    zz = tf.cast(d, tf.float32)
    x = tf.constant((np.arange(opt.M) - (opt.M - 1) / 2) * opt.dx,
                    shape=[opt.M, 1],
                    dtype=tf.float32)
    y = tf.constant((np.arange(opt.N) - (opt.N - 1) / 2) * opt.dy,
                    shape=[1, opt.N],
                    dtype=tf.float32)
    r2 = tf.matmul(tf.square(x), tf.ones_like(y)) + tf.matmul(
        tf.ones_like(x), tf.square(y))
    Rz = zz * (1 + (opt.zr / zz)**2)
    gouy = tf.atan(zz / opt.zr)
    wz = opt.w0 * tf.sqrt(1 + (zz / opt.zr)**2)
    k = 2 * np.pi / opt.wlength
    amp_term = tf.exp(-r2 / (wz**2))
    phase_arg = -(k * zz + k * r2 / (2 * Rz) - gouy)
    Gz = tf.complex(amp_term * tf.cos(phase_arg), amp_term * tf.sin(phase_arg))

    return Gz
    def body(i, feat_scores, feat_x, feat_y, feat_w, feat_h, feat_theta):
        """Body: update feature labels, scores and bboxes.
				Follow the original SSD paper for that purpose:
					- assign values when jaccard > 0.5;
					- only update if beat the score of other bboxes.
				"""
        # Jaccard score.

        bbox = cord[i]
        angle = tf.atan(
            (bbox[3, 1] - bbox[2, 1]) / tf.abs(bbox[2, 0] - bbox[3, 0]))
        height = tf.sqrt(
            tf.square(bbox[3, 1] - bbox[0, 1]) +
            tf.square(bbox[3, 0] - bbox[0, 0]))
        rotate_matrix = tf.stack([-tf.sin(angle), tf.cos(angle)])
        a_cord = tf.transpose(tf.stack([bbox[0, 0] - xref, bbox[0, 1] - yref]),
                              perm=(1, 2, 0))
        d_cord = tf.transpose(tf.stack([bbox[3, 0] - xref, bbox[3, 1] - yref]),
                              perm=(1, 2, 0))
        y_a = tf.reduce_sum(a_cord * rotate_matrix, axis=-1) + yref
        y_d = tf.reduce_sum(a_cord * rotate_matrix, axis=-1) + yref
        ys = (y_a + y_d) / 2

        mask = positive_anchors(bbox)
        score = height / href
        mask = tf.logical_and(mask, tf.greater(score, feat_scores))
        imask = tf.cast(mask, tf.int64)
        fmask = tf.cast(mask, dtype)
        feat_scores = tf.where(mask,
                               tf.ones_like(feat_scores, dtype=dtype) * score,
                               feat_scores)
        feat_theta = tf.where(mask,
                              tf.ones_like(feat_scores, dtype=dtype) * angle,
                              feat_theta)
        feat_h = tf.where(
            mask,
            tf.ones_like(feat_scores, dtype=dtype) * tf.log(height / gamma),
            feat_h)
        feat_y = tf.where(mask, ys, feat_y)

        return [i + 1, feat_scores, feat_x, feat_y, feat_w, feat_h, feat_theta]
Example #8
0
def Phase(x):
    '''
    Compute magnitude of input if input is HDA object.
        - If input is real-valued, pass through
    '''

    if type(x) is TFHDA:
        # phase = tf.angle(c)
        # phase = tf.atan2(x.i, x.r)
        # x.r = tf.maximum(abs(x.r), 1e-15)

        phase = tf.div(x.i, tf.maximum(abs(x.r), 1e-15))
        # phase = tf.div(x.i, abs(x.r)+1e-15)
        phase = tf.atan(phase)

        # phase = x.i
        # phase = tf.concat([x.r, x.i], axis=1)  # shape(batch, 4)
    else:
        phase = 0

    return phase
    def _atan2(y, x):
        """ My implementation of atan2 in tensorflow.  Returns in -pi .. pi."""
        tan = tf.atan(y / (x + 1e-8))  # this returns in -pi/2 .. pi/2

        one_map = tf.ones_like(tan)

        # correct quadrant error
        correction = tf.where(tf.less(x + 1e-8, 0.0),
                              3.141592653589793 * one_map, 0.0 * one_map)
        tan_c = tan + correction  # this returns in -pi/2 .. 3pi/2

        # bring to positive values
        correction = tf.where(tf.less(tan_c, 0.0),
                              2 * 3.141592653589793 * one_map, 0.0 * one_map)
        tan_zero_2pi = tan_c + correction  # this returns in 0 .. 2pi

        # make symmetric
        correction = tf.where(tf.greater(tan_zero_2pi, 3.141592653589793),
                              -2 * 3.141592653589793 * one_map, 0.0 * one_map)
        tan_final = tan_zero_2pi + correction  # this returns in -pi .. pi
        return tan_final
Example #10
0
def RNN(x):
    batch_x = tf.reshape(x, [BATCH_SIZE, N_STEPS, N_INPUT])

    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x_unstack = tf.unstack(batch_x, N_STEPS, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(N_HIDDEN, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x_unstack, dtype=tf.float32)

    W_fc1_opt = weight_variable('lstm_weights_1', [N_HIDDEN, 1],
                                float(N_HIDDEN))
    b_fc1_opt = bias_variable('lstm_bias_1', [1])

    outputs = tf.matmul(outputs[-1], W_fc1_opt) + b_fc1_opt

    # radiants in the range of [-pi/2, pi/2] * 2 to get 360 range
    y_opt = tf.multiply(tf.atan(outputs), 2)

    return y_opt
def main():

    with tf.Session() as sess:
        #tf.assign(x,y)

        print(sess.run(x))
        print(sess.run(y))

        print(sess.run(tf.add(x, y)))
        print(sess.run(tf.subtract(x, y)))  #减法
        print(sess.run(tf.multiply(x, y)))  #乘法
        print(sess.run(tf.divide(x, y)))  #除法
        print(sess.run(tf.mod(x, y)))  #取模,x/y的余数
        print(sess.run(tf.abs(tf.constant(-4))))  #求绝对值
        print(sess.run(tf.negative(y)))  #取负
        print(sess.run(tf.sign(3)))  #返回输入的符号,负数则为-1,0为0,正数为1
        print(sess.run(tf.sign(0)))
        print(sess.run(tf.sign(-3)))
        #print(sess.run(tf.inv(y)))          #取反
        print(sess.run(tf.square(x)))  #平方
        print(sess.run(tf.round([0.9, 2.5, 2.3, 1.5,
                                 -4.5])))  #舍入最接近的整数 [1.0,2.0,2.0,2.0,-4.0]
        print(sess.run(tf.sqrt(0.04)))  #平方根,float
        a = [[2, 2], [3, 3]]
        b = [[8, 16], [2, 3]]
        print(sess.run(tf.pow(a, b)))  #幂次方运算[[2的8次方,2的16次方],[3的2次方,3的3次方]]
        print(sess.run(tf.exp(2.0)))  #e的次方 float
        print(sess.run(tf.log(10.0)))  # float 计算log,一个输入计算e的ln,两次输入以第二次输入为底
        #print(sess.run(tf.log(3.0,9.0)))
        print(sess.run(tf.maximum(x, y)))  #最大值
        print(sess.run(tf.minimum(x, y)))  #最小值
        print(sess.run(tf.cos(0.0)))  #三角函数
        print(sess.run(tf.sin(0.0)))
        print(sess.run(tf.tan(45.0)))
        print(sess.run(tf.atan(1.0)))  #ctan三角函数
        print(sess.run(tf.cond(tf.less(x, y), f1, f2)))  #满足条件则执行f1,否则执行f2
        #print(sess.run(tf()))

    pass
Example #12
0
def inference_nvidianet(images):
    with tf.variable_scope('conv1'):
        conv1 = common.activation(common.conv(images, 24, ksize=5, stride=2,
                                              padding='VALID'))
    with tf.variable_scope('conv2'):
        conv2 = common.activation(common.conv(conv1, 36, ksize=5, stride=2,
                                              padding='VALID'))
    with tf.variable_scope('conv3'):
        conv3 = common.activation(common.conv(conv2, 48, ksize=5, stride=2,
                                              padding='VALID'))
    with tf.variable_scope('conv4'):
        conv4 = common.activation(common.conv(conv3, 64, ksize=3, stride=1,
                                              padding='VALID'))
    with tf.variable_scope('conv5'):
        conv5 = common.activation(common.conv(conv4, 64, ksize=3, stride=1,
                                              padding='VALID'))
    with tf.variable_scope('conv6'):
        conv6 = common.activation(common.conv(conv5, 64, ksize=3, stride=1,
                                              padding='VALID'))
    with tf.variable_scope('conv7'):
        conv7 = common.activation(common.conv(conv6, 64, ksize=3, stride=1,
                                              padding='VALID'))
    conv7_flat = common.flatten(conv7)
    with tf.variable_scope('fc1'):
        fc1 = common.dropout(common.activation(common.fc(conv7_flat, 512)),
                             0.5)
    with tf.variable_scope('fc2'):
        fc2 = common.dropout(common.activation(common.fc(fc1, 128)),
                             0.625)
    with tf.variable_scope('fc3'):
        fc3 = common.dropout(common.activation(common.fc(fc2, 32)),
                             0.75)
    with tf.variable_scope('fc4'):
        fc4 = common.dropout(common.activation(common.fc(fc3, 8)),
                             0.875)
    with tf.variable_scope('fc5'):
        fc5 = tf.atan(common.fc(fc4, 1))
    return fc5
Example #13
0
def smooth_l1_loss_atan(targets, preds, anchor_state, sigma=3.0, weight=None):

    sigma_squared = sigma**2
    indices = tf.reshape(tf.where(tf.equal(anchor_state, 1)), [
        -1,
    ])
    preds = tf.gather(preds, indices)
    targets = tf.gather(targets, indices)

    # compute smooth L1 loss
    # f(x) = 0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
    #        |x| - 0.5 / sigma / sigma    otherwise
    regression_diff = preds - targets
    regression_diff = tf.abs(regression_diff)

    regression_diff = tf.reshape(regression_diff, [-1, 5])
    dx, dy, dw, dh, dtheta = tf.unstack(regression_diff, axis=-1)
    dtheta = tf.atan(dtheta)
    regression_diff = tf.transpose(tf.stack([dx, dy, dw, dh, dtheta]))

    regression_loss = tf.where(
        tf.less(regression_diff, 1.0 / sigma_squared),
        0.5 * sigma_squared * tf.pow(regression_diff, 2),
        regression_diff - 0.5 / sigma_squared)

    if weight is not None:
        regression_loss = tf.reduce_sum(regression_loss, axis=-1)
        weight = tf.gather(weight, indices)
        regression_loss *= weight

    normalizer = tf.stop_gradient(tf.where(tf.equal(anchor_state, 1)))
    normalizer = tf.cast(tf.shape(normalizer)[0], tf.float32)
    normalizer = tf.maximum(1.0, normalizer)

    # normalizer = tf.stop_gradient(tf.cast(tf.equal(anchor_state, 1), tf.float32))
    # normalizer = tf.maximum(tf.reduce_sum(normalizer), 1)

    return tf.reduce_sum(regression_loss) / normalizer
Example #14
0
def Dense(X, size, init, name, activation):
    w = get_weights(shape=size, name='W_' + name, init=init)
    b = get_weights(shape=[size[-1]], name='b_' + name, init=init)

    dense = tf.matmul(X, w) + b
    print(name, size, size[-1])
    ## Applying activation

    if activation == 'relu':
        h_fc = tf.nn.relu(dense)
    elif activation == 'sigmoid':
        h_fc = tf.nn.sigmoid(dense)
    elif activation == 'leaky_relu':
        h_fc = tf.nn.leaky_relu(dense)
    elif activation == 'tanh':
        h_fc = tf.nn.tanh(dense)
    elif activation == 'atan':
        h_fc = tf.atan(dense)


#     if dropout >= 0.0 and dropout < 1.0:
#         return tf.nn.dropout(h_fc, keep_prob=dropout)
    return h_fc
Example #15
0
    def mlp_rest_2048(self, h):
        # FCL 3
        W_fc3 = weight_variable([1024, 256])
        b_fc3 = bias_variable([256])
        h_fc3 = tf.nn.relu(tf.matmul(h, W_fc3) + b_fc3)
        h_fc3_drop = tf.nn.dropout(h_fc3, self.keep_prob)

        # FCL 4
        W_fc4 = weight_variable([256, 128])
        b_fc4 = bias_variable([128])
        h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)
        h_fc4_drop = tf.nn.dropout(h_fc4, self.keep_prob)

        # Output
        W_fc5 = weight_variable([128, 16])
        b_fc5 = bias_variable([16])
        h_fc5 = tf.nn.relu(tf.matmul(h_fc4_drop, W_fc5) + b_fc5)
        h_fc5_drop = tf.nn.dropout(h_fc5, self.keep_prob)

        W_fc6 = weight_variable([16, 2])
        b_fc6 = bias_variable([2])

        return tf.multiply(tf.atan(tf.matmul(h_fc5_drop, W_fc6) + b_fc6), 2)
Example #16
0
    def merge_mlp(self, h1, h2):
        # FCL 1

        W_fc1 = weight_variable([1152 * 2, 1164])
        b_fc1 = bias_variable_const([1164])
        h_flat1 = tf.reshape(h1, [-1, 1152])
        h_flat2 = tf.reshape(h2, [-1, 1152])
        print h_flat1
        print h_flat2
        h_flat = tf.reshape(tf.stack((h_flat1, h_flat2), axis=2), [-1, 1152 * 2])
        print h_flat
        h_fc1 = tf.nn.relu(tf.matmul(h_flat, W_fc1) + b_fc1)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        # FCL 2
        W_fc2 = weight_variable([1164, 100])
        b_fc2 = bias_variable([100])
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
        h_fc2_drop = tf.nn.dropout(h_fc2, self.keep_prob)

        # FCL 3
        W_fc3 = weight_variable([100, 50])
        b_fc3 = bias_variable([50])
        h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
        h_fc3_drop = tf.nn.dropout(h_fc3, self.keep_prob)

        # FCL 4
        W_fc4 = weight_variable([50, 10])
        b_fc4 = bias_variable([10])
        h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)
        h_fc4_drop = tf.nn.dropout(h_fc4, self.keep_prob)

        # Output
        W_fc5 = weight_variable([10, 2])
        b_fc5 = bias_variable([2])

        return tf.multiply(tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5), 2)
Example #17
0
def autopilot(x, weights, biases, dropout):
    # Layer 1 - 66*200*3 to 31*98*24
    conv1 = conv2d(x, weights['wc1'], biases['bc1'], 2)

    # Layer 2 - 31*98*24 to 14*47*36
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'], 2)

    # Layer 3 - 14*47*36 to 5*22*48
    conv3 = conv2d(conv2, weights['wc3'], biases['bc3'], 2)

    # Layer 4 - 5*22*48 to 3*20*64
    conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])

    # Layer 5 - 3*20*64 to 1*18*64
    conv5 = conv2d(conv4, weights['wc5'], biases['bc5'])

    # Flatten feature map
    flat1 = flatten(conv5)

    # Fully connected layer - 1*18*64 to 100
    fc1 = tf.add(tf.matmul(flat1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)

    # Fully connected layer - From 100 to 50
    fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])
    fc2 = tf.nn.relu(fc2)
    #fc2 = tf.nn.dropout(fc2, dropout)

    # Fully connected layer - From 50 to 10
    fc3 = tf.add(tf.matmul(fc2, weights['wd3']), biases['bd3'])
    fc3 = tf.nn.relu(fc3)

    # Fully connected layer - Output Layer - class prediction - 10 to 1
    out = tf.multiply(
        tf.atan(tf.add(tf.matmul(fc3, weights['out']), biases['out'])), 2)
    return out
    def CreateGraph(self, x, activationFunction):
        y = tf.concat(x, axis=1)
        for i in range(len(self.layers) - 2):
            w = self.weights[i]
            b = self.biases[i]

            if activationFunction == ActivationFunction.Tanh:
                y = tf.nn.tanh(tf.add(tf.matmul(y, w), b))

            elif activationFunction == ActivationFunction.Sigmoid:
                y = tf.nn.sigmoid(tf.add(tf.matmul(y, w), b))

            elif activationFunction == ActivationFunction.Sin:
                y = tf.sin(tf.add(tf.matmul(y, w), b))

            elif activationFunction == ActivationFunction.Cos:
                y = tf.cos(tf.add(tf.matmul(y, w), b))

            elif activationFunction == ActivationFunction.Atan:
                y = tf.atan(tf.add(tf.matmul(y, w), b))

        w = self.weights[-1]
        b = self.biases[-1]
        return tf.add(tf.matmul(y, w), b)
Example #19
0
 def test_Atan(self):
     t = tf.atan(self.random(4, 3))
     self.check(t)
def atan(x):
    return tf.atan(x) * 1
    return x
Example #21
0
 def _log_cdf(self, x):
     return self._extend_support_with_default_value(
         x,
         lambda x: np.log(2 / np.pi) + tf.math.log(tf.atan(self._z(x))),
         default_value=-np.inf)
Example #22
0
def bbox_ciou(boxes1, boxes2):
    '''
    ciou = iou - p2/c2 - av
    :param boxes1: (8, 13, 13, 3, 4)   pred_xywh
    :param boxes2: (8, 13, 13, 3, 4)   label_xywh
    :return:
    '''
    boxes1_x0y0x1y1 = tf.concat([
        boxes1[..., :2] - boxes1[..., 2:] * 0.5,
        boxes1[..., :2] + boxes1[..., 2:] * 0.5
    ],
                                axis=-1)
    boxes2_x0y0x1y1 = tf.concat([
        boxes2[..., :2] - boxes2[..., 2:] * 0.5,
        boxes2[..., :2] + boxes2[..., 2:] * 0.5
    ],
                                axis=-1)
    boxes1_x0y0x1y1 = tf.concat([
        tf.minimum(boxes1_x0y0x1y1[..., :2], boxes1_x0y0x1y1[..., 2:]),
        tf.maximum(boxes1_x0y0x1y1[..., :2], boxes1_x0y0x1y1[..., 2:])
    ],
                                axis=-1)
    boxes2_x0y0x1y1 = tf.concat([
        tf.minimum(boxes2_x0y0x1y1[..., :2], boxes2_x0y0x1y1[..., 2:]),
        tf.maximum(boxes2_x0y0x1y1[..., :2], boxes2_x0y0x1y1[..., 2:])
    ],
                                axis=-1)

    # area
    boxes1_area = (boxes1_x0y0x1y1[..., 2] - boxes1_x0y0x1y1[..., 0]) * (
        boxes1_x0y0x1y1[..., 3] - boxes1_x0y0x1y1[..., 1])
    boxes2_area = (boxes2_x0y0x1y1[..., 2] - boxes2_x0y0x1y1[..., 0]) * (
        boxes2_x0y0x1y1[..., 3] - boxes2_x0y0x1y1[..., 1])

    # top-left and bottom-right coord, shape: (8, 13, 13, 3, 2)
    left_up = tf.maximum(boxes1_x0y0x1y1[..., :2], boxes2_x0y0x1y1[..., :2])
    right_down = tf.minimum(boxes1_x0y0x1y1[..., 2:], boxes2_x0y0x1y1[..., 2:])

    # intersection area and iou
    inter_section = tf.maximum(right_down - left_up, 0.0)
    inter_area = inter_section[..., 0] * inter_section[..., 1]
    union_area = boxes1_area + boxes2_area - inter_area
    iou = inter_area / (union_area + 1e-9)

    # top-left and bottom-right coord of the enclosing rectangle, shape: (8, 13, 13, 3, 2)
    enclose_left_up = tf.minimum(boxes1_x0y0x1y1[..., :2],
                                 boxes2_x0y0x1y1[..., :2])
    enclose_right_down = tf.maximum(boxes1_x0y0x1y1[..., 2:],
                                    boxes2_x0y0x1y1[..., 2:])

    # diagnal ** 2
    enclose_wh = enclose_right_down - enclose_left_up
    enclose_c2 = K.pow(enclose_wh[..., 0], 2) + K.pow(enclose_wh[..., 1], 2)

    # center distances between two rectangles
    p2 = K.pow(boxes1[..., 0] - boxes2[..., 0], 2) + K.pow(
        boxes1[..., 1] - boxes2[..., 1], 2)

    # add av
    atan1 = tf.atan(boxes1[..., 2] / (boxes1[..., 3] + 1e-9))
    atan2 = tf.atan(boxes2[..., 2] / (boxes2[..., 3] + 1e-9))
    v = 4.0 * K.pow(atan1 - atan2, 2) / (math.pi**2)
    a = v / (1 - iou + v)

    ciou = iou - 1.0 * p2 / enclose_c2 - 1.0 * a * v
    return ciou
Example #23
0
def compute_ciou(target, output):
    '''
    takes in a list of bounding boxes
    but can work for a single bounding box too
    all the boundary cases such as bounding boxes of size 0 are handled.
    '''
    target = (target) * (target != 0)
    output = (output) * (target != 0)

    x1g, y1g, x2g, y2g = tf.split(value=target, num_or_size_splits=4, axis=1)
    x1, y1, x2, y2 = tf.split(value=output, num_or_size_splits=4, axis=1)

    w_pred = x2 - x1
    h_pred = y2 - y1
    w_gt = x2g - x1g
    h_gt = y2g - y1g

    x_center = (x2 + x1) / 2
    y_center = (y2 + y1) / 2
    x_center_g = (x1g + x2g) / 2
    y_center_g = (y1g + y2g) / 2

    xc1 = tf.minimum(x1, x1g)
    yc1 = tf.minimum(y1, y1g)
    xc2 = tf.maximum(x2, x2g)
    yc2 = tf.maximum(y2, y2g)

    ###iou term###
    xA = tf.maximum(x1g, x1)
    yA = tf.maximum(y1g, y1)
    xB = tf.minimum(x2g, x2)
    yB = tf.minimum(y2g, y2)

    interArea = tf.maximum(0.0, (xB - xA + 1)) * tf.maximum(0.0, yB - yA + 1)

    boxAArea = (x2g - x1g + 1) * (y2g - y1g + 1)
    boxBArea = (x2 - x1 + 1) * (y2 - y1 + 1)

    iouk = interArea / (boxAArea + boxBArea - interArea + 1e-10)
    ###

    ###distance term###
    c = ((xc2 - xc1)**2) + ((yc2 - yc1)**2) + 1e-7
    d = ((x_center - x_center_g)**2) + ((y_center - y_center_g)**2)
    u = d / c
    ###

    ###aspect-ratio term###
    arctan = tf.atan(w_gt / (h_gt + 1e-10)) - tf.atan(w_pred /
                                                      (h_pred + 1e-10))
    v = (4 / (math.pi**2)) * tf.pow(
        (tf.atan(w_gt / (h_gt + 1e-10)) - tf.atan(w_pred /
                                                  (h_pred + 1e-10))), 2)
    S = 1 - iouk
    alpha = v / (S + v + 1e-10)
    w_temp = 2 * w_pred
    ar = (8 / (math.pi**2)) * arctan * ((w_pred - w_temp) * h_pred)
    ###

    ###calculate diou###
    #diouk = iouk - u
    #diouk = (1 - diouk)
    ###

    ###calculate ciou###
    ciouk = iouk - (u + alpha * ar)
    ciouk = (1 - ciouk)
    ###

    return ciouk
Example #24
0
#FCL 2
W_fc2 = weight_variable([1164, 100])
b_fc2 = bias_variable([100])

h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)

#FCL 3
W_fc3 = weight_variable([100, 50])
b_fc3 = bias_variable([50])

h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)

h_fc3_drop = tf.nn.dropout(h_fc3, keep_prob)

#FCL 3
W_fc4 = weight_variable([50, 10])
b_fc4 = bias_variable([10])

h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)

h_fc4_drop = tf.nn.dropout(h_fc4, keep_prob)

#Output
W_fc5 = weight_variable([10, 1])
b_fc5 = bias_variable([1])

y = tf.mul(tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5), 2) #scale the atan output
Example #25
0
    def __init__(self, drop_out=False, relu=True, is_training=True):
        self.x = tf.placeholder(tf.float32, shape=[None, 66, 200, 3])
        self.y_ = tf.placeholder(tf.float32, shape=[None, 1])

        self.x_image = self.x

        self.keep_prob = tf.placeholder(tf.float32)

        # first convolutional layer
        self.W_conv1 = weight_variable([5, 5, 3, 24])
        self.b_conv1 = bias_variable([24])

        self.h_conv1 = conv2d(self.x_image, binarize(self.W_conv1),
                              2) + self.b_conv1

        if relu:
            self.h_conv1 = tf.nn.relu(self.h_conv1)

        self.batch_norm1 = tf.contrib.layers.batch_norm(
            self.h_conv1, is_training=is_training, trainable=True)
        self.out_feature1 = HardTanh(self.batch_norm1, is_training)

        # second convolutional layer
        self.W_conv2 = weight_variable([5, 5, 24, 36])
        self.b_conv2 = bias_variable([36])

        self.h_conv2 = conv2d(binarize(self.out_feature1),
                              binarize(self.W_conv2), 2) + self.b_conv2
        if relu:
            self.h_conv2 = tf.nn.relu(self.h_conv2)

        self.batch_norm2 = tf.contrib.layers.batch_norm(
            self.h_conv2, is_training=is_training, trainable=True)

        self.out_feature2 = HardTanh(self.batch_norm2, is_training)

        # third convolutional layer
        self.W_conv3 = weight_variable([5, 5, 36, 48])
        self.b_conv3 = bias_variable([48])

        self.h_conv3 = conv2d(binarize(self.out_feature2),
                              binarize(self.W_conv3), 2) + self.b_conv3

        if relu:
            self.h_conv3 = tf.nn.relu(self.h_conv3)

        self.batch_norm3 = tf.contrib.layers.batch_norm(
            self.h_conv3, is_training=is_training, trainable=True)

        self.out_feature3 = HardTanh(self.batch_norm3, is_training)

        # fourth convolutional layer
        self.W_conv4 = weight_variable([3, 3, 48, 64])
        self.b_conv4 = bias_variable([64])

        self.h_conv4 = conv2d(binarize(self.out_feature3),
                              binarize(self.W_conv4), 1) + self.b_conv4

        if relu:
            self.h_conv4 = tf.nn.relu(self.h_conv4)

        self.batch_norm4 = tf.contrib.layers.batch_norm(
            self.h_conv4, is_training=is_training, trainable=True)

        self.out_feature4 = HardTanh(self.batch_norm4, is_training)

        # fifth convolutional layer
        self.W_conv5 = weight_variable([3, 3, 64, 64])
        self.b_conv5 = bias_variable([64])

        self.h_conv5 = conv2d(binarize(self.out_feature4),
                              binarize(self.W_conv5), 1) + self.b_conv5

        if relu:
            self.h_conv5 = tf.nn.relu(self.h_conv5)

        self.batch_norm5 = tf.contrib.layers.batch_norm(
            self.h_conv5, is_training=is_training, trainable=True)

        self.out_feature5 = HardTanh(self.batch_norm5, is_training)

        # FCL 1
        self.W_fc1 = weight_variable([1152, 1164])
        self.b_fc1 = bias_variable([1164])

        self.out_feature5_flat = tf.reshape(binarize(self.out_feature5),
                                            [-1, 1152])
        self.h_fc1 = tf.matmul(self.out_feature5_flat, binarize(
            self.W_fc1)) + self.b_fc1

        if relu:
            self.h_fc1 = tf.nn.relu(self.h_fc1)

        self.batch_norm6 = tf.contrib.layers.batch_norm(
            self.h_fc1, is_training=is_training, trainable=True)

        self.out_feature6 = HardTanh(self.batch_norm6, is_training)
        if drop_out & is_training:
            self.out_feature6 = tf.nn.dropout(self.out_feature6,
                                              self.keep_prob)

        # FCL 2
        self.W_fc2 = weight_variable([1164, 100])
        self.b_fc2 = bias_variable([100])

        self.h_fc2 = tf.matmul(binarize(self.out_feature6), binarize(
            self.W_fc2)) + self.b_fc2

        if relu:
            self.h_fc2 = tf.nn.relu(self.h_fc2)

        self.batch_norm7 = tf.contrib.layers.batch_norm(
            self.h_fc2, is_training=is_training, trainable=True)

        self.out_feature7 = HardTanh(self.batch_norm7, is_training)
        if drop_out & is_training:
            self.out_feature7 = tf.nn.dropout(self.out_feature7,
                                              self.keep_prob)

        # FCL 3
        self.W_fc3 = weight_variable([100, 50])
        self.b_fc3 = bias_variable([50])

        self.h_fc3 = tf.matmul(binarize(self.out_feature7), binarize(
            self.W_fc3)) + self.b_fc3

        if relu:
            self.h_fc3 = tf.nn.relu(self.h_fc3)

        self.batch_norm8 = tf.contrib.layers.batch_norm(
            self.h_fc3, is_training=is_training, trainable=True)

        self.out_feature8 = HardTanh(self.batch_norm8, is_training)
        if drop_out & is_training:
            self.out_feature8 = tf.nn.dropout(self.out_feature8,
                                              self.keep_prob)

        # FCL 3
        self.W_fc4 = weight_variable([50, 10])
        self.b_fc4 = bias_variable([10])

        self.h_fc4 = tf.matmul(binarize(self.out_feature8), binarize(
            self.W_fc4)) + self.b_fc4

        if relu:
            self.h_fc4 = tf.nn.relu(self.h_fc4)

        self.batch_norm9 = tf.contrib.layers.batch_norm(
            self.h_fc4, is_training=is_training, trainable=True)

        self.out_feature9 = HardTanh(self.batch_norm9, is_training)

        if drop_out & is_training:
            self.out_feature9 = tf.nn.dropout(self.out_feature9,
                                              self.keep_prob)

        # Output
        self.W_fc5 = weight_variable([10, 1])
        self.b_fc5 = bias_variable([1])

        # scale the atan output
        self.y = tf.multiply(
            tf.atan(tf.matmul(self.out_feature9, self.W_fc5) + self.b_fc5), 2)
Example #26
0
    def __init__(self):
        self.image_input = tf.placeholder(tf.float32, shape=[None, 66, 200, 3])
        # use for training loss computation
        self.y_ = tf.placeholder(tf.float32, shape=[None, 1])
        self.keep_prob = tf.placeholder(tf.float32)

        # model parameters
        # self.model_params = []

        # first convolutional layer
        W_conv1 = _weight_variable([5, 5, 3, 24])
        b_conv1 = _bias_variable([24])

        h_conv1 = tf.nn.relu(_conv2d(self.image_input, W_conv1, 2) + b_conv1)

        # second convolutional layer
        W_conv2 = _weight_variable([5, 5, 24, 36])
        b_conv2 = _bias_variable([36])

        h_conv2 = tf.nn.relu(_conv2d(h_conv1, W_conv2, 2) + b_conv2)

        # third convolutional layer
        W_conv3 = _weight_variable([5, 5, 36, 48])
        b_conv3 = _bias_variable([48])

        h_conv3 = tf.nn.relu(_conv2d(h_conv2, W_conv3, 2) + b_conv3)

        # fourth convolutional layer
        W_conv4 = _weight_variable([3, 3, 48, 64])
        b_conv4 = _bias_variable([64])

        h_conv4 = tf.nn.relu(_conv2d(h_conv3, W_conv4, 1) + b_conv4)

        # fifth convolutional layer
        W_conv5 = _weight_variable([3, 3, 64, 64])
        b_conv5 = _bias_variable([64])

        h_conv5 = tf.nn.relu(_conv2d(h_conv4, W_conv5, 1) + b_conv5)

        # FCL 1
        W_fc1 = _weight_variable([1152, 1164])
        b_fc1 = _bias_variable([1164])

        h_conv5_flat = tf.reshape(h_conv5, [-1, 1152])
        h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)
        h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)

        # FCL 2
        W_fc2 = _weight_variable([1164, 100])
        b_fc2 = _bias_variable([100])

        h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

        h_fc2_drop = tf.nn.dropout(h_fc2, self.keep_prob)

        # FCL 3
        W_fc3 = _weight_variable([100, 50])
        b_fc3 = _bias_variable([50])

        h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)

        h_fc3_drop = tf.nn.dropout(h_fc3, self.keep_prob)

        # FCL 3
        W_fc4 = _weight_variable([50, 10])
        b_fc4 = _bias_variable([10])

        h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)

        h_fc4_drop = tf.nn.dropout(h_fc4, self.keep_prob)

        # Output
        W_fc5 = _weight_variable([10, 1])
        b_fc5 = _bias_variable([1])

        self.steering = tf.multiply(
            tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5),
            2)  # scale the atan output
Example #27
0
 def _log_cdf(self, x):
   return tf.log1p(2 / np.pi * tf.atan(self._z(x))) - np.log(2)
Example #28
0
    def __init__(self,
                 params,
                 restore=None,
                 session=None,
                 use_softmax=False,
                 image_size=28,
                 image_channel=1,
                 activation='relu',
                 activation_param=0.3,
                 l2_reg=0.0,
                 dropout_rate=0.0,
                 flatten=True,
                 out_dim=10):

        global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
        if 'Sequential' not in globals():
            print('importing Keras from tensorflow...')
            from tensorflow.keras.models import Sequential
            from tensorflow.keras.layers import InputLayer, Dense, Dropout, Activation, Flatten, Lambda
            from tensorflow.keras.layers import Conv2D, MaxPooling2D
            from tensorflow.keras.layers import LeakyReLU
            from tensorflow.keras.models import load_model
            from tensorflow.keras import regularizers
            from tensorflow.keras import backend as K

        self.image_size = image_size
        self.num_channels = image_channel
        self.num_labels = out_dim

        model = Sequential()
        if flatten:
            model.add(
                Flatten(input_shape=(image_size, image_size, image_channel)))
        first = True
        # list of all hidden units weights
        self.U = []
        n = 0
        for param in params:
            n += 1
            # add each dense layer, and save a reference to list U
            if first:
                self.U.append(
                    Dense(param,
                          input_shape=(image_size, ),
                          kernel_initializer='he_uniform',
                          kernel_regularizer=regularizers.l2(l2_reg)))
                first = False
            else:
                self.U.append(
                    Dense(param,
                          kernel_initializer='he_uniform',
                          kernel_regularizer=regularizers.l2(l2_reg)))
            model.add(self.U[-1])
            # ReLU activation
            # model.add(Activation(activation))
            if activation == "arctan":
                model.add(
                    Lambda(lambda x: tf.atan(x),
                           name=activation + "_" + str(n)))
            elif activation == "leaky":
                print("Leaky ReLU slope: {:.3f}".format(activation_param))
                model.add(
                    LeakyReLU(alpha=activation_param,
                              name=activation + "_" + str(n)))
            else:
                model.add(
                    Activation(activation, name=activation + "_" + str(n)))
            if dropout_rate > 0.0:
                model.add(Dropout(dropout_rate))
        self.W = Dense(out_dim,
                       kernel_initializer='he_uniform',
                       kernel_regularizer=regularizers.l2(l2_reg))
        model.add(self.W)
        # output log probability, used for black-box attack
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        # save the output of intermediate layers
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        # a tensor to get gradients
        self.gradients = []
        for i in range(model.output.shape[1]):
            output_tensor = model.output[:, i]
            self.gradients.append(K.gradients(output_tensor, model.input)[0])

        self.layer_outputs = layer_outputs
        self.model = model
        model.summary()
Example #29
0
    def _scharr_edges(cls, image, magnitude):
        """ Returns a tensor holding modified Scharr edge maps.

        Parameters
        ----------
        image: tensor
            Image tensor with shape [batch_size, h, w, d] and type float32. The image(s) must be
            2x2 or larger.
        magnitude: bool
            Boolean to determine if the edge magnitude or edge direction is returned

        Returns
        -------
        tensor
            Tensor holding edge maps for each channel. Returns a tensor with shape `[batch_size, h,
            w, d, 2]` where the last two dimensions hold `[[dy[0], dx[0]], [dy[1], dx[1]], ...,
            [dy[d-1], dx[d-1]]]` calculated using the Scharr filter.
        """

        # Define vertical and horizontal Scharr filters.
        static_image_shape = image.get_shape()
        image_shape = K.shape(image)

        # 5x5 modified Scharr kernel ( reshape to (5,5,1,2) )
        matrix = np.array([[[[0.00070, 0.00070]], [[0.00520, 0.00370]],
                            [[0.03700, 0.00000]], [[0.00520, -0.0037]],
                            [[0.00070, -0.0007]]],
                           [[[0.00370, 0.00520]], [[0.11870, 0.11870]],
                            [[0.25890, 0.00000]], [[0.11870, -0.1187]],
                            [[0.00370, -0.0052]]],
                           [[[0.00000, 0.03700]], [[0.00000, 0.25890]],
                            [[0.00000, 0.00000]], [[0.00000, -0.2589]],
                            [[0.00000, -0.0370]]],
                           [[[-0.0037, 0.00520]], [[-0.1187, 0.11870]],
                            [[-0.2589, 0.00000]], [[-0.1187, -0.1187]],
                            [[-0.0037, -0.0052]]],
                           [[[-0.0007, 0.00070]], [[-0.0052, 0.00370]],
                            [[-0.0370, 0.00000]], [[-0.0052, -0.0037]],
                            [[-0.0007, -0.0007]]]])
        num_kernels = [2]
        kernels = K.constant(matrix, dtype='float32')
        kernels = K.tile(kernels, [1, 1, image_shape[-1], 1])

        # Use depth-wise convolution to calculate edge maps per channel.
        # Output tensor has shape [batch_size, h, w, d * num_kernels].
        pad_sizes = [[0, 0], [2, 2], [2, 2], [0, 0]]
        padded = tf.pad(
            image,  # pylint:disable=unexpected-keyword-arg,no-value-for-parameter
            pad_sizes,
            mode='REFLECT')
        output = K.depthwise_conv2d(padded, kernels)

        if not magnitude:  # direction of edges
            # Reshape to [batch_size, h, w, d, num_kernels].
            shape = K.concatenate([image_shape, num_kernels], axis=0)
            output = K.reshape(output, shape=shape)
            output.set_shape(static_image_shape.concatenate(num_kernels))
            output = tf.atan(
                K.squeeze(output[:, :, :, :, 0] / output[:, :, :, :, 1],
                          axis=None))
        # magnitude of edges -- unified x & y edges don't work well with Neural Networks
        return output
Example #30
0
 def _cdf(self, x):
   return tf.atan(self._z(x)) / np.pi + 0.5
Example #31
0
# fully connected layer 2
W_fc2 = weight_variable("fc2", [1152, 100])
b_fc2 = bias_variable([100])

h_fc2 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc2) + b_fc2)

h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)

# fully connected layer 3
W_fc3 = weight_variable("fc3", [100, 50])
b_fc3 = bias_variable([50])

h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)

h_fc3_drop = tf.nn.dropout(h_fc3, keep_prob)

# fully connected layer 4
W_fc4 = weight_variable("fc4", [50, 10])
b_fc4 = bias_variable([10])

h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)

h_fc4_drop = tf.nn.dropout(h_fc4, keep_prob)

# output
W_fc5 = weight_variable("fc5", [10, 1])
b_fc5 = bias_variable([1])

y = tf.multiply(tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5),
                2)  #scale the atan output
Example #32
0
def atan_layer(x):
    return tf.multiply(tf.atan(x), 2)
Example #33
0
 def owent(h, a):
     h = tf.abs(h)
     term1 = tf.atan(a) / (2 * np.pi)
     term2 = tf.exp((-1 / 2) * (tf.multiply(tf.square(h), (tf.square(a) + 1))))
     return tf.multiply(term1, term2)
Example #34
0
def custom(y):
    return tf.atan(y, name='output')
    def __init__(self, dropout_prob=0.2, batch_norm=False, whitening=False, is_training=True):
        x = tf.placeholder(tf.float32, shape=[None, 66, 200, 3], name='x')
        y_ = tf.placeholder(tf.float32, shape=[None, 1])
        keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        x_image = x

        self.W_conv1 = weight_variable([5, 5, 3, 24])
        self.b_conv1 = bias_variable([24])
        self.h_conv1 = tf.nn.relu(conv2d(x_image, self.W_conv1, 2) + self.b_conv1)
        if batch_norm:
            self.h_conv1 = tf.contrib.layers.batch_norm(self.h_conv1, is_training=is_training, trainable=True)

        self.W_conv2 = weight_variable([5, 5, 24, 36])
        self.b_conv2 = bias_variable([36])
        self.h_conv2 = tf.nn.relu(conv2d(self.h_conv1, self.W_conv2, 2) + self.b_conv2)

        self.W_conv3 = weight_variable([5, 5, 36, 48])
        self.b_conv3 = bias_variable([48])
        self.h_conv3 = tf.nn.relu(conv2d(self.h_conv2, self.W_conv3, 2) + self.b_conv3)
        if batch_norm:
            self.h_conv3 = tf.contrib.layers.batch_norm(self.h_conv3, is_training=is_training, trainable=True)

        self.W_conv4 = weight_variable([3, 3, 48, 64])
        self.b_conv4 = bias_variable([64])
        self.h_conv4 = tf.nn.relu(conv2d(self.h_conv3, self.W_conv4, 1) + self.b_conv4)

        self.W_conv5 = weight_variable([3, 3, 64, 64])
        self.b_conv5 = bias_variable([64])
        self.h_conv5 = tf.nn.relu(conv2d(self.h_conv4, self.W_conv5, 1) + self.b_conv5)
        if batch_norm:
            self.h_conv5 = tf.contrib.layers.batch_norm(self.h_conv5, is_training=is_training, trainable=True)

        self.W_fc1 = weight_variable([1152, 1164])
        self.b_fc1 = bias_variable([1164])

        self.h_conv5_flat = tf.reshape(self.h_conv5, [-1, 1152])
        self.h_fc1 = tf.nn.relu(tf.matmul(self.h_conv5_flat, self.W_fc1) + self.b_fc1)
        if batch_norm:
            self.h_fc1 = tf.contrib.layers.batch_norm(self.h_fc1, is_training=is_training, trainable=True)
        self.h_fc1_drop = tf.nn.dropout(self.h_fc1, keep_prob)

        self.W_fc2 = weight_variable([1164, 100])
        self.b_fc2 = bias_variable([100])
        self.h_fc2 = tf.nn.relu(tf.matmul(self.h_fc1_drop, self.W_fc2) + self.b_fc2, name='fc2')
        if batch_norm:
            self.h_fc2 = tf.contrib.layers.batch_norm(self.h_fc2, is_training=is_training, trainable=True)
        self.h_fc2_drop = tf.nn.dropout(self.h_fc2, keep_prob)

        self.W_fc3 = weight_variable([100, 50])
        self.b_fc3 = bias_variable([50])
        self.h_fc3 = tf.nn.relu(tf.matmul(self.h_fc2_drop, self.W_fc3) + self.b_fc3, name='fc3')
        if batch_norm:
            self.h_fc3 = tf.contrib.layers.batch_norm(self.h_fc3, is_training=is_training, trainable=True)
        self.h_fc3_drop = tf.nn.dropout(self.h_fc3, keep_prob)

        self.W_fc4 = weight_variable([50, 10])
        self.b_fc4 = bias_variable([10])
        self.h_fc4 = tf.nn.relu(tf.matmul(self.h_fc3_drop, self.W_fc4) + self.b_fc4, name='fc4')
        if batch_norm:
            self.h_fc4 = tf.contrib.layers.batch_norm(self.h_fc4, is_training=is_training, trainable=True)
        self.h_fc4_drop = tf.nn.dropout(self.h_fc4, keep_prob)

        self.W_fc5 = weight_variable([10, 1])
        self.b_fc5 = bias_variable([1])
        y = tf.mul(tf.atan(tf.matmul(self.h_fc4_drop, self.W_fc5) + self.b_fc5), 2, name='y')

        self.x = x
        self.y_ = y_
        self.y = y
        self.keep_prob = keep_prob
        self.fc2 = self.h_fc2
        self.fc3 = self.h_fc3
Example #36
0
	def Render_block(self,face_shape,face_norm,face_color,camera_scale,f_scale,facemodel,batchsize,is_train=True):
		if is_train and is_windows:
			raise ValueError('Not support training with Windows environment.')

		if is_windows:
			return [],[],[]

		# render reconstruction images 
		n_vex = int(facemodel.idBase.shape[0].value/3)
		fov_y = 2*tf.atan(112./(1015.*f_scale))*180./m.pi
		fov_y = tf.reshape(fov_y,[batchsize])
		# full face region
		face_shape = tf.reshape(face_shape,[batchsize,n_vex,3])
		face_norm = tf.reshape(face_norm,[batchsize,n_vex,3])
		face_color = tf.reshape(face_color,[batchsize,n_vex,3])

		# pre-defined cropped face region
		mask_face_shape = tf.gather(face_shape,tf.cast(facemodel.front_mask_render-1,tf.int32),axis = 1)
		mask_face_norm = tf.gather(face_norm,tf.cast(facemodel.front_mask_render-1,tf.int32),axis = 1)
		mask_face_color = tf.gather(face_color,tf.cast(facemodel.front_mask_render-1,tf.int32),axis = 1)

		# setting cammera settings
		camera_position = tf.constant([[0,0,10.0]])*tf.reshape(camera_scale,[-1,1])
		camera_lookat = tf.constant([0,0,0.0])
		camera_up = tf.constant([0,1.0,0])

		# setting light source position(intensities are set to 0 because we have computed the vertex color)
		light_positions = tf.tile(tf.reshape(tf.constant([0,0,1e5]),[1,1,3]),[batchsize,1,1])
		light_intensities = tf.tile(tf.reshape(tf.constant([0.0,0.0,0.0]),[1,1,3]),[batchsize,1,1])
		ambient_color = tf.tile(tf.reshape(tf.constant([1.0,1,1]),[1,3]),[batchsize,1])

		#using tf_mesh_renderer for rasterization (https://github.com/google/tf_mesh_renderer)
		# img: [batchsize,224,224,3] images in RGB order (0-255)
		# mask:[batchsize,224,224,1] transparency for img ({0,1} value)
		img_rgba = mesh_renderer.mesh_renderer(face_shape,
			tf.cast(facemodel.face_buf-1,tf.int32),
			face_norm,
			face_color,
			camera_position = camera_position,
			camera_lookat = camera_lookat,
			camera_up = camera_up,
			light_positions = light_positions,
			light_intensities = light_intensities,
			image_width = 224,
			image_height = 224,
			fov_y = fov_y,
			near_clip = 0.01,
			far_clip = 50.0,
			ambient_color = ambient_color)

		img = img_rgba[:,:,:,:3]
		mask = img_rgba[:,:,:,3:]

		img = tf.cast(img[:,:,:,::-1],tf.float32) #transfer RGB to BGR
		mask = tf.cast(mask,tf.float32) # full face region

		if is_train:
			# compute mask for small face region
			img_crop_rgba = mesh_renderer.mesh_renderer(mask_face_shape,
				tf.cast(facemodel.mask_face_buf-1,tf.int32),
				mask_face_norm,
				mask_face_color,
				camera_position = camera_position,
				camera_lookat = camera_lookat,
				camera_up = camera_up,
				light_positions = light_positions,
				light_intensities = light_intensities,
				image_width = 224,
				image_height = 224,
				fov_y = fov_y,
				near_clip = 0.01,
				far_clip = 50.0,
				ambient_color = ambient_color)

			mask_f = img_crop_rgba[:,:,:,3:]
			mask_f = tf.cast(mask_f,tf.float32) # small face region
			return img,mask,mask_f

		img_rgba = tf.cast(tf.clip_by_value(img_rgba,0,255),tf.float32)

		return img_rgba,mask,mask
Example #37
0
 def _log_cdf(self, x):
   return self._extend_support_with_default_value(
       x,
       lambda x: np.log(2 / np.pi) + tf.log(tf.atan(self._z(x))),
       default_value=-np.inf)