Ejemplo n.º 1
0
 def call(self, inputs):
     x = relu(self.linear1(inputs))
     x = relu(self.linear2(x))
     x = relu(self.linear3(x))
     x = self.linear4(x)
     z_mean = x[:, :self.latent_dim]
     z_log_var = softplus(x[:, self.latent_dim:])
     return z_mean, z_log_var
def get_unet(arg1, height , width ,channel , trainable = True ):
    inputs = Input(shape=(height,width, channel))
    conv1 = Conv2D(3, (1, 1), kernel_initializer='random_normal', activation='relu', trainable = trainable)(inputs)

    conv2 = Conv2D(3, (3, 3), kernel_initializer='random_normal', activation='relu',  padding='same', trainable = trainable)(conv1)

    concat1 = concatenate([conv1, conv2], axis=-1)

    conv3 = Conv2D(3, (5, 5), activation='relu', kernel_initializer='truncated_normal', padding='same', trainable = trainable)(concat1)

    concat2 = concatenate([conv2, conv3], axis=-1)

    conv4 = Conv2D(3, (7, 7), activation='relu', kernel_initializer='random_normal', padding='same', trainable = trainable)(concat2)

    concat3 = concatenate([conv1, conv2, conv3, conv4], axis=-1)

    K = Conv2D(3, (3, 3), activation='relu', kernel_initializer='truncated_normal', padding='same', trainable = True)(concat3)

    print(inputs.shape,K.shape)
    product= keras.layers.Multiply()([K, inputs])
    sum1 = keras.layers.Subtract()([product, K])
    sum2 = Lambda(lambda x: 1+x) (sum1)
    out_layer = Lambda(lambda x: relu(x)) (sum2)

    if arg1 == 1:
        model = Model(inputs=inputs,outputs=out_layer)
    else:
        model = Model(inputs=inputs,outputs=conv1)

    return model
Ejemplo n.º 3
0
 def call(self, inputs):
     # alpha is used for leaky relu slope in activations instead of
     # negative_slope.
     return activations.relu(inputs,
                             alpha=self.negative_slope,
                             max_value=self.max_value,
                             threshold=self.threshold)
Ejemplo n.º 4
0
def fc_layer(layer, num_neurons):
    new_layer = Dense(num_neurons, kernel_initializer='he_normal')(layer)
    new_layer = batch_norm(new_layer,
                           updates_collections=None,
                           center=True,
                           scale=True)
    return activations.relu(new_layer)
Ejemplo n.º 5
0
 def call(self, inputs):
   # alpha is used for leaky relu slope in activations instead of
   # negative_slope.
   return activations.relu(
       inputs,
       alpha=self.negative_slope,
       max_value=self.max_value,
       threshold=self.threshold)
Ejemplo n.º 6
0
 def call(self, x, sigmoid_activation=True):
     x = self.conv1(x)
     x = self.conv2(x)
     x = self.flatten(x)
     x = self.fully_connected1(x)
     x = self.fully_connected2(x)
     if sigmoid_activation:
         return sigmoid(x)
     return relu(x)
Ejemplo n.º 7
0
    def call(self, x):
        """
        Input:
            xyz1: input points position data, [B, C, N]
            xyz2: sampled input points position data, [B, C, S]
            points1: input points data, [B, D, N]
            points2: input points data, [B, D, S]
        Return:
            new_points: upsampled points data, [B, D', N]
        """
        xyz1, xyz2, points1, points2 = x
        xyz1 = tf.transpose(xyz1, (0, 2, 1))
        xyz2 = tf.transpose(xyz2, (0, 2, 1))

        points2 = tf.transpose(points2, (0, 2, 1))

        b, n, c = xyz1.shape
        _, s, _ = xyz2.shape
        B = b.value
        N = n.value
        C = c.value
        S = s.value
        if S == 1:
            interpolated_points = points2.repeat(1, N, 1)
        else:
            dists = square_distance(xyz1, xyz2)
            #dists, idx = dists.sort(dim=-1)
            dists = tf.sort(dists)
            idx = tf.math.argmax(dists, -1)
            dists = dists[:, :, :3],
            idx = idx[:, :, :3]  # [B, N, 3]
            mask = tf.math.greater(dists, 1e-10)
            update = tf.ones(tf.shape(dists)) * 1e-10
            update = tf.boolean_mask(update, mask)
            dists = tf.tensor_scatter_nd_update(dists, tf.where(mask), update)
            #dists[dists < 1e-10] = 1e-10
            weight = 1.0 / dists  # [B, N, 3]
            weight = tf.math.divide(weight,
                                    tf.reshape(tf.reduce_sum(weight, -1),
                                               (B, N, 3)))  # [B, N, 3]
            interpolated_points = tf.math.reduce_sum(
                tf.multiply(index_points(points2, idx),
                            tf.reshape(weight, (B, N, 3, 1))), 2)
        if points1 is not None:
            points1 = tf.transpose(points1, (0, 2, 1))
            new_points = tf.concat([points1, interpolated_points], -1)
        else:
            new_points = interpolated_points

        new_points = tf.transpose(new_points, (0, 2, 1))
        for i, conv in enumerate(self.mlp_convs):
            bn = self.mlp_bns[i]
            new_points = relu(bn(conv(new_points)))
        return new_points
  def test_relu(self):
    x = backend.placeholder(ndim=2)
    f = backend.function([x], [activations.relu(x)])
    positive_values = np.random.random((2, 5))
    result = f([positive_values])[0]
    self.assertAllClose(result, positive_values, rtol=1e-05)

    negative_values = np.random.uniform(-1, 0, (2, 5))
    result = f([negative_values])[0]
    expected = np.zeros((2, 5))
    self.assertAllClose(result, expected, rtol=1e-05)
Ejemplo n.º 9
0
def ConvBlock(x, in_planes, out_planes, var_scope="ConvBlock", is_train=True):
    # bottleneck blocks
    if in_planes != out_planes:
        residual = downsample(x,
                              out_planes=out_planes,
                              var_scope=var_scope + '_downsample')
    else:
        residual = x

    out1 = tf.layers.batch_normalization(name=var_scope + '_bn1',
                                         trainable=True,
                                         inputs=x,
                                         training=is_train)
    out1 = activations.relu(out1)
    out1 = conv3x3(out_planes=int(out_planes / 2),
                   var_scope=var_scope + '_out1')(out1)

    out2 = tf.layers.batch_normalization(name=var_scope + '_bn2',
                                         trainable=True,
                                         inputs=out1,
                                         training=is_train)
    out2 = activations.relu(out2)
    out2 = conv3x3(out_planes=int(out_planes / 4),
                   var_scope=var_scope + '_out2')(out2)

    out3 = tf.layers.batch_normalization(name=var_scope + '_bn3',
                                         trainable=True,
                                         inputs=out2,
                                         training=is_train)
    out3 = activations.relu(out3)
    out3 = conv3x3(out_planes=int(out_planes / 4),
                   var_scope=var_scope + '_out3')(out3)

    out3 = layers.concatenate([out1, out2, out3],
                              axis=-1,
                              name=var_scope + '_concatenate')

    out3 += residual

    return out3
Ejemplo n.º 10
0
def downsample(x, out_planes, var_scope="downsample", is_train=True):

    residual = tf.layers.batch_normalization(name=var_scope + '_bn',
                                             trainable=True,
                                             inputs=x,
                                             training=is_train)
    residual = activations.relu(residual)
    residual = layers.Conv2D(filters=out_planes,
                             kernel_size=1,
                             strides=1,
                             use_bias=False,
                             name=var_scope + '_conv1')(residual)
    return residual
    def call(self, x):
        """
        Input:
            xyz: input points position data, [B, C, N]
            points: input points data, [B, D, N]
        Return:
            new_xyz: sampled points position data, [B, C, S]
            new_points_concat: sample points feature data, [B, D', S]
        """
        xyz , points = x
        xyz = tf.transpose(xyz,(0,2,1))
        #xyz = xyz.permute(0, 2, 1)
        if points is not None:
            points = tf.transpose(points,(0,2,1))
            #points = points.permute(0, 2, 1)

        b, n, c = xyz.shape
        B = b.value
        N = n.value
        C = c.value
        S = self.npoint
        new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
        new_points_list = []
        for i, radius in enumerate(self.radius_list):
            K = self.nsample_list[i]
            group_idx = query_ball_point(radius, K, xyz, new_xyz)
            grouped_xyz = index_points(xyz, group_idx)
            grouped_xyz = tf.subtract(grouped_xyz , tf.reshape(new_xyz,(B,S,1,C)))
            #grouped_xyz -= new_xyz.view(B, S, 1, C)
            if points is not None:
                grouped_points = index_points(points, group_idx)

                grouped_points = tf.concat([grouped_points, grouped_xyz],-1)
                #grouped_points = np.cat([grouped_points, grouped_xyz], dim=-1)
            else:
                grouped_points = grouped_xyz

            grouped_points = tf.transpose(grouped_points,(0,3,2,1))
            #grouped_points = grouped_points.permute(0, 3, 2, 1)  # [B, D, K, S]
            for j in range(len(self.conv_blocks[i])):
                conv = self.conv_blocks[i][j]
                bn = self.bn_blocks[i][j]
                grouped_points =  relu(bn(conv(grouped_points)))
            new_points = tf.reduce_max(grouped_points,2)
            #new_points = np.max(grouped_points, 2)[0]  # [B, D', S]
            new_points_list.append(new_points)

        new_xyz = tf.transpose(new_xyz,(0,2,1))
        #new_xyz = new_xyz.permute(0, 2, 1)
        new_points_concat = tf.concat(new_points_list,-1)
        return (new_xyz, new_points_concat)
Ejemplo n.º 12
0
    def call(self, inputs, **kwargs):
        outputs = inputs

        for i in range(self.depth):
            outputs = activations.relu(outputs)
            outputs = self.conv_layers[i](outputs)

        outputs = self.residual_multiplier(outputs)

        if self.projection_layer is not None:
            inputs = self.projection_layer(inputs)

        outputs = inputs + outputs

        return outputs
Ejemplo n.º 13
0
def conv_layer(layer,
               num_filters,
               k_size=(3, 3),
               shape=(-1, 28, 28, 1),
               padding="same"):
    new_layer = Conv2D(num_filters,
                       kernel_size=k_size,
                       strides=(1, 1),
                       padding=padding,
                       kernel_initializer='he_normal',
                       input_shape=shape)(layer)
    new_layer = batch_norm(new_layer,
                           updates_collections=None,
                           center=True,
                           scale=True)
    return activations.relu(new_layer)
Ejemplo n.º 14
0
 def call(self, inputs):
   return activations.relu(inputs, max_value=self.max_value)
Ejemplo n.º 15
0
 def call(self, inputs):
     x = relu(self.linear1(inputs))
     x = relu(self.linear2(x))
     x = relu(self.linear3(x))
     x = sigmoid(self.linear4(x))
     return x
Ejemplo n.º 16
0
    def forward(self, x, is_train=True):
        '''
        Forward pass through FAN network
        '''
        x = activations.relu(
            self.bn1(self.conv1(x),
                     training=is_train,
                     name='FAN_begin_bn1',
                     trainable=True))
        with tf.compat.v1.variable_scope("FAN_start"):
            x = layers.AveragePooling2D(pool_size=(2, 2),
                                        strides=2,
                                        name='AvgPool2D_layer1')(ConvBlock(
                                            x,
                                            64,
                                            128,
                                            var_scope="ConvBlock_layer1",
                                            is_train=is_train))

            x = ConvBlock(x,
                          128,
                          128,
                          var_scope="ConvBlock_layer2",
                          is_train=is_train)
            x = ConvBlock(x,
                          128,
                          256,
                          var_scope="ConvBlock_layer3",
                          is_train=is_train)

        previous = x

        outputs = []
        self.last_feature = []
        for i in range(self.num_modules):
            hg = self._modules['m' + str(i)].forward(previous,
                                                     var_scope='HG_%d' % i,
                                                     is_train=is_train)

            ll = hg
            ll = self._modules['top_m_' + str(i)](
                ll,
                256,
                256,
                var_scope="ConvBlock_top_m_HG_%d" % i,
                is_train=is_train)

            ll = activations.relu(self._modules['bn_end' + str(i)](
                self._modules['conv_last' + str(i)](ll),
                training=is_train,
                name='bn_end_HG_%d' % i,
                trainable=True))

            # Predict heatmaps
            # tmp_out, output of each stage
            tmp_out = self._modules['l' + str(i)](ll)
            outputs.append(tmp_out)
            self.last_feature.append(ll)
            if i < self.num_modules - 1:
                # form previous, input to the next HG stage
                ll = self._modules['bl' + str(i)](ll)
                tmp_out_ = self._modules['al' + str(i)](tmp_out)
                previous = previous + ll + tmp_out_

            self.logits = outputs
        return outputs
Ejemplo n.º 17
0
def relu6(x):
    return relu(x, max_value=6)
Ejemplo n.º 18
0
 def _correlation(args):
     x1 = args[0]
     x2 = args[1]
     x = relu(correlation(x1, x2, 1, 40, 1, 2, 40))
     return x