示例#1
0
    def _calculate_features(self, xy, wh, objectiveness, classes, anchors):
        shape = K.shape(xy)[1:3]  # width, height

        xy_sig = K.sigmoid(xy)
        # TODO rethink logic here, grid needs to be calculated just once after model initialization
        col = K.reshape(K.tile(K.arange(0, shape[0]), shape[0:1]),
                        (-1, shape[0]))
        row = K.reshape(K.tile(K.arange(0, shape[1]), shape[1:2]),
                        (-1, shape[1]))
        row = K.transpose(row)
        col = K.repeat_elements(K.reshape(col, (shape[0], shape[1], 1, 1)),
                                rep=len(anchors),
                                axis=-2)
        row = K.repeat_elements(K.reshape(row, (shape[0], shape[1], 1, 1)),
                                rep=len(anchors),
                                axis=-2)
        grid = K.concatenate((col, row), axis=-1)
        # TODO same thing for the anchors
        anchors_tensor = K.reshape(K.constant(anchors),
                                   [1, 1, 1, len(anchors), 2])

        box_xy = (xy_sig + K.cast(grid, K.dtype(xy_sig))) / (shape[0],
                                                             shape[1])

        box_wh = K.exp(wh) * anchors_tensor / K.cast(self.input_image_dims,
                                                     K.dtype(wh))

        obj_sig = K.sigmoid(objectiveness)
        class_sig = K.sigmoid(classes)

        return box_xy, box_wh, obj_sig, class_sig
示例#2
0
 def call(self, inputs):
     # Input = [X^H, X^L]
     assert len(inputs) == 2
     high_input, low_input = inputs
     # Transpose Convolution: High Channels -> High Channels
     high_to_high = K.conv2d_transpose(high_input, self.high_to_high_kernel, output_shape=self.high_out_shape,
                                       strides=self.strides, padding=self.padding,
                                       data_format="channels_last")
     # Transpose Convolution: High Channels -> Low Channels
     high_to_low = K.pool2d(high_input, (2, 2), strides=(2, 2), pool_mode="avg")
     high_to_low = K.conv2d_transpose(high_to_low, self.high_to_low_kernel, output_shape=self.low_out_shape,
                                      strides=self.strides, padding=self.padding,
                                      data_format="channels_last")
     # Transpose Convolution: Low Channels -> High Channels
     # Note: there is intermediate output size
     high_out_channels = self.high_out_shape[3]
     low_out_N, low_out_W, low_out_H = self.low_out_shape[:3]
     intermediate_shape = (low_out_N, low_out_W, low_out_H, high_out_channels)
     low_to_high = K.conv2d_transpose(low_input, self.low_to_high_kernel, output_shape=intermediate_shape,
                                      strides=self.strides, padding=self.padding,
                                      data_format="channels_last")
     low_to_high = K.repeat_elements(low_to_high, 2, axis=1)  # Nearest Neighbor Upsampling
     low_to_high = K.repeat_elements(low_to_high, 2, axis=2)
     # Transpose Convolution: Low Channels -> Low Channels
     low_to_low = K.conv2d_transpose(low_input, self.low_to_low_kernel, output_shape=self.low_out_shape,
                                     strides=self.strides, padding=self.padding,
                                     data_format="channels_last")
     # Cross Add
     high_add = high_to_high + low_to_high
     low_add = high_to_low + low_to_low
     return [high_add, low_add]
示例#3
0
def merge_action_crossed_fn(x):
    input = x[0]
    is_crossed_orig = x[1]
    is_crossed_orig = K.switch(is_crossed_orig > 0.1,
                               K.maximum(is_crossed_orig, 1),
                               is_crossed_orig * 0)
    action_count = input.shape[1]
    is_crossed = K.expand_dims(is_crossed_orig, axis=1)
    is_crossed = K.expand_dims(is_crossed, axis=1)
    is_crossed = K.temporal_padding(is_crossed, (0, action_count - 1))
    is_crossed = K.squeeze(is_crossed, axis=2)
    is_crossed_mask = K.expand_dims(is_crossed_orig, axis=1)
    is_crossed_mask = K.repeat_elements(is_crossed_mask, action_count, axis=1)
    res_crossed = (1 - is_crossed_mask) * input + is_crossed
    carpisma_timer_orig = x[2]
    carpisma_timer_orig = K.squeeze(carpisma_timer_orig, axis=2)
    is_carpisma = K.sum(carpisma_timer_orig, axis=1)
    is_carpisma = K.switch(is_carpisma > 0.1, K.maximum(is_carpisma, 1),
                           is_carpisma * 0)
    not_carpisma = 1 - is_carpisma
    print("carpisma timer", carpisma_timer_orig)
    print("is carpisma", is_carpisma.shape)
    print("not carpisma", not_carpisma.shape)
    not_carpisma = K.expand_dims(not_carpisma, axis=1)
    not_carpisma = K.repeat_elements(not_carpisma, action_count, axis=1)
    res_crossed = res_crossed * not_carpisma
    res = K.concatenate([res_crossed, carpisma_timer_orig], axis=1)
    return res
示例#4
0
 def call(self, inputs):
     # Input = [X^H, X^L]
     assert len(inputs) == 2
     high_input, low_input = inputs
     # Convolution: High Channels -> High Channels
     high_to_high = K.conv2d(high_input, self.high_to_high_kernel,
                             strides=self.strides, padding=self.padding,
                             data_format="channels_last")
     # Convolution: High Channels -> Low Channels
     high_to_low = K.pool2d(high_input, (2, 2), strides=(2, 2), pool_mode="avg")
     high_to_low = K.conv2d(high_to_low, self.high_to_low_kernel,
                            strides=self.strides, padding=self.padding,
                            data_format="channels_last")
     # Convolution: Low Channels -> High Channels
     low_to_high = K.conv2d(low_input, self.low_to_high_kernel,
                            strides=self.strides, padding=self.padding,
                            data_format="channels_last")
     low_to_high = K.repeat_elements(low_to_high, 2, axis=1)  # Nearest Neighbor Upsampling
     low_to_high = K.repeat_elements(low_to_high, 2, axis=2)
     # Convolution: Low Channels -> Low Channels
     low_to_low = K.conv2d(low_input, self.low_to_low_kernel,
                           strides=self.strides, padding=self.padding,
                           data_format="channels_last")
     # Cross Add
     high_add = high_to_high + low_to_high
     low_add = high_to_low + low_to_low
     return [high_add, low_add]
示例#5
0
 def reshape_function(self, lambdas):
     (width, height, channels) = 224, 224, 3
     c = K.repeat_elements(lambdas, width, axis=1)
     d = K.expand_dims(c)
     e = K.repeat_elements(d, height, axis=2)
     f = K.expand_dims(e)
     g = K.repeat_elements(f, channels, axis=3)
     return g
 def organize_slices_for_feature_extraction(self, slab):
     if self.single_slice_out:
         return K.repeat_elements(slab, 3, 3)
     shape = K.shape(slab)
     B, H, W, C = shape[0], shape[1], shape[2], shape[3]
     slab = K.permute_dimensions(slab, (0, 3, 1, 2, 4)) # (B, H, W, C) --> (B, C, H, W)
     slab = K.reshape(slab, K.stack([B*C, H, W])) # (B * C, H, W)
     slab = K.expand_dims(slab, axis=-1) # (B * C, H, W, 1)
     slab = K.repeat_elements(slab, 3, 3) # (B * C, H, W, 3)
     return slab
示例#7
0
def concat_label(args):
    '''
    Converts 2d labels to 3d, i.e. [[0,1]] = [[[0,0],[0,0]],[[1,1],[1,1]]]
    '''
    x, labels = args
    x_shape = K.int_shape(x)
    label_shape = K.int_shape(labels)
    output = K.reshape(labels, (K.shape(x)[0], 1, 1, label_shape[1]))
    output = K.repeat_elements(output, x_shape[1], axis=1)
    output = K.repeat_elements(output, x_shape[2], axis=2)

    return K.concatenate([x, output], axis=-1)
示例#8
0
    def get_output(self, train=False):
        X = self.get_input(train)
        if self.dim_ordering == 'th':
            output = K.repeat_elements(X, self.size, axis=2)
#             output = K.repeat_elements(output, self.size[1], axis=3)
        elif self.dim_ordering == 'tf':
            output = K.repeat_elements(X, self.size, axis=1)
#             output = K.repeat_elements(output, self.size[1], axis=2)
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        f = T.grad(T.sum(self._pool1d_layer.get_output(train)),
                   wrt=self._pool1d_layer.get_input(train)) * output

        return f
示例#9
0
 def call(self, x, training=None):
     mask = K.random_uniform(K.shape(x)[:-1], 0.0, 1.0)
     mask = K.expand_dims(mask, -1)
     mask = K.repeat_elements(mask, K.int_shape(x)[-1], -1)
     rand_x = K.switch(K.less(mask, self.rate),
                       K.random_normal(K.shape(x), 0.0, 1.0), x)
     return K.in_train_phase(rand_x, x, training=training)
示例#10
0
 def selective_loss(y_true, y_pred):
     em_c = K.mean(y_pred[:, -1])
     loss = K.categorical_crossentropy(
         K.repeat_elements(y_pred[:, -1:], self.num_classes, axis=1) *
         y_true[:, :],
         y_pred[:, :-1]) + lamda * K.maximum(-em_c + c, 0)**2
     return loss
示例#11
0
 def pred_rc_recursive(input):
     ki = K.repeat_elements(K.expand_dims(input[1][:, :, 0], axis=-1),
                            input[0].shape[2], 2)
     temp = (input[0] - ki * K.reverse(input[0], axes=2)) / (1 -
                                                             ki * ki)
     temp = Concatenate(axis=2)([temp, input[1]])
     return temp
示例#12
0
    def call(self, inputs):
        """
        Method for the forward function of the layer.
        :param inputs: Input tensor
        :param kwargs: Additional keyword arguments for the base method
        :return: A tensor
        """
        #assert input_shape is not None and len(input_shape) >= 2

        expert_outputs, gate_outputs, final_outputs = [], [], []
        for expert_layer in self.expert_layers:
            expert_output = expand_dims(expert_layer(inputs), axis=2)
            expert_outputs.append(expert_output)
        expert_outputs = tf.concat(expert_outputs, 2)

        for gate_layer in self.gate_layers:
            gate_outputs.append(gate_layer(inputs))

        for gate_output in gate_outputs:
            expanded_gate_output = expand_dims(gate_output, axis=1)
            weighted_expert_output = expert_outputs * repeat_elements(
                expanded_gate_output, self.units, axis=1)
            final_outputs.append(sum(weighted_expert_output, axis=2))
        # 返回的矩阵维度 num_tasks * batch * units
        return final_outputs
示例#13
0
def vgg16(input_shape: Tuple[int, ...], output_shape: Tuple[int,
                                                            ...]) -> Model:
    """Return VGG16 Keras model."""
    num_classes = output_shape[0]
    image_input = Input(shape=input_shape)
    resized_input = image_input

    if len(input_shape) < 3:
        resized_input = Lambda(lambda x: K.repeat_elements(
            tf.expand_dims(x, -1), 3, -1))(resized_input)
        input_shape = (input_shape[0], input_shape[1], 3)

    if image_is_smaller_than_vgg_min(resized_input.shape):
        resized_input = Lambda(
            lambda x: tf.image.resize(x, VGG16_MIN_IMAGE_SIZE))(resized_input)
        input_shape = VGG16_MIN_IMAGE_SIZE + (3, )

    vgg = VGG16(include_top=False, input_shape=input_shape)
    for layer in vgg.layers:
        layer.trainable = False

    extracted_features = vgg(resized_input)
    flattened_features = Flatten()(extracted_features)
    class1 = Dense(1024, activation='relu')(flattened_features)
    output = Dense(num_classes, activation='softmax')(class1)
    model = Model(inputs=image_input, outputs=output)

    return model
示例#14
0
def tile_context_feature(feat, max_sequence_size):
    """
    Tile context features to max_sequence_size. Do nothing if sequence feature

    Parameters
    ----------
    feat: Tensor
        Feature tensor to be tiled
        Shape: [batch_size, max_len] or [batch_size, sequence_size, max_len]
    max_sequence_size: int
        Maximum number of records per query

    Returns
    -------
    Tensor
        Tiled tensor of shape [batch_size, max_sequence_size, max_len]

    """
    return tf.cond(
        tf.equal(tf.rank(feat), tf.constant(2)),
        true_fn=lambda: K.repeat_elements(
            tf.expand_dims(feat, axis=1), rep=max_sequence_size, axis=1
        ),
        false_fn=lambda: feat,
    )