コード例 #1
0
def sam_vgg(data):
    # conv_1
    trainable = True
    conv_1_out = Conv2D(64, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block1_conv1',
                        trainable=trainable)(data)
    conv_1_out = Conv2D(64, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block1_conv2',
                        trainable=trainable)(conv_1_out)

    ds_conv_1_out = MaxPooling2D((2, 2), strides=(2, 2),
                                 name='block1_pool')(conv_1_out)

    # conv_2
    conv_2_out = Conv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block2_conv1',
                        trainable=trainable)(ds_conv_1_out)
    conv_2_out = Conv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block2_conv2',
                        trainable=trainable)(conv_2_out)

    ds_conv_2_out = MaxPooling2D((2, 2), strides=(2, 2),
                                 name='block2_pool')(conv_2_out)

    # conv_3
    conv_3_out = Conv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block3_conv1',
                        trainable=trainable)(ds_conv_2_out)
    conv_3_out = Conv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block3_conv2',
                        trainable=trainable)(conv_3_out)
    conv_3_out = Conv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block3_conv3',
                        trainable=trainable)(conv_3_out)

    ds_conv_3_out = MaxPooling2D((2, 2),
                                 strides=(2, 2),
                                 name='block3_pool',
                                 padding='same')(conv_3_out)

    # conv_4
    conv_4_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block4_conv1',
                        trainable=trainable)(ds_conv_3_out)
    conv_4_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block4_conv2',
                        trainable=trainable)(conv_4_out)
    conv_4_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block4_conv3',
                        trainable=trainable)(conv_4_out)

    ds_conv_4_out = MaxPooling2D((2, 2),
                                 strides=(2, 2),
                                 name='block4_pool',
                                 padding='same')(conv_4_out)

    # conv_5 #
    conv_5_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block5_conv1',
                        trainable=trainable)(ds_conv_4_out)
    conv_5_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block5_conv2',
                        trainable=trainable)(conv_5_out)
    conv_5_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block5_conv3',
                        trainable=trainable)(conv_5_out)

    conv_6_out = MaxPooling2D((2, 2),
                              strides=(2, 2),
                              name='block5_pool',
                              padding='same')(conv_5_out)

    # salconv_6#
    salconv_6_out = Conv2D(512, (5, 5),
                           padding='same',
                           activation='relu',
                           trainable=trainable)(conv_6_out)
    salconv_6_out = Conv2D(512, (5, 5),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_6_out)

    salconv_5_out = Conv2D(512, (3, 3),
                           padding='same',
                           activation='relu',
                           trainable=trainable)(conv_5_out)
    salconv_5_out = Conv2D(512, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_5_out)

    edgeconv_5_out = Conv2D(512, (3, 3),
                            padding='same',
                            activation='relu',
                            trainable=trainable)(conv_5_out)
    edgeconv_5_out = Conv2D(512, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_5_out)

    salconv_4_out = Conv2D(256, (3, 3),
                           padding='same',
                           activation='relu',
                           trainable=trainable)(conv_4_out)
    salconv_4_out = Conv2D(256, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_4_out)

    edgeconv_4_out = Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            trainable=trainable)(conv_4_out)
    edgeconv_4_out = Conv2D(256, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_4_out)

    salconv_3_out = Conv2D(256, (3, 3),
                           padding='same',
                           activation='relu',
                           trainable=trainable)(conv_3_out)
    salconv_3_out = Conv2D(256, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_3_out)

    edgeconv_3_out = Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            trainable=trainable)(conv_3_out)
    edgeconv_3_out = Conv2D(256, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_3_out)

    salconv_2_out = Conv2D(128, (3, 3),
                           padding='same',
                           activation='relu',
                           trainable=trainable)(conv_2_out)
    salconv_2_out = Conv2D(128, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_2_out)

    edgeconv_2_out = Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            trainable=trainable)(conv_2_out)
    edgeconv_2_out = Conv2D(128, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_2_out)

    salconv_1_out = Conv2D(64, (3, 3),
                           padding='same',
                           activation='relu',
                           trainable=trainable)(conv_1_out)
    salconv_1_out = Conv2D(64, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_1_out)

    edgeconv_1_out = Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            trainable=trainable)(conv_1_out)
    edgeconv_1_out = Conv2D(64, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_1_out)

    # saliency from conv_6 #
    saliency6 = Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       trainable=trainable)(salconv_6_out)
    saliency6_up = UpsampleLike()([saliency6, conv_1_out])

    # saliency from conv_5 #
    edge5 = Conv2D(1, (1, 1),
                   padding='same',
                   activation='sigmoid',
                   trainable=trainable)(edgeconv_5_out)
    edge5_up = UpsampleLike()([edge5, conv_1_out])

    salconv_5_out = Concatenate()(
        [salconv_5_out,
         UpsampleLike()([saliency6, salconv_5_out])])
    salconv_5_out = Conv2D(256, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_5_out)

    attention_5a = Conv2D(1, (1, 1), padding='same',
                          activation='sigmoid')(salconv_5_out)
    attention_5a = Flatten()(attention_5a)
    attention_5a = Softmax()(attention_5a)

    attention_5b = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3))\
        (MaxPooling2D((2, 2), strides=(2, 2), padding='same')(salconv_5_out))
    attention_5b = UpsampleLike()([attention_5b, salconv_5_out])
    attention_5b = Flatten()(attention_5b)
    attention_5b = Softmax()(attention_5b)

    attention_5 = Average()([attention_5a, attention_5b])

    attention_5 = RepeatVector(256)(attention_5)
    attention_5 = Permute((2, 1))(attention_5)
    attention_5 = Reshape((14, 14, 256), name='att_conv5')(attention_5)
    attention_5 = Multiply()([attention_5, salconv_5_out])
    salconv_5_out = Add()([attention_5, salconv_5_out])

    salconv_5_out = Concatenate()([salconv_5_out, edge5])
    salconv_5_out = Conv2D(256, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_5_out)
    saliency5 = Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       trainable=trainable)(salconv_5_out)
    saliency5_up = UpsampleLike()([saliency5, conv_1_out])

    # saliency from conv_4 #
    edgeconv_4_out = Concatenate()(
        [edgeconv_4_out,
         UpsampleLike()([edge5, salconv_4_out])])
    edgeconv_4_out = Conv2D(128, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_4_out)
    edge4 = Conv2D(1, (1, 1),
                   padding='same',
                   activation='sigmoid',
                   trainable=trainable)(edgeconv_4_out)
    edge4_up = UpsampleLike()([edge4, conv_1_out])

    salconv_4_out = Concatenate()([
        salconv_4_out,
        UpsampleLike()([saliency6, salconv_4_out]),
        UpsampleLike()([saliency5, salconv_4_out])
    ])
    salconv_4_out = Conv2D(128, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_4_out)

    attention_4a = Conv2D(1, (1, 1), padding='same',
                          activation='sigmoid')(salconv_4_out)
    attention_4a = Flatten()(attention_4a)
    attention_4a = Softmax()(attention_4a)

    attention_4b = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((2, 2), strides=(2, 2), padding='same')(salconv_4_out))
    attention_4b = UpsampleLike()([attention_4b, salconv_4_out])
    attention_4b = Flatten()(attention_4b)
    attention_4b = Softmax()(attention_4b)

    attention_4c = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((4, 4), strides=(4, 4), padding='same')(salconv_4_out))
    attention_4c = UpsampleLike()([attention_4c, salconv_4_out])
    attention_4c = Flatten()(attention_4c)
    attention_4c = Softmax()(attention_4c)

    attention_4 = Average()([attention_4a, attention_4b, attention_4c])

    attention_4 = RepeatVector(128)(attention_4)
    attention_4 = Permute((2, 1))(attention_4)
    attention_4 = Reshape((28, 28, 128), name='att_conv4')(attention_4)
    attention_4 = Multiply()([attention_4, salconv_4_out])
    salconv_4_out = Add()([attention_4, salconv_4_out])

    salconv_4_out = Concatenate()([salconv_4_out, edge4])
    salconv_4_out = Conv2D(128, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_4_out)
    saliency4 = Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       trainable=trainable)(salconv_4_out)
    saliency4_up = UpsampleLike()([saliency4, conv_1_out])

    # saliency from conv_3 #
    edgeconv_3_out = Concatenate()([
        edgeconv_3_out,
        UpsampleLike()([edge4, salconv_3_out]),
        UpsampleLike()([edge5, salconv_3_out])
    ])
    edgeconv_3_out = Conv2D(128, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_3_out)
    edge3 = Conv2D(1, (1, 1),
                   padding='same',
                   activation='sigmoid',
                   trainable=trainable)(edgeconv_3_out)
    edge3_up = UpsampleLike()([edge3, conv_1_out])

    salconv_3_out = Concatenate()([
        salconv_3_out,
        UpsampleLike()([saliency6, salconv_3_out]),
        UpsampleLike()([saliency5, salconv_3_out]),
        UpsampleLike()([saliency4, salconv_3_out])
    ])
    salconv_3_out = Conv2D(128, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_3_out)

    attention_3a = Conv2D(1, (1, 1), padding='same',
                          activation='sigmoid')(salconv_3_out)
    attention_3a = Flatten()(attention_3a)
    attention_3a = Softmax()(attention_3a)

    attention_3b = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((2, 2), strides=(2, 2), padding='same')(salconv_3_out))
    attention_3b = UpsampleLike()([attention_3b, salconv_3_out])
    attention_3b = Flatten()(attention_3b)
    attention_3b = Softmax()(attention_3b)

    attention_3c = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((4, 4), strides=(4, 4), padding='same')(salconv_3_out))
    attention_3c = UpsampleLike()([attention_3c, salconv_3_out])
    attention_3c = Flatten()(attention_3c)
    attention_3c = Softmax()(attention_3c)

    attention_3d = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((8, 8), strides=(8, 8), padding='same')(salconv_3_out))
    attention_3d = UpsampleLike()([attention_3d, salconv_3_out])
    attention_3d = Flatten()(attention_3d)
    attention_3d = Softmax()(attention_3d)

    attention_3 = Average()(
        [attention_3a, attention_3b, attention_3c, attention_3d])

    attention_3 = RepeatVector(128)(attention_3)
    attention_3 = Permute((2, 1))(attention_3)
    attention_3 = Reshape((56, 56, 128), name='att_conv3')(attention_3)
    attention_3 = Multiply()([attention_3, salconv_3_out])
    salconv_3_out = Add()([attention_3, salconv_3_out])

    salconv_3_out = Concatenate()([salconv_3_out, edge3])
    salconv_3_out = Conv2D(128, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_3_out)
    saliency3 = Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       trainable=trainable)(salconv_3_out)
    saliency3_up = UpsampleLike()([saliency3, conv_1_out])

    # saliency from conv_2 #
    edgeconv_2_out = Concatenate()([
        edgeconv_2_out,
        UpsampleLike()([edge5, salconv_2_out]),
        UpsampleLike()([edge4, salconv_2_out]),
        UpsampleLike()([edge3, salconv_2_out])
    ])
    edgeconv_2_out = Conv2D(64, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_2_out)
    edge2 = Conv2D(1, (1, 1),
                   padding='same',
                   activation='sigmoid',
                   trainable=trainable)(edgeconv_2_out)
    edge2_up = UpsampleLike()([edge2, conv_1_out])

    salconv_2_out = Concatenate()([
        salconv_2_out,
        UpsampleLike()([saliency6, salconv_2_out]),
        UpsampleLike()([saliency5, salconv_2_out]),
        UpsampleLike()([saliency4, salconv_2_out]),
        UpsampleLike()([saliency3, salconv_2_out])
    ])
    salconv_2_out = Conv2D(64, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_2_out)

    attention_2a = Conv2D(1, (1, 1), padding='same',
                          activation='sigmoid')(salconv_2_out)
    attention_2a = Flatten()(attention_2a)
    attention_2a = Softmax()(attention_2a)

    attention_2b = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((2, 2), strides=(2, 2), padding='same')(salconv_2_out))
    attention_2b = UpsampleLike()([attention_2b, salconv_2_out])
    attention_2b = Flatten()(attention_2b)
    attention_2b = Softmax()(attention_2b)

    attention_2c = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((4, 4), strides=(4, 4), padding='same')(salconv_2_out))
    attention_2c = UpsampleLike()([attention_2c, salconv_2_out])
    attention_2c = Flatten()(attention_2c)
    attention_2c = Softmax()(attention_2c)

    attention_2d = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((8, 8), strides=(8, 8), padding='same')(salconv_2_out))
    attention_2d = UpsampleLike()([attention_2d, salconv_2_out])
    attention_2d = Flatten()(attention_2d)
    attention_2d = Softmax()(attention_2d)

    attention_2e = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((16, 16), strides=(16, 16), padding='same')(salconv_2_out))
    attention_2e = UpsampleLike()([attention_2e, salconv_2_out])
    attention_2e = Flatten()(attention_2e)
    attention_2e = Softmax()(attention_2e)

    attention_2 = Average()(
        [attention_2a, attention_2b, attention_2c, attention_2d, attention_2e])

    attention_2 = RepeatVector(64)(attention_2)
    attention_2 = Permute((2, 1))(attention_2)
    attention_2 = Reshape((112, 112, 64), name='att_conv2')(attention_2)
    attention_2 = Multiply()([attention_2, salconv_2_out])
    salconv_2_out = Add()([attention_2, salconv_2_out])

    salconv_2_out = Concatenate()([salconv_2_out, edge2])
    salconv_2_out = Conv2D(64, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_2_out)
    saliency2 = Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       trainable=trainable)(salconv_2_out)
    saliency2_up = UpsampleLike()([saliency2, conv_1_out])

    # saliency from conv_1 #
    edgeconv_1_out = Concatenate()([
        edgeconv_1_out,
        UpsampleLike()([edge5, salconv_1_out]),
        UpsampleLike()([edge4, salconv_1_out]),
        UpsampleLike()([edge3, salconv_1_out]),
        UpsampleLike()([edge2, salconv_1_out])
    ])
    edgeconv_1_out = Conv2D(32, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            trainable=trainable)(edgeconv_1_out)
    edge1 = Conv2D(1, (1, 1),
                   padding='same',
                   activation='sigmoid',
                   trainable=trainable)(edgeconv_1_out)

    salconv_1_out = Concatenate()([
        salconv_1_out,
        UpsampleLike()([saliency6, salconv_1_out]),
        UpsampleLike()([saliency5, salconv_1_out]),
        UpsampleLike()([saliency4, salconv_1_out]),
        UpsampleLike()([saliency3, salconv_1_out]),
        UpsampleLike()([saliency2, salconv_1_out])
    ])
    salconv_1_out = Conv2D(32, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_1_out)

    attention_1a = Conv2D(1, (1, 1), padding='same',
                          activation='sigmoid')(salconv_1_out)
    attention_1a = Flatten()(attention_1a)
    attention_1a = Softmax()(attention_1a)

    attention_1b = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((2, 2), strides=(2, 2), padding='same')(salconv_1_out))
    attention_1b = UpsampleLike()([attention_1b, salconv_1_out])
    attention_1b = Flatten()(attention_1b)
    attention_1b = Softmax()(attention_1b)

    attention_1c = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((4, 4), strides=(4, 4), padding='same')(salconv_1_out))
    attention_1c = UpsampleLike()([attention_1c, salconv_1_out])
    attention_1c = Flatten()(attention_1c)
    attention_1c = Softmax()(attention_1c)

    attention_1d = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((8, 8), strides=(8, 8), padding='same')(salconv_1_out))
    attention_1d = UpsampleLike()([attention_1d, salconv_1_out])
    attention_1d = Flatten()(attention_1d)
    attention_1d = Softmax()(attention_1d)

    attention_1e = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((16, 16), strides=(16, 16), padding='same')(salconv_1_out))
    attention_1e = UpsampleLike()([attention_1e, salconv_1_out])
    attention_1e = Flatten()(attention_1e)
    attention_1e = Softmax()(attention_1e)

    attention_1f = Conv2D(1, (1, 1), padding='same', activation='sigmoid', dilation_rate=(3, 3)) \
        (MaxPooling2D((32, 32), strides=(32, 32), padding='same')(salconv_1_out))
    attention_1f = UpsampleLike()([attention_1f, salconv_1_out])
    attention_1f = Flatten()(attention_1f)
    attention_1f = Softmax()(attention_1f)

    attention_1 = Average()([
        attention_1a, attention_1b, attention_1c, attention_1d, attention_1e,
        attention_1f
    ])

    attention_1 = RepeatVector(32)(attention_1)
    attention_1 = Permute((2, 1))(attention_1)
    attention_1 = Reshape((224, 224, 32), name='att_conv1')(attention_1)
    attention_1 = Multiply()([attention_1, salconv_1_out])
    salconv_1_out = Add()([attention_1, salconv_1_out])

    salconv_1_out = Concatenate()([salconv_1_out, edge1])
    salconv_1_out = Conv2D(32, (3, 3),
                           padding='same',
                           activation='sigmoid',
                           trainable=trainable)(salconv_1_out)
    saliency1 = Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       trainable=trainable)(salconv_1_out)

    return [
        saliency6_up, saliency5_up, edge5_up, saliency4_up, edge4_up,
        saliency3_up, edge3_up, saliency2_up, edge2_up, saliency1, edge1
    ]
コード例 #2
0
    def build_extractor_29layers_v2(self, name, block, layers):

        in_img = Input(shape=(*self.in_size_hw, 1))

        X = self._mfm(in_img,
                      name=name + '_mfm1',
                      out_channels=48,
                      kernel_size=5,
                      strides=1)
        X = Average()([
            MaxPooling2D(pool_size=2, padding='same')(X),
            AveragePooling2D(pool_size=2, padding='same')(X)
        ])
        X = self._make_layer(X,
                             name=name + '_layers1',
                             block=block,
                             num_blocks=layers[0],
                             out_channels=48)
        X = self._group(X,
                        name=name + '_group1',
                        in_channels=48,
                        out_channels=96,
                        kernel_size=3,
                        strides=1)
        X = Average()([
            MaxPooling2D(pool_size=2, padding='same')(X),
            AveragePooling2D(pool_size=2, padding='same')(X)
        ])
        X = self._make_layer(X,
                             name=name + '_layers2',
                             block=block,
                             num_blocks=layers[1],
                             out_channels=96)
        X = self._group(X,
                        name=name + '_group2',
                        in_channels=96,
                        out_channels=192,
                        kernel_size=3,
                        strides=1)
        X = Average()([
            MaxPooling2D(pool_size=2, padding='same')(X),
            AveragePooling2D(pool_size=2, padding='same')(X)
        ])
        X = self._make_layer(X,
                             name=name + '_layers3',
                             block=block,
                             num_blocks=layers[2],
                             out_channels=192)
        X = self._group(X,
                        name=name + '_group3',
                        in_channels=192,
                        out_channels=128,
                        kernel_size=3,
                        strides=1)
        X = self._make_layer(X,
                             name=name + '_layers4',
                             block=block,
                             num_blocks=layers[3],
                             out_channels=128)
        X = self._group(X,
                        name=name + '_group4',
                        in_channels=128,
                        out_channels=128,
                        kernel_size=3,
                        strides=1)
        X = Average()([
            MaxPooling2D(pool_size=2, padding='same')(X),
            AveragePooling2D(pool_size=2, padding='same')(X)
        ])
        feat = Dense(256,
                     name=name + '_dense1',
                     kernel_regularizer=regularizers.l2(0.0005))(Flatten()(X))

        ret_extractor = Model(inputs=in_img, outputs=feat, name=name)
        ret_extractor.summary()

        return ret_extractor
コード例 #3
0
                             weights=None)(embedded_sequences)
elif model_choice == 2:
    gru_kata = Bidirectional(GRU(EMBEDDING_DIM,
                                 return_sequences=True,
                                 dropout=dropout,
                                 recurrent_dropout=rec_dropout),
                             merge_mode=merge_m,
                             weights=None)(rtwo)
else:
    combine = 1  # input('Enter 1 for Add, 2 for Subtract, 3 for Multiply, 4 for Average, 5 for Maximum: ')
    if combine == 2:
        merge = Subtract()([embedded_sequences, rtwo])
    elif combine == 3:
        merge = Multiply()([embedded_sequences, rtwo])
    elif combine == 4:
        merge = Average()([embedded_sequences, rtwo])
    elif combine == 5:
        merge = Maximum()([embedded_sequences, rtwo])
    else:
        merge = Add()([embedded_sequences, rtwo])
    gru_kata = Bidirectional(GRU(EMBEDDING_DIM,
                                 return_sequences=True,
                                 dropout=dropout,
                                 recurrent_dropout=rec_dropout,
                                 trainable=gtrainable),
                             merge_mode=merge_m,
                             weights=None)(merge)

crf = CRF(len(label.index) + 1, learn_mode='marginal')(gru_kata)

preds = Dense(len(label.index) + 1, activation='softmax')(gru_kata)
コード例 #4
0
    y = mobilenet.mobilenet_by_me(
        name='temporal', 
        inputs=input_y, 
        input_shape=(224,224,20), 
        classes=classes,
        weight='weights/mobilenet_temporal{}_{}e.h5'.format(opt_size,tem_epochs))
else:
    y = mobilenet.mobilenet_by_me(
    name='temporal', 
    inputs=input_y, 
    input_shape=(224,224,20), 
    classes=classes)

# Fusion
if fusion == 'avg':
    z = Average()([x, y])
elif fusion == 'max':
    z = Maximum()([x, y])
elif fusion == 'concat':
    z = Concatenate()([x, y])
elif fusion == 'conv':
    z = Concatenate()([x, y])
    z = Conv2D(256, (1, 1), use_bias=True)(z)
else:
    z = Multiply()([x, y])

z = GlobalAveragePooling2D()(z)
if fusion == 'concat':
    z = Reshape((1,1,2048))(z)
elif fusion == 'conv':
    z = Reshape((1,1,256))(z)
コード例 #5
0
ファイル: rn.py プロジェクト: IMVDsports/inter-rel-net
def create_relationships(rel_type,
                         g_theta_model,
                         p1_joints,
                         p2_joints,
                         use_attention=False,
                         use_relations=True,
                         attention_proj_size=None,
                         return_attention=False):
    g_theta_outs = []

    if not use_relations:
        for object_i in p1_joints:
            g_theta_outs.append(g_theta_model([object_i]))

        if use_attention:
            # Output may be tuple if return_attention is true, second element is attention vector
            return IRNAttention(
                projection_size=attention_proj_size,
                return_attention=return_attention)(g_theta_outs)
        else:
            rel_out = Average()(g_theta_outs)

        return rel_out

    if rel_type == 'inter' or rel_type == 'p1_p2_all_bidirectional':
        # All joints from person1 connected to all joints of person2, and back
        for object_i in p1_joints:
            for object_j in p2_joints:
                g_theta_outs.append(g_theta_model([object_i, object_j]))
        for object_i in p2_joints:
            for object_j in p1_joints:
                g_theta_outs.append(g_theta_model([object_i, object_j]))
        rel_out = Average()(g_theta_outs)
    elif rel_type == 'intra' or rel_type == 'indivs':

        indiv1_avg = create_relationships('p1_p1_all', g_theta_model,
                                          p1_joints, p2_joints)

        indiv2_avg = create_relationships('p2_p2_all', g_theta_model,
                                          p1_joints, p2_joints)

        rel_out = Concatenate()([indiv1_avg, indiv2_avg])
    elif rel_type == 'inter_and_indivs':
        # All joints from person1 connected to all joints of person2, and back
        for object_i in p1_joints:
            for object_j in p2_joints:
                g_theta_outs.append(g_theta_model([object_i, object_j]))
        for object_i in p2_joints:
            for object_j in p1_joints:
                g_theta_outs.append(g_theta_model([object_i, object_j]))

        # All joints from person1 connected to all other joints of itself
        for idx, object_i in enumerate(p1_joints):
            for object_j in p1_joints[idx + 1:]:
                # for object_j in p1_joints[idx:]:
                g_theta_outs.append(g_theta_model([object_i, object_j]))

        # All joints from person2 connected to all other joints of itself
        for idx, object_i in enumerate(p2_joints):
            for object_j in p2_joints[idx + 1:]:
                # for object_j in p2_joints[idx:]:
                g_theta_outs.append(g_theta_model([object_i, object_j]))

        rel_out = Average()(g_theta_outs)
    elif rel_type == 'p1_p2_all':
        # All joints from person1 connected to all joints of person2
        for object_i in p1_joints:
            for object_j in p2_joints:
                g_theta_outs.append(g_theta_model([object_i, object_j]))
        rel_out = Average()(g_theta_outs)
    elif rel_type == 'p1_p1_all':
        # All joints from person1 connected to all other joints of itself
        for idx, object_i in enumerate(p1_joints):
            for object_j in p1_joints[idx + 1:]:
                # for object_j in p1_joints[idx:]:
                g_theta_outs.append(g_theta_model([object_i, object_j]))
        if use_attention:
            return IRNAttention(
                projection_size=attention_proj_size,
                return_attention=return_attention)(g_theta_outs)
        else:
            rel_out = Average()(g_theta_outs)
    elif rel_type == 'p2_p2_all':
        # All joints from person2 connected to all other joints of itself
        rel_out = create_relationships('p1_p1_all', g_theta_model, p2_joints,
                                       p1_joints)
    elif rel_type == 'p1_p1_all_bidirectional':
        # All joints from person1 connected to all other joints of itself, and back
        rel_out = create_relationships('p1_p2_all_bidirectional',
                                       g_theta_model, p1_joints, p1_joints)
    elif rel_type == 'p2_p2_all_bidirectional':
        # All joints from person2 connected to all other joints of itself, and back
        rel_out = create_relationships('p1_p2_all_bidirectional',
                                       g_theta_model, p2_joints, p2_joints)
    elif rel_type == 'p1_p1_all-p2_p2_all':
        # All joints from person1 connected to all other joints of itself
        for idx, object_i in enumerate(p1_joints):
            for object_j in p1_joints[idx + 1:]:
                # for object_j in p1_joints[idx:]:
                g_theta_outs.append(g_theta_model([object_i, object_j]))
        for idx, object_i in enumerate(p2_joints):
            for object_j in p2_joints[idx + 1:]:
                # for object_j in p1_joints[idx:]:
                g_theta_outs.append(g_theta_model([object_i, object_j]))
        rel_out = Average()(g_theta_outs)
    else:
        raise ValueError("Invalid rel_type:", rel_type)

    return rel_out
コード例 #6
0
def main():
    try:
        os.mkdir('./img')
    except OSError:
        pass

    # face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml')
    # detector = MTCNN()

    # load model and weights
    img_size = 64
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    img_idx = 0
    detected = ''  #make this not local variable
    time_detection = 0
    time_network = 0
    time_plot = 0
    skip_frame = 1  # every 5 frame do 1 detection and network forward propagation
    ad = 0.6

    #Parameters
    num_capsule = 3
    dim_capsule = 16
    routings = 2
    stage_num = [3, 3, 3]
    lambda_d = 1
    num_classes = 3
    image_size = 64
    num_primcaps = 7 * 3
    m_dim = 5
    S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]

    model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d,
                             S_set)()
    model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d,
                                 S_set)()

    num_primcaps = 8 * 8 * 3
    S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]

    model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d,
                                 S_set)()

    print('Loading models ...')

    weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5'
    model1.load_weights(weight_file1)
    print('Finished loading model 1.')

    weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
    model2.load_weights(weight_file2)
    print('Finished loading model 2.')

    weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
    model3.load_weights(weight_file3)
    print('Finished loading model 3.')

    inputs = Input(shape=(64, 64, 3))
    x1 = model1(inputs)  #1x1
    x2 = model2(inputs)  #var
    x3 = model3(inputs)  #w/o
    avg_model = Average()([x1, x2, x3])
    model = Model(inputs=inputs, outputs=avg_model)

    # load our serialized face detector from disk
    print("[INFO] loading face detector...")
    protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
    modelPath = os.path.sep.join(
        ["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"])
    net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

    # capture video
    #cap = cv2.VideoCapture(0)
    #cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024*1)
    #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768*1)

    print('Start detecting pose ...')
    detected_pre = np.empty((1, 1, 1))

    while True:
        # get video frame
        #ret, input_img = cap.read()
        #input_img = cv2.imread('HELEN_147110327_1_4.jpg')
        photo_path = '/media/nuaa301/OS/师兄程序/资料整理/资料整理/论文程序/演示系统程序/python_test3/img1/103.jpg'
        input_img = cv2.imread(photo_path)

        img_idx = img_idx + 1
        img_h, img_w, _ = np.shape(input_img)

        if img_idx == 1 or img_idx % skip_frame == 0:
            time_detection = 0
            time_network = 0
            time_plot = 0

            # detect faces using LBP detector
            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            # detected = face_cascade.detectMultiScale(gray_img, 1.1)
            # detected = detector.detect_faces(input_img)
            # pass the blob through the network and obtain the detections and
            # predictions
            blob = cv2.dnn.blobFromImage(cv2.resize(input_img,
                                                    (300, 300)), 1.0,
                                         (300, 300), (104.0, 177.0, 123.0))
            net.setInput(blob)
            detected = net.forward()

            if detected_pre.shape[2] > 0 and detected.shape[2] == 0:
                detected = detected_pre

            faces = np.empty((detected.shape[2], img_size, img_size, 3))

            input_img = draw_results_ssd(detected, input_img, faces, ad,
                                         img_size, img_w, img_h, model,
                                         time_detection, time_network,
                                         time_plot)
            # cv2.imwrite('img/'+str(img_idx)+'.png',input_img)

        else:
            input_img = draw_results_ssd(detected, input_img, faces, ad,
                                         img_size, img_w, img_h, model,
                                         time_detection, time_network,
                                         time_plot)

        if detected.shape[2] > detected_pre.shape[2] or img_idx % (skip_frame *
                                                                   3) == 0:
            detected_pre = detected

        key = cv2.waitKey(0)
コード例 #7
0
def carnn(embedding_matrix,
          config,
          compare_out_size=CARNN_COMPARE_LAYER_OUTSIZE,
          rnn_size=CARNN_RNN_SIZE,
          rnn_dropout=CARNN_AGGREATION_DROPOUT):
    q1 = Input(shape=(config['max_length'], ), dtype='int32', name='q1_input')
    q2 = Input((config['max_length'], ), dtype='int32', name='q2_input')
    activation = 'elu'
    compare_dim = 500
    compare_dropout = 0.2
    embedding_layer = Embedding(embedding_matrix.shape[0],
                                embedding_matrix.shape[1],
                                trainable=config['embed_trainable'],
                                weights=[embedding_matrix]
                                # mask_zero=True
                                )

    q1_embed = embedding_layer(q1)
    q2_embed = embedding_layer(q2)  # bsz, 1, emb_dims
    q1_embed = BatchNormalization(axis=2)(q1_embed)
    q2_embed = BatchNormalization(axis=2)(q2_embed)
    q1_embed = SpatialDropout1D(config['spatial_dropout_rate'])(q1_embed)
    q2_embed = SpatialDropout1D(config['spatial_dropout_rate'])(q2_embed)

    highway_encoder = TimeDistributed(Highway(activation='relu'))
    self_attention = SelfAttention(d_model=embedding_matrix.shape[1])

    q1_encoded = highway_encoder(q1_embed, )
    q2_encoded = highway_encoder(q2_embed, )

    s1_encoded = self_attention(q1, q1_encoded)
    s2_encoded = self_attention(q2, q2_encoded)

    # Attention
    q1_aligned, q2_aligned = soft_attention_alignment(q1_encoded, q2_encoded)

    # Compare
    q1_combined1 = Concatenate()([
        q1_encoded,
        q2_aligned,
        interaction(q1_encoded, q2_aligned),
    ])
    q1_combined2 = Concatenate()([
        q2_aligned,
        q1_encoded,
        interaction(q1_encoded, q2_aligned),
    ])

    q2_combined1 = Concatenate()([
        q2_encoded,
        q1_aligned,
        interaction(q2_encoded, q1_aligned),
    ])
    q2_combined2 = Concatenate()([
        q1_aligned,
        q2_encoded,
        interaction(q2_encoded, q1_aligned),
    ])

    s1_combined1 = Concatenate()([
        q1_encoded,
        s1_encoded,
        interaction(q1_encoded, s1_encoded),
    ])
    s1_combined2 = Concatenate()([
        s1_encoded,
        q1_encoded,
        interaction(q1_encoded, s1_encoded),
    ])

    s2_combined1 = Concatenate()([
        q2_encoded,
        s2_encoded,
        interaction(q2_encoded, s2_encoded),
    ])
    s2_combined2 = Concatenate()([
        s2_encoded,
        q2_encoded,
        interaction(q2_encoded, s2_encoded),
    ])

    compare_layers_d = [
        Dense(compare_dim, activation=activation),
        Dropout(compare_dropout),
        Dense(compare_out_size, activation=activation),
        Dropout(compare_dropout),
    ]

    compare_layers_g = [
        Dense(compare_dim, activation=activation),
        Dropout(compare_dropout),
        Dense(compare_out_size, activation=activation),
        Dropout(compare_dropout),
    ]

    # NOTE these can be optimized
    q1_compare1 = time_distributed(q1_combined1, compare_layers_d)
    q1_compare2 = time_distributed(q1_combined2, compare_layers_d)
    q1_compare = Average()([q1_compare1, q1_compare2])

    q2_compare1 = time_distributed(q2_combined1, compare_layers_d)
    q2_compare2 = time_distributed(q2_combined2, compare_layers_d)
    q2_compare = Average()([q2_compare1, q2_compare2])

    s1_compare1 = time_distributed(s1_combined1, compare_layers_g)
    s1_compare2 = time_distributed(s1_combined2, compare_layers_g)
    s1_compare = Average()([s1_compare1, s1_compare2])

    s2_compare1 = time_distributed(s2_combined1, compare_layers_g)
    s2_compare2 = time_distributed(s2_combined2, compare_layers_g)
    s2_compare = Average()([s2_compare1, s2_compare2])

    # Aggregate
    q1_encoded = Concatenate()([q1_encoded, q1_compare, s1_compare])
    q2_encoded = Concatenate()([q2_encoded, q2_compare, s2_compare])

    aggreate_rnn = CuDNNGRU(rnn_size, return_sequences=True)
    q1_aggreated = aggreate_rnn(q1_encoded)
    q1_aggreated = Dropout(rnn_dropout)(q1_aggreated)
    q2_aggreated = aggreate_rnn(q2_encoded)
    q2_aggreated = Dropout(rnn_dropout)(q2_aggreated)

    # Pooling
    q1_rep = apply_multiple(q1_aggreated, [
        GlobalAvgPool1D(),
        GlobalMaxPool1D(),
    ])
    q2_rep = apply_multiple(q2_aggreated, [
        GlobalAvgPool1D(),
        GlobalMaxPool1D(),
    ])

    q_diff = Lambda(lambda x: K.abs(x[0] - x[1]))([q1_rep, q2_rep])
    q_multi = Lambda(lambda x: x[0] * x[1])([q1_rep, q2_rep])

    feature_input = Input(shape=(config['feature_length'], ))
    feature_dense = BatchNormalization()(feature_input)
    feature_dense = Dense(config['dense_dim'],
                          activation='relu')(feature_dense)
    h_all1 = Concatenate()([q1_rep, q2_rep, q_diff, q_multi, feature_dense])
    h_all2 = Concatenate()([q2_rep, q1_rep, q_diff, q_multi, feature_dense])
    h_all1 = Dropout(0.5)(h_all1)
    h_all2 = Dropout(0.5)(h_all2)

    dense = Dense(256, activation='relu')

    h_all1 = dense(h_all1)
    h_all2 = dense(h_all2)
    h_all = Average()([h_all1, h_all2])
    predictions = Dense(1, activation='sigmoid')(h_all)
    model = Model(inputs=[q1, q2, feature_input], outputs=predictions)
    opt = optimizers.get(config['optimizer'])
    K.set_value(opt.lr, config['learning_rate'])
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=[f1])
    return model
コード例 #8
0
def make_model(arch):
    n = 2  # 9 will give 58 weight layers (downsample separate)
    x_in = Input(shape=x_train.shape[1:], name='Input')
    # Convolution before first block.
    x = Conv2D(16, (3, 3), padding='same', use_bias=False,
               name='Conv_Start')(x_in)
    x = BatchNormalization(name='BN_Start')(x)
    x = Activation('relu', name='ReLU_Start')(x)

    # Block A
    # Make feature map depth match
    x_ = Conv2D(128, (1, 1), name='Conv_1x1_A')(x)
    for i in range(0, n):
        # First conv
        x = Conv2D(128, (3, 3),
                   padding='same',
                   use_bias=False,
                   name='Conv_A{}'.format(2 * i + 1))(x)
        x = BatchNormalization(name='BN_A{}'.format(2 * i + 1))(x)
        x = Activation('relu', name='ReLU_A{}'.format(2 * i + 1))(x)
        x = Dropout(0.25, name='Dropout_A{}'.format(2 * i + 1))(x)
        # Second conv
        x = Conv2D(128, (3, 3),
                   padding='same',
                   use_bias=False,
                   name='Conv_A{}'.format(2 * i + 2))(x)
        x = BatchNormalization(name='BN_A{}'.format(2 * i + 2))(x)
        # Merge
        x = Add(name='Merge_A{}'.format(2 * i + 2))([x, x_])
        x = Activation('relu', name='ReLU_A{}'.format(2 * i + 2))(x)
        x = Dropout(0.25, name='Dropout_A{}'.format(2 * i + 2))(x)

    # First branch (differance) classifier
    if arch != 'baseline':
        y_1 = Flatten(name='Flatten_Alpha')(x)
    if arch == 'digest' or arch == 'avg' or arch == 'aux':
        y_1 = Dense(512, name='Dense_Alpha')(y_1)
        y_1 = BatchNormalization(name='BN_Alpha')(y_1)
        y_1 = Activation('relu', name='ReLU_Alpha')(y_1)
        y_1 = Dropout(0.5, name='Dropout_Alpha')(y_1)
        y_1 = Dense(num_classes, activation='softmax',
                    name='Classifier_Alpha')(y_1)

    # Block B
    # Downsample with convolution of stride 2
    x = Conv2D(256, (3, 3),
               strides=2,
               padding='same',
               use_bias=False,
               name='Downsample_Conv_B')(x)
    for i in range(0, n):
        x_ = x
        x = Conv2D(256, (3, 3),
                   padding='same',
                   use_bias=False,
                   name='Conv_B{}'.format(2 * i + 1))(x)
        x = BatchNormalization(name='BN_B{}'.format(2 * i + 1))(x)
        x = Activation('relu', name='ReLU_B{}'.format(2 * i + 1))(x)
        x = Dropout(0.25, name='Dropout_B{}'.format(2 * i + 1))(x)
        x = Conv2D(256, (3, 3),
                   padding='same',
                   use_bias=False,
                   name='Conv_B{}'.format(2 * i + 2))(x)
        x = BatchNormalization(name='BN_B{}'.format(2 * i + 2))(x)
        x = Add(name='Merge_B{}'.format(2 * i + 2))([x, x_])
        x = Activation('relu', name='ReLU_B{}'.format(2 * i + 2))(x)
        x = Dropout(0.25, name='Dropout_B{}'.format(2 * i + 2))(x)

    # Second branch (differance) classifier
    if arch != 'baseline':
        y_2 = Flatten(name='Flatten_Beta')(x)
    if arch == 'digest' or arch == 'avg' or arch == 'aux':
        y_2 = Dense(512, name='Dense_Beta')(y_2)
        y_2 = BatchNormalization(name='BN_Beta')(y_2)
        y_2 = Activation('relu', name='ReLU_Beta')(y_2)
        y_2 = Dropout(0.5, name='Dropout_Beta')(y_2)
        y_2 = Dense(num_classes, activation='softmax',
                    name='Classifier_Beta')(y_2)

    # Block C
    # Downsample with convolution of stride 2
    x = Conv2D(512, (3, 3),
               strides=2,
               padding='same',
               use_bias=False,
               name='Downsample_Conv_C')(x)
    for i in range(0, n):
        x_ = x
        x = Conv2D(512, (3, 3),
                   padding='same',
                   use_bias=False,
                   name='Conv_C{}'.format(2 * i + 1))(x)
        x = BatchNormalization(name='BN_C{}'.format(2 * i + 1))(x)
        x = Activation('relu', name='ReLU_C{}'.format(2 * i + 1))(x)
        x = Dropout(0.25, name='Dropout_C{}'.format(2 * i + 1))(x)
        x = Conv2D(512, (3, 3),
                   padding='same',
                   use_bias=False,
                   name='Conv_C{}'.format(2 * i + 2))(x)
        x = BatchNormalization(name='BN_C{}'.format(2 * i + 2))(x)
        x = Add(name='Merge_C{}'.format(2 * i + 2))([x, x_])
        x = Activation('relu', name='ReLU_C{}'.format(2 * i + 2))(x)
        x = Dropout(0.25, name='Dropout_C{}'.format(2 * i + 2))(x)

    # Top
    x = GlobalAveragePooling2D(name='Global_Average')(x)

    # Third branch (differance) classifier
    if arch != 'baseline':
        y_3 = Flatten(name='Flatten_Gamma')(x)
    if arch == 'digest' or arch == 'avg':
        y_3 = Dense(512, name='Dense_Gamma')(y_3)
        y_3 = BatchNormalization(name='BN_Gamma')(y_3)
        y_3 = Activation('relu', name='ReLU_Gamma')(y_3)
        y_3 = Dropout(0.5, name='Dropout_Gamma')(y_3)
        y_3 = Dense(num_classes, activation='softmax',
                    name='Classifier_Gamma')(y_3)

    # Main classifier
    if arch == 'digest':
        x = Concatenate(name='Classifications_Differance')([y_1, y_2, y_3])
        x = Dense(512, name='Dense_Gestalt')(x)
        x = BatchNormalization(name='BN_Gestalt')(x)
        x = Activation('relu', name='ReLU_Gestalt')(x)
        x = Dropout(0.5, name='Dropout_Gestalt')(x)
        y = Dense(num_classes, activation='softmax',
                  name='Classifier_Gestalt')(x)

    elif arch == 'avg':
        y = Average(name='Classification_Average')([y_1, y_2, y_3])

    elif arch == 'aux':
        x = Dense(512, name='Dense_FC')(y_3)
        x = BatchNormalization(name='BN_FC')(x)
        x = Activation('relu', name='ReLU_FC')(x)
        x = Dropout(0.5, name='Dropout_FC')(x)
        y = Dense(num_classes, activation='softmax', name='Classifier_Main')(x)

    elif arch == 'skip':
        x = Concatenate(name='Concat_Layer')([y_1, y_2, y_3])
        x = Dense(512, name='Dense_Top')(x)
        x = BatchNormalization(name='BN_Top')(x)
        x = Activation('relu', name='ReLU_Top')(x)
        x = Dropout(0.5, name='Dropout_Top')(x)
        y = Dense(num_classes, activation='softmax', name='Classifier')(x)

    elif arch == 'baseline':
        #x = Flatten(name="Flatten_Top")(x)
        #x = Dense(512, name='Dense_Top')(x)
        #x = BatchNormalization(name='BN_Top')(x)
        #x = Activation('relu', name='ReLU_Top')(x)
        #x = Dropout(0.5, name='Dropout_Top')(x)
        y = Dense(num_classes, activation='softmax', name='Classifier')(x)

    if arch == 'digest' or arch == 'avg':
        model = Model(inputs=x_in, outputs=[y, y_3, y_2, y_1])
    elif arch == 'aux':
        model = Model(inputs=x_in, outputs=[y, y_2, y_1])
    else:
        model = Model(inputs=x_in, outputs=y)

    # Halve learning rate every 10 epochs
    #decay = 1 / ((train_samples * augment_factor / batch_size) * 10)
    optimizer = keras.optimizers.SGD(lr=0.1, momentum=0.9, nesterov=True)

    return model, optimizer
コード例 #9
0
def ensembleModels(models, model_input):
    # taken from https://medium.com/@twt446/ensemble-and-store-models-in-keras-2-x-b881a6d7693f
    yModels = [model(model_input) for model in models]
    yAvg = Average()(yModels)
    modelEns = Model(inputs=model_input, outputs=yAvg, name='ensemble')
    return modelEns
コード例 #10
0
def run(dataset_path, vocab_path, model_dir):

    print('Load vocabulary')
    vocab = Dictionary.load(vocab_path)
    vocab_size = len(vocab.token2id)
    print('vocab_size:', vocab_size)

    print('Load', dataset_path)
    f = h5py.File(dataset_path, 'r')

    X_train = f['x_train'].value
    y_train = f['y_train'].value

    print('X_train.shape:', X_train.shape)
    print('y_train.shape:', y_train.shape)

    max_train_size = 10000000
    print('Cutoff train data to %d examples' % max_train_size)
    X_train = X_train[:max_train_size, :]
    y_train = y_train[:max_train_size]

    print('X_train.shape:', X_train.shape)
    print('y_train.shape:', y_train.shape)

    print('Shuffle dataset')
    indices = np.arange(X_train.shape[0])
    np.random.shuffle(indices)
    X_train = X_train[indices]
    y_train = y_train[indices]

    vec_dims = 100
    aggregation_type = 'concat'  # 'average', `sum` or 'concat'
    win_size = X_train.shape[1]
    print('win_size:', win_size)

    # Define input shape
    inputs = Input(shape=(win_size, ), dtype='int32')

    # Embedding layer maps each tokenId in the window to a `vec_dim` dim.
    # vector. Output shape: (-1, win_size, vec_dim)
    word_vectors = Embedding(vocab_size, vec_dims)(inputs)

    # The embedding layer output is fed into `win_size` lambda layers that
    # each outputs a word vector.
    sliced_word_vector = [
        Lambda(lambda x: x[:, i, :], output_shape=(vec_dims, ))(word_vectors)
        for i in range(win_size)
    ]

    # Aggregate the word vectors
    if aggregation_type == 'concat':
        h = Concatenate()(sliced_word_vector)
    elif aggregation_type == 'average':
        h = Average()(sliced_word_vector)
    elif aggregation_type == 'sum':
        h = Add()(sliced_word_vector)
    else:
        raise ValueError('Invalid row aggregation')

    # Feed the aggregated word vectors into a dense layer that returns
    # a probability for each word in the vocabulary
    probs = Dense(vocab_size, activation='softmax')(h)

    model = Model(inputs, probs)

    # Compile the model with SGD optimizer and cross entropy loss
    model.compile(optimizer=SGD(lr=0.05),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    epochs_per_fit = 1

    for i in range(100):

        start = current_time_ms()
        h = model.fit(X_train,
                      y_train,
                      batch_size=256,
                      epochs=epochs_per_fit,
                      verbose=0)
        mean_epoch_time = (current_time_ms() - start) / epochs_per_fit

        epoch = i * epochs_per_fit
        loss = h.history['loss'][-1]
        acc = h.history['acc'][-1]

        print('epoch %d: loss=%f acc=%f time=%f' %
              (epoch, loss, acc, mean_epoch_time))

        checkpoint_dir = os.path.join(model_dir, ts_rand())
        os.makedirs(checkpoint_dir, exist_ok=True)
        checkpoint_path = os.path.join(checkpoint_dir, 'w2v_model.h5')
        print('Save model:', checkpoint_path)
        model.save(checkpoint_path)
コード例 #11
0
ファイル: processvid.py プロジェクト: johndpope/Dr.Face
model1.load_weights(weight_file1)
print('Finished loading model 1.')

weight_file2 = '/content/FSA-Net/pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
model2.load_weights(weight_file2)
print('Finished loading model 2.')

weight_file3 = '/content/FSA-Net/pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
model3.load_weights(weight_file3)
print('Finished loading model 3.')

inputs = Input(shape=(64,64,3))
x1 = model1(inputs) #1x1
x2 = model2(inputs) #var
x3 = model3(inputs) #w/o
avg_model = Average()([x1,x2,x3])

model = Model(inputs=inputs, outputs=avg_model)

detector = face_detection.build_detector(
  "RetinaNetResNet50", confidence_threshold=.5, nms_iou_threshold=.3)

"""
    SORT: A Simple, Online and Realtime Tracker
    Copyright (C) 2016 Alex Bewley [email protected]
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
コード例 #12
0
ファイル: Headpose.py プロジェクト: dqhuy140598/PyFace
  def __init__(self,fsanet_model_path,option=None):

      if option is None:

         self.option = {
              'image_size' : 64,
              'num_capsule' : 3,
              'dim_capsule' : 16,
              'routings' : 2,
              'stage_num' : [3,3,3],
              'lambda_d' : 1,
              'num_classes' : 3,
              'image_size' : 64,
              'num_primcaps' : 7*3,
              'm_dim' : 5,
              'ad' : 0.6
         }
      else:
          self.option = option

      self.image_size = self.option['image_size']
      self.channels = 3

      fsanet_model_path = fsanet_model_path

      S_set = [self.option['num_capsule'], 
                    self.option['dim_capsule'], 
                    self.option['routings'], 
                    self.option['num_primcaps'], 
                    self.option['m_dim']
                   ]

      model1 = FSA_net_Capsule(self.option['image_size'], self.option['num_classes'], self.option['stage_num'], self.option['lambda_d'], S_set)()

      model2 = FSA_net_Var_Capsule(self.option['image_size'], self.option['num_classes'], self.option['stage_num'], self.option['lambda_d'], S_set)()

      self.option['num_primcaps'] = 8*8*3

      S_set = [self.option['num_capsule'], 
                    self.option['dim_capsule'], 
                    self.option['routings'], 
                    self.option['num_primcaps'], 
                    self.option['m_dim']
                   ]

      model3 = FSA_net_noS_Capsule(self.image_size, self.option['num_classes'], self.option['stage_num'], self.option['lambda_d'], S_set)()

      print('Loading models ...')

      weight_file1 = fsanet_model_path[0]
      model1.load_weights(weight_file1)
      print('Finished loading model 1.')

      weight_file2 = fsanet_model_path[1]
      model2.load_weights(weight_file2)
      print('Finished loading model 2.')

      weight_file3 = fsanet_model_path[2]
      model3.load_weights(weight_file3)
      print('Finished loading model 3.')

      inputs = Input(shape=(self.image_size,self.image_size,self.channels))

      x1 = model1(inputs) #1x1
      x2 = model2(inputs) #var
      x3 = model3(inputs) #w/o

      avg_model = Average()([x1,x2,x3])

      self.model = Model(inputs=inputs, outputs=avg_model)
コード例 #13
0
ファイル: octopus.py プロジェクト: LordHui/octopus
    def __init__(self, num=8, img_size=1080):
        self.num = num
        self.img_size = img_size
        self.inputs = []
        self.poses = []
        self.ts = []

        images = [
            Input(shape=(self.img_size, self.img_size, 3),
                  name='image_{}'.format(i)) for i in range(self.num)
        ]
        Js = [
            Input(shape=(25, 3), name='J_2d_{}'.format(i))
            for i in range(self.num)
        ]

        self.inputs.extend(images)
        self.inputs.extend(Js)

        pose_raw = np.load(
            os.path.join(os.path.dirname(__file__),
                         '../assets/mean_a_pose.npy'))
        pose_raw[:3] = 0.
        pose = tf.reshape(
            batch_rodrigues(pose_raw.reshape(-1, 3).astype(np.float32)),
            (-1, ))
        trans = np.array([0., 0.2, -2.3])

        batch_size = tf.shape(images[0])[0]

        conv2d_0 = Conv2D(8, (3, 3),
                          strides=(2, 2),
                          activation='relu',
                          kernel_initializer='he_normal',
                          trainable=False)
        maxpool_0 = MaxPool2D((2, 2))

        conv2d_1 = Conv2D(16, (3, 3),
                          activation='relu',
                          kernel_initializer='he_normal',
                          trainable=False)
        maxpool_1 = MaxPool2D((2, 2))

        conv2d_2 = Conv2D(32, (3, 3),
                          activation='relu',
                          kernel_initializer='he_normal',
                          trainable=False)
        maxpool_2 = MaxPool2D((2, 2))

        conv2d_3 = Conv2D(64, (3, 3),
                          activation='relu',
                          kernel_initializer='he_normal',
                          trainable=False)
        maxpool_3 = MaxPool2D((2, 2))

        conv2d_4 = Conv2D(128, (3, 3), trainable=False)
        maxpool_4 = MaxPool2D((2, 2))

        flat = Flatten()
        self.image_features = flat

        latent_code = Dense(20, name='latent_shape')

        pose_trans = tf.tile(
            tf.expand_dims(tf.concat((trans, pose), axis=0), 0),
            (batch_size, 1))
        posetrans_init = Input(tensor=pose_trans, name='posetrans_init')
        self.inputs.append(posetrans_init)

        J_flat = Flatten()
        concat_pose = Concatenate()

        latent_pose_from_I = Dense(200,
                                   name='latent_pose_from_I',
                                   activation='relu',
                                   trainable=False)
        latent_pose_from_J = Dense(200,
                                   name='latent_pose_from_J',
                                   activation='relu',
                                   trainable=False)
        latent_pose = Dense(100, name='latent_pose')
        posetrans_res = Dense(24 * 3 * 3 + 3,
                              name='posetrans_res',
                              kernel_initializer=RandomNormal(stddev=0.01),
                              trainable=False)
        posetrans = Add(name='posetrans')

        dense_layers = []

        for i, (J, image) in enumerate(zip(Js, images)):
            conv2d_0_i = conv2d_0(image)
            maxpool_0_i = maxpool_0(conv2d_0_i)

            conv2d_1_i = conv2d_1(maxpool_0_i)
            maxpool_1_i = maxpool_1(conv2d_1_i)

            conv2d_2_i = conv2d_2(maxpool_1_i)
            maxpool_2_i = maxpool_2(conv2d_2_i)

            conv2d_3_i = conv2d_3(maxpool_2_i)
            maxpool_3_i = maxpool_3(conv2d_3_i)

            conv2d_4_i = conv2d_4(maxpool_3_i)
            maxpool_4_i = maxpool_4(conv2d_4_i)

            # shape
            flat_i = flat(maxpool_4_i)

            latent_code_i = latent_code(flat_i)
            dense_layers.append(latent_code_i)

            # pose
            J_flat_i = J_flat(J)
            latent_pose_from_I_i = latent_pose_from_I(flat_i)
            latent_pose_from_J_i = latent_pose_from_J(J_flat_i)

            concat_pose_i = concat_pose(
                [latent_pose_from_I_i, latent_pose_from_J_i])
            latent_pose_i = latent_pose(concat_pose_i)
            posetrans_res_i = posetrans_res(latent_pose_i)
            posetrans_i = posetrans([posetrans_res_i, posetrans_init])

            self.poses.append(
                Lambda(lambda x: tf.reshape(x[:, 3:], (-1, 24, 3, 3)),
                       name='pose_{}'.format(i))(posetrans_i))
            self.ts.append(
                Lambda(lambda x: x[:, :3],
                       name='trans_{}'.format(i))(posetrans_i))

        if self.num > 1:
            self.dense_merged = Average(
                name='merged_latent_shape')(dense_layers)
        else:
            self.dense_merged = NameLayer(name='merged_latent_shape')(
                dense_layers[0])

        # betas
        self.betas = Dense(10, name='betas',
                           trainable=False)(self.dense_merged)

        with open(
                os.path.join(os.path.dirname(__file__),
                             '../assets/smpl_sampling.pkl'), 'rb') as f:
            sampling = pkl.load(f)

        M = sampling['meshes']
        U = sampling['up']
        D = sampling['down']
        A = sampling['adjacency']

        self.faces = M[0].f.astype(np.int32)

        low_res = D[-1].shape[0]
        tf_U = [sparse_to_tensor(u) for u in U]
        tf_A = [map(sparse_to_tensor, chebyshev_polynomials(a, 3)) for a in A]

        shape_features_dense = Dense(
            low_res * 64,
            kernel_initializer=RandomNormal(stddev=0.003),
            name='shape_features_flat')(self.dense_merged)
        shape_features = Reshape((low_res, 64),
                                 name="shape_features")(shape_features_dense)

        conv_l3 = GraphConvolution(32,
                                   tf_A[3],
                                   activation='relu',
                                   name='conv_l3',
                                   trainable=False)(shape_features)
        unpool_l2 = Lambda(lambda v: sparse_dot_adj_batch(tf_U[2], v),
                           name='unpool_l2')(conv_l3)
        conv_l2 = GraphConvolution(16,
                                   tf_A[2],
                                   activation='relu',
                                   name='conv_l2',
                                   trainable=False)(unpool_l2)
        unpool_l1 = Lambda(lambda v: sparse_dot_adj_batch(tf_U[1], v),
                           name='unpool_l1')(conv_l2)
        conv_l1 = GraphConvolution(16,
                                   tf_A[1],
                                   activation='relu',
                                   name='conv_l1',
                                   trainable=False)(unpool_l1)
        unpool_l0 = Lambda(lambda v: sparse_dot_adj_batch(tf_U[0], v),
                           name='unpool_l0')(conv_l1)
        conv_l0 = GraphConvolution(3,
                                   tf_A[0],
                                   activation='tanh',
                                   name='offsets_pre')(unpool_l0)

        self.offsets = Lambda(lambda x: x / 10., name='offsets')(conv_l0)

        smpl = SmplTPoseLayer(theta_in_rodrigues=False,
                              theta_is_perfect_rotmtx=False)
        smpls = [
            NameLayer('smpl_{}'.format(i))(smpl(
                [p, self.betas, t, self.offsets]))
            for i, (p, t) in enumerate(zip(self.poses, self.ts))
        ]

        self.vertices = [
            Lambda(lambda s: s[0], name='vertices_{}'.format(i))(smpl)
            for i, smpl in enumerate(smpls)
        ]

        # we only need one instance per batch for laplace
        self.vertices_tposed = Lambda(lambda s: s[1],
                                      name='vertices_tposed')(smpls[0])
        vertices_naked = Lambda(lambda s: s[2],
                                name='vertices_naked')(smpls[0])

        self.laplacian = Lambda(
            lambda (v0, v1): compute_laplacian_diff(v0, v1, self.faces),
            name='laplacian')([self.vertices_tposed, vertices_naked])
        self.symmetry = NameLayer('symmetry')(self.vertices_tposed)

        l = SmplBody25FaceLayer(theta_in_rodrigues=False,
                                theta_is_perfect_rotmtx=False)
        kps = [
            NameLayer('kps_{}'.format(i))(l([p, self.betas, t]))
            for i, (p, t) in enumerate(zip(self.poses, self.ts))
        ]

        self.Js = [
            Lambda(lambda jj: jj[:, :25], name='J_reproj_{}'.format(i))(j)
            for i, j in enumerate(kps)
        ]
        self.face_kps = [
            Lambda(lambda jj: jj[:, 25:], name='face_reproj_{}'.format(i))(j)
            for i, j in enumerate(kps)
        ]

        self.repr_loss = reprojection([self.img_size, self.img_size],
                                      [self.img_size / 2., self.img_size / 2.],
                                      self.img_size, self.img_size)

        renderer = RenderLayer(self.img_size,
                               self.img_size,
                               1,
                               np.ones((6890, 1)),
                               np.zeros(1),
                               self.faces, [self.img_size, self.img_size],
                               [self.img_size / 2., self.img_size / 2.],
                               name='render_layer')
        self.rendered = [
            NameLayer('rendered_{}'.format(i))(renderer(v))
            for i, v in enumerate(self.vertices)
        ]

        self.inference_model = Model(
            inputs=self.inputs,
            outputs=[self.vertices_tposed] + self.vertices +
            [self.betas, self.offsets] + self.poses + self.ts)

        self.opt_pose_model = Model(inputs=self.inputs, outputs=self.Js)

        opt_pose_loss = {
            'J_reproj_{}'.format(i): self.repr_loss
            for i in range(self.num)
        }
        self.opt_pose_model.compile(loss=opt_pose_loss, optimizer='adam')

        self.opt_shape_model = Model(inputs=self.inputs,
                                     outputs=self.Js + self.face_kps +
                                     self.rendered +
                                     [self.symmetry, self.laplacian])

        opt_shape_loss = {
            'laplacian': laplace_mse,
            'symmetry': symmetry_mse,
        }
        opt_shape_weights = {
            'laplacian': 100. * self.num,
            'symmetry': 50. * self.num,
        }

        for i in range(self.num):
            opt_shape_loss['rendered_{}'.format(i)] = 'mse'
            opt_shape_weights['rendered_{}'.format(i)] = 1.

            opt_shape_loss['J_reproj_{}'.format(i)] = self.repr_loss
            opt_shape_weights['J_reproj_{}'.format(i)] = 50.

            opt_shape_loss['face_reproj_{}'.format(i)] = self.repr_loss
            opt_shape_weights['face_reproj_{}'.format(i)] = 10. * self.num

        self.opt_shape_model.compile(loss=opt_shape_loss,
                                     loss_weights=opt_shape_weights,
                                     optimizer='adam')
コード例 #14
0
    def __init__(self,
                 dim,
                 encode_dim,
                 num_clones,
                 initial_growth=0.10,
                 shrink=0.75,
                 **kwargs):

        self.layer_dims = []
        self.encoded_layer = 0
        encode_data = Input(shape=(dim, ))
        decode_data = Input(shape=(encode_dim, ))

        ################################################
        ###build the encoder and decoder architecture###
        ################################################

        #initial expansion layer
        start_dim = int(dim * (1.0 + initial_growth))
        encoded = Dense(start_dim, **kwargs)(encode_data)
        self.layer_dims.append(start_dim)
        self.encoded_layer += 1

        #compression layers
        current_dim = int(start_dim * shrink)
        while current_dim > encode_dim:
            encoded = Dense(current_dim, **kwargs)(encoded)
            self.layer_dims.append(current_dim)
            self.encoded_layer += 1
            current_dim = int(current_dim * shrink)

        #final encoding layer
        #encoded = Dense(encode_dim, **kwargs)(encoded)
        encoded = Dense(encode_dim, activation='linear')(encoded)
        self.layer_dims.append(encode_dim)
        self.encoded_layer += 1

        #first expansion layer
        reversed_dims = self.layer_dims[::-1][1:]
        decoded = Dense(reversed_dims[0], **kwargs)(decode_data)
        self.layer_dims.append(reversed_dims[0])
        self.encoded_layer += 1

        #remaining expansion layers
        for current_dim in reversed_dims[1:]:
            decoded = Dense(current_dim, **kwargs)(decoded)
            self.layer_dims.append(current_dim)

        #final linear layer
        decoded = Dense(dim, activation='linear')(decoded)
        self.layer_dims.append(dim)

        #create encoder and decoder models
        self.encoder = Model(encode_data, encoded)
        self.decoder = Model(decode_data, decoded)

        #create the joint model
        _encoded_ = self.encoder(encode_data)
        _decoded_ = self.decoder(_encoded_)
        self.autoencoder = Model(encode_data, _decoded_)

        #########################################
        ###build the coupled autoencoder model###
        #########################################

        #################
        ###the encoder###
        #################

        #read in joint data and split into the columns
        joint_encode_data = Input(shape=(dim * num_clones, ))
        split_encode_data = []
        for i in range(num_clones):
            split_encode_data.append(
                Lambda(lambda x: x[:, i * dim:(i + 1) * dim],
                       output_shape=(dim, ))(joint_encode_data))

        #output from tied encoders
        split_encoded = [self.encoder(input) for input in split_encode_data]

        #average the output from each encoder
        if num_clones == 1:
            average_encoded = split_encoded[0]
        else:
            average_encoded = Average()(split_encoded)

        #create the full encoder
        self.joint_encoder = Model(joint_encode_data, average_encoded)

        #################
        ###the decoder###
        #################

        average_encoded = Input(shape=(encode_dim, ))

        #spit out the results and concatenate
        split_decoded = [
            self.decoder(average_encoded) for i in range(num_clones)
        ]
        if num_clones == 1:
            joint_decode_data = split_decoded[0]
        else:
            joint_decode_data = concatenate(split_decoded, axis=-1)

        #final model
        self.joint_decoder = Model(average_encoded, joint_decode_data)

        #####################
        ###the autoencoder###
        #####################

        encoded = self.joint_encoder(joint_encode_data)
        joint_decode_data = self.joint_decoder(encoded)
        self.joint_autoencoder = Model(joint_encode_data, joint_decode_data)

        return None
コード例 #15
0
def make_preds(X_test, y_test):
    img_rows, img_cols = 28, 28
    f = 0

    input_a = Input(shape=(img_rows, img_cols, 1))
    input_b = Input(shape=(img_rows, img_cols, 1))
    input_c = Input(shape=(img_rows, img_cols, 1))

    # because we re-use the same instance `base_network`,
    # the weights of the network
    # will be shared across the two branches
    #vec_a = create_base_network2(input_a)
    #vec_b = create_base_network2(input_b)
    #vec_c = create_base_network2(input_c)

    cnet = convnet((img_rows, img_cols, 1))
    vec_a = cnet(input_a)
    vec_b = cnet(input_b)
    vec_c = cnet(input_c)
    #vec_a = Activation('selu')(vec_a)
    #vec_b = Activation('selu')(vec_b)
    #vec_c = Activation('selu')(vec_c)

    #vec_a2 = GaussianNoise(50.0)(vec_a)
    #vec_b2 = GaussianNoise(50.0)(vec_b)
    #vec_c2 = GaussianNoise(50.0)(vec_c)
    #vec_a2 = Lambda(lambda v: K.tanh(v[0] - v[1]))([vec_b, vec_c])
    #vec_b2 = Lambda(lambda v: K.tanh(v[0] - v[1]))([vec_a, vec_c])
    #vec_c2 = Lambda(lambda v: K.tanh(v[0] - v[1]))([vec_a, vec_b])
    #vec_a2 = GaussianNoise(0.01)(vec_a2)
    #vec_b2 = GaussianNoise(0.01)(vec_b2)
    #vec_c2 = GaussianNoise(0.01)(vec_c2)
    #vec_a2 = Lambda(lambda v: K.clip(v, -1.0, 1.0))(vec_a2)
    #vec_b2 = Lambda(lambda v: K.clip(v, -1.0, 1.0))(vec_b2)
    #vec_c2 = Lambda(lambda v: K.clip(v, -1.0, 1.0))(vec_c2)

    #vec_a3 = Dot(axes=-1, normalize=False)([vec_b, vec_c])
    #vec_b3 = Dot(axes=-1, normalize=False)([vec_c, vec_a])
    #vec_c3 = Dot(axes=-1, normalize=False)([vec_a, vec_b])

    #x = Lambda(lambda v : K.stack([K.abs(v[1]-v[2]), K.abs(v[2]-v[0]), K.abs(v[0]-v[1])], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack([K.mean(K.square(v[1]-v[2]), axis=-1), K.mean(K.square(v[2]-v[0]), axis=-1), K.mean(K.square(v[0]-v[1]), axis=-1)], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack([K.log(K.epsilon()+K.mean(K.square(v[1]-v[2]), axis=-1)), K.log(K.mean(K.epsilon()+K.square(v[2]-v[0]), axis=-1)), K.log(K.mean(K.epsilon()+K.square(v[0]-v[1]), axis=-1))], axis=-1))([vec_a, vec_b, vec_c])

    #x = Lambda(lambda v : K.stack([K.mean(K.square(v[1]-v[2]), axis=-1), K.mean(K.square(v[2]-v[0]), axis=-1), K.mean(K.square(v[0]-v[1]), axis=-1)], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack([v[1]*v[2], v[2]*v[0], v[0]*v[1]], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack(v, axis=-1))([vec_a3, vec_b3, vec_c3])
    #x = Activation('selu')(x)
    #x = TimeDistributed(Dense(3, activation='selu'))(x)
    #x = TimeDistributed(Dense(3, activation='sigmoid'))(x)
    #x = Flatten()(x)
    #x = Dense(3*nb_filters, activation='selu')(x)
    #x = Dense(nb_filters, activation='selu')(x)

    #x = Convolution1D(2*nb_filters, 3, strides=1, activation='selu', padding='same')(x)
    #x = Convolution1D(2*nb_filters, 3, strides=2, activation='selu', padding='same')(x)
    #x = Convolution1D(3, 1, strides=1, activation='sigmoid', padding='same')(x)
    #x = GlobalAveragePooling1D()(x)

    #probs = Activation('softmax')(x)
    #x = Convolution1D(int(nb_filters/2), 2, strides=2, activation='selu', padding='same')(x)

    #x = Reshape((-1,1))(x)
    #x = TimeDistributed(Dense(nb_filters, activation='selu'))(x)
    #x = TimeDistributed(Dense(nb_filters, activation='selu'))(x)
    #x = TimeDistributed(Dense(1, activation='sigmoid'))(x)
    #x = Flatten()(x)
    #probs = Lambda(lambda x : x/(K.repeat_elements(K.expand_dims(K.sum(x, axis=-1)), 3, axis=-1)+K.epsilon()))(x)

    #x = Reshape((-1,1))(x)
    #x = TimeDistributed(Dense(1, activation='sigmoid'))(x)
    #x = Flatten()(x)
    #x = Dense(int(nb_filters/2), activation='selu')(x)
    #x = Dense(int(nb_filters/2), activation='selu')(x)
    #probs = Dense(3, activation='softmax')(x)

    #input_u = Input(shape=(3,))
    #y = Dense(int(nb_filters/2), activation='selu')(input_u)
    #y = Dense(int(nb_filters/2), activation='selu')(y)
    #p = Dense(3, activation='softmax')(y)

    #unit = Model(inputs=input_u, outputs=p)
    #probs = unit(x)

    #x = Reshape((1,-1))(x)
    #x = Convolution1D(2*nb_filters, 1, strides=1, activation='selu', padding='same')(x)
    #x = Convolution1D(2*nb_filters, 1, strides=1, activation='selu', padding='same')(x)
    #x = Convolution1D(3, 1, strides=1, activation='softmax', padding='same')(x)
    #probs = Flatten()(x)

    #probs = Lambda(lambda x : x/(K.repeat_elements(K.expand_dims(K.sum(x, axis=-1)), 3, axis=-1)+K.epsilon()))(x)
    #probs = Lambda(lambda x : 1.0-x/(K.repeat_elements(K.expand_dims(K.sum(x, axis=-1)), 3, axis=-1)+K.epsilon()))(x)

    input_u = Input(shape=(3, ))
    y = Dense(6, activation='selu')(input_u)
    y = Dense(6, activation='selu')(y)
    p = Dense(3, activation='softmax')(y)

    ntop = int(nb_filters / 2)
    unit = Model(inputs=input_u, outputs=p)
    x0 = Lambda(lambda v: K.stack([
        K.mean(K.square(v[1] - v[2]), axis=-1),
        K.mean(K.square(v[2] - v[0]), axis=-1),
        K.mean(K.square(v[0] - v[1]), axis=-1)
    ],
                                  axis=-1))([vec_a, vec_b, vec_c])
    x1 = Lambda(lambda v: K.stack([
        K.mean(K.square(v[2] - v[0]), axis=-1),
        K.mean(K.square(v[0] - v[1]), axis=-1),
        K.mean(K.square(v[1] - v[2]), axis=-1)
    ],
                                  axis=-1))([vec_a, vec_b, vec_c])
    x2 = Lambda(lambda v: K.stack([
        K.mean(K.square(v[0] - v[1]), axis=-1),
        K.mean(K.square(v[1] - v[2]), axis=-1),
        K.mean(K.square(v[2] - v[0]), axis=-1)
    ],
                                  axis=-1))([vec_a, vec_b, vec_c])
    #x0 = Lambda(lambda v : K.stack([K.mean(tf.nn.top_k(K.square(v[1]-v[2]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[2]-v[0]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[0]-v[1]), k=ntop)[0], axis=-1)], axis=-1))([vec_a, vec_b, vec_c])
    #x1 = Lambda(lambda v : K.stack([K.mean(tf.nn.top_k(K.square(v[2]-v[0]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[0]-v[1]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[1]-v[2]), k=ntop)[0], axis=-1)], axis=-1))([vec_a, vec_b, vec_c])
    #x2 = Lambda(lambda v : K.stack([K.mean(tf.nn.top_k(K.square(v[0]-v[1]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[1]-v[2]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[2]-v[0]), k=ntop)[0], axis=-1)], axis=-1))([vec_a, vec_b, vec_c])

    x0 = unit(x0)
    x1 = unit(x1)
    x2 = unit(x2)

    x1 = Lambda(lambda v: K.stack([v[:, 2], v[:, 0], v[:, 1]], axis=-1))(x1)
    x2 = Lambda(lambda v: K.stack([v[:, 1], v[:, 2], v[:, 0]], axis=-1))(x2)

    probs = Average()([x0, x1, x2])

    #x = Reshape((-1,1))(x)
    #x = TimeDistributed(Dense(nb_filters, activation='selu'))(x)
    #x = TimeDistributed(Dense(nb_filters, activation='selu'))(x)
    #x = TimeDistributed(Dense(1, activation='linear'))(x)
    #x = Flatten()(x)
    #probs = Lambda(lambda y : K.exp(-y)/(K.repeat_elements(K.expand_dims(K.epsilon() + K.sum(K.exp(-y), axis=-1)), 3, axis=-1)))(x)

    model = Model(inputs=[input_a, input_b, input_c], outputs=probs)
    optimizer = Adam(lr=0.0003)  #RMSprop()
    model.compile(loss=pick_best_loss,
                  optimizer=optimizer,
                  metrics=['accuracy'])

    model.load_weights('autoenc' + str(f) + '.h5')

    #model = [model.layers[0], model.layers[-2]]

    batch_size = 256
    test_gen = create_triplets(X_test, None, 1.0, batch_size, mode='val')

    print('log loss, accuracy')
    print(model.evaluate_generator(test_gen, steps=20))

    input = Input(shape=(img_rows, img_cols, 1))
    output = model.layers[3](input)
    model2 = Model(inputs=input, outputs=output)

    vecs = model2.predict(1.0 * X_test)

    print(np.std(vecs, axis=0))

    pca = PCA(n_components=3, svd_solver='arpack', copy=True, whiten=True)
    pca.fit(vecs[:, :])

    pca_vecs = pca.transform(vecs[:, :])

    kmeans_dims = 11

    clf = KMeans(n_clusters=kmeans_dims, random_state=0,
                 max_iter=1000).fit(vecs)
    preds = clf.predict(vecs)
    #preds = DBSCAN(eps=5.0).fit_predict(vecs)
    #preds = MeanShift().fit_predict(pca_vecs)
    #preds = KMeans(n_clusters=kmeans_dims, random_state=0).fit_predict(vecs)
    #gmm = BayesianGaussianMixture(n_components=4).fit(pca_vecs)
    #preds = gmm.predict(pca_vecs)
    #preds = AgglomerativeClustering(n_clusters=kmeans_dims, linkage='average', affinity='euclidean').fit_predict(vecs)

    print(len(set(preds)))

    print('0')
    idx = np.where(y_test == 0)
    print(preds[idx][0:40])
    print('1')
    idx = np.where(y_test == 1)
    print(preds[idx][0:40])
    print('2')
    idx = np.where(y_test == 2)
    print(preds[idx][0:40])
    print('3')
    idx = np.where(y_test == 3)
    print(preds[idx][0:40])
    print('4')
    idx = np.where(y_test == 4)
    print(preds[idx][0:40])
    print('5')
    idx = np.where(y_test == 5)
    print(preds[idx][0:40])
    print('6')
    idx = np.where(y_test == 6)
    print(preds[idx][0:40])
    print('7')
    idx = np.where(y_test == 7)
    print(preds[idx][0:40])
    print('8')
    idx = np.where(y_test == 8)
    print(preds[idx][0:40])
    print('9')
    idx = np.where(y_test == 9)
    print(preds[idx][0:40])

    plt.close('all')
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(xs=pca_vecs[y_test == 1][:, 0],
               ys=pca_vecs[y_test == 1][:, 1],
               zs=pca_vecs[y_test == 1][:, 2],
               zdir='z',
               s=5,
               c='k',
               depthshade=True,
               label='1')
    ax.scatter(xs=pca_vecs[y_test == 7][:, 0],
               ys=pca_vecs[y_test == 7][:, 1],
               zs=pca_vecs[y_test == 7][:, 2],
               zdir='z',
               s=5,
               c='r',
               depthshade=True,
               label='7')
    ax.scatter(xs=pca_vecs[y_test == 4][:, 0],
               ys=pca_vecs[y_test == 4][:, 1],
               zs=pca_vecs[y_test == 4][:, 2],
               zdir='z',
               s=5,
               c='b',
               depthshade=True,
               label='4')
    ax.scatter(xs=pca_vecs[y_test == 9][:, 0],
               ys=pca_vecs[y_test == 9][:, 1],
               zs=pca_vecs[y_test == 9][:, 2],
               zdir='z',
               s=5,
               c='g',
               depthshade=True,
               label='9')
    ax.scatter(xs=pca_vecs[y_test == 0][:, 0],
               ys=pca_vecs[y_test == 0][:, 1],
               zs=pca_vecs[y_test == 0][:, 2],
               zdir='z',
               s=5,
               c='m',
               depthshade=True,
               label='0')
    plt.legend()
    ''' 	
コード例 #16
0
    count = count + 1
modelVGG02_all.get_layer(name='batch_normalization_1_input').name='VGG02_all_norm'
    
count = 0
for layer in modelVGG_CNN.layers:
    layer.name='VGG_CNN_'+ str(count)
    count = count + 1
modelVGG_CNN.get_layer(name='batch_normalization_1_input').name='VGG_CNN_norm'

# join the models with an average layer
#inputs = Input(shape=(timesteps, data_dim))
X_1 = modelVGG02_all.output
X_2 = modelVGG.output
X_3 = modelVGG02.output
X_4 = modelVGG_CNN.output
out = Average()([X_1, X_2, X_3, X_4])

ensemble_VGG = Model(inputs=[modelVGG02_all.input, modelVGG.input, modelVGG02.input, modelVGG_CNN.input], outputs=out)

ensemble_VGG.compile(loss='binary_crossentropy',
              optimizer='Adam',
              metrics=['binary_accuracy', f1])

# evaluate on the mock test set
X, Y, Fscore = ensemble_VGG.evaluate([X_test, X_test, X_test, X_test], Y_test)
print(X, Y, Fscore)

# save the ensemble model
# save the model to the cloud storage bucket
filename = 'ensemble_Audio'
path = 'gs://mediaeval_data_storage/ensembles'
コード例 #17
0
ファイル: test_surgeon.py プロジェクト: audatic/keras-surgeon
def test_delete_channels_merge_others(channel_index, data_format):
    layer_test_helper_merge_2d(Add(), channel_index, data_format)
    layer_test_helper_merge_2d(Multiply(), channel_index, data_format)
    layer_test_helper_merge_2d(Average(), channel_index, data_format)
    layer_test_helper_merge_2d(Maximum(), channel_index, data_format)
コード例 #18
0
               weights=[trans_embedding_matrix_fr],
               input_length=max_words,
               trainable=False)(trans_input)

e = Embedding(vocab_size,
              300,
              weights=[embedding_matrix],
              input_length=max_words,
              trainable=False)(a)
f = Embedding(vocab_size_fr,
              300,
              weights=[embedding_matrix_fr],
              input_length=max_words,
              trainable=False)(b)

merge = (Average()([e, f, ef]))

x = merge

x = Conv1D(filters=64, kernel_size=3, padding='valid', activation='relu')(x)
x = MaxPooling1D(pool_size=5)(x)
x = LSTM(80, dropout=0.5)(x)

x = concatenate([x, meta_input])
x = Dense(32, activation="relu")(x)
x = Dropout(0.50)(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.50)(x)
x = Dense(4, activation="relu")(x)
x = Dropout(0.50)(x)
x = Dense(2, activation="relu")(x)
コード例 #19
0
def get_extract_model():
    """
    构建事件抽取模型结构,加载模型参数,返回模型对象
    1、使用bert输出预测动词下标
    2、使用bert输出融合动词下标预测事件时间、地点、主语、宾语、否定词
    :return: 各个部分的模型对象
    """
    with extract_sess.as_default():
        with extract_sess.graph.as_default():
            # 构建bert模型主体
            bert_model = build_transformer_model(
                config_path=bert_config.config_path,
                return_keras_model=False,
                model=bert_config.model_type
            )

            # 搭建模型
            # 动词输入
            trigger_start_in = Input(shape=(None,))
            trigger_end_in = Input(shape=(None,))
            # 动词下标输入
            trigger_index_start_in = Input(shape=(1,))
            trigger_index_end_in = Input(shape=(1,))
            # 宾语输入
            object_start_in = Input(shape=(None,))
            object_end_in = Input(shape=(None,))
            # 主语输入
            subject_start_in = Input(shape=(None,))
            subject_end_in = Input(shape=(None,))
            # 地点输入
            loc_start_in = Input(shape=(None,))
            loc_end_in = Input(shape=(None,))
            # 时间输入
            time_start_in = Input(shape=(None,))
            time_end_in = Input(shape=(None,))
            # 否定词输入
            negative_start_in = Input(shape=(None,))
            negative_end_in = Input(shape=(None,))
            # 将模型外传入的下标赋值给模型内部变量(只是为了将模型中应用与构建Model的输入区分开来)
            trigger_index_start, trigger_index_end = trigger_index_start_in, trigger_index_end_in

            trigger_start_out = Dense(1, activation='sigmoid')(bert_model.model.output)
            trigger_end_out = Dense(1, activation='sigmoid')(bert_model.model.output)
            # 预测trigger动词的模型
            trigger_model = Model(bert_model.model.inputs, [trigger_start_out, trigger_end_out])

            # 按照动词下标采集字向量
            k1v = Lambda(seq_gather)([bert_model.model.output, trigger_index_start])
            k2v = Lambda(seq_gather)([bert_model.model.output, trigger_index_end])
            kv = Average()([k1v, k2v])
            # 使用归一化融合动词词向量与句子张量
            t = LayerNormalization(conditional=True)([bert_model.model.output, kv])

            # 宾语模型输出
            object_start_out = Dense(1, activation='sigmoid')(t)
            object_end_out = Dense(1, activation='sigmoid')(t)
            # 主语模型输出
            subject_start_out = Dense(1, activation='sigmoid')(t)
            subject_end_out = Dense(1, activation='sigmoid')(t)
            # 地点模型输出
            loc_start_out = Dense(1, activation='sigmoid')(t)
            loc_end_out = Dense(1, activation='sigmoid')(t)
            # 时间模型输出
            time_start_out = Dense(1, activation='sigmoid')(t)
            time_end_out = Dense(1, activation='sigmoid')(t)
            # 否定词模型输出
            negative_start_out = Dense(1, activation='sigmoid')(t)
            negative_end_out = Dense(1, activation='sigmoid')(t)

            # 输入text和trigger,预测object
            object_model = Model(bert_model.model.inputs + [trigger_index_start_in, trigger_index_end_in],
                                 [object_start_out, object_end_out])
            # 输入text和trigger,预测subject
            subject_model = Model(bert_model.model.inputs + [trigger_index_start_in, trigger_index_end_in],
                                  [subject_start_out, subject_end_out])
            # 输入text和trigger,预测loc
            loc_model = Model(bert_model.model.inputs + [trigger_index_start_in, trigger_index_end_in],
                              [loc_start_out, loc_end_out])
            # 输入text和trigger,预测time
            time_model = Model(bert_model.model.inputs + [trigger_index_start_in, trigger_index_end_in],
                               [time_start_out, time_end_out])
            # 输入text和trigger,预测否定词negative
            negative_model = Model(bert_model.model.inputs + [trigger_index_start_in, trigger_index_end_in],
                                   [negative_start_out, negative_end_out])

            # 主模型
            train_model = Model(
                bert_model.model.inputs + [trigger_start_in, trigger_end_in, trigger_index_start_in, trigger_index_end_in,
                                           object_start_in, object_end_in, subject_start_in, subject_end_in, loc_start_in,
                                           loc_end_in, time_start_in, time_end_in, negative_start_in, negative_end_in],
                [trigger_start_out, trigger_end_out, object_start_out, object_end_out, subject_start_out, subject_end_out,
                 loc_start_out, loc_end_out, time_start_out, time_end_out, negative_start_out, negative_end_out])
            # 加载事件抽取模型参数
            logger.info("开始加载事件抽取模型参数。。。")
            train_model.load_weights(pre_config.event_extract_model_path)
            logger.info("事件抽取模型参数加载完成!")

    return trigger_model, object_model, subject_model, loc_model, time_model, negative_model
コード例 #20
0
	def create_layer(self):
		if self.type == "Dense":
			self.block = Dense(units=int(self.gff("units")), activation=self.gff("activation"))
		elif self.type == "Input":
			self.block = Input(shape=self.get_int(self.gff("shape")))
		elif self.type == "Dropout":
			self.block = Dropout(rate=float(self.gff("rate")))
		elif self.type == "Flatten":
			self.block = Flatten()
		elif self.type == "Reshape":
			self.block = Reshape(target_shape=int(self.gff("rate")))
		elif self.type == "Permute":
			self.block = Permute(dims=self.get_int(self.gff("dims")))
		elif self.type == "RepeatVector":
			self.block = RepeatVector(n=int(self.gff("dims")))
		#CONVOLUTIONAL
		elif self.type == "Conv1D":
			self.block = Conv1D(filters=int(self.gff("filters")), kernel_size=self.get_int(self.gff("kernel_size")),
								activation=self.get_act(self.gff("activation")), strides=self.get_int(self.gff("strides")),
								padding=self.gff("padding"))
		elif self.type == "Conv2D":
			self.block = Conv2D(filters=int(self.gff("filters")), kernel_size=self.get_int(self.gff("kernel_size")),
								activation=self.get_act(self.gff("activation")), strides=self.get_int(self.gff("strides")),
								padding=self.gff("padding"))
		elif self.type == "Conv2DTranspose":
			self.block = Conv2DTranspose(filters=int(self.gff("filters")), kernel_size=self.get_int(self.gff("kernel_size")),
								activation=self.get_act(self.gff("activation")), strides=self.get_int(self.gff("strides")),
								padding=self.gff("padding"))
		elif self.type == "Conv3D":
			self.block = Conv3D(filters=int(self.gff("filters")), kernel_size=self.get_int(self.gff("kernel_size")),
								activation=self.get_act(self.gff("activation")), strides=self.get_int(self.gff("strides")),
								padding=self.gff("padding"))
		elif self.type == "UpSampling1D":
			self.block = UpSampling1D(size=self.get_int(self.gff("size")))
		elif self.type == "UpSampling2D":
			self.block = UpSampling2D(size=self.get_int(self.gff("size")))
		elif self.type == "UpSampling3D":
			self.block = UpSampling3D(size=self.get_int(self.gff("size")))
		#POOLING
		elif self.type == "MaxPooling1D":
			self.block = MaxPooling1D(pool_size=self.get_int(self.gff("pool_size")),
									  strides=self.get_int(self.gff("strides")), padding=self.gff("padding"))
		elif self.type == "MaxPooling2D":
			self.block = MaxPooling2D(pool_size=self.get_int(self.gff("pool_size")),
									  strides=self.get_int(self.gff("strides")), padding=self.gff("padding"))
		elif self.type == "MaxPooling3D":
			self.block = MaxPooling3D(pool_size=self.get_int(self.gff("pool_size")),
									  strides=self.get_int(self.gff("strides")), padding=self.gff("padding"))
		elif self.type == "AveragePooling1D":
			self.block = AveragePooling1D(pool_size=self.get_int(self.gff("pool_size")),
									  strides=self.get_int(self.gff("strides")), padding=self.gff("padding"))
		elif self.type == "AveragePooling2D":
			self.block = AveragePooling2D(pool_size=self.get_int(self.gff("pool_size")),
									  strides=self.get_int(self.gff("strides")), padding=self.gff("padding"))
		elif self.type == "AveragePooling3D":
			self.block = AveragePooling3D(pool_size=self.get_int(self.gff("pool_size")),
									  strides=self.get_int(self.gff("strides")), padding=self.gff("padding"))
		elif self.type == "GlobalMaxPooling1D":
			self.block = GlobalMaxPooling1D()
		elif self.type == "GlobalMaxPooling2D":
			self.block = GlobalMaxPooling2D()
		elif self.type == "GlobalAveragePooling1D":
			self.block = GlobalAveragePooling1D()
		elif self.type == "GlobalAveragePooling2D":
			self.block = GlobalAveragePooling2D()
		#Locally Connected
		elif self.type == "LocallyConnected1D":
			self.block = LocallyConnected1D(filters=int(self.gff("filters")), kernel_size=self.get_int(self.gff("kernel_size")),
								activation=self.get_act(self.gff("activation")), strides=self.get_int(self.gff("strides")),
								padding=self.gff("padding"))
		elif self.type == "LocallyConnected2D":
			self.block = LocallyConnected2D(filters=int(self.gff("filters")), kernel_size=self.get_int(self.gff("kernel_size")),
								activation=self.get_act(self.gff("activation")), strides=self.get_int(self.gff("strides")),
								padding=self.gff("padding"))
		#MERGE LAYERS
		elif self.type == "Add":
			self.block = Add()
		elif self.type == "Subtract":
			self.block = Subtract()
		elif self.type == "Multiply":
			self.block = Multiply()
		elif self.type == "Average":
			self.block = Average()
		elif self.type == "Maximum":
			self.block = Maximum()
		elif self.type == "Concatenate":
			self.block = Concatenate()
		elif self.type == "Dot":
			self.block = Dot()

		#NORMALISATION LAYER
		elif self.type == "BatchNormalization":
			self.block = BatchNormalization(axis=int(self.gff("axis")), center=bool(self.gff("center")),
											momentum=float(self.gff("momentum")), epsilon=float(self.gff("epsilon")),
											scale=bool(self.gff("scale")))
		#NOISE LAYERS
		elif self.type == "GaussianNoise":
			self.block = GaussianNoise(stddev=self.get_float(self.gff("stddev")))
		#NOISE LAYERS
		elif self.type == "GaussianDropout":
			self.block = GaussianDropout(rate=self.get_float(self.gff("rate")))
		elif self.type == "AlphaDropout":
			self.block = AlphaDropout(rate=self.get_float(self.gff("rate")), seed=int(self.gff("rate")))
# model
X_input = Input(shape=(X.shape[1], X.shape[2]))
graph_conv_filters_input = Input(shape=(graph_conv_filters.shape[1],
                                        graph_conv_filters.shape[2]))

layer_gcnn1 = MultiGraphCNN(
    8, num_filters, activation='elu')([X_input, graph_conv_filters_input])
# layer_gcnn1 = Dropout(0.2)(layer_gcnn1)
layer_gcnn2 = MultiGraphCNN(
    8, num_filters, activation='elu')([layer_gcnn1, graph_conv_filters_input])
# layer_gcnn2 = Dropout(0.2)(layer_gcnn2)
layer_gcnn3 = MultiGraphCNN(
    8, num_filters, activation='elu')([layer_gcnn2, graph_conv_filters_input])
# layer_gcnn3 = Dropout(0.2)(layer_gcnn3)
layer_gcnn4 = Average()([layer_gcnn1, layer_gcnn2, layer_gcnn3])
# add new Graph layer with cnn
layer_gcnn4 = MultiGraphCNN(
    1, num_filters, activation='elu')([layer_gcnn4, graph_conv_filters_input])
# layer_gcnn3 = Dropout(0.2)(layer_gcnn3)
# layer_gcnn5 = Reshape((layer_gcnn4.shape[1]*layer_gcnn4.shape[2],))(layer_gcnn4)
layer_gcnn5 = Flatten()(layer_gcnn4)
# layer_gcnn5 = Dropout(0.2)(layer_gcnn5)
# # layer_conv5 = AveragePooling2D(pool_size=(2, 1), strides=None, padding='valid', data_format=None)(layer_conv5)
layer_dense1 = Dense(V, activation='linear')(layer_gcnn5)

model = Model(inputs=[X_input, graph_conv_filters_input], outputs=layer_dense1)
model.summary()

# model = cnn_tfmodel.egrmodel2(A, X, graph_conv_filters,2)
コード例 #22
0
ファイル: MainTransfer.py プロジェクト: joshuakosasih/TA
     gru_kata = Bidirectional(GRU(EMBEDDING_DIM * 3,
                                  return_sequences=True,
                                  dropout=dropout_gru,
                                  recurrent_dropout=rec_dropout),
                              merge_mode=merge_m,
                              weights=None)(merge)
 else:
     combine = input(
         'Enter 1 for Add, 2 for Subtract, 3 for Multiply, 4 for Average, '
         '5 for Maximum, 6 for Concatenate: ')
     if combine == 2:
         merge = Subtract()([dropout, rtwo])
     elif combine == 3:
         merge = Multiply()([dropout, rtwo])
     elif combine == 4:
         merge = Average()([dropout, rtwo])
     elif combine == 5:
         merge = Maximum()([dropout, rtwo])
     elif combine == 6:
         merge = Concatenate()([dropout, rtwo])
     else:
         merge = Add()([dropout, rtwo])
     if combine == 6:
         gru_kata = Bidirectional(GRU(EMBEDDING_DIM * 2,
                                      return_sequences=True,
                                      dropout=dropout_gru,
                                      recurrent_dropout=rec_dropout),
                                  merge_mode=merge_m,
                                  weights=None)(merge)
     else:
         gru_kata = Bidirectional(GRU(EMBEDDING_DIM,
コード例 #23
0
ファイル: dagan.py プロジェクト: sjjdd/fxgan
def safe_average(list_inputs):
    if len(list_inputs) == 1:
        return list_inputs[0]
    return Average()(list_inputs)
コード例 #24
0
def main():
    try:
        os.mkdir('./img')
    except OSError:
        pass

    # face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface_improved.xml')
    # detector = MTCNN()

    # load model and weights
    img_size = 64
    stage_num = [3, 3, 3]
    lambda_local = 1
    lambda_d = 1
    img_idx = 0
    detected = ''  #make this not local variable
    time_detection = 0
    time_network = 0
    time_plot = 0
    skip_frame = 1  # 每5帧做1次检测和网络前向传播
    ad = 0.6

    #Parameters
    num_capsule = 3
    dim_capsule = 16
    routings = 2
    stage_num = [3, 3, 3]
    lambda_d = 1
    num_classes = 3
    image_size = 64
    num_primcaps = 7 * 3
    m_dim = 5
    S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]

    model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d,
                             S_set)()
    model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d,
                                 S_set)()

    num_primcaps = 8 * 8 * 3
    # 分别构建3种模型
    S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]

    model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d,
                                 S_set)()

    print('Loading models ...')

    # 1x1的卷积模型
    weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5'
    model1.load_weights(weight_file1)
    print('Finished loading model 1.')

    # 方差模型
    weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
    model2.load_weights(weight_file2)
    print('Finished loading model 2.')

    # 无卷积和方差操作模型
    weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
    model3.load_weights(weight_file3)
    print('Finished loading model 3.')

    # 把3个模型整合到一起,取均值变成一个模型
    inputs = Input(shape=(64, 64, 3))
    x1 = model1(inputs)  #1x1
    x2 = model2(inputs)  #var
    x3 = model3(inputs)  #w/o
    avg_model = Average()([x1, x2, x3])
    model = Model(inputs=inputs, outputs=avg_model)

    # 加载序列化人脸检测器
    print("[INFO] loading face detector...")
    protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
    modelPath = os.path.sep.join(
        ["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"])
    net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

    # capture video
    # cap = cv2.VideoCapture(0)

    cap = cv2.VideoCapture("LFPW_image_test_0005_0.jpg")
    print(cap)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024 * 1)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768 * 1)

    print('Start detecting pose ...')
    detected_pre = np.empty((1, 1, 1))

    while True:
        # get video frame
        ret, input_img = cap.read()

        img_idx = img_idx + 1
        img_h, img_w, _ = np.shape(input_img)

        if img_idx == 1 or img_idx % skip_frame == 0:
            time_detection = 0
            time_network = 0
            time_plot = 0

            # 使用LBP检测器检测人脸
            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            # detected = face_cascade.detectMultiScale(gray_img, 1.1)
            # detected = detector.detect_faces(input_img)
            # 将blob通过网络并获得检测结果
            # 预测
            blob = cv2.dnn.blobFromImage(cv2.resize(input_img,
                                                    (300, 300)), 1.0,
                                         (300, 300), (104.0, 177.0, 123.0))
            net.setInput(blob)
            detected = net.forward()

            if detected_pre.shape[2] > 0 and detected.shape[2] == 0:
                detected = detected_pre

            faces = np.empty((detected.shape[2], img_size, img_size, 3))

            input_img = draw_results_ssd(detected, input_img, faces, ad,
                                         img_size, img_w, img_h, model,
                                         time_detection, time_network,
                                         time_plot)
            cv2.imwrite('img/' + str(img_idx) + '.png', input_img)

        else:
            input_img = draw_results_ssd(detected, input_img, faces, ad,
                                         img_size, img_w, img_h, model,
                                         time_detection, time_network,
                                         time_plot)

        if detected.shape[2] > detected_pre.shape[2] or img_idx % (skip_frame *
                                                                   3) == 0:
            detected_pre = detected

        key = cv2.waitKey(1)
コード例 #25
0
ファイル: rn.py プロジェクト: IMVDsports/inter-rel-net
def fuse_rn(output_size,
            new_arch,
            train_kwargs,
            models_kwargs,
            weights_filepaths,
            freeze_g_theta=False,
            fuse_at_fc1=False,
            avg_at_end=False):

    prunned_models = []
    for model_kwargs, weights_filepath in zip(models_kwargs,
                                              weights_filepaths):
        model = get_model(output_size=output_size, **model_kwargs)
        if weights_filepath != []:
            model.load_weights(weights_filepath)

        if not fuse_at_fc1 and not avg_at_end:
            for layer in model.layers[::
                                      -1]:  # reverse looking for last pool layer
                if layer.name.startswith(
                    ('average', 'concatenate', 'irn_attention')):
                    out_pool = layer.output
                    break
            prunned_model = Model(inputs=model.input, outputs=out_pool)
        elif fuse_at_fc1:  # Prune keeping dropout + f_phi_fc1
            for layer in model.layers[::
                                      -1]:  # reverse looking for last f_phi_fc1 layer
                if layer.name.startswith(('f_phi_fc1')):
                    out_f_phi_fc1 = layer.output
                    break
            prunned_model = Model(inputs=model.input, outputs=out_f_phi_fc1)
        elif avg_at_end:
            prunned_model = Model(inputs=model.input, outputs=model.output)

        if freeze_g_theta:
            for layer in prunned_model.layers:  # Freezing model
                layer.trainable = False
        prunned_models.append(prunned_model)

    ### Train params
    drop_rate = train_kwargs.get('drop_rate', 0.1)
    kernel_init_type = train_kwargs.get('kernel_init_type', 'TruncatedNormal')
    kernel_init_param = train_kwargs.get('kernel_init_param', 0.045)
    kernel_init_seed = train_kwargs.get('kernel_init_seed')

    kernel_init = get_kernel_init(kernel_init_type,
                                  param=kernel_init_param,
                                  seed=kernel_init_seed)

    if new_arch:
        # Building bottom
        joint_stream_objects = []
        temp_stream_objects = []

        for i in range(models_kwargs[0]['num_objs']):
            obj_joint = Input(shape=models_kwargs[0]['object_shape'],
                              name="joint_stream_object" + str(i))
            joint_stream_objects.append(obj_joint)

        for i in range(models_kwargs[1]['num_objs']):
            obj_temp = Input(shape=models_kwargs[1]['object_shape'],
                             name="temp_stream_object" + str(i))
            temp_stream_objects.append(obj_temp)

        inputs = joint_stream_objects + temp_stream_objects
        models_outs = [
            prunned_models[0](joint_stream_objects),
            prunned_models[1](temp_stream_objects)
        ]

    else:
        # Building bottom
        person1_joints = []
        person2_joints = []
        for i in range(model_kwargs['num_objs']):
            object_i = Input(shape=models_kwargs[0]['object_shape'],
                             name="person1_object" + str(i))
            object_j = Input(shape=models_kwargs[0]['object_shape'],
                             name="person2_object" + str(i))
            person1_joints.append(object_i)
            person2_joints.append(object_j)
        inputs = person1_joints + person2_joints

        models_outs = [m(inputs) for m in prunned_models]

    if not avg_at_end:
        x = Concatenate()(models_outs)

        # Building top and Model
        top_kwargs = get_relevant_kwargs(model_kwargs, create_top)
        x = create_top(x, kernel_init, **top_kwargs)
        out_rn = Dense(output_size,
                       activation='softmax',
                       kernel_initializer=kernel_init,
                       name='softmax')(x)
    else:
        # Model outputs already include individual soft max
        out_rn = Average()(models_outs)

    model = Model(inputs=inputs, outputs=out_rn, name="fused_rel_net")

    return model
コード例 #26
0
    reshapedPoolForSentence = Lambda(lambda x: K.reshape(x, shape=newShape),
                                     name='switch_axis_' + 'sentence' +
                                     str(i + 1) + 'winSize' +
                                     str(j + windowMin))(mergedPoolForSentence)
    densePoolForSentence = Dense(numDensePool,
                                 bias_regularizer=regularizers.l2(eta),
                                 kernel_regularizer=regularizers.l2(eta),
                                 activation='softmax',
                                 use_bias=True)(reshapedPoolForSentence)

    densePoolForSentence = Dropout(dr, name='DropDense' +
                                   str(i))(densePoolForSentence)
    maxPooledPerDoc.append(densePoolForSentence)

#Naive Approach
averaged = Average()(maxPooledPerDoc)
averaged = Lambda(lambda x: K.reshape(
    x, shape=(-1, int(averaged.shape[1]) * int(averaged.shape[2]))),
                  name='attend_output')(averaged)
out_avg = Dense(1, activation='sigmoid', use_bias=True)(averaged)

#Apply Attention
mergedPoolPerDoc = Concatenate(axis=1)(maxPooledPerDoc)
biRnn_ = Bidirectional(GRU(dimGRU, return_sequences=True),
                       merge_mode='concat')(mergedPoolPerDoc)
newShape = (-1, int(mergedPoolPerDoc.shape[1]), int(biRnn_.shape[2]))
biRnn = Lambda(lambda x: K.reshape(x, shape=newShape),
               name='biRnn_TF_Reminder1')(biRnn_)
#biRnn2 = Lambda(lambda x: K.reshape(x,shape=newShape), name ='biRnn_TF_Reminder2')(biRnn_[1])

#QIITA
コード例 #27
0
def background_est_cnn(image_frame):
    """
    implements:
        background estimation using temporal depth reduction

        inputs:
            image_fame: history image frame for background computation
        return: 
            background estimation model

    """
    tdr_layer_1_1 = Conv2D(32,
                           kernel_size=(1, 1),
                           strides=1,
                           padding='same',
                           name='tdr_layer_1_1',
                           data_format='channels_last')(image_frame)
    tdr_layer_1_1 = Activation('relu')(tdr_layer_1_1)
    tdr_layer_1_2 = Conv2D(32,
                           kernel_size=(3, 3),
                           strides=1,
                           padding='same',
                           name='tdr_layer_1_2',
                           data_format='channels_last')(image_frame)
    tdr_layer_1_2 = Activation('relu')(tdr_layer_1_2)
    tdr_layer_1_3 = Conv2D(32,
                           kernel_size=(5, 5),
                           strides=1,
                           padding='same',
                           name='tdr_layer_1_3',
                           data_format='channels_last')(image_frame)
    tdr_layer_1_3 = Activation('relu')(tdr_layer_1_3)

    tdr_layer_1 = Average()([tdr_layer_1_1, tdr_layer_1_2, tdr_layer_1_3])

    tdr_layer_2_1 = Conv2D(16,
                           kernel_size=(1, 1),
                           strides=1,
                           padding='same',
                           name='tdr_layer_2_1',
                           data_format='channels_last')(tdr_layer_1)
    tdr_layer_2_1 = Activation('relu')(tdr_layer_2_1)
    tdr_layer_2_2 = Conv2D(16,
                           kernel_size=(3, 3),
                           strides=1,
                           padding='same',
                           name='tdr_layer_2_2',
                           data_format='channels_last')(tdr_layer_1)
    tdr_layer_2_2 = Activation('relu')(tdr_layer_2_2)
    tdr_layer_2_3 = Conv2D(16,
                           kernel_size=(5, 5),
                           strides=1,
                           padding='same',
                           name='tdr_layer_2_3',
                           data_format='channels_last')(tdr_layer_1)
    tdr_layer_2_3 = Activation('relu')(tdr_layer_2_3)

    tdr_layer_2 = Average()([tdr_layer_2_1, tdr_layer_2_2, tdr_layer_2_3])

    tdr_layer_3_1 = Conv2D(8,
                           kernel_size=(1, 1),
                           strides=1,
                           padding='same',
                           name='tdr_layer_3_1',
                           data_format='channels_last')(tdr_layer_2)
    tdr_layer_3_1 = Activation('relu')(tdr_layer_3_1)
    tdr_layer_3_2 = Conv2D(8,
                           kernel_size=(3, 3),
                           strides=1,
                           padding='same',
                           name='tdr_layer_3_2',
                           data_format='channels_last')(tdr_layer_2)
    tdr_layer_3_2 = Activation('relu')(tdr_layer_3_2)
    tdr_layer_3_3 = Conv2D(8,
                           kernel_size=(5, 5),
                           strides=1,
                           padding='same',
                           name='tdr_layer_3_3',
                           data_format='channels_last')(tdr_layer_2)
    tdr_layer_3_3 = Activation('relu')(tdr_layer_3_3)

    tdr_layer_3 = Average()([tdr_layer_3_1, tdr_layer_3_2, tdr_layer_3_3])

    tdr_layer_4 = Conv2D(1,
                         kernel_size=(3, 3),
                         padding='same',
                         name="TDR_block",
                         data_format='channels_last')(tdr_layer_3)
    model = Activation('relu')(tdr_layer_4)

    return model
コード例 #28
0
def train(X_train, X_test):
    batch_size = 512
    same_frac = 0.125  #  0.0625 #0.25 # 0.125 #
    epochs = 40
    train_steps_per_epoch = 50

    f = 0

    # the data, shuffled and split between train and test sets

    # input image dimensions
    img_rows, img_cols = 28, 28

    #input_dim = 784
    input_dim = (img_rows, img_cols, 1)
    nb_epoch = 12

    # network definition

    input_a = Input(shape=(img_rows, img_cols, 1))
    input_b = Input(shape=(img_rows, img_cols, 1))
    input_c = Input(shape=(img_rows, img_cols, 1))

    # because we re-use the same instance `base_network`,
    # the weights of the network
    # will be shared across the two branches
    #vec_a = create_base_network2(input_a)
    #vec_b = create_base_network2(input_b)
    #vec_c = create_base_network2(input_c)

    cnet = convnet((img_rows, img_cols, 1))
    vec_a = cnet(input_a)
    vec_b = cnet(input_b)
    vec_c = cnet(input_c)
    #vec_a = Activation('selu')(vec_a)
    #vec_b = Activation('selu')(vec_b)
    #vec_c = Activation('selu')(vec_c)

    #vec_a2 = GaussianNoise(50.0)(vec_a)
    #vec_b2 = GaussianNoise(50.0)(vec_b)
    #vec_c2 = GaussianNoise(50.0)(vec_c)
    #vec_a2 = Lambda(lambda v: K.tanh(v[0] - v[1]))([vec_b, vec_c])
    #vec_b2 = Lambda(lambda v: K.tanh(v[0] - v[1]))([vec_a, vec_c])
    #vec_c2 = Lambda(lambda v: K.tanh(v[0] - v[1]))([vec_a, vec_b])
    #vec_a2 = GaussianNoise(0.01)(vec_a2)
    #vec_b2 = GaussianNoise(0.01)(vec_b2)
    #vec_c2 = GaussianNoise(0.01)(vec_c2)
    #vec_a2 = Lambda(lambda v: K.clip(v, -1.0, 1.0))(vec_a2)
    #vec_b2 = Lambda(lambda v: K.clip(v, -1.0, 1.0))(vec_b2)
    #vec_c2 = Lambda(lambda v: K.clip(v, -1.0, 1.0))(vec_c2)

    #vec_a3 = Dot(axes=-1, normalize=False)([vec_b, vec_c])
    #vec_b3 = Dot(axes=-1, normalize=False)([vec_c, vec_a])
    #vec_c3 = Dot(axes=-1, normalize=False)([vec_a, vec_b])

    #x = Lambda(lambda v : K.stack([K.abs(v[1]-v[2]), K.abs(v[2]-v[0]), K.abs(v[0]-v[1])], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack([K.mean(K.square(v[1]-v[2]), axis=-1), K.mean(K.square(v[2]-v[0]), axis=-1), K.mean(K.square(v[0]-v[1]), axis=-1)], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack([K.log(K.mean(K.square(v[1]-v[2]), axis=-1)), K.log(K.mean(K.square(v[2]-v[0]), axis=-1)), K.log(K.mean(K.square(v[0]-v[1]), axis=-1))], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack([K.log(K.epsilon()+K.mean(K.square(v[1]-v[2]), axis=-1)), K.log(K.mean(K.epsilon()+K.square(v[2]-v[0]), axis=-1)), K.log(K.mean(K.epsilon()+K.square(v[0]-v[1]), axis=-1))], axis=-1))([vec_a, vec_b, vec_c])

    #x = Lambda(lambda v : K.stack([v[1]*v[2], v[2]*v[0], v[0]*v[1]], axis=-1))([vec_a, vec_b, vec_c])
    #x = Lambda(lambda v : K.stack(v, axis=-1))([vec_a3, vec_b3, vec_c3])
    #x = Activation('selu')(x)
    #x = TimeDistributed(Dense(3, activation='selu'))(x)
    #x = TimeDistributed(Dense(3, activation='sigmoid'))(x)
    #x = Flatten()(x)
    #x = Dense(3*nb_filters, activation='selu')(x)
    #x = Dense(nb_filters, activation='selu')(x)

    #x = Convolution1D(2*nb_filters, 3, strides=1, activation='selu', padding='same')(x)
    #x = Convolution1D(2*nb_filters, 3, strides=2, activation='selu', padding='same')(x)
    #x = Convolution1D(3, 1, strides=1, activation='sigmoid', padding='same')(x)
    #x = GlobalAveragePooling1D()(x)

    #probs = Activation('softmax')(x)
    #x = Convolution1D(int(nb_filters/2), 2, strides=2, activation='selu', padding='same')(x)

    #x = Reshape((-1,1))(x)
    #x = TimeDistributed(Dense(nb_filters, activation='selu'))(x)
    #x = TimeDistributed(Dense(nb_filters, activation='selu'))(x)
    #x = TimeDistributed(Dense(1, activation='linear'))(x)
    #x = Flatten()(x)
    #probs = Lambda(lambda y : K.exp(-y)/(K.repeat_elements(K.expand_dims(K.epsilon() + K.sum(K.exp(-y), axis=-1)), 3, axis=-1)))(x)

    input_u = Input(shape=(3, ))
    y = Dense(6, activation='selu')(input_u)
    y = Dense(6, activation='selu')(y)
    p = Dense(3, activation='softmax')(y)

    ntop = int(nb_filters / 2)
    unit = Model(inputs=input_u, outputs=p)
    x0 = Lambda(lambda v: K.stack([
        K.mean(K.square(v[1] - v[2]), axis=-1),
        K.mean(K.square(v[2] - v[0]), axis=-1),
        K.mean(K.square(v[0] - v[1]), axis=-1)
    ],
                                  axis=-1))([vec_a, vec_b, vec_c])
    x1 = Lambda(lambda v: K.stack([
        K.mean(K.square(v[2] - v[0]), axis=-1),
        K.mean(K.square(v[0] - v[1]), axis=-1),
        K.mean(K.square(v[1] - v[2]), axis=-1)
    ],
                                  axis=-1))([vec_a, vec_b, vec_c])
    x2 = Lambda(lambda v: K.stack([
        K.mean(K.square(v[0] - v[1]), axis=-1),
        K.mean(K.square(v[1] - v[2]), axis=-1),
        K.mean(K.square(v[2] - v[0]), axis=-1)
    ],
                                  axis=-1))([vec_a, vec_b, vec_c])

    #	x0 = Lambda(lambda v : K.stack([K.mean(tf.nn.top_k(K.square(v[1]-v[2]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[2]-v[0]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[0]-v[1]), k=ntop)[0], axis=-1)], axis=-1))([vec_a, vec_b, vec_c])
    #	x1 = Lambda(lambda v : K.stack([K.mean(tf.nn.top_k(K.square(v[2]-v[0]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[0]-v[1]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[1]-v[2]), k=ntop)[0], axis=-1)], axis=-1))([vec_a, vec_b, vec_c])
    #	x2 = Lambda(lambda v : K.stack([K.mean(tf.nn.top_k(K.square(v[0]-v[1]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[1]-v[2]), k=ntop)[0], axis=-1), K.mean(tf.nn.top_k(K.square(v[2]-v[0]), k=ntop)[0], axis=-1)], axis=-1))([vec_a, vec_b, vec_c])

    x0 = unit(x0)
    x1 = unit(x1)
    x2 = unit(x2)

    x1 = Lambda(lambda v: K.stack([v[:, 2], v[:, 0], v[:, 1]], axis=-1))(x1)
    x2 = Lambda(lambda v: K.stack([v[:, 1], v[:, 2], v[:, 0]], axis=-1))(x2)

    probs = Average()([x0, x1, x2])

    model = Model(inputs=[input_a, input_b, input_c], outputs=probs)
    optimizer = Adam(lr=0.0003, clipnorm=0.1, clipvalue=0.1)  #
    model.compile(loss=pick_best_loss, optimizer=optimizer)

    #overlaps = np.ones((X_train.shape[0], X_train.shape[0]))
    train_gen = create_triplets(X_train,
                                None,
                                same_frac,
                                batch_size,
                                mode='train')

    es_gen = create_triplets(X_test, None, same_frac, batch_size, mode='val')
    es_steps = 100
    callbacks = [
        EarlyStopping(monitor='val_loss', patience=4, verbose=0),
        ModelCheckpoint('autoenc' + str(f) + '.h5',
                        monitor='loss',
                        save_best_only=True,
                        verbose=0)
    ]
    model.fit_generator(train_gen,
                        steps_per_epoch=train_steps_per_epoch,
                        epochs=epochs,
                        validation_data=es_gen,
                        validation_steps=es_steps,
                        callbacks=callbacks)  #, max_q_size=10, workers=1

    #model.fit_generator(train_gen, steps_per_epoch=train_steps_per_epoch, epochs=epochs, validation_data=val_gen, validation_steps=100) #, max_q_size=10, workers=1

    model.load_weights('autoenc' + str(f) + '.h5')

    #train_gen.close()
    #es_gen.close()
    '''
コード例 #29
0
history.close()
equa += "{}\t{:.3f}".format(args.save1, acc)
model1 = keras.models.load_model(savename + "/check_" + str(epoch))
for i in range(len(model1.layers)):
    model1.layers[i].name += "_1"
savename = "save/" + str(args.save2)
history = open(savename + '/history')
epoch = eval(history.readline()) + 1
acc = eval(history.readline())['val_acc'][epoch - 1]
history.close()
equa += "{}\t{:.3f}".format(args.save2, acc)
model2 = keras.models.load_model(savename + "/check_" + str(epoch))
for i in range(len(model2.layers)):
    model2.layers[i].name += "_2"

out = Average()([model1.outputs[0], model2.outputs[0]])
imodel = Model(inputs=[model1.input, model2.input],
               outputs=out,
               name='ensemble')
#imodel=Model(inputs=model2.input,outputs=model2.outputs[0],name='ensemble')

rc = ""
for sha in imodel._feed_inputs:
    if (len(sha._keras_shape) == 4):
        rc += "c"
    if (len(sha._keras_shape) == 3):
        rc += "r"
train = wkiter([args.left + ".root", args.right + ".root"],
               batch_size=batch_size,
               end=args.end * 1.,
               istrain=1,
コード例 #30
0
    K.greater_equal(x, 0.5),
    Add(name="add_inner")
    ([xception_model_embeddings_current, xception_model_embeddings_previous]
     ), xception_model_embeddings_current),
                                       name="add_conditional")(
                                           similarity_prediction)
#diffrentiable_conditional_dot = Lambda( lambda x:K.where(x>=0.5, Dot()([xception_model_embeddings_current, xception_model_embeddings_previous]), xception_model_embeddings_current))(similarity_prediction)
diffrentiable_conditional_multiply = Lambda(lambda x: K.switch(
    x >= 0.5,
    Multiply()
    ([xception_model_embeddings_current, xception_model_embeddings_previous]
     ), xception_model_embeddings_current))(similarity_prediction)
diffrentiable_conditional_average = Lambda(lambda x: K.switch(
    x >= 0.5,
    Average()
    ([xception_model_embeddings_current, xception_model_embeddings_previous]
     ), xception_model_embeddings_current))(similarity_prediction)
#diffrentiable_conidtional_concatenate = Lambda( lambda x:K.where(x>=0.5,Concatenate()([xception_model_embeddings_current, xception_model_embeddings_previous])  , xception_model_embeddings_current))(similarity_prediction)

#Here we try convolution

#Be sure to initialize a different one with var input sizes for concat version
#predictor_convolution = xception_conv_final_predictor(diffrentiable_conditional_add)
final_predictor_1 = Flatten()(diffrentiable_conditional_add)
final_predictor_2 = Dense(1024, activation='relu')(final_predictor_1)
final_predictor_3 = Dense(512, activation='relu')(final_predictor_2)
final_predictor_4 = Dense(256, activation='relu')(final_predictor_3)
final_prediction_reg = Dense(2, name="final_pred_reg")(final_predictor_4)
final_prediction_class = Dense(3,
                               activation='softmax',
                               name="final_pred_class")(final_predictor_4)