def dynamic_embeddings(num_layers,
                       filter_lengths,
                       nb_filters,
                       n_classes,
                       hidden_size=250):
    # some constants
    max_len_char = 140
    dropout_rate = 0.5

    # dynamic embeddings and more n-grams
    input_layer = (Input(name='input', shape=(max_len_char, )))

    # Dynamic embeddings: TensorShape([Dimension(None), Dimension(246), Dimension(140)])
    embed = Embedding(input_dim=246, output_dim=140)(input_layer)

    convs = []
    for i in range(num_layers):
        for ksize in filter_lengths:
            conv = (Convolution1D(filters=nb_filters, kernel_size=ksize, padding="valid", activation="relu",\
                                                     strides=1, name ='conv%d_%d' % (i, ksize))(embed))
            pool = MaxPooling1D(pool_size=max_len_char - ksize + 1,
                                name='pool%d_%d' % (i, ksize))(conv)
            convs.append(pool)

    concat = Concatenate()(convs)
    flatten = Flatten()(concat)
    flatten.get_shape()

    hidden = Dense(hidden_size, activation="relu")(flatten)
    dropout = Dropout(rate=dropout_rate)(hidden)

    output = Dense(n_classes, activation='softmax')(dropout)

    model = Model(inputs=input_layer, outputs=output)
    return model
def static_embeddings(num_layers,
                      filter_lengths,
                      nb_filters,
                      n_classes,
                      hidden_size=250):
    # some constants
    max_len_char = 140
    dropout_rate = 0.5

    input_layer = (Input(name='input',
                         shape=(max_len_char, 246)))  #len(small_chars_set))))

    convs = []
    for i in range(num_layers):
        for j in filter_lengths:
            conv = (Convolution1D(filters=nb_filters, kernel_size=j, padding="valid", activation="relu",\
                                             strides=1, name ='conv%d_%d' % (i, j))(input_layer))
            pool = MaxPooling1D(pool_size=max_len_char - j + 1,
                                name='pool%d_%d' % (i, j))(conv)
            convs.append(pool)

    concat = Concatenate()(convs)
    flatten = Flatten()(concat)
    flatten.get_shape()

    hidden = Dense(hidden_size, activation="relu")(flatten)
    dropout = Dropout(rate=dropout_rate)(hidden)

    output = Dense(10, activation='softmax')(dropout)

    model = Model(inputs=input_layer, outputs=output)
示例#3
0
def build_discriminator(img_shape,
                        df,
                        num_classes,
                        num_layers=4,
                        act_multi_label='softmax'):
    def d_layer(layer_input, filters, f_size=4, normalization=True):
        """Discriminator layer"""
        d = Conv2D(filters, kernel_size=f_size, strides=2,
                   padding='valid')(layer_input)
        d = LeakyReLU(alpha=0.2)(d)
        if normalization:
            d = InstanceNormalization()(d)
        return d

    img = Input(shape=img_shape)

    #label = Input(shape=(1,), dtype='int32')
    #label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label))
    #flat_img = Flatten()(img)
    #model_input = multiply([flat_img, label_embedding])
    #d0 = Reshape(self.img_shape)(model_input)

    d = img
    for i in range(num_layers):
        _norm = False if i == 0 else True
        d = d_layer(d, df * 2**i, normalization=_norm)

    flat_repr = Flatten()(d)

    #validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

    print("flat_repr.get_shape().as_list():", flat_repr.get_shape().as_list())
    print("flat_repr.get_shape().as_list()[1:]:",
          flat_repr.get_shape().as_list()[1:])

    gan_logit = Dense(df * 2**(num_layers - 1))(flat_repr)
    gan_logit = LeakyReLU(alpha=0.2)(gan_logit)
    gan_prob = Dense(1, activation='sigmoid')(gan_logit)

    class_logit = Dense(df * 2**(num_layers - 1))(flat_repr)
    class_logit = LeakyReLU(alpha=0.2)(class_logit)
    class_prob = Dense(num_classes, activation=act_multi_label)(class_logit)

    ####
    #label = Input(shape=(1,), dtype='int32')
    #label_embedding = Flatten()(Embedding(self.num_classes, 9)(label))
    #flat_img = Flatten()(validity)
    #d44 = multiply([flat_img, label_embedding])
    #d444 = Reshape(validity.get_shape().as_list()[1:])(d44)
    ####

    return Model(img, [gan_prob, class_prob])
示例#4
0
def cnn_ascad(classes=256):
    # From VGG16 design
    input_shape = (700, 1)
    img_input = Input(shape=input_shape)
    # Block 1
    x = Conv1D(64, 11, activation='relu', padding='same',
               name='block1_conv1')(img_input)
    print x.get_shape()
    x = AveragePooling1D(2, strides=2, name='block1_pool')(x)
    print x.get_shape()
    # Block 2
    x = Conv1D(128, 11, activation='relu', padding='same',
               name='block2_conv1')(x)
    print x.get_shape()
    x = AveragePooling1D(2, strides=2, name='block2_pool')(x)
    print x.get_shape()
    # Block 3
    x = Conv1D(256, 11, activation='relu', padding='same',
               name='block3_conv1')(x)
    print x.get_shape()
    x = AveragePooling1D(2, strides=2, name='block3_pool')(x)
    print x.get_shape()
    # Block 4
    x = Conv1D(512, 11, activation='relu', padding='same',
               name='block4_conv1')(x)
    print x.get_shape()
    x = AveragePooling1D(2, strides=2, name='block4_pool')(x)
    print x.get_shape()
    # Block 5
    x = Conv1D(512, 11, activation='relu', padding='same',
               name='block5_conv1')(x)
    print x.get_shape()
    x = AveragePooling1D(2, strides=2, name='block5_pool')(x)
    print x.get_shape()
    # Classification block
    x = Flatten(name='flatten')(x)
    print x.get_shape()
    x = Dense(4096, activation='relu', name='fc1')(x)
    print x.get_shape()
    x = Dense(4096, activation='relu', name='fc2')(x)
    print x.get_shape()
    x = Dense(classes, activation='softmax', name='predictions')(x)
    print x.get_shape()

    inputs = img_input
    # Create model.
    model = Model(inputs, x, name='cnn_best')
    optimizer = RMSprop(lr=0.00001)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
示例#5
0
文件: node.py 项目: eoinoconn/PySrch
class ConvOutputNode(Node):
    """Input node of feature extraction graph."""

    __vertex_type = "ConvOutputNode"

    def __init__(self):
        super().__init__()

    def build(self, model):
        """ Builds and stoers Keras model. """
        self._model = Flatten()(model)

    @property
    def output_dimension(self):
        return self._model.get_shape()[1]
示例#6
0
    def build_VGG16(self):
        data_input = Input(batch_shape=self.data_size)  # ,dtype=np.float32)

        net = Conv2D(64, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding='same', activation="relu",
                     name="conv1_1")(data_input)  # 可以
        net = Conv2D(64, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding='same', activation="relu",
                     name="conv1_2")(net)
        net = MaxPool2D((2, 2), strides=(2, 2), name="maxpool_1")(net)

        net = Conv2D(128, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv2_1")(net)
        net = Conv2D(128, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv2_2")(net)
        net = MaxPool2D((2, 2), strides=(2, 2), name="maxpool_2")(net)

        net = Conv2D(256, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv3_1")(net)
        net = Conv2D(256, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv3_2")(net)
        net = Conv2D(256, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv3_3")(net)
        net = MaxPool2D((2, 2), strides=(2, 2), name="maxpool3")(net)

        net = Conv2D(512, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv4_1")(net)
        net = Conv2D(512, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv4_2")(net)
        net = Conv2D(512, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv4_3")(net)
        net = Conv2D(512, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv4_4")(net)
        net = MaxPool2D((2, 2), strides=(2, 2), name="maxpool4")(net)

        net = Conv2D(512, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv5_1")(net)
        net = Conv2D(512, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv5_2")(net)
        net = Conv2D(512, (3, 3), strides=(1, 1),
                     kernel_regularizer=regularizers.l2(decay_weight), padding="same", activation="relu",
                     name="conv5_3")(net)
        net = MaxPool2D((2, 2), strides=(2, 2), name="maxpool5")(net)

        # net = Flatten()(net) # 全局池化不可以的

        if self.flags==1:
            # 普通的fc
            net = Flatten()(net)
            net = Dense(4096, name="fc1")(net)
            net = Activation("relu")(net)
            net = Dense(4096, name="fc2")(net)
            net = Activation("relu")(net)
            net = Dense(self.n_classes, name="fc3")(net)
            net = Activation("softmax")(net)

            model = Model(inputs=data_input, outputs=net)
            from keras.utils import plot_model
            import os
            plot_model(model, to_file=os.path.join('./imgs', "003_vgg.png"), show_shapes=True)
            model.summary()
            print("output data shape : ", net.get_shape())
            return net
        elif self.flags== 2:
            #  全局平均池化
            net = AvgPool2D((7, 7), (1, 1), padding="valid", name="pool_fc")(net)
            net = GlobalAveragePooling2D(name="GAP")(net)
            net = Dense(self.n_classes, name="fc1")(net)
            net = Activation("softmax")(net)
            model = Model(inputs=data_input, outputs=net)
            model.summary()
            print("output data shape : ", net.get_shape())
            return net
        elif self.flags== 3:
            # 卷积代替fc
            fks = net.get_shape().as_list()[1:3]
            net = Conv2D(4096, fks, strides=(1, 1), padding="valid",
                         activation="relu", name="cnn_fc1")(net)
            net = Conv2D(self.n_classes, (1, 1), strides=(1, 1), padding="valid",
                         activation="relu", name="cnn_fc3")(net)
            net = Activation("softmax")(net)
            model = Model(inputs=data_input, outputs=net)
            print("output data shape : ", net.get_shape())

            model.summary()
            return net
        else:
            raise ValueError
示例#7
0
def build_nets(flags=1):
    data_input = Input(batch_shape=(32, 224, 224, 3))
    # -----1
    net = Conv2D(64, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv1_1")(data_input)
    net = Conv2D(64, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv1_2")(net)
    # -----2
    net = MaxPool2D((2, 2), strides=(2, 2), padding="Valid",
                    name="maxpool1")(net)
    net = Conv2D(128, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv2_1")(net)
    net = Conv2D(128, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv2_2")(net)
    # -----3
    net = MaxPool2D((2, 2), strides=(2, 2), padding="Valid",
                    name="maxpool2")(net)
    net = Conv2D(256, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv3_1")(net)
    net = Conv2D(256, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv3_2")(net)
    net = Conv2D(256, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv3_3")(net)
    # -----4
    net = MaxPool2D((2, 2), strides=(2, 2), padding="Valid",
                    name="maxpool3")(net)
    net = Conv2D(512, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv4_1")(net)
    net = Conv2D(512, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv4_2")(net)
    net = Conv2D(512, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv4_3")(net)
    # -----5
    net = MaxPool2D((2, 2), strides=(2, 2), padding="Valid",
                    name="maxpool4")(net)
    net = Conv2D(512, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv5_1")(net)
    net = Conv2D(512, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv5_2")(net)
    net = Conv2D(512, (3, 3),
                 strides=(1, 1),
                 padding="same",
                 activation="relu",
                 name="conv5_3")(net)
    # ------6
    # conv2d  代替 fc
    net = MaxPool2D((2, 2), strides=(2, 2), padding="Valid",
                    name="maxpool5")(net)

    if flags == 1:
        # 普通的fc
        net = Flatten()(net)
        net = Dense(4096, name="fc1")(net)
        net = Activation("relu")(net)
        net = Dense(4096, name="fc2")(net)
        net = Activation("relu")(net)
        net = Dense(1000, name="fc3")(net)
        net = Activation("softmax")(net)
        a = net.get_layer("fc2").get_weights()
        print('a    --->   ', a)
        model = Model(inputs=data_input, outputs=net)
        print("output data shape : ", net.get_shape())
        return model
    elif flags == 2:
        #  全局平均池化
        net = AvgPool2D((7, 7), (1, 1), padding="valid", name="pool_fc")(net)
        net = GlobalAveragePooling2D(name="GAP")(net)
        net = Dense(1000, name="fc1")(net)
        net = Activation("softmax")(net)
        model = Model(inputs=data_input, outputs=net)
        print("output data shape : ", net.get_shape())
        return model
    elif flags == 3:
        # 卷积代替fc
        net = Conv2D(4096, (7, 7),
                     strides=(1, 1),
                     padding="valid",
                     activation="relu",
                     name="cnn_fc1")(net)
        net = Conv2D(4096, (1, 1),
                     strides=(1, 1),
                     padding="valid",
                     activation="relu",
                     name="cnn_fc2")(net)
        net = Conv2D(1000, (1, 1),
                     strides=(1, 1),
                     padding="valid",
                     activation="relu",
                     name="cnn_fc3")(net)
        net = Flatten()(net)
        net = Activation("softmax")(net)
        model = Model(inputs=data_input, outputs=net)
        print("output data shape : ", net.get_shape())
        return model
    else:
        raise ValueError
示例#8
0
def draw_cnn_model(hyper_param, embedding_matrix=None, verbose=True):
    """
    Input: hyper_parameters dictionary
    
    Construct:
        input layers : x , x_pos(o), x_captialization(o)
        embedding matrix : use_glove or randomly initialize
        conv1 : first convolution layer
        conv2 : second convolution layer
        conv3 : third convolution layer
        max pooling
        flatten : concant maxpooled univariate vectors into one long vector
        ff1, ff2: two feed forward layers
        out_pred: softmax over all ner classes
    
    Returns: keras.models.Model object
    """

    # input layer(s)
    x = Input(shape=(hyper_param['maxlen'], ), name='x')
    if hyper_param['use_pos_tags']:
        x_pos = Input(shape=(hyper_param['maxlen'], hyper_param['poslen']),
                      name='x_pos')
    if hyper_param['use_capitalization_info']:
        x_capital = Input(shape=(hyper_param['maxlen'],
                                 hyper_param['capitallen']),
                          name='x_capital')

    # embedding matrix
    if hyper_param['use_glove']:
        embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], weights=[embedding_matrix],\
                          input_length=hyper_param['maxlen'], trainable=hyper_param['allow_glove_retrain'])(x)
    else:
        embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], input_length=hyper_param['maxlen'],\
                          embeddings_initializer="random_uniform" )(x)

    # concat embeddings with additional features
    if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info']:
        embed = Concatenate(axis=-1)([embed, x_pos, x_capital])
    elif hyper_param['use_pos_tags'] and (
            not hyper_param['use_capitalization_info']):
        embed = Concatenate(axis=-1)([embed, x_pos])
    elif (not hyper_param['use_pos_tags']
          ) and hyper_param['use_capitalization_info']:
        embed = Concatenate(axis=-1)([embed, x_capital])
    else:
        embed = embed

    # feed embeddings into conv1
    conv1 = Conv1D( filters=hyper_param['conv1_filters'], \
                   kernel_size=hyper_param['conv1_kernel_size'],\
                   strides=hyper_param['conv1_strides'], \
                   padding=hyper_param['conv1_padding'],\
                   activation='relu', name='conv1')(embed)

    # update this
    # make primary capsules
    conv2 = Conv1D( filters=hyper_param['conv2_filters'], \
                   kernel_size=hyper_param['conv2_kernel_size'],\
                   strides=hyper_param['conv2_strides'], \
                   padding=hyper_param['conv2_padding'],\
                   activation='relu', name='conv2')(conv1)

    # update this
    # make primary capsules
    conv3 = Conv1D( filters=hyper_param['conv3_filters'], \
                   kernel_size=hyper_param['conv3_kernel_size'],\
                   strides=hyper_param['conv3_strides'], \
                   padding=hyper_param['conv3_padding'],\
                   activation='relu', name='conv3')(conv2)

    # max pooling layer
    max_pooled = MaxPooling1D(pool_size=hyper_param['max_pooling_size'], \
                              strides=hyper_param['max_pooling_strides'], \
                              padding=hyper_param['max_pooling_padding'])(conv3)
    # dropout
    maxpooled_dropout = Dropout(hyper_param['maxpool_dropout'])(max_pooled)

    # flatten many univariate vectos into 1 long vector
    flattened = Flatten()(maxpooled_dropout)

    # to feed-forward layers
    ff1 = Dense(hyper_param['feed_forward_1'], activation='relu')(flattened)
    ff1_dropout = Dropout(hyper_param['ff1_dropout'])(ff1)

    ff2 = Dense(hyper_param['feed_forward_2'], activation='relu')(ff1_dropout)
    ff2_dropout = Dropout(hyper_param['ff2_dropout'])(ff2)

    out_pred = Dense(hyper_param['ner_classes'],
                     activation='softmax',
                     name='out_pred')(ff2)  #!

    if verbose:
        print("x", x.get_shape())
        if hyper_param['use_pos_tags']: print("x_pos", x_pos.get_shape())
        if hyper_param['use_capitalization_info']:
            print("x_capital", x_capital.get_shape())
        print("embed", embed.get_shape())
        print("embed", embed.get_shape())

        print("conv1", conv1.get_shape())
        print("conv2", conv2.get_shape())
        print("conv3", conv3.get_shape())
        print("max_pooled", max_pooled.get_shape())
        print("flattened", flattened.get_shape())
        print("ff1", ff1.get_shape())
        print("ff2", ff2.get_shape())
        print("out_pred", out_pred.get_shape())

    # return final model
    if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info']:
        cnnmodel = Model(inputs=[x, x_pos, x_capital], outputs=[out_pred])
    elif hyper_param['use_pos_tags'] and (
            not hyper_param['use_capitalization_info']):
        cnnmodel = Model(inputs=[x, x_pos], outputs=[out_pred])
    elif (not hyper_param['use_pos_tags']
          ) and hyper_param['use_capitalization_info']:
        cnnmodel = Model(inputs=[x, x_capital], outputs=[out_pred])
    else:
        cnnmodel = Model(inputs=[x], outputs=[out_pred])

    return cnnmodel
示例#9
0
    def build(self, img_shape):
        if self.model is not None:
            print("PosNet has been constructed")
        else:
            img_input = Input(shape=(img_shape[0], img_shape[1], img_shape[2]),
                              name='inputImg')
            x_conv = Conv2D(24, (8, 8),
                            padding="valid",
                            strides=(2, 2),
                            name="conv1")(img_input)
            x_conv = BatchNormalization()(x_conv)
            x_conv = Activation('elu')(x_conv)
            print(x_conv.get_shape())

            x_conv = Conv2D(36, (5, 5),
                            padding="valid",
                            strides=(2, 2),
                            name="conv2")(x_conv)
            x_conv = BatchNormalization()(x_conv)
            x_conv = Activation('elu')(x_conv)
            print(x_conv.get_shape())

            x_conv = Conv2D(48, (5, 5),
                            padding="valid",
                            strides=(2, 2),
                            name="conv3")(x_conv)
            x_conv = BatchNormalization()(x_conv)
            x_conv = Activation('elu')(x_conv)
            print(x_conv.get_shape())

            x_conv = Conv2D(64, (5, 5), padding="valid", name="conv4")(x_conv)
            x_conv = BatchNormalization()(x_conv)
            x_conv = Activation('elu')(x_conv)
            print(x_conv.get_shape())

            x_conv = Conv2D(64, (5, 5), padding="valid", name="conv5")(x_conv)
            x_conv = BatchNormalization()(x_conv)
            x_conv = Activation('elu')(x_conv)
            print(x_conv.get_shape())

            x_out = Flatten()(x_conv)
            print(x_out.get_shape())

            # Cut for transfer learning is here:
            speed_input = Input(shape=(1, ), name='inputSpeed')

            x_out = Lambda(lambda x: K.concatenate(x, axis=1))(
                [x_out, speed_input])
            x_out = Dense(200)(x_out)
            x_out = BatchNormalization()(x_out)
            x_out = Activation('elu')(x_out)
            x_out = Dense(200)(x_out)
            x_out = BatchNormalization()(x_out)
            x_end = Activation('elu')(x_out)

            # Branching from X_END to three branches (steer, throttle, position)
            steer = Dense(100)(x_end)
            steer = BatchNormalization()(steer)
            steer = Activation('elu')(steer)
            steer = Dropout(.2)(steer)
            steer = Dense(30)(steer)
            steer = BatchNormalization()(steer)
            steer = Activation('elu')(steer)
            steer = Dense(1, activation='sigmoid')(steer)
            steer = Lambda(lambda x: x * 10 - 5, name='outputSteer')(steer)

            throttle = Dense(100, name='thr1')(x_end)
            throttle = BatchNormalization(name='thr2')(throttle)
            throttle = Activation('elu')(throttle)
            throttle = Dropout(.2)(throttle)
            throttle = Dense(30, name='thr3')(throttle)
            throttle = BatchNormalization(name='thr4')(throttle)
            throttle = Activation('elu')(throttle)
            throttle = Dense(1, activation='sigmoid', name='thr5')(throttle)
            throttle = Lambda(lambda x: x * 2 - 1, name='outputThr')(throttle)

            position = Dropout(.3)(x_end)
            position = Dense(1, activation='sigmoid', name='pos5')(position)
            position = Lambda(lambda x: x * 2 - 1, name='outputPos')(position)
            self.model = Model((img_input, speed_input),
                               (steer, throttle, position))
def gan_1(
    input_img,
    hidden_num=128,
    no_of_pairs=5,
    min_fea_map_H=8,
    activation_fn=tf.nn.elu,
    noise_dim=0,
    z_num=64,
    input_channel=3
):  #x, hidden_num=3, no_of_pairs=4, min_fea_map_H=8, activation_fn=tf.nn.elu, noise_dim=0):

    # Encoder
    encoder_layer_list = []
    x = Conv2D(hidden_num,
               kernel_size=3,
               strides=1,
               activation=activation_fn,
               padding='same')(input_img)

    for idx in range(no_of_pairs):
        # to increase number of filter by (filters)*(index+1) ex: 16, 32, 48 ...
        channel_num = hidden_num * (idx + 1)

        res = x
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)

        x = add([x, res])

        encoder_layer_list.append(x)
        if idx < no_of_pairs - 1:
            x = Conv2D(hidden_num * (idx + 2),
                       kernel_size=3,
                       strides=2,
                       activation=activation_fn,
                       padding='same')(x)

    # for flattening the layer
    x = Flatten()(x)
    # 20480
    reshape_dim = int(np.prod([min_fea_map_H, min_fea_map_H / 2, channel_num]))
    x = Reshape((1, reshape_dim))(x)

    x = Dense(z_num, activation=None)(x)

    # Decoder
    reshape_dim = int(np.prod([min_fea_map_H, min_fea_map_H / 2, hidden_num]))
    x = Dense(reshape_dim, activation=None)(x)
    x = Reshape((min_fea_map_H, min_fea_map_H // 2, hidden_num))(x)

    for idx in range(no_of_pairs):
        x = Concatenate(axis=-1)(
            [x, encoder_layer_list[no_of_pairs - 1 - idx]])
        res = x

        channel_num = x.get_shape().as_list()[-1]
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = add([x, res])

        if idx < no_of_pairs - 1:
            x = UpSampling2D(2)(x)
            x = Conv2D(hidden_num * (no_of_pairs - idx - 1),
                       kernel_size=1,
                       strides=1,
                       activation=activation_fn,
                       padding='same')(x)

    out = Conv2D(input_channel,
                 name='output_g1',
                 kernel_size=3,
                 strides=1,
                 activation=None,
                 padding='same')(x)
    return out
示例#11
0
class My_Model:
    def __init__(self, input_shape, dimention):
        self.ip1 = Input(shape=(input_shape, 1))
        self.ip2 = Input(shape=(input_shape, 1))
        self.sim = Input(shape=(1, ))
        print(self.ip1.shape)
        ##---projection layer-----##
        ## assume each argument is of size 4 and 5 argument per event. hence ip1 is 20x1. so is ip2self.
        ## after conv each argument will have 5 channles(dimention)
        self.projection1 = Conv1D(
            5, 4, strides=4, padding='valid',
            activation='tanh')  #(Dropout(0.5)(Dense(1500, activation='relu')))
        self.sh_projection1_op1 = self.projection1(self.ip1)
        self.sh_projection1_op2 = self.projection1(self.ip2)

        print(self.sh_projection1_op1.shape)
        self.f_v1 = Flatten()(self.sh_projection1_op1)
        self.f_v2 = Flatten()(self.sh_projection1_op2)
        print('after flatten {}'.format(self.f_v1.get_shape().as_list()))
        self.projection2 = Conv1D(1,
                                  5,
                                  strides=5,
                                  padding='valid',
                                  activation='tanh')
        self.allignment = list()

        x0 = self.f_v2
        x1 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x0)
        x2 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x1)
        x3 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x2)
        x4 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x3)

        print('*****@@@@@+++++{} and {} and {} and {} and{}'.format(
            x0.shape, x1.shape, x2.shape, x3.shape, x4.shape))

        self.merged_layer = merge([self.f_v1, x0], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x1], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x2], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x3], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x4], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)
        '''
        for i in range(5):
            self.merged_layer = merge([self.f_v1, self.f_v2], mode = 'concat')
            self.merged_layer = Reshape((-1,2))(self.merged_layer)
            allgn = self.projection2(self.merged_layer)
            #print('{} dim merged layer{} dim of conv allgn{}'.format(i,self.merged_layer.shape,allgn.shape ))
            self.allignment.append(allgn)
            temp1 = self.f_v2[0:4]
            temp2 = self.f_v2[5:]
            self.f_v2 = merge([temp2, temp1], mode = 'concat')
            #print(self.f_v2.type)
        '''
        #print(self.allignment[0].shape)
        for i in range(len(self.allignment)):
            self.allignment[i] = Reshape((5, 1))(self.allignment[i])
            #print('{} dim allgnlayer{}'.format(i,self.allignment[i] ))

        self.allignment_all = merge(self.allignment, mode='concat')

        self.prediction = Dense(1, activation='sigmoid')(Flatten()(
            self.allignment_all))

        self.model = Model(input=[self.ip1, self.ip2, self.sim],
                           output=self.prediction)
        sgd = SGD(lr=0.1, momentum=0.9, decay=0, nesterov=False)
        self.model.compile(loss='mean_squared_error',
                           optimizer=sgd,
                           metrics=['accuracy'])
        #seed(2017)
        #self.model.fit([X1, X2], Y.values, batch_size = 2000, nb_epoch = 100, verbose = 1)
    def train_model(self,
                    train_X1,
                    train_X2,
                    train_S,
                    train_y,
                    batch_size_=50,
                    epch=15):
        self.model.fit([train_X1, train_X2, train_S],
                       train_y,
                       batch_size=batch_size_,
                       nb_epoch=epch,
                       verbose=1,
                       shuffle=True)
        self.epch = epch

    def predict(self, test_x1, test_x2, train_S):
        return self.model.predict([test_x1, test_x2, train_S])

    def evaluate(self, X1, X2, S, y):
        return self.model.evaluate([X1, X2, S], y)

    def save_Model_separately(self, path):
        weights_file = os.path.join(path, 'weight.h5')
        model_file = os.path.join(path, 'model_archi.json')
        # Save the weights
        self.model.save_weights(weights_file)
        with open(model_file, 'w') as f:
            f.write(self.model.to_json())

    def save_model(self, path):
        model_file = os.path.join(path, 'model_FM' + str(self.epch) + '.h5')
        self.model.save(model_file)
        return str(model_file)

    def load_model(self, path):
        self.model = load_model(path)
示例#12
0
print(x.shape)
x = MaxPooling2D((2,2))(x)
print(x.shape)
x = Conv2D(35, (5, 5), padding='same', activation='relu')(x)
print(x.shape)
x = MaxPooling2D((2,2))(x)
print(x.shape)
x = Conv2D(40, (5, 5), padding='same', activation='relu')(x)
print(x.shape)
x = MaxPooling2D((2,2))(x)
print(x.shape)
print('pre flatten shape')
shape = K.int_shape(x)
print(shape)
x = Flatten()(x)
print(x.get_shape())
# time.sleep(20)
# z_mean = Dense(latentDim, name='z_mean')(x)
# z_log_var = Dense(latentDim, name='z_log_var')(x)

# print(z_mean.shape)
# print(z_log_var.shape)

# z = Lambda(sampling, output_shape=(latentDim,), name='z')([z_mean, z_log_var])

encoder_model = Model(encoder_in, x)

#This ensures the model will be shared, including weights
encoded1 = encoder_model(image1)
encoded2 = encoder_model(image2)
encoded3 = encoder_model(image3)