Exemplo n.º 1
0
def cnn_encode(embed_input):
    ca1 = SeparableConv1D(filters=64,
                          kernel_size=1,
                          padding='same',
                          activation='relu',
                          name='conv1')
    ca2 = SeparableConv1D(filters=64,
                          kernel_size=2,
                          padding='same',
                          activation='relu',
                          name='conv2')
    ca3 = SeparableConv1D(filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu',
                          name='conv3')
    mp = GlobalMaxPooling1D()
    da = Dense(200, activation='relu', name='encode')
    x1 = ca1(embed_input)
    x1 = mp(x1)
    x2 = ca2(embed_input)
    x2 = mp(x2)
    x3 = ca3(embed_input)
    x3 = mp(x3)
    x = Concatenate()([x1, x2, x3])
    return da(x)
Exemplo n.º 2
0
def rcnn(embed_input, class_num):
    ra = LSTM(200, activation='tanh', return_sequences=True)
    ba = Bidirectional(ra, merge_mode='concat')
    ca1 = SeparableConv1D(filters=64,
                          kernel_size=1,
                          padding='same',
                          activation='relu')
    ca2 = SeparableConv1D(filters=64,
                          kernel_size=2,
                          padding='same',
                          activation='relu')
    ca3 = SeparableConv1D(filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
    mp = GlobalMaxPooling1D()
    da1 = Dense(200, activation='relu')
    da2 = Dense(class_num, activation='softmax')
    x = ba(embed_input)
    x1 = ca1(x)
    x1 = mp(x1)
    x2 = ca2(x)
    x2 = mp(x2)
    x3 = ca3(x)
    x3 = mp(x3)
    x = Concatenate()([x1, x2, x3])
    x = da1(x)
    x = Dropout(0.2)(x)
    return da2(x)
Exemplo n.º 3
0
def cnn_1d(embed_input1, embed_input2):
    ca1 = SeparableConv1D(filters=64, kernel_size=1, padding='same', activation='relu')
    ca2 = SeparableConv1D(filters=64, kernel_size=2, padding='same', activation='relu')
    ca3 = SeparableConv1D(filters=64, kernel_size=3, padding='same', activation='relu')
    mp = GlobalMaxPooling1D()
    da1 = Dense(200, activation='relu')
    da2 = Dense(200, activation='relu')
    da3 = Dense(1, activation='sigmoid')
    x1 = ca1(embed_input1)
    x1 = mp(x1)
    x2 = ca2(embed_input1)
    x2 = mp(x2)
    x3 = ca3(embed_input1)
    x3 = mp(x3)
    x = Concatenate()([x1, x2, x3])
    x = da1(x)
    y1 = ca1(embed_input2)
    y1 = mp(y1)
    y2 = ca2(embed_input2)
    y2 = mp(y2)
    y3 = ca3(embed_input2)
    y3 = mp(y3)
    y = Concatenate()([y1, y2, y3])
    y = da1(y)
    diff = Lambda(lambda a: K.abs(a))(Subtract()([x, y]))
    prod = Multiply()([x, y])
    z = Concatenate()([x, y, diff, prod])
    z = da2(z)
    z = Dropout(0.2)(z)
    return da3(z)
def sepcnn_model(blocks,
                 filters,
                 kernel_size,
                 dropout_rate,
                 pool_size,
                 input_shape,
                 use_pretrained_embedding=False,
                 is_embedding_trainable=False,
                 embedding_matrix=None):
    model = Sequential()
    if use_pretrained_embedding:
        model.add(
            Embedding(input_dim=embedding_matrix.shape[0],
                      output_dim=embedding_matrix.shape[1],
                      input_length=input_shape[0],
                      weights=[embedding_matrix],
                      trainable=is_embedding_trainable))
    else:
        model.add(
            Embedding(input_dim=num_features,
                      output_dim=embedding_dim,
                      input_length=input_shape[0]))

    for _ in range(blocks - 1):
        model.add(Dropout(rate=dropout_rate))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(MaxPooling1D(pool_size=pool_size))

    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(GlobalAveragePooling1D())
    model.add(Dropout(rate=dropout_rate))
    model.add(Dense(1, activation='sigmoid'))
    return model
    def build_model(self):
        input_layer = Input((self.max_sentence_length, ))
        embedding_layer = Embedding(
            self.input_dim,
            self.embedding_dim,
            input_length=self.max_sentence_length)(input_layer)
        embedding_layer = Dropout(0.5)(embedding_layer)
        # conv1
        conv_layer1 = SeparableConv1D(self.feature_maps[0],
                                      self.kernel_sizes[0],
                                      activation='relu',
                                      strides=1,
                                      padding='same',
                                      depth_multiplier=4)(embedding_layer)
        max_pool_layer1 = GlobalMaxPooling1D()(conv_layer1)
        dense_layer1 = Dense(self.hidden_dim)(max_pool_layer1)
        dense_layer1 = Dropout(0.5)(dense_layer1)
        # conv2
        conv_layer2 = SeparableConv1D(self.feature_maps[1],
                                      self.kernel_sizes[1],
                                      activation='relu',
                                      strides=1,
                                      padding='same',
                                      depth_multiplier=4)(embedding_layer)
        max_pool_layer2 = GlobalMaxPooling1D()(conv_layer2)
        dense_layer2 = Dense(self.hidden_dim)(max_pool_layer2)
        dense_layer2 = Dropout(0.5)(dense_layer2)
        # conv3
        conv_layer3 = SeparableConv1D(self.feature_maps[2],
                                      self.kernel_sizes[2],
                                      activation='relu',
                                      strides=1,
                                      padding='same',
                                      depth_multiplier=4)(embedding_layer)
        max_pool_layer3 = GlobalMaxPooling1D()(conv_layer3)
        dense_layer3 = Dense(self.hidden_dim)(max_pool_layer3)
        dense_layer3 = Dropout(0.5)(dense_layer3)
        # concatenate conv channels
        concat = concatenate([dense_layer1, dense_layer2, dense_layer3])
        #concat = Dropout(0.5)(concat)
        concat = Activation('relu')(concat)
        output_layer = Dense(self.output_dim, activation='sigmoid')(concat)

        self.model = Model(inputs=input_layer, outputs=output_layer)
        if self.loss == 'custom_recall_spec':
            self.loss_name = 'custom_recall_spec'
            custom_loss = binary_recall_specificity_loss(self.recall_weight)
            self.loss = custom_loss

        elif self.loss == 'combined_loss':
            self.loss_name = 'combined_loss'
            custom_loss = combined_loss(self.bce_weight, self.recall_weight)
            self.loss = custom_loss

        self.model.compile(loss=self.loss,
                           optimizer=self.optimizer,
                           metrics=self.metrics)
Exemplo n.º 6
0
def build_sepconv(Traniable_embedding, embedding_matrix, max_len, kmer_size,
                  metrics, classes_1, classes_2, classes_3, classes_4,
                  classes_5, classes_6):

    inp = Input(shape=(max_len, ), dtype='uint16')
    max_features = 4**kmer_size + 1
    if Traniable_embedding == True:
        main = Embedding(max_features, 128)(inp)
    else:
        #main = Embedding(max_features, vector_size, weights=[embedding_matrix],trainable=False)(inp)
        main = embedding_matrix(inp)

    main = SeparableConv1D(84, (5))(main)
    main = Dropout(0.5)(main)
    main = LeakyReLU()(main)

    main = SeparableConv1D(58, (9))(main)
    main = Dropout(0.5)(main)
    main = LeakyReLU()(main)

    main = SeparableConv1D(
        180,
        (13),
    )(main)
    main = Dropout(0.5)(main)
    main = LeakyReLU()(main)

    main = Dense(2800)(main)
    main = Dropout(0.5)(main)
    main = LeakyReLU()(main)
    #main = Dense(2800)(main)

    main = GlobalAveragePooling1D()(main)
    out1 = Dense(classes_1, activation='softmax')(main)
    out2 = Dense(classes_2, activation='softmax')(main)
    out3 = Dense(classes_3, activation='softmax')(main)
    out4 = Dense(classes_4, activation='softmax')(main)
    out5 = Dense(classes_5, activation='softmax')(main)
    if classes_6 != 0:
        out6 = Dense(classes_6, activation='softmax')(main)
    else:
        pass
    if metrics == 'f1':
        metrics = f1
    elif metrics == 'precision-recall':
        metrics = [keras_metrics.precision(), keras_metrics.recall()]
    else:
        pass
    model = Model(inputs=[inp], outputs=[out1, out2, out3, out4, out5])
    optimizer = Adam(lr=0.001)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=[metrics])
    return model
Exemplo n.º 7
0
def nochannel_nodil(classes):
    InputSignal = Input(shape=(250, 22))

    ConvBlock1 = SeparableConv1D(filters=32, kernel_size=9, strides=1, padding='same', activation='relu',
                                 dilation_rate=1, kernel_regularizer='l2')(InputSignal)
    ConvBlock1 = BatchNormalization()(ConvBlock1)
    ConvBlock1 = SeparableConv1D(filters=32, kernel_size=9, strides=1, padding='same', activation='relu',
                                 dilation_rate=1, kernel_regularizer='l2')(ConvBlock1)
    ConvBlock1 = BatchNormalization()(ConvBlock1)
    for _ in range(0, 4):
        ConvBlock1 = SeparableConv1D(filters=32, kernel_size=9, strides=2, padding='same', activation='relu',
                                     dilation_rate=1, kernel_regularizer='l2')(ConvBlock1)
        ConvBlock1 = BatchNormalization()(ConvBlock1)

    ConvBlock2 = SeparableConv1D(filters=32, kernel_size=9, strides=1, padding='same', activation='relu',
                                 dilation_rate=1, kernel_regularizer='l2')(InputSignal)
    ConvBlock2 = BatchNormalization()(ConvBlock2)
    ConvBlock2 = SeparableConv1D(filters=32, kernel_size=9, strides=1, padding='same', activation='relu',
                                 dilation_rate=1, kernel_regularizer='l2')(ConvBlock2)
    ConvBlock2 = BatchNormalization()(ConvBlock2)
    for _ in range(0, 4):
        ConvBlock2 = SeparableConv1D(filters=32, kernel_size=9, strides=2, padding='same', activation='relu',
                                     dilation_rate=1, kernel_regularizer='l2')(ConvBlock2)
        ConvBlock2 = BatchNormalization()(ConvBlock2)

    ConvBlock3 = SeparableConv1D(filters=32, kernel_size=9, strides=1, padding='same', activation='relu',
                                 dilation_rate=1, kernel_regularizer='l2')(InputSignal)
    ConvBlock3 = BatchNormalization()(ConvBlock3)
    ConvBlock3 = SeparableConv1D(filters=32, kernel_size=9, strides=1, padding='same', activation='relu',
                                 dilation_rate=1, kernel_regularizer='l2')(ConvBlock3)
    ConvBlock3 = BatchNormalization()(ConvBlock3)
    for _ in range(0, 4):
        ConvBlock3 = SeparableConv1D(filters=32, kernel_size=9, strides=2, padding='same', activation='relu',
                                     dilation_rate=1, kernel_regularizer='l2')(ConvBlock3)
        ConvBlock3 = BatchNormalization()(ConvBlock3)

    Concat_Layer = keras.layers.concatenate([ConvBlock1, ConvBlock2, ConvBlock3])
    Concat_Layer = Flatten()(Concat_Layer)
    FinalOutput = Dense(1024, activation='relu', kernel_regularizer='l2')(Concat_Layer)
    FinalOutput = BatchNormalization()(FinalOutput)
    FinalOutput = Dropout(.5)(FinalOutput)
    FinalOutput = Dense(512, activation='relu', kernel_regularizer='l2')(FinalOutput)
    FinalOutput = BatchNormalization()(FinalOutput)
    FinalOutput = Dropout(.5)(FinalOutput)
    FinalOutput = Dense(256, activation='relu', kernel_regularizer='l2')(FinalOutput)
    FinalOutput = BatchNormalization()(FinalOutput)
    FinalOutput = Dropout(.5)(FinalOutput)

    EventPrediction = Dense(64, activation='relu', kernel_regularizer='l2')(FinalOutput)
    EventPrediction = BatchNormalization()(EventPrediction)
    EventPrediction = Dropout(.5)(EventPrediction)
    EventPrediction = Dense(classes, activation='softmax', kernel_regularizer='l2', name='event_prediction')(
        EventPrediction)

    CompleteModel = Model(inputs=(InputSignal), outputs=[EventPrediction])
    opt = keras.optimizers.sgd(lr=1e-3, momentum=.9, nesterov=True)
    CompleteModel.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])

    return CompleteModel
def build_model(input_length,
                emb_input_dim,
                emb_out_dim,
                lstm_hidden_units,
                num_cls,
                embedding_matrix=None):

    l_input = Input(shape=(input_length, ))

    if embedding_matrix is None:
        l_emb = Embedding(emb_input_dim, emb_out_dim)(l_input)
        #l_emb = Embedding(emb_input_dim, emb_out_dim, mask_zero=True)(l_input)
    else:
        l_emb = Embedding(emb_input_dim,
                          emb_out_dim,
                          weights=[embedding_matrix],
                          trainable=True)(l_input)

    # add bilstm layer
    l_posemb = Position_Embedding()(l_emb)
    l_posemb = Dropout(0.1)(l_posemb)

    l_cnn = SeparableConv1D(128,
                            3,
                            use_bias=False,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(l_posemb)
    l_cnn = SeparableConv1D(128,
                            3,
                            use_bias=False,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(l_cnn)
    l_cnn = Conv1D(128,
                   3,
                   use_bias=False,
                   padding='same',
                   kernel_initializer='he_normal')(l_cnn)

    # add attention layer
    l_att = Attention(nb_head=64, size_per_head=16)([l_cnn] * 3)
    print('l_att.shape:', l_att.shape)

    # add dense layer
    l_dense = Dense(num_cls, activation='softmax')(l_att)

    model = Model(l_input, l_dense)
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
    model.summary()

    return model
Exemplo n.º 9
0
 def sepa_conv_block(x, dim):
     x1 = SeparableConv1D(dim,
                          kernel_size=1,
                          strides=1,
                          padding='same',
                          activation='relu')(x)
     x1 = BatchNormalization()(x1)
     x1 = SeparableConv1D(dim,
                          kernel_size=3,
                          strides=1,
                          padding='same',
                          activation='relu')(x1)
     x1 = BatchNormalization()(x1)
     return x1
def BuildModel(lr=0.001, decay=0.0):

    brand_name = Input(shape=[1], name='brand_name')
    category_name = Input(shape=[1], name='category_name')
    item_condition_id = Input(shape=[1], name='item_condition_id')
    item_description = Input(shape=(50, ), name='item_description')
    name = Input(shape=(10, ), name='name')
    shipping = Input(shape=[1], name='shipping')

    em_ctg = Embedding(ctg_num + 1, 10)(category_name)
    em_brand = Embedding(brand_num + 1, 10)(brand_name)
    em_desc = Embedding(20000, 100)(item_description)
    em_name = Embedding(20000, 20)(name)

    x = SeparableConv1D(64, 3, activation='relu')(em_name)
    x = GlobalAveragePooling1D()(x)

    y = SeparableConv1D(64, 6, activation='relu')(em_desc)
    y = SeparableConv1D(128, 6, activation='relu')(y)
    y = layers.Dropout(0.3)(y)
    y = BatchNormalization()(y)
    y = GlobalAveragePooling1D()(y)

    main = layers.concatenate([
        layers.Flatten()(em_brand),
        layers.Flatten()(em_ctg),
        item_condition_id,
        y,
        x,
        shipping,
    ])
    dense = Dense(64, activation='relu')(main)
    dense = Dense(32, activation='relu')(dense)
    dense = Dense(1, activation='linear')(dense)

    model = Model([
        brand_name,
        category_name,
        item_condition_id,
        item_description,
        name,
        shipping,
    ], dense)

    optimizer = Adam(lr=lr, decay=decay)
    model.compile(loss="mse", optimizer=optimizer)

    return model
Exemplo n.º 11
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    channel_axis = -1
    in_channels = K.int_shape(inputs)[channel_axis]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand
        x = Conv1D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x)
        x = BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x)
        x = ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
#    if stride == 2:
#        x = ZeroPadding1D(padding=1, name=prefix + 'pad')(x)
    x = SeparableConv1D(in_channels, kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same', name=prefix + 'depthwise')(x)
    x = BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x)
    x = ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv1D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x)
    x = BatchNormalization(axis=channel_axis, epsilon=1e-3,momentum=0.999, name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return add([inputs, x])
    return x
Exemplo n.º 12
0
def quartznet_12x1_15_39(input_dim, output_dim=29):
    input_data = Input(name='the_input', shape=(None, input_dim))
    x = input_data
    x = sep1d_bn_relu(x, 128, 15, strides=2, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 128, 15, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 128, 15, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 128, 15, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 128, 15, strides=1, dilation_rate=1, padding="same")

    x = sep1d_bn_relu(x, 128, 21, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 128, 21, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 128, 21, strides=1, dilation_rate=1, padding="same")

    x = sep1d_bn_relu(x, 256, 27, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 256, 27, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 256, 27, strides=1, dilation_rate=1, padding="same")

    x = sep1d_bn_relu(x, 256, 33, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 256, 33, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 256, 33, strides=1, dilation_rate=1, padding="same")

    x = sep1d_bn_relu(x, 256, 39, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 512, 1, strides=1, dilation_rate=1, padding="same")

    conv_final = SeparableConv1D(output_dim,
                                 1,
                                 padding="same",
                                 dilation_rate=2)(x)
    y_pred = Activation('softmax', name='softmax')(conv_final)
    model = Model(inputs=input_data, outputs=y_pred)
    model.output_length = lambda x: x / 2
    print(model.summary())
    return model
        def test_separable_convolution(self):
            N, C, H, W = 2, 3, 5, 5
            x = np.random.rand(N, H, W, C).astype(np.float32, copy=False)
            model = Sequential()
            model.add(
                SeparableConv2D(filters=10,
                                kernel_size=(1, 2),
                                strides=(1, 1),
                                padding='valid',
                                input_shape=(H, W, C),
                                data_format='channels_last',
                                depth_multiplier=4))
            model.add(
                MaxPooling2D((2, 2),
                             strides=(2, 2),
                             data_format='channels_last'))
            model.compile(optimizer='sgd', loss='mse')
            self._test_one_to_one_operator_keras(model, x)

            x = np.random.rand(N, H, C).astype(np.float32, copy=False)
            model = Sequential()
            model.add(
                SeparableConv1D(filters=10,
                                kernel_size=2,
                                strides=1,
                                padding='valid',
                                input_shape=(H, C),
                                data_format='channels_last'))
            model.compile(optimizer='sgd', loss='mse')
            self._test_one_to_one_operator_keras(model, x)
Exemplo n.º 14
0
def quartznet_12x1_shallow(input_dim, output_dim=29):
    # No bn and relu activations, so probably not a good model
    input_data = Input(name='the_input', shape=(None, input_dim))
    x = input_data
    x = sep1d_bn_relu(x, 192, 15, strides=2, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 192, 15, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 192, 15, strides=1, dilation_rate=1, padding="same")

    x = sep1d_bn_relu(x, 192, 21, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 192, 21, strides=1, dilation_rate=1, padding="same")

    x = sep1d_bn_relu(x, 384, 27, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 384, 27, strides=1, dilation_rate=1, padding="same")

    x = sep1d_bn_relu(x, 384, 33, strides=1, dilation_rate=1, padding="same")
    x = sep1d_bn_relu(x, 512, 1, strides=1, dilation_rate=1, padding="same")

    conv_final = SeparableConv1D(output_dim,
                                 1,
                                 padding="same",
                                 dilation_rate=2)(x)
    y_pred = Activation('softmax', name='softmax')(conv_final)
    model = Model(inputs=input_data, outputs=y_pred)
    model.output_length = lambda x: x / 2
    print(model.summary())
    return model
Exemplo n.º 15
0
def cnn(embed_input1, embed_input2, embed_input3):
    ca1 = SeparableConv1D(filters=64,
                          kernel_size=1,
                          padding='same',
                          activation='relu',
                          name='conv1')
    ca2 = SeparableConv1D(filters=64,
                          kernel_size=2,
                          padding='same',
                          activation='relu',
                          name='conv2')
    ca3 = SeparableConv1D(filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu',
                          name='conv3')
    mp = GlobalMaxPooling1D()
    da = Dense(200, activation='relu', name='encode')
    norm = Lambda(lambda a: K.sum(K.square(a), axis=-1, keepdims=True))
    x1 = ca1(embed_input1)
    x1 = mp(x1)
    x2 = ca2(embed_input1)
    x2 = mp(x2)
    x3 = ca3(embed_input1)
    x3 = mp(x3)
    x = Concatenate()([x1, x2, x3])
    x = da(x)
    y1 = ca1(embed_input2)
    y1 = mp(y1)
    y2 = ca2(embed_input2)
    y2 = mp(y2)
    y3 = ca3(embed_input2)
    y3 = mp(y3)
    y = Concatenate()([y1, y2, y3])
    y = da(y)
    z1 = ca1(embed_input3)
    z1 = mp(z1)
    z2 = ca2(embed_input3)
    z2 = mp(z2)
    z3 = ca3(embed_input3)
    z3 = mp(z3)
    z = Concatenate()([z1, z2, z3])
    z = da(z)
    pos = norm(Subtract()([x, y]))
    neg = norm(Subtract()([x, z]))
    return Subtract()([pos, neg])
Exemplo n.º 16
0
 def residual_layer(self, in_layer):
     rl = SeparableConv1D(RESIDUAL_LAYER_PARAMETERS["filters"],
                          RESIDUAL_LAYER_PARAMETERS["strides"],
                          padding='same',
                          data_format='channels_first',
                          name="Policy_Head_Conv")(in_layer)
     rl = BatchNormalization()(rl)
     rl = Activation("relu")(rl)
     rl = SeparableConv1D(RESIDUAL_LAYER_PARAMETERS["filters"],
                          RESIDUAL_LAYER_PARAMETERS["strides"],
                          padding='same',
                          data_format='channels_first',
                          name="Policy_Head_Conv")(rl)
     rl = BatchNormalization()(rl)
     rl = merge([rl, in_layer], mode='sum')  # Skip connection
     rl = Activation("relu")(rl)
     return rl
Exemplo n.º 17
0
def residual_block(inputs, factor, filters):
    dilation = 2 ** factor
    # Residual block
    n1 = conv_block(inputs, filters, dilation)
    residual = TimeDistributed(SeparableConv1D(1, kernel_size=1, padding='same'))(n1)
    # print residual.shape
    outputs = keras.layers.add([inputs, residual])
    return outputs
def sepcnn_model(blocks, filters, kernel_size, dropout_rate, pool_size,
                 input_shape, vocab_size):
    model = Sequential()
    model.add(
        Embedding(input_dim=vocab_size + 1,
                  output_dim=embedding_dim,
                  input_length=input_shape[0]))

    for _ in range(blocks - 1):
        model.add(Dropout(rate=dropout_rate))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(MaxPooling1D(pool_size=pool_size))

    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(GlobalAveragePooling1D())
    model.add(Dropout(rate=dropout_rate))
    model.add(Dense(1, activation='sigmoid'))
    model.summary()
    return model
Exemplo n.º 19
0
def main(args):
    args = parse_train_args(args)

    X_train = np.load(args.X_train)
    X_valid = np.load(args.X_validate)
    y_train = np.load(args.y_train)
    y_valid = np.load(args.y_validate)

    model_save_path = args.model_save_path

    def lr_schedule(epoch, lr):
        if epoch > 50:
            if epoch % 5 == 0:
                return lr * 0.95
        return lr

    lr_callback = LearningRateScheduler(lr_schedule)
    callbacks = [lr_callback, EarlyStopping(monitor='val_loss', patience=3)]

    input_shape = X_train.shape[1:]
    num_output_classes = y_train.shape[1]

    input_layer = Input(shape=input_shape)
    conv_1 = Conv1D(filters=40,
                    kernel_size=3,
                    padding='same',
                    activation='relu',
                    kernel_regularizer=l2(0.01))(input_layer)
    pool_1 = MaxPooling1D(pool_size=(2))(conv_1)
    conv_2 = SeparableConv1D(filters=40,
                             kernel_size=3,
                             padding='same',
                             activation='relu',
                             kernel_regularizer=l2(0.01))(pool_1)
    bn_1 = BatchNormalization()(conv_2)

    flatten = Flatten()(bn_1)
    predictions = Dense(num_output_classes, activation='softmax')(flatten)

    model = Model(input_layer, predictions)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print(model.summary())

    batch_size = 10000
    epochs = 150

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(X_valid, y_valid),
              callbacks=callbacks)
    model.save(model_save_path)
Exemplo n.º 20
0
    def decoder(x, n_output_channels=80):
        x = conv_block(x, ShapeChange=UpSampling1D)
        x = conv_block(x, ShapeChange=UpSampling1D)
        x = conv_block(x, ShapeChange=UpSampling1D)

        # Use a depthwise convolution to return something with a single dimension
        x = SeparableConv1D(filters=n_output_channels, kernel_size=16, padding='same')(x)

        return x
Exemplo n.º 21
0
def big_XCEPTION(input_shape, num_classes):
    img_input = Input(input_shape)
    x = Conv1D(32, (1), strides=(2), use_bias=False)(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv1D(64, (1), use_bias=False)(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv1D(128, (1), strides=(2), padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv1D(128, (1), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv1_act')(x)
    x = SeparableConv1D(128, (1), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling1D((3), strides=(2), padding='same')(x)
    x = layers.add([x, residual])

    residual = Conv1D(256, (1), strides=(2), padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv1D(256, (1), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = Dropout(0.5)(x)
    x = SeparableConv1D(256, (1), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling1D((3), strides=(2), padding='same')(x)
    x = layers.add([x, residual])
    x = Conv1D(
        num_classes,
        (1),
        # kernel_regularizer=regularization,
        padding='same')(x)
    x = GlobalAveragePooling1D()(x)
    output = Activation('softmax', name='predictions')(x)

    model = Model(img_input, output)
    return model
Exemplo n.º 22
0
    def _pool(self, embed, insz, **kwargs):
        """Override the base method from parent to provide text pooling facility.
        Here that is a stack of depthwise separable convolutions with interleaved max pooling followed by a global
        avg-over-time pooling

        :param dsz: Word embeddings dim size
        :param kwargs:
        :return: nothing
        """
        filtsz = kwargs['filtsz']
        blocks = kwargs.get('layers', 2)
        pdrop = kwargs.get('dropout', 0.5)
        cmotsz = kwargs['cmotsz']
        poolsz = kwargs.get('poolsz', 2)

        input_ = embed
        for _ in range(blocks - 1):
            drop1 = Dropout(rate=pdrop)(input_)
            sep1 = SeparableConv1D(filters=cmotsz,
                                   kernel_size=filtsz,
                                   activation='relu',
                                   bias_initializer='random_uniform',
                                   depthwise_initializer='random_uniform',
                                   padding='same')(drop1)
            sep2 = SeparableConv1D(filters=cmotsz,
                                   kernel_size=filtsz,
                                   activation='relu',
                                   bias_initializer='random_uniform',
                                   depthwise_initializer='random_uniform',
                                   padding='same')(sep1)
            input_ = MaxPooling1D(pool_size=poolsz)(sep2)

        sep3 = SeparableConv1D(filters=cmotsz * 2,
                               kernel_size=filtsz,
                               activation='relu',
                               bias_initializer='random_uniform',
                               depthwise_initializer='random_uniform',
                               padding='same')(input_)
        global_average_pooling = GlobalAveragePooling1D()(sep3)
        drop2 = Dropout(rate=pdrop)(global_average_pooling)

        return drop2
Exemplo n.º 23
0
 def create_neural_network(self):
     
     """
     Draft Network Architecture for testing purpose
     Input: 16x10 size
     
     Try with 3 hidden layers for now. Each layer has 120 units with a dropout rate at 0.25 and relu as the activation layer
     Use Flatten to map to a one dimension output with size 19 (first 10 maps to the cards to pick, and the last 9 map to the board 
     to drop the card)
     
     There should be two output layers: one for the card and one for the move
     """
     
     state_input = Input(shape = (fe.get_feature_dim(self.features), 2 * gm.START_HANDS))
     
     # Testing for Conv1d layer
     
     x = SeparableConv1D(self.params["filters"], 5, padding='same', data_format='channels_first', name="Conv_kernal_5")(state_input)
     x = BatchNormalization()(x)
     x = Activation(self.params["activation"])(x)
     x = MaxPooling1D(padding='same')(x)
     
     for i in range(self.params["conv_layers"]):
         x = SeparableConv1D(self.params["filters"], 3, padding='same', data_format='channels_first', name="Conv1D_{}".format(i+1))(x)
         x = BatchNormalization()(x)
         x = Activation(self.params["activation"])(x)
         x = MaxPooling1D(padding='same')(x)
     
     x = Flatten()(x)
         
     for i in range(self.params["dense_layers"]):
         x = Dense(self.params["units"], activation=self.params["activation"], name="Dense_{}".format(i+1))(x)
         if self.params["dropout"] > 0:
             x = Dropout(self.params["dropout"])(x)
             
     
     x1 = Bias()(x)
     x2 = Bias()(x)
     card_output = Dense(2 * gm.START_HANDS, activation=self.params["output_activation"], name='card_output')(x1)
     move_output = Dense(gm.BOARD_SIZE ** 2, activation=self.params["output_activation"], name='move_output')(x2)
     network = Model(input=state_input, output=[card_output, move_output])
     return network
Exemplo n.º 24
0
 def policy_head(self, in_layer):
     ph = SeparableConv1D(POLICY_HEAD_PARAMETERS["filters"],
                          POLICY_HEAD_PARAMETERS["strides"],
                          padding='same',
                          data_format='channels_first',
                          name="Policy_Head_Conv")(in_layer)
     ph = BatchNormalization()(ph)
     ph = Activation("relu")(ph)
     ph = Dense(POLICY_HEAD_PARAMETERS["plane_number"],
                name="Policy_head_dense")(ph)
     return ph
Exemplo n.º 25
0
    def build(self, input, neighbour=None):
        input = super(ConvolutionInput, self).build(input)
        input = input.content if hasattr(input, "content") else input

        if input.shape.ndims == 4:

            if self._type == "normal":
                if (self._padding == "valid"
                        and (self._kernel[0] > input.shape.dims[1].value
                             or self._kernel[1] > input.shape.dims[2].value)):
                    self._padding = "same"
                return Conv2D(self._features,
                              self._kernel,
                              strides=self._stride,
                              padding=self._padding,
                              activation=self._activation,
                              name=Node.get_name(self))(input)
            elif self._type == "separable":
                return SeparableConv2D(self._features,
                                       self._kernel,
                                       strides=self._stride,
                                       padding=self._padding,
                                       activation=self._activation,
                                       name=Node.get_name(self))(input)
            elif self._type == "depthwise":
                return DepthwiseConv2D(self._kernel,
                                       strides=self._stride,
                                       padding=self._padding,
                                       activation=self._activation,
                                       name=Node.get_name(self))(input)

        elif input.shape.ndims == 3:
            if self._type == "normal":
                if (self._padding == "valid"
                        and (self._kernel[0] > input.shape.dims[1].value
                             or self._kernel[1] > input.shape.dims[2].value)):
                    self._padding = "same"
                return Conv1D(self._features,
                              self._kernel[0],
                              strides=self._stride[0],
                              padding=self._padding,
                              activation=self._activation,
                              name=Node.get_name(self))(input)
            elif self._type == "separable":
                return SeparableConv1D(self._features,
                                       self._kernel[0],
                                       strides=self._stride[0],
                                       padding=self._padding,
                                       activation=self._activation,
                                       name=Node.get_name(self))(input)

        return input
Exemplo n.º 26
0
 def value_head(self, in_layer):
     vh = SeparableConv1D(VALUE_HEAD_PARAMETERS["filters"],
                          VALUE_HEAD_PARAMETERS["strides"],
                          padding='same',
                          data_format='channels_first',
                          name="Value_Head_Conv")(in_layer)
     vh = BatchNormalization()(vh)
     vh = Activation("relu")(vh)
     vh = Dense(VALUE_HEAD_PARAMETERS["hidden_size"])(vh)
     vh = Activation("relu")(vh)
     vh = Dense(1)(vh)
     vh = Activation("tanh")(vh)
     return vh
Exemplo n.º 27
0
def crnn(embed_input, class_num):
    ca1 = SeparableConv1D(filters=64,
                          kernel_size=1,
                          padding='same',
                          activation='relu')
    ca2 = SeparableConv1D(filters=64,
                          kernel_size=2,
                          padding='same',
                          activation='relu')
    ca3 = SeparableConv1D(filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
    ra = LSTM(200, activation='tanh')
    da = Dense(class_num, activation='softmax')
    x1 = ca1(embed_input)
    x2 = ca2(embed_input)
    x3 = ca3(embed_input)
    x = Concatenate()([x1, x2, x3])
    x = ra(x)
    x = Dropout(0.2)(x)
    return da(x)
Exemplo n.º 28
0
def separableconv_block(x, filters, kernel_size, strides, se, ratio, act,
                        name):
    y = SeparableConv1D(filters=filters,
                        kernel_size=kernel_size,
                        padding='same',
                        strides=strides,
                        kernel_initializer='VarianceScaling',
                        name='{}_separableconv'.format(name))(x)
    if se:
        y = squeezeExcite(y, ratio, name='{}_se'.format(name))
    y = BatchNormalization(name='{}_bn'.format(name))(y)
    y = Activation(act, name='{}_act'.format(name))(y)
    return y
Exemplo n.º 29
0
def sep1d_bn_relu(x,
                  filters,
                  kernel_size,
                  strides=1,
                  dilation_rate=1,
                  padding="same"):
    x = SeparableConv1D(filters,
                        kernel_size,
                        strides=strides,
                        dilation_rate=dilation_rate,
                        padding=padding)(x)
    x = BatchNormalization()(x)
    x = Swish()(x)
    return x
Exemplo n.º 30
0
    def __init__(self,
                 filters,
                 kernel_size,
                 num_blocks,
                 num_convs,
                 num_heads,
                 initializer=None,
                 regularizer=None,
                 dropout=.1):
        conv_layers = []
        attention_layers = []
        feedforward_layers = []
        for i in range(num_blocks):
            conv_layers.append([])
            for j in range(num_convs):
                conv_layers[i].append(
                    SeparableConv1D(filters,
                                    7,
                                    padding='same',
                                    depthwise_initializer=initializer,
                                    pointwise_initializer=initializer,
                                    depthwise_regularizer=regularizer,
                                    pointwise_regularizer=regularizer,
                                    activation='relu',
                                    bias_regularizer=regularizer,
                                    activity_regularizer=regularizer))
            attention_layers.append(
                MultiHeadAttention(filters, num_heads, initializer,
                                   regularizer, dropout))
            feedforward_layers.append([
                Conv1D(filters,
                       1,
                       activation='relu',
                       kernel_initializer=initializer,
                       kernel_regularizer=regularizer,
                       bias_regularizer=regularizer),
                Conv1D(filters,
                       1,
                       activation='linear',
                       kernel_initializer=initializer,
                       kernel_regularizer=regularizer,
                       bias_regularizer=regularizer)
            ])

        self.conv_layers = conv_layers
        self.attention_layers = attention_layers
        self.feedforward_layers = feedforward_layers
        self.num_blocks = num_blocks
        self.num_convs = num_convs
        self.dropout = dropout