def build_model(self):
		# Alice's network
		# FC layer -> Conv Layer (4 1-D convolutions)	
		input_layer = Input(shape=(no_input_neurons,))
		l1 = Dense(no_input_neurons, activation = 'sigmoid',kernel_initializer = 'glorot_normal')(input_layer)
		l2 = Reshape((no_input_neurons,1))(l1)
		l3 = ZeroPadding1D(padding=2)(l2)
		# Xavier Glotrot Initialization of weights
		l4 = Conv1D(2,kernel_size = 4, strides = 1, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(l3)
		l5 = Conv1D(4,kernel_size = 2, strides = 2, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(l4)
		l6 = Conv1D(4,kernel_size = 1, strides = 1, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(l5)
		l7 = Conv1D(1,kernel_size = 1, strides = 1, activation = 'tanh', kernel_initializer = 'glorot_normal')(l6)
		l8 = Flatten(name = 'cipher_output')(l7)
		
		# For Bob's Network
		# FC layer -> Conv Layer (4 1-D convolutions)
		auxillary_input = Input(shape = (key_size,),name = 'aux_input')
		new_input = concatenate([l8,auxillary_input],axis=1)
		x = Dense(no_input_neurons, activation = 'sigmoid',kernel_initializer = 'glorot_normal')(new_input)
		x1 = Reshape((no_input_neurons,1))(x)
		x2 =ZeroPadding1D(padding =2)(x1)
		x3 = Conv1D(2,kernel_size = 4, strides = 1, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(x2)
		x4 = Conv1D(4,kernel_size = 2, strides = 2, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(x3)
		x5 = Conv1D(4,kernel_size = 1, strides = 1, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(x4)
		x6 = Conv1D(1,kernel_size = 1, strides = 1, activation = 'tanh', kernel_initializer = 'glorot_normal')(x5)
		Bob_output = Flatten()(x6)
		Alice_Bob_model = Model(inputs = [input_layer,auxillary_input],outputs = Bob_output)
		intermediate_layer_model = Model(inputs=Alice_Bob_model.input,outputs=Alice_Bob_model.get_layer('cipher_output').output)
		return (Alice_Bob_model,intermediate_layer_model)
def regression_model(input_dim,
                     emb_mat,
                     seq_len,
                     conv_layers=1,
                     conv_filters=32,
                     filter_size=3,
                     lstm_dim=32,
                     fc_layers=1,
                     fc_units=64,
                     dropout=0.0,
                     metrics=[]):
    """
    Compiles a model that learns representations from convolutional and
    recurrent layers. These representations are combined with auxiliary input,
    informing about the tweet context.
    """
    seqs = Input(shape=(seq_len, ), dtype='int32', name='text_input')
    emb = Embedding(emb_mat.shape[0],
                    emb_mat.shape[1],
                    weights=[emb_mat],
                    input_length=seq_len,
                    trainable=True,
                    name='word_embedding')(seqs)
    lstm = LSTM(lstm_dim, name='lstm_1')(emb)
    x = ZeroPadding1D(name='pad_1')(emb)
    x = Conv1D(conv_filters, filter_size, activation='relu', name='conv_1')(x)
    x = MaxPooling1D(name='pool_1')(x)
    for i in range(2, conv_layers + 1):
        pad_name = 'pad_' + str(i)
        conv_name = 'conv_' + str(i)
        pool_name = 'pool_' + str(i)
        x = ZeroPadding1D(name=pad_name)(x)
        x = Conv1D(conv_filters,
                   filter_size,
                   activation='relu',
                   name=conv_name)(x)
        x = MaxPooling1D(name=pool_name)(x)
    flatten = Flatten(name='flatten')(x)
    aux_input = Input(shape=(input_dim, ), name='aux_input')
    norm_inputs = BatchNormalization(name='bn_aux')(aux_input)
    x = concatenate([flatten, lstm, norm_inputs], name='comb_input')
    x = Dropout(dropout, name='dropout_1')(x)
    for i in range(1, fc_layers + 1):
        fc_name = 'fc_' + str(i)
        bn_name = 'bn_' + str(i)
        x = Dense(fc_units, activation='relu', name=fc_name)(x)
        x = BatchNormalization(name=bn_name)(x)
    output = Dense(1, activation='relu', name='output')(x)
    model = Model(inputs=[seqs, aux_input], outputs=[output])
    model.compile(optimizer='Adam', loss='mean_squared_error', metrics=metrics)
    return model
Beispiel #3
0
	def BuildCritic(self):
	
		print(self.outputShape)

		kernel_size = 8

		model = Sequential()
		
		model.add(Conv1D(32, kernel_size=kernel_size, strides=2, input_shape=self.outputShape, padding="same"))
		model.add(LeakyReLU(alpha=0.2))
		model.add(Dropout(0.25))
		model.add(Conv1D(64, kernel_size=kernel_size, strides=2, padding="same"))
		model.add(ZeroPadding1D(padding=((0,1))))
		model.add(BatchNormalization(momentum=0.8))
		model.add(LeakyReLU(alpha=0.2))
		model.add(Dropout(0.25))
		model.add(Conv1D(128, kernel_size=kernel_size, strides=2, padding="same"))
		model.add(BatchNormalization(momentum=0.8))
		model.add(LeakyReLU(alpha=0.2))
		model.add(Dropout(0.25))
		model.add(Conv1D(256, kernel_size=kernel_size, strides=2, padding="same"))
		model.add(BatchNormalization(momentum=0.8))
		model.add(LeakyReLU(alpha=0.2))
		model.add(Dropout(0.25))
		model.add(Flatten())
		model.add(Dense(1))
		
		model.summary()

		inputs = Input(shape=self.outputShape)
		validity = model(inputs)

		return Model(inputs, validity)
Beispiel #4
0
def ufcnn_model_seq(sequence_length=5000,
                    features=1,
                    nb_filter=150,
                    filter_length=5,
                    output_dim=1,
                    optimizer='adagrad',
                    loss='mse',
                    regression=True,
                    class_mode=None,
                    init="lecun_uniform"):

    model = Sequential()
    model.add(ZeroPadding1D(2, input_shape=(None, features)))
    #########################################################
    model.add(
        Convolution1D(nb_filter=nb_filter,
                      filter_length=filter_length,
                      border_mode='valid',
                      init=init))
    model.add(Activation('relu'))
    model.add(
        Convolution1D(nb_filter=output_dim,
                      filter_length=sequence_length,
                      border_mode='same',
                      init=init))
    model.add(Activation('sigmoid'))

    model.compile(optimizer=optimizer, loss=loss)

    return model
Beispiel #5
0
def alexnet(images_train, labels_train, images_test, labels_test):
    #AlexNet with batch normalization in Keras

    model = Sequential()
    model.add(ZeroPadding1D((1), input_shape=(1, 3722)))
    model.add(Convolution1D(64, 3, activation='relu'))
    model.add(MaxPooling1D((2)))

    model.add(ZeroPadding1D((1)))
    model.add(Convolution1D(128, 3, activation='relu'))
    model.add(MaxPooling1D((2)))

    model.add(ZeroPadding1D((1)))
    model.add(Convolution1D(192, 3, activation='relu'))
    model.add(MaxPooling1D((2)))

    model.add(ZeroPadding1D((1)))
    model.add(Convolution1D(256, 3, activation='relu'))
    model.add(MaxPooling1D((2)))

    model.add(ZeroPadding1D((1)))
    model.add(Convolution1D(512, 3, activation='relu'))
    model.add(MaxPooling1D((2)))

    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(2, activation='softmax'))

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    model.fit(images_train,
              labels_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=2,
              validation_data=(images_test, labels_test))

    score = model.evaluate(images_test,
                           labels_test,
                           show_accuracy=True,
                           verbose=0)

    return score, model
Beispiel #6
0
    def model(self):
        x_input = Input(self.get_input_shape())

        # Zero-Padding
        x = ZeroPadding1D(3)(x_input)

        # Stage 1
        x = Conv1D(4,
                   7,
                   strides=1,
                   name='conv1',
                   kernel_initializer=glorot_uniform(seed=0))(x)
        x = BatchNormalization(name='bn_conv1')(x)
        x = Activation('relu')(x)
        # x = MaxPooling1D(3, strides=2)(x)

        # Stage 2
        x = conv_block(x,
                       kernel_size=3,
                       filters=[4, 4, 16],
                       stage=2,
                       block='a',
                       s=1)
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='b')
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='c')

        # Stage 3
        x = conv_block(x,
                       kernel_size=3,
                       filters=[8, 8, 32],
                       stage=3,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='b')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='c')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='d')

        # Stage 4
        x = conv_block(x,
                       kernel_size=3,
                       filters=[16, 16, 64],
                       stage=4,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='b')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='c')

        # AVGPOOL
        x = AveragePooling1D(2, name="avg_pool")(x)

        # Output layer
        x = Flatten()(x)
        mu, sigma = GaussianLayer(len(self.targetname), name='main_output')(x)

        # Create model
        model = Model(inputs=x_input, outputs=mu, name='ResNetDeepEnsemble')

        return model, sigma
    def gen_model4(self):
        # keypartition, keylabels, keylistIDs = self.gen_referenceKeyDicts()
        # keytraining_generator = self.generate(keylabels, keypartition['train'])
        # keyvalidation_generator = self.generate(keylabels, keypartition['validation'])
        x_train, y_train, x_test, y_test = self._gen_testing_data()

        model = Sequential()
        model.add(ZeroPadding1D((1), input_shape=(100, 7)))
        model.add(Convolution1D(64, 3, activation='relu'))
        model.add(MaxPooling1D((2)))

        model.add(ZeroPadding1D((1)))
        model.add(Convolution1D(128, 3, activation='relu'))
        model.add(MaxPooling1D((2)))

        model.add(ZeroPadding1D((1)))
        model.add(Convolution1D(192, 3, activation='relu'))
        model.add(MaxPooling1D((2)))

        model.add(ZeroPadding1D((1)))
        model.add(Convolution1D(256, 3, activation='relu'))
        model.add(MaxPooling1D((2)))

        model.add(ZeroPadding1D((1)))
        model.add(Convolution1D(512, 3, activation='relu'))
        model.add(MaxPooling1D((2)))

        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(2, activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd)

        model.fit(x_train, y_train, batch_size = 100, nb_epoch=10)
        model.evaluate(x_test, y_test)
        """ model.fit_generator(generator=keytraining_generator,
                            steps_per_epoch=len(keypartition['train']) // 64,
                            epochs=32,
                            validation_data=keyvalidation_generator,
                            validation_steps=len(keypartition['validation']) // 64) 
        """

        """ model.evaluate_generator(generator=keytraining_generator,
Beispiel #8
0
def cit_nocit_rnn_try(max_sentence_len, max_words):
    inp = Input(shape=(max_sentence_len, ))
    emb = Embedding(max_words, 128, input_length=max_sentence_len)(inp)

    branch1 = Conv1D(128,
                     3,
                     padding='valid',
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.01))(emb)
    branch2 = Conv1D(128,
                     4,
                     padding='valid',
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.01))(emb)
    branch3 = Conv1D(128,
                     5,
                     padding='valid',
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.01))(emb)

    comb = Concatenate()([
        branch1,
        ZeroPadding1D((0, 1))(branch2),
        ZeroPadding1D((0, 2))(branch3)
    ])

    pooled = MaxPooling1D(pool_size=2)(comb)
    rnn = GRU(128, dropout=0.2, recurrent_dropout=0.2)(pooled)

    dense = Dense(2, activation='sigmoid')(rnn)

    model = Model(inputs=inp, outputs=dense)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
Beispiel #9
0
    def Resnet(self, data_base):
        inp = Input(shape=(self.input_dim, 1))
        if data_base == 'mimic':
            n_zeropadding = 0
            out = Conv1D(16,
                         self.filter_size_1,
                         strides=self.strides_1,
                         activation='relu')(inp)

        elif data_base == 'eicu':
            n_zeropadding = 1
            out = ZeroPadding1D(padding=n_zeropadding)(inp)
            out = Conv1D(16, self.filter_size_1, strides=self.strides_1)(out)

        out = BatchNormalization()(out)
        out = Activation('relu')(out)
        # print(out.shape)
        out = Reshape((int(
            (self.input_dim +
             (2 * n_zeropadding) - self.filter_size_1) / self.strides_1) + 1,
                       16, 1))(out)

        for i in range(int(self.nb_layer / 2)):
            if (i + 1) % 2 == 1:
                if i != 0:
                    self.nb_filter = self.nb_filter * 2
                out = self.conv_block(out, [self.nb_filter, self.nb_filter])

            else:
                out = self.identify_block(out,
                                          [self.nb_filter, self.nb_filter])

        #out = AveragePooling2D((3, 3))(out)

        out = Flatten()(out)
        out = Dense(64, activation='relu', name='Dense_1')(out)
        out = Dropout(0.5)(out)
        out = Dense(16)(out)
        out = Activation('relu')(out)
        out = Dense(1, activation='sigmoid', name='Dense_2')(out)

        model = Model(inp, out)
        model.summary()
        # plot_model(model, to_file='model.png', show_shapes=True)

        return model
def classification_model(input_dim,
                         output_dim,
                         emb_mat,
                         seq_len,
                         conv_layers=1,
                         filters=32,
                         filter_size=3,
                         dropout=0.5,
                         fc_layers=1,
                         fc_units=64,
                         metrics=['acc']):
    """
    Compiles classification model which processes textual inputs using convolutional
    layers and then merges them with (normalized) auxiliary inputs.
    """
    seqs = Input(shape=(seq_len, ), dtype='int32', name='text_input')
    x = Embedding(emb_mat.shape[0],
                  emb_mat.shape[1],
                  weights=[emb_mat],
                  input_length=seq_len,
                  trainable=True,
                  name='word_embedding')(seqs)
    for i in range(1, conv_layers + 1):
        pad_name = 'pad_' + str(i)
        conv_name = 'conv_' + str(i)
        pool_name = 'pool_' + str(i)
        x = ZeroPadding1D(name=pad_name)(x)
        x = Conv1D(filters, filter_size, activation='relu', name=conv_name)(x)
        x = MaxPooling1D(name=pool_name)(x)
    x = Dropout(dropout, name='dropout')(x)
    flatten = Flatten(name='flatten')(x)
    aux_input = Input(shape=(input_dim, ), name='aux_input')
    norm_inputs = BatchNormalization(name='bn_aux')(aux_input)
    x = concatenate([flatten, norm_inputs], name='comb_input')
    for i in range(1, fc_layers + 1):
        fc_name = 'fc_' + str(i)
        bn_name = 'bn_' + str(i)
        x = Dense(fc_units, activation='relu', name=fc_name)(x)
        x = BatchNormalization(name=bn_name)(x)
    output = Dense(output_dim, activation='softmax', name='output')(x)
    model = Model(inputs=[seqs, aux_input], outputs=[output])
    model.compile(optimizer='Adam',
                  loss='categorical_crossentropy',
                  metrics=metrics)
    return model
Beispiel #11
0
    def VGG(self, data_base):
        inp = Input(shape=(self.input_dim, 1))
        if data_base == 'mimic':
            n_zeropadding = 0
            out = Conv1D(16, self.filter_size_1, strides=self.strides_1)(inp)

        elif data_base == 'eicu':
            n_zeropadding = 1
            out = ZeroPadding1D(padding=n_zeropadding)(inp)
            out = Conv1D(16, self.filter_size_1, strides=self.strides_1)(out)

        # out = BatchNormalization()(out)
        out = Activation('relu')(out)
        out = Reshape((int(
            (self.input_dim +
             (2 * n_zeropadding) - self.filter_size_1) / self.strides_1) + 1,
                       16, 1))(out)

        for i in range(self.nb_layer):
            if (i + 1) % 4 == 1:
                if i != 0:
                    self.nb_filter = self.nb_filter * 2
                out = Convolution2D(self.nb_filter, (3, 3),
                                    padding='same')(out)
                out = Activation('relu')(out)
            else:
                out = Convolution2D(self.nb_filter, (3, 3),
                                    padding='same')(out)
                out = Activation('relu')(out)

        out = Flatten()(out)
        out = Dense(64)(out)
        out = Activation('relu')(out)
        out = Dropout(0.5)(out)
        out = Dense(16)(out)
        out = Activation('relu')(out)

        out = Dense(1, activation='sigmoid')(out)

        model = Model(inp, out)
        model.summary()
        return model
Beispiel #12
0
    def add_embedding(self,
                      model,
                      vocabulary_size,
                      embedding_dim,
                      maxlen,
                      use_my_embedding,
                      embedding_weights=None):

        if embedding_weights is not None:
            if use_my_embedding:
                model.add(
                    MyEmbedding(vocabulary_size,
                                embedding_dim,
                                input_length=maxlen,
                                weights=[embedding_weights]))
            else:
                model.add(
                    Embedding(vocabulary_size,
                              embedding_dim,
                              input_length=maxlen,
                              weights=[embedding_weights]))
        else:
            if use_my_embedding:
                model.add(
                    MyEmbedding(vocabulary_size,
                                embedding_dim,
                                input_length=maxlen))
            else:
                model.add(
                    Embedding(vocabulary_size,
                              embedding_dim,
                              input_length=maxlen))

        padding_len = self.calc_padding_len(maxlen)
        if padding_len > 0:
            model.add(ZeroPadding1D(padding_len))

        return maxlen + 2 * padding_len
def train_model():
    # 定义conv1D神经网络层结构
    input_layer = Input(shape=(time_steps, 1), dtype='float32')
    zeropadding_layer = ZeroPadding1D(padding=1)(input_layer)

    conv1D_layer1 = Conv1D(64, 3, strides=1, use_bias=True)(zeropadding_layer)
    avgpooling_layer = AveragePooling1D(pool_size=3, strides=1)(conv1D_layer1)

    flatten_layer = Flatten()(avgpooling_layer)
    dropout_layer = Dropout(.45)(flatten_layer)

    output_layer = Dense(1, activation='tanh')(dropout_layer)

    ts_model = Model(inputs=input_layer, outputs=output_layer)
    # 编译模型
    ts_model.compile(loss='mean_absolute_error', optimizer='adam')

    # ts_model.summary() # 输出模型层结构

    # 存储损失函数最小值时的模型为hdf文件
    save_best = ModelCheckpoint(
        '%s_conv1D_weights.{epoch:02d}-{val_loss:.4f}.h5' % stock_code,
        monitor='val_loss',
        verbose=2,
        save_best_only=True,
        save_weights_only=False,
        mode='min',
        period=1)
    # 开始训练模型(拟合)
    ts_model.fit(x=X_train,
                 y=y_train,
                 batch_size=16,
                 epochs=45,
                 verbose=2,
                 callbacks=[save_best],
                 validation_data=(X_val, y_val),
                 shuffle=True)
Beispiel #14
0
def discriminator_model():

    ## check joint

    input_song = Input(shape=(generating_size, 156))

    joint = Reshape((generating_size, 156, 1))(input_song)
    joint = TimeDistributed(
        Convolution1D(filters=20, kernel_size=8, padding='valid', strides=2)
    )(joint)  #tercie (3 pultony = sleduji 4 noty) * ligatura(proto 2 strides)
    #39
    joint = Activation(LeakyReLU(0.3))(joint)
    joint = TimeDistributed(
        Convolution1D(filters=40, kernel_size=3, padding='valid', strides=1))(
            joint)  # velka tercie 4 pultony a cista kvarta 5 pultonu
    #38
    joint = Activation(LeakyReLU(0.3))(joint)
    joint = TimeDistributed(
        Convolution1D(filters=200, kernel_size=3, padding='valid',
                      strides=1))(joint)  # kvinty 7 pultonu = sleduji 8 not
    #17
    joint = Activation(LeakyReLU(0.3))(joint)
    joint = TimeDistributed(MaxPooling1D(2))(joint)  # chci dominantni akord

    joint = TimeDistributed(
        Convolution1D(filters=300, kernel_size=3, padding='valid',
                      strides=1))(joint)
    #5
    print joint.shape
    joint = Activation(LeakyReLU(0.3))(joint)
    joint = TimeDistributed(MaxPooling1D(2))(joint)
    print joint.shape
    joint = TimeDistributed(
        Convolution1D(filters=400, kernel_size=3, padding='valid',
                      strides=2))(joint)
    #5
    print joint.shape
    joint = Activation(LeakyReLU(0.3))(joint)
    # (gen_size, 66, 20)
    cross_joint = Reshape((generating_size, 7 * 400))(joint)
    joint = TimeDistributed(Dense(50))(cross_joint)
    joint = Flatten()(joint)
    joint = Dropout(0.5)(joint)
    joint = Dense(1, kernel_regularizer=keras.regularizers.l2(0.1))(joint)
    joint = Activation(LeakyReLU(0.3))(joint)

    ## check rhythm

    rhythm = ZeroPadding1D(4)(
        input_song
    )  # 4 on both sides, so locally connecteds kernel will be 9 (bc. they don't supp 'same' yet)
    rhythm = Convolution1D(filters=20 * 20,
                           kernel_size=24,
                           strides=16,
                           padding='valid')(rhythm)
    rhythm = Activation(LeakyReLU(0.3))(rhythm)
    rhythm = Reshape((generating_size / 16, 20, 20))(rhythm)
    rhythm = TimeDistributed(
        keras.layers.local.LocallyConnected1D(filters=100,
                                              kernel_size=9,
                                              padding='valid'))(rhythm)
    rhythm = Activation(LeakyReLU(0.3))(rhythm)
    rhythm = TimeDistributed(Dense(50))(rhythm)
    rhythm = Flatten()(rhythm)
    rhythm = Dropout(0.5)(rhythm)
    rhythm = Dense(1, kernel_regularizer=keras.regularizers.l2(0.1))(rhythm)
    rhythm = Activation(LeakyReLU(0.3))(rhythm)

    ## check structure

    structure = Reshape((generating_size, 156, 1))(input_song)
    structure = TimeDistributed(
        Convolution1D(filters=16, kernel_size=8, padding='same',
                      strides=4))(structure)  #tercie*ligatura
    # 78
    structure = Activation(LeakyReLU(0.3))(structure)
    structure = TimeDistributed(
        Convolution1D(filters=32, kernel_size=2, padding='valid',
                      strides=2))(structure)  #kvinty
    structure = TimeDistributed(MaxPooling1D(2))(structure)
    structure = Reshape((generating_size, 9 * 32))(structure)
    structure = Convolution1D(80, 2)(structure)
    structure = Activation(LeakyReLU(0.3))(structure)
    structure = Convolution1D(120, 2, dilation_rate=2)(structure)
    structure = Activation(LeakyReLU(0.3))(structure)
    structure = Convolution1D(160, 2, dilation_rate=4)(structure)
    structure = Activation(LeakyReLU(0.3))(structure)
    structure = Convolution1D(200, 2, dilation_rate=8)(structure)
    structure = Activation(LeakyReLU(0.3))(structure)
    structure = TimeDistributed(Dense(50))(structure)
    structure = Dropout(0.5)(structure)
    structure = Flatten()(structure)
    structure = Dense(1,
                      kernel_regularizer=keras.regularizers.l2(0.1))(structure)
    structure = Activation(LeakyReLU(0.3))(structure)

    ## check consistency

    differences = Reshape((generating_size, 156, 1))(input_song)
    differences = TimeDistributed(
        Convolution1D(filters=1, kernel_size=2, padding='same',
                      strides=2))(differences)  #tercie*ligatura
    # 78
    differences = Activation(LeakyReLU(0.3))(differences)
    differences = Reshape((generating_size, 78))(differences)
    differences = Convolution1D(150, 2)(differences)
    differences = SimpleRNN(200, return_sequences=True)(differences)
    differences = TimeDistributed(
        Dense(1, kernel_regularizer=keras.regularizers.l2(0.1)))(differences)
    differences = Activation(LeakyReLU(0.3))(differences)
    differences = Flatten()(differences)
    differences = Dropout(0.5)(differences)
    differences = Dense(
        1, kernel_regularizer=keras.regularizers.l2(0.1))(differences)
    differences = Activation(LeakyReLU(0.3))(differences)

    continuity = GRU(150, return_sequences=True)(cross_joint)
    continuity = Activation(LeakyReLU(0.3))(continuity)
    continuity = TimeDistributed(
        Dense(1, kernel_regularizer=keras.regularizers.l2(0.1)))(continuity)
    continuity = Flatten()(continuity)
    continuity = Dropout(0.5)(continuity)
    continuity = Dense(
        1, kernel_regularizer=keras.regularizers.l2(0.1))(continuity)
    continuity = Activation(LeakyReLU(0.3))(continuity)

    final = keras.layers.concatenate(
        [joint, rhythm, structure, continuity, differences])
    final = Dropout(0.35)(final)
    final = Dense(1)(final)
    #final = Activation('sigmoid')(final) # Do not use in Wasserstein GAN (also use mean_squared_error)

    model = Model(inputs=input_song, outputs=final)
    return model
Beispiel #15
0
theano.config.floatX = "float32"
X_train = X_train.astype(theano.config.floatX)
Y_train = Y_train.astype(theano.config.floatX)
X_test = X_test.astype(theano.config.floatX)
Y_test = Y_test.astype(theano.config.floatX)

from keras.models import Sequential
from keras.layers.core import Dense,Flatten,Dropout
from keras.layers.convolutional import Convolution1D,ZeroPadding1D,MaxPooling1D
from keras.constraints import maxnorm
from keras.optimizers import SGD

np.random.seed(1)

model = Sequential()
model.add(ZeroPadding1D(12,input_shape=(200,4)))
model.add(Dropout(0.1))

model.add(Convolution1D(64, 3,
                        border_mode="same",
                        W_constraint = maxnorm(2),
                        input_shape=(224,4),
                        activation="relu"))
model.add(MaxPooling1D(pool_length=2, stride=2, border_mode="same"))
model.add(Dropout(0.5))

model.add(Convolution1D(128, 3,
                        border_mode="same",
                        W_constraint = maxnorm(2),
                        activation="relu"))
model.add(MaxPooling1D(pool_length=2, stride=2, border_mode="same"))
class LossHistory(Callback):
    def on_train_begin(self, logs={}):
        self.losses = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))


# Alice's network
# FC layer -> Conv Layer (4 1-D convolutions)
input_layer = Input(shape=(no_input_neurons, ))
l1 = Dense(no_input_neurons,
           activation='sigmoid',
           kernel_initializer='glorot_normal')(input_layer)
l2 = Reshape((no_input_neurons, 1))(l1)
l3 = ZeroPadding1D(padding=2)(l2)
# Xavier Glotrot Initialization of weights
l4 = Conv1D(2,
            kernel_size=4,
            strides=1,
            activation='sigmoid',
            kernel_initializer='glorot_normal')(l3)
l5 = Conv1D(4,
            kernel_size=2,
            strides=2,
            activation='sigmoid',
            kernel_initializer='glorot_normal')(l4)
l6 = Conv1D(4,
            kernel_size=1,
            strides=1,
            activation='sigmoid',
def custom_objective2(y_true,y_pred):
	return K.mean(tf.scalar_mul(1.0/m,(tf.reduce_sum(tf.abs(y_true - y_pred),1))))

class LossHistory(Callback):
    def on_train_begin(self, logs={}):
        self.losses = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))

#For Eve
input_eve = Input(shape = (ciphertext_size,))                        # Imp: When we train Eve then it should only modify his own parameters and not Alice's networks parameters since it is "optimal Eve" is : argmin over theta_eve (loss of Eve) so l9 is not applied on l8
l9 = Dense(no_input_neurons, activation = 'sigmoid',kernel_initializer = 'glorot_normal')(input_eve)
l10 = Dense(no_input_neurons, activation = 'sigmoid',kernel_initializer = 'glorot_normal')(l9)
l11 = Reshape((no_input_neurons,1))(l10)
l12 = ZeroPadding1D(padding=2)(l11)
l13 = Conv1D(2,kernel_size = 4, strides = 1, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(l12)
l14 = Conv1D(4,kernel_size = 2, strides = 2, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(l13)
l15 = Conv1D(4,kernel_size = 1, strides = 1, activation = 'sigmoid', kernel_initializer = 'glorot_normal')(l14)
l16 = Conv1D(1,kernel_size = 1, strides = 1, activation = 'tanh', kernel_initializer = 'glorot_normal')(l15)
Eve_output = Flatten()(l16)
Eve_model = Model(inputs = input_eve,outputs = Eve_output)
#Let train_data is a matrix of shape mxno_input_neurons where for each training example has first bits for plaintext and next bits for key
adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
#Alice_Bob_model.compile(optimizer = adam, loss = loss_function(l8) , metrics =['accuracy'])
#Alice_Bob_model.compile(optimizer = adam, loss = loss_function(intermediate_layer_model.predict(Alice_Bob_model.input)) , metrics =['accuracy'])
#Eve_model.compile(optimizer = adam, loss = 'mean_absolute_error',metrics = ['accuracy'])
Eve_model.compile(optimizer = adam, loss = custom_objective2 , metrics = ['accuracy'])


class Crypto_Network(object):
Beispiel #18
0
import numpy as np
from keras import backend as K

K.set_image_dim_ordering('th')
import tensorflow as tf
from keras.models import Model
from keras.models import load_model

#%%
weight_path = '~/p-Snet_weights.hdf5'
#%%

model = Sequential()

model.add(Conv1D(16, 64, strides=2, input_shape=(None, 1)))  #layer1
model.add(ZeroPadding1D(padding=16))
model.add(BatchNormalization())  #layer2
convout1 = Activation('relu')
model.add(convout1)  #layer3

model.add(MaxPooling1D(pool_size=8, padding='valid'))  #layer4
#
#
model.add(Conv1D(32, 32, strides=2))  #layer5
model.add(ZeroPadding1D(padding=8))
model.add(BatchNormalization())  #layer6
convout2 = Activation('relu')
model.add(convout2)  #layer7

model.add(MaxPooling1D(pool_size=8, padding='valid'))  #layer8
Beispiel #19
0
    batch_size = 64

    word_input = Input(shape=(maxlen, ), dtype='float32', name='word_input')
    word_emb = Embedding(nb_word,
                         word_embedding_dim,
                         input_length=maxlen,
                         dropout=0.2,
                         name='word_emb')(word_input)
    bilstm = Bidirectional(
        LSTM(lstm_dim, dropout_W=0.1, dropout_U=0.1,
             return_sequences=True))(word_emb)
    bilstm_d = Dropout(0.1)(bilstm)

    half_window_size = 5
    print "===========1...%s" % time.time()
    paddinglayer = ZeroPadding1D(padding=half_window_size)(word_emb)
    conv = Conv1D(nb_filter=50,
                  filter_length=(2 * half_window_size + 1),
                  border_mode='valid')(paddinglayer)
    conv_d = Dropout(0.1)(conv)
    dense_conv = TimeDistributed(Dense(50))(conv_d)
    rnn_cnn_merge = merge([bilstm_d, dense_conv], mode='concat', concat_axis=2)
    print "===========2...%s" % time.time()
    dense = TimeDistributed(Dense(nb_tag))(rnn_cnn_merge)
    crf = ChainCRF()
    crf_output = crf(dense)

    model = Model(input=[word_input], output=[crf_output])

    model.compile(loss=crf.sparse_loss,
                  optimizer=RMSprop(0.001),
Beispiel #20
0
def create_model8():
    board_input = Input(shape=[8, 8])
    action_input = Input(shape=[8, 8])
    color_input = Input(shape=[1])

    model_v = Sequential([
        InputLayer([8, 8]),
        Reshape([8, 8, 1]),
        Conv2D(64, 8, 1),
        ELU(),
        Conv2D(64, 1, 1),
        ELU(),
        Flatten()
    ])

    model_h = Sequential([
        InputLayer([8, 8]),
        Reshape([8, 8, 1]),
        Conv2D(64, 1, 8),
        ELU(),
        Conv2D(64, 1, 1),
        ELU(),
        Flatten()
    ])

    model_dr = Sequential([
        InputLayer([8, 8]),
        Reshape([8*8, 1]),
        ZeroPadding1D(4),
        Reshape([8, 9, 1]),
        LocallyConnected2D(64, 8, 1),
        ELU(),
        LocallyConnected2D(64, 1, 1),
        ELU(),
        Flatten()
    ])

    model_dl = Sequential([
        InputLayer([8, 8]),
        Reshape([8*8, 1]),
        ZeroPadding1D(2),
        Reshape([10, 7, 1]),
        LocallyConnected2D(64, 10, 1),
        ELU(),
        LocallyConnected2D(64, 1, 1),
        ELU(),
        Flatten()
    ])

    color_model = Sequential([
        InputLayer([1]),
        Dense(256),
        ELU(),
        Dense(1024),
        ELU()
    ])

    merge_layer = merge([
        model_v(board_input),
        model_h(board_input),
        model_dl(board_input),
        model_dr(board_input),
        color_model(color_input),
        model_v(action_input),
        model_h(action_input),
        model_dl(action_input),
        model_dr(action_input),
    ], mode="concat", concat_axis=-1) 

    x = Dense(2048)(merge_layer)
    x = BatchNormalization()(x)
    x = ELU()(x)
    x = Dense(512)(x)
    x = BatchNormalization()(x)
    x = ELU()(x)
    x = Dense(128)(x)
    x = BatchNormalization()(x)
    x = ELU()(x)
    output = Dense(1, activation="tanh")(x)

    model = Model(input=[board_input, color_input, action_input], output=[output])

    adam = Adam(lr=1e-4)
    model.compile(optimizer=adam, loss="mse")

    return model
Beispiel #21
0
def create_model():
    board_input = Input(shape=[8, 8])
    action_input = Input(shape=[8, 8])
    color_input = Input(shape=[1])

    model_c = Sequential(
        [
            InputLayer([8, 8]),
            Reshape([8, 8, 1]),
            Conv2D(128, 8, 1, name="model_c_conv1"),  # 1x8
            ELU(),
            Conv2D(128, 1, 1, name="model_c_conv2"),  # 1x8
            ELU(),
            Flatten()
        ],
        name="model_c")

    model_r = Sequential(
        [
            InputLayer([8, 8]),
            Reshape([8, 8, 1]),
            Conv2D(128, 1, 8, name="model_r_conv1"),  # 8x1
            ELU(),
            Conv2D(128, 1, 1, name="model_r_conv2"),  # 8x1
            ELU(),
            Flatten()
        ],
        name="model_r")

    model_dr = Sequential(
        [
            InputLayer([8, 8]),
            Reshape([8 * 8, 1]),
            ZeroPadding1D(4),
            Reshape([8, 9, 1]),
            LocallyConnected2D(96, 8, 1, name="model_dr_lc1"),  # 1x9
            ELU(),
            LocallyConnected2D(64, 1, 1, name="model_dr_lc2"),  # 1x9
            ELU(),
            Flatten()
        ],
        name="model_dr")

    model_dl = Sequential(
        [
            InputLayer([8, 8]),
            Reshape([8 * 8, 1]),
            ZeroPadding1D(3),
            Reshape([10, 7, 1]),
            LocallyConnected2D(96, 10, 1, name="model_dl_lc1"),  # 1x7
            ELU(),
            LocallyConnected2D(64, 1, 1, name="model_dl_lc2"),  # 1x7
            ELU(),
            Flatten()
        ],
        name="model_dl")

    merge_layer = Concatenate()([
        model_c(board_input),
        model_r(board_input),
        model_dl(board_input),
        model_dr(board_input),
        color_input,
        model_c(action_input),
        model_r(action_input),
        model_dl(action_input),
        model_dr(action_input),
    ])

    x = Dense(2048, name="fc_1")(merge_layer)
    x = ELU()(x)
    x = Dense(512, name="fc_2")(x)
    x = ELU()(x)
    output = Dense(1, activation="tanh", name="fc_3")(x)

    model = Model(input=[board_input, color_input, action_input],
                  output=[output])

    adam = Adam(lr=1e-5)
    model.compile(optimizer=adam, loss="mse")

    print(model.summary())

    return model
Beispiel #22
0
    def model(self):
        """
        Implementation of the popular ResNet the following architecture:
        CONV1D -> BATCHNORM -> RELU -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
        -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
        modified version retrieved from two sources:
            https://github.com/priya-dwivedi/Deep-Learning/blob/master/resnet_keras/
            https://github.com/viig99/mkscancer/blob/master/medcan_evaluate_next.py

        Returns:
        model -- a Model() instance in Keras
        """

        x_input = Input(self.get_input_shape())

        # Zero-Padding
        x = ZeroPadding1D(3)(x_input)

        # Stage 1
        x = Conv1D(4,
                   7,
                   strides=1,
                   name='conv1',
                   kernel_initializer=glorot_uniform(seed=0))(x)
        x = BatchNormalization(name='bn_conv1')(x)
        x = Activation('relu')(x)
        # x = MaxPooling1D(3, strides=2)(x)

        # Stage 2
        x = conv_block(x,
                       kernel_size=3,
                       filters=[4, 4, 16],
                       stage=2,
                       block='a',
                       s=1)
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='b')
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='c')

        # Stage 3
        x = conv_block(x,
                       kernel_size=3,
                       filters=[8, 8, 32],
                       stage=3,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='b')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='c')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='d')

        # Stage 4
        x = conv_block(x,
                       kernel_size=3,
                       filters=[16, 16, 64],
                       stage=4,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='b')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='c')

        # AVGPOOL
        x = AveragePooling1D(2, name="avg_pool")(x)

        # Output layer
        x = Flatten()(x)
        x = Dense(len(self.targetname),
                  activation=self.last_layer_activation,
                  name='fc' + str(len(self.targetname)),
                  kernel_initializer=glorot_uniform(seed=0))(x)

        # Create model
        model = Model(inputs=x_input, outputs=x, name='ResNet')

        return model
Beispiel #23
0
def main():
    data_dir = '../data/taobao/'
#    data_dir = '../data/amazon/'
    item_path = data_dir+'dim_items.txt'
    sub_item_path = data_dir+'match_item.txt'
    train_pair_path = data_dir+'train_set_1to1.txt'
    if data_dir=='../data/amazon/':
        item_path = data_dir+'dim_items_all.txt'
        sub_item_path = data_dir+'match_item_all.txt'
        train_pair_path = data_dir+'train_set_1to1_all.txt'
    pg = PairGenerator(item_path, sub_item_path, train_pair_path)

    graph = Graph()
    graph.add_input(name='left_in', input_shape=(pg.max_len,), dtype='int')
    graph.add_input(name='right_in', input_shape=(pg.max_len,), dtype='int')

    activation='relu'
    filter1_length=3
    pool1_length=4
    filter2_length=2
    pool2_length=2
    nb_filter=100
    nb_epoch=5
    optimizer='adagrad'

    graph.add_shared_node(
        Embedding(input_dim=len(pg.alphabet), 
            output_dim=config.w2vSize, 
            input_length= pg.max_len, 
            mask_zero=True,
        weights=[pg.w2v_weight]),
        name='embedding',
        inputs=['left_in', 'right_in'])
    graph.add_shared_node(
        ZeroPadding1D(
            padding=filter1_length-1
        ),
        name='padding1',
        inputs=['embedding'])
    graph.add_shared_node(
        Convolution1D(nb_filter=nb_filter, 
            filter_length=filter1_length,
            border_mode='valid', activation=activation, 
            subsample_length=1
        ), 
        name='conv1', 
        inputs=['padding1'])
    graph.add_shared_node(
        MaxPooling1D(pool_length=pool1_length
        ),
        name='max1', 
        inputs=['conv1'])
    graph.add_shared_node(
        ZeroPadding1D(
            padding=filter2_length-1
        ),
        name='padding2',
        inputs=['max1'])
    graph.add_shared_node(
        Convolution1D(nb_filter=nb_filter, 
            filter_length=filter2_length, 
            border_mode='valid', activation=activation, 
            subsample_length=1
        ), 
        name='conv2',
        inputs=['padding2'])
    graph.add_shared_node(
        MaxPooling1D(pool_length=pool2_length
        ),
        name='max2', merge_mode=None,
        inputs=['conv2'])
    graph.add_shared_node(
        Dropout(0.2),
        name='dropout', merge_mode=None,
        inputs=['max2'])
    graph.add_shared_node(
        Flatten(), 
        name='flatten', merge_mode=None,
        inputs=['dropout'])
    graph.add_shared_node(
        Dense(output_dim=config.feature_dim,
            activation=activation
        ), 
        name='dense1', merge_mode=None,
        inputs=['flatten'], 
        outputs=['dense1_left', 'dense1_right'])
    
    #graph.add_shared_node(
    #    Dense(output_dim=len(pg.cat2idx),
    #        activation='softmax'),
    #    name='dense2', 
    #    inputs=['dense1'], 
    #    outputs=['dense2_output1', 'dense2_output2'])
    graph.add_node(
        Dense(output_dim=config.feature_dim,
            b_constraint=maxnorm(m=0),
            activation='linear'
        ), 
        name='dense3',  
        input='dense1_left')
    graph.add_node(
        Dense(output_dim=1,
            activation='sigmoid'
        ), 
        name='dense4',  
        inputs=['dense3', 'dense1_right'],
        merge_mode='dot')
    #graph.add_output(name='left_out', input='dense2_output1')
    #graph.add_output(name='right_out', input='dense2_output2')
    graph.add_output(name='pair_out', input='dense4')
    graph.compile(optimizer=optimizer, 
        loss={#'left_out': 'categorical_crossentropy',
            #'right_out': 'categorical_crossentropy',
            'pair_out': 'binary_crossentropy'},
        #loss_weight={'left_out': 0.0,
        #    'right_out': 0.0,
        #    'pair_out': 1}
        )
    print(graph.summary())
    print('Preparing data...', time.ctime())
    (train_data, test_data) = pg.fetch_all(0.1)
    print('Start training...', time.ctime())
    callbacks=[ValEvaluate(test_data, 'result.txt'), 
            EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')]
    graph.fit(data=train_data,
        validation_split=0.1111111111, 
        callbacks=callbacks, 
        batch_size=256, nb_epoch=nb_epoch,verbose=1)
Beispiel #24
0
def ufcnn_model_concat(sequence_length=5000,
                       features=1,
                       nb_filter=150,
                       filter_length=5,
                       output_dim=1,
                       optimizer='adagrad',
                       loss='mse',
                       regression=True,
                       class_mode=None,
                       activation="softplus",
                       init="lecun_uniform"):
    model = Graph()

    model.add_input(name='input', input_shape=(None, features))
    #########################################################
    model.add_node(ZeroPadding1D(2), name='input_padding',
                   input='input')  # to avoid lookahead bias
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='valid',
                                 init=init,
                                 input_shape=(sequence_length, features)),
                   name='conv1',
                   input='input_padding')
    model.add_node(Activation(activation), name='relu1', input='conv1')
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='same',
                                 init=init),
                   name='conv2',
                   input='relu1')
    model.add_node(Activation(activation), name='relu2', input='conv2')
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='same',
                                 init=init),
                   name='conv3',
                   input='relu2')
    model.add_node(Activation(activation), name='relu3', input='conv3')
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='same',
                                 init=init),
                   name='conv4',
                   input='relu3')
    model.add_node(Activation(activation), name='relu4', input='conv4')
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='same',
                                 init=init),
                   name='conv5',
                   input='relu4')
    model.add_node(Activation(activation), name='relu5', input='conv5')
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='same',
                                 init=init),
                   name='conv6',
                   inputs=['relu3', 'relu5'],
                   merge_mode='concat',
                   concat_axis=-1)
    model.add_node(Activation(activation), name='relu6', input='conv6')
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='same',
                                 init=init),
                   name='conv7',
                   inputs=['relu2', 'relu6'],
                   merge_mode='concat',
                   concat_axis=-1)
    model.add_node(Activation(activation), name='relu7', input='conv7')
    #########################################################
    model.add_node(Convolution1D(nb_filter=nb_filter,
                                 filter_length=filter_length,
                                 border_mode='same',
                                 init=init),
                   name='conv8',
                   inputs=['relu1', 'relu7'],
                   merge_mode='concat',
                   concat_axis=-1)
    model.add_node(Activation(activation), name='relu8', input='conv8')
    #########################################################
    if regression:
        #########################################################
        model.add_node(Convolution1D(nb_filter=output_dim,
                                     filter_length=sequence_length,
                                     border_mode='same',
                                     init=init),
                       name='conv9',
                       input='relu8')
        model.add_output(name='output', input='conv9')
    else:
        model.add_node(Convolution1D(nb_filter=output_dim,
                                     filter_length=sequence_length,
                                     border_mode='same',
                                     init=init),
                       name='conv9',
                       input='relu8')
        model.add_node(Activation('softmax'), name='activation', input='conv9')
        model.add_output(name='output', input='activation')

    model.compile(optimizer=optimizer, loss={'output': loss})

    return model
Beispiel #25
0
def get_X(o_content):

    nb_word = len(index_word)  # 1008
    nb_tag = len(index_tag)  # 16/14
    maxlen = 100
    word_embedding_dim = 100
    lstm_dim = 100
    batch_size = 64

    word_input = Input(shape=(maxlen,), dtype='float32', name='word_input')
    word_emb = Embedding(nb_word, word_embedding_dim, input_length=maxlen, dropout=0.2, name='word_emb')(word_input)
    bilstm = Bidirectional(LSTM(lstm_dim, dropout_W=0.1, dropout_U=0.1, return_sequences=True))(word_emb)
    bilstm_d = Dropout(0.1)(bilstm)

    half_window_size = 5

    paddinglayer = ZeroPadding1D(padding=half_window_size)(word_emb)
    conv = Conv1D(nb_filter=50, filter_length=(2 * half_window_size + 1), border_mode='valid')(paddinglayer)
    conv_d = Dropout(0.1)(conv)
    dense_conv = TimeDistributed(Dense(50))(conv_d)
    rnn_cnn_merge = merge([bilstm_d, dense_conv], mode='concat', concat_axis=2)

    dense = TimeDistributed(Dense(nb_tag))(rnn_cnn_merge)
    crf = ChainCRF()
    crf_output = crf(dense)

    model = Model(input=[word_input], output=[crf_output])

    model.compile(loss=crf.sparse_loss,
                  optimizer=RMSprop(0.001),
                  metrics=['sparse_categorical_accuracy'])
    # model.load_weights('/home/weiwc/pkl/model.weights')
    model.load_weights('model.weights')

    x_sen=[]
    word_sen=[]
    content_re = o_content.replace(" ","")
    for line in content_re:
        word_sen.append(line)
        if line in dict_word:
            x_sen.append(dict_word[line])
        else:
            x_sen.append(1)
    X_test_cut=[]
    X_test_len=[]
    max_sen_len=100
    if len(x_sen) <= max_sen_len:
        X_test_cut.append(x_sen)
        X_test_len.append(len(x_sen))

    X_test_cut=pad_sequences(X_test_cut,maxlen=max_sen_len,padding='post')
    Y_pred = model.predict(X_test_cut)

    j2=0
    i2=0
    t = []
    for j1 in range(len(word_sen)):
        w = word_sen[j1]
        tags = Y_pred[i2][j2]
        t_tmp = []
        for i in range(14):
            if (tags[i] == 1):
                # t_tmp.append(index_tag[i])
                # t_tmp.append(w)
                # t.append(t_tmp)
                t.append(index_tag[i])
                break
        j2 += 1
        # if j2 == X_test_len[i2]:       #X_test_len = [89, 37, 95, 86, 90, 100, 90, 94, 80, 79, 44, 59]
        #     j2 = 0
        #     i2 += 1
    wl = re.split("[ ]{1,100}", o_content)
    tt = []
    start = 0
    end = 0
    for i in wl:
        end += len(i)
        tt.append(t[start:end])
        start += len(i)
    tt2 = []
    for i in range(len(tt)):
        flag = False
        for j in tt[i]:
            if j.startswith('B'):
                flag = True
                tt2.append("".join(wl[i]) + "|" + j.split("-")[1])
                break
        if not flag:
            for j in tt[i]:
                if j.startswith('I'):
                    flag = True
                    tt2.append("".join(wl[i]) + "|" + j.split("-")[1])
                    break
        if not flag:
            for j in tt[i]:
                tt2.append("".join(wl[i]) + "|" + j)
                break

    return "   ".join(tt2)
Beispiel #26
0
    def model(self):
        x_input = Input(self.get_input_shape())

        # Zero-Padding
        x = ZeroPadding1D(3)(x_input)

        # Stage 1
        x = Conv1D(4,
                   7,
                   strides=1,
                   name='conv1',
                   kernel_initializer=glorot_normal(seed=0))(x)
        x = BatchNormalization(name='bn_conv1')(x)
        x = Activation('relu')(x)
        # x = MaxPooling1D(3, strides=2)(x)

        # Stage 2
        x = conv_block(x,
                       kernel_size=3,
                       filters=[4, 4, 16],
                       stage=2,
                       block='a',
                       s=1)
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='b')
        x = identity_block(x, 3, [4, 4, 16], stage=2, block='c')

        # Stage 3
        x = conv_block(x,
                       kernel_size=3,
                       filters=[8, 8, 32],
                       stage=3,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='b')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='c')
        x = identity_block(x, 3, [8, 8, 32], stage=3, block='d')

        # Stage 4
        x = conv_block(x,
                       kernel_size=3,
                       filters=[16, 16, 64],
                       stage=4,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='b')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='c')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='d')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='e')
        x = identity_block(x, 3, [16, 16, 64], stage=4, block='f')

        # Stage 5
        x = conv_block(x,
                       kernel_size=3,
                       filters=[32, 32, 128],
                       stage=5,
                       block='a',
                       s=2)
        x = identity_block(x, 3, [32, 32, 128], stage=5, block='b')
        x = identity_block(x, 3, [32, 32, 128], stage=5, block='c')

        # AVGPOOL
        x = AveragePooling1D(2, name="avg_pool")(x)

        # Output layer
        x = Flatten()(x)
        mu, sigma = GaussianLayer(len(self.targetname), name='main_output')(x)

        # Additional 'input' for the labels
        label_layer = Input((len(self.targetname), ))

        # Create model
        model = Model(inputs=[x_input, label_layer],
                      outputs=[mu, sigma],
                      name='ResNetDeepEnsemble')

        # Define the loss function (needs to be defined here because it uses an intermediate layer)
        # NOTE: do not include loss function when compiling model because of this
        div_result = Lambda(lambda y: y[0] / y[1])(
            [K.square(label_layer - mu), sigma])
        loss = K.mean(0.5 * tf.log(sigma) + 0.5 * div_result) + 5

        # Add loss to model
        model.add_loss(loss)

        return model
Beispiel #27
0
def save_result_to_hbase(x):
    nb_word = len(index_word)  # 1008
    nb_tag = len(index_tag)  # 16/14
    maxlen = 100
    word_embedding_dim = 100
    lstm_dim = 100
    batch_size = 64

    word_input = Input(shape=(maxlen, ), dtype='float32', name='word_input')
    word_emb = Embedding(nb_word,
                         word_embedding_dim,
                         input_length=maxlen,
                         dropout=0.2,
                         name='word_emb')(word_input)
    bilstm = Bidirectional(
        LSTM(lstm_dim, dropout_W=0.1, dropout_U=0.1,
             return_sequences=True))(word_emb)
    bilstm_d = Dropout(0.1)(bilstm)

    half_window_size = 5

    paddinglayer = ZeroPadding1D(padding=half_window_size)(word_emb)
    conv = Conv1D(nb_filter=50,
                  filter_length=(2 * half_window_size + 1),
                  border_mode='valid')(paddinglayer)
    conv_d = Dropout(0.1)(conv)
    dense_conv = TimeDistributed(Dense(50))(conv_d)
    rnn_cnn_merge = merge([bilstm_d, dense_conv], mode='concat', concat_axis=2)

    dense = TimeDistributed(Dense(nb_tag))(rnn_cnn_merge)
    crf = ChainCRF()
    crf_output = crf(dense)

    model = Model(input=[word_input], output=[crf_output])

    model.compile(loss=crf.sparse_loss,
                  optimizer=RMSprop(0.001),
                  metrics=['sparse_categorical_accuracy'])

    # model.load_weights('/home/weiwc/pkl/model.weights')
    model.load_weights('model.weights')
    X_test_cut = x[0]
    X_test_len = x[1]
    X_word = x[2]
    rowkey = str(x[3])

    # print type(X_test_cut)
    # print len(X_test_cut)
    # print X_test_cut

    # print (X_test_len)
    # print len(X_test_len)
    # print X_test_cut[0],X_test_cut[1],X_test_cut[2]

    Y_pred = model.predict(X_test_cut)
    # print "Y_pred",len(Y_pred),len(Y_pred[0]),len(Y_pred[1]),Y_pred
    # print "X_word",len(X_word),X_word

    j2 = 0
    i2 = 0
    t = []
    # tt = []
    # for i in range(12):
    #     tt.append([])
    for j1 in range(len(X_word)):
        # index_tag {0: 'PAD', 1: 'O', 2: 'B-ROLE', 3: 'I-ROLE', 4: 'B-PER', 5: 'I-PER', 6: 'B-CRIME', 7: 'I-CRIME', 8: 'B-TIME',
        #  9: 'I-TIME', 10: 'B-ORG', 11: 'I-ORG', 12: 'B-LOC', 13: 'I-LOC'}
        w = X_word[j1]
        tags = Y_pred[i2][j2]
        tag_flag = False
        t_tmp = []
        for i in range(14):
            if (tags[i] == 1) and i > 0:
                t_tmp.append(index_tag[i])
                t_tmp.append(w)
                t.append(t_tmp)
                break
        j2 += 1
        if j2 == X_test_len[
                i2]:  #X_test_len = [89, 37, 95, 86, 90, 100, 90, 94, 80, 79, 44, 59]
            j2 = 0
            i2 += 1
    for i in t:
        print i[0], i[1]
    # l2 = []
    # l3 = []
    # l22 = []
    # l23 = []
    # c = 0
    # ttl = ""
    # for i in t:
    #     if  i[0].startswith('B') and c == 0:
    #         l2.append(i[0])
    #         l3.append(i[1].decode("utf-8"))
    #         ttl = i[0].replace('B','I')
    #         c = c + 1
    #
    #     elif i[0] == ttl:
    #         l2.append(i[0])
    #         l3.append(i[1].decode("utf-8"))
    #     elif i[0].startswith('B') and c != 0:
    #         l22.append(l2)
    #         l23.append("".join(l3))
    #         l2 = []
    #         l3 = []
    #         l2.append(i[0])
    #         l3.append(i[1].decode("utf-8"))
    #         ttl = i[0].replace('B', 'I')
    # l22.append(l2)
    # l23.append("".join(l3))
    # taglist = ['B_ROLE','I_ROLE','B_PER','I_PER','B_CRIME','I_CRIME','B_TIME','I_TIME','B_ORG','I_ORG','B_LOC','I_LOC']
    # ret_t = {'PER': [], 'LOC': [], 'ORG': [], 'TIME': [], 'ROLE': [], 'CRIME': []}
    # index_tag {0: 'PAD', 1: 'O', 2: 'B-ROLE', 3: 'I-ROLE', 4: 'B-PER', 5: 'I-PER', 6: 'B-CRIME', 7: 'I-CRIME', 8: 'B-TIME',
    #  9: 'I-TIME', 10: 'B-ORG', 11: 'I-ORG', 12: 'B-LOC', 13: 'I-LOC'}
    # id = 0
    # for i in l22:
    #     ret_t[i[0].split("-")[1]].append(l23[id])
    #     id += 1
    #
    # t2 = []
    # for i in ret_t.keys():
    #     tmp = (rowkey, [rowkey, "d", i, ",".join(ret_t[i])])
    #     t2.append(tmp)
    # for i in t2:
    #     print i[1][2],i[1][3]

    # return t2
    return "-"
Beispiel #28
0
from keras.optimizers import SGD
from keras.models import Model
from keras.models import load_model
from keras.callbacks import ModelCheckpoint

# In[21]:

#Define input layer which has shape (None, 7) and of type float32. None indicates the number of instances
input_layer = Input(shape=(7, 1), dtype='float32')

# ZeroPadding1D layer is added next to add zeros at the beginning and end of each series. Zeropadding ensure that the downstream convolution layer does not reduce the dimension of the output sequences. Pooling layer, added after the convolution layer is used to downsampling the input.

# In[22]:

#Add zero padding
zeropadding_layer = ZeroPadding1D(padding=1)(input_layer)

# The first argument of Conv1D is the number of filters, which determine the number of features in the output. Second argument indicates length of the 1D convolution window. The third argument is strides and represent the number of places to shift the convolution window. Lastly, setting use_bias as True, add a bias value during computation of an output feature. Here, the 1D convolution can be thought of as generating local AR models over rolling window of three time units.

# In[23]:

#Add 1D convolution layer
conv1D_layer = Conv1D(64, 3, strides=1, use_bias=True)(zeropadding_layer)

# AveragePooling1D is added next to downsample the input by taking average over pool size of three with stride of one timesteps. The average pooling in this case can be thought of as taking moving averages over a rolling window of three time units. We have used average pooling instead of max pooling to generate the moving averages.

# In[24]:

#Add AveragePooling1D layer
avgpooling_layer = AveragePooling1D(pool_size=3, strides=1)(conv1D_layer)
Beispiel #29
0
fs=2000
#fs=160000
model =Sequential()
model.add(Conv1D(16,64,strides=2,input_shape=(fs,1), name='layer1')) #layer1
model.add(BatchNormalization(name='layer2'))
convout1= Activation('relu', name='layer3')
model.add(convout1) 
model.add(AveragePooling1D(pool_size=2, padding='valid', name='layer4')) 
model.add(Conv1D(32,32,strides=2, name='layer5')) 
model.add(BatchNormalization(name='layer6'))
convout1= Activation('relu', name='layer7')
model.add(convout1) 
model.add(AveragePooling1D(pool_size=2, padding='valid', name='layer8'))
#model.add(GlobalAveragePooling1D()) 
model.add(Conv1D(64,16,strides=2, name='layer9'))
model.add(ZeroPadding1D(padding=16, name='layer10'))
model.add(BatchNormalization(name='layer11'))
convout1= Activation('relu', name='layer12')
model.add(convout1)
#model.add(AveragePooling1D(pool_size=2, padding='valid'))
model.add(GlobalAveragePooling1D(name='layer13')) 
#model.add(Flatten())
#model.add((Dense(64)))
#model.add((Activation('relu')))
model.add(Dense(32, name='layer14'))
model.add(Activation('relu', name='layer15'))
model.add(Dropout(0.3, name='layer16'))
model.add(Dense(3, name='layer17'))
model.add(Activation('softmax', name='layer18'))
model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adam(),metrics=['accuracy'])