def multichannel_cnn():
    """
    Build the Multichannel CNN model
    """
    feature_length = 600
    input_layer = Input(shape=(feature_length, ))

    # Load the embedding weights and set the embedding layer
    weights = np.load(open('weights/100d.npy', 'rb'))
    embedding_layer = Embedding(input_dim=weights.shape[0],
                                output_dim=weights.shape[1],
                                mask_zero=False,
                                weights=[weights],
                                trainable=False)

    embedding = embedding_layer(input_layer)
    embedding = Dropout(0.6)(embedding)

    # channel 1
    channel_1 = Conv1D(filters=1024,
                       kernel_size=2,
                       padding='valid',
                       activation='relu')(embedding)
    channel_1 = GlobalMaxPooling1D()(channel_1)

    # channel 2
    channel_2 = Conv1D(filters=1024,
                       kernel_size=4,
                       padding='valid',
                       activation='relu')(embedding)
    channel_2 = GlobalMaxPooling1D()(channel_2)

    # Fully connected network
    fully_connected = Concatenate()([channel_1, channel_2])
    fully_connected = Dropout(0.4)(fully_connected)
    fully_connected = Dense(128,
                            activation='relu',
                            kernel_constraint=unit_norm(),
                            bias_constraint=unit_norm())(fully_connected)
    fully_connected = Dropout(0.4)(fully_connected)
    output = Dense(1,
                   activation='sigmoid',
                   kernel_constraint=unit_norm(),
                   bias_constraint=unit_norm())(fully_connected)

    model = Model(inputs=(input_layer), outputs=output)

    # Model settings
    metrics = [
        WorkSavedOverSamplingAtRecall(recall=1, name='wss'),
        WorkSavedOverSamplingAtRecall(recall=0.95, name='wss_95')
    ]
    opt = optimizers.Adam(1e-4)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=metrics)
    return model
Example #2
0
    def cnn(self):
        x_in = Input(shape=(self.config.seq_len,))
        if self.config.use_pretrained_embedding == False:
            x_embedded = Embedding(self.config.vocab_size, self.config.embedding_dim
                                   )(x_in)#embedding的大小
        else:
            word2vec_model = word2vec.load(self.config.word2vec_model_path)
            word2vec_dict = {}
            for idx, word in enumerate(word2vec_model.wv.vocab):
                word2vec_dict[word] = word2vec_model.wv[word]
            word_embedding_2dlist = [[]] * (self.config.vocab_size) 
            word_embedding_2dlist[0] = np.zeros(self.config.embedding_dim)
            bound = np.sqrt(6.0) / np.sqrt(self.config.vocab_size)  # bound for random variables.
            count_exist = 0
            count_not_exist = 0
            for i in range(1, self.config.vocab_size):  # loop each word
                word = self.config.id2char[i]  # get a word
                embedding = None
                try:
                    embedding = word2vec_dict[word][:self.config.embedding_dim]  # try to get vector:it is an array.
                except Exception:
                    embedding = None
                if embedding is not None:  # the 'word' exist a embedding
                    word_embedding_2dlist[i] = embedding
                    count_exist = count_exist + 1  # assign array to this word.
                else:  # no embedding for this word
                    word_embedding_2dlist[i] = np.random.uniform(-bound, bound, self.config.embedding_dim);
                    count_not_exist = count_not_exist + 1  # init a random value for the word.
            word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
            print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)

            #pdb.set_trace()
            x_embedded = Embedding(self.config.vocab_size, self.config.embedding_dim, weights=[word_embedding_final])(x_in)

        reshape = Reshape((self.config.seq_len, self.config.embedding_dim, 1))(x_embedded)

        maxpool_list = []
        for filter_size in range(self.config.low_kernel_size, self.config.high_kernel_size + 1):
            conv = Conv2D(self.config.num_filters, kernel_size=(filter_size, self.config.embedding_dim), padding='valid', kernel_initializer='normal', activation=self.config.activation_func)(reshape)
            maxpool = MaxPool2D(pool_size=(self.config.seq_len - filter_size + 1, 1), strides=(1,1), padding='valid')(conv)
            maxpool_list.append(maxpool)

        #pdb.set_trace()
        
        if len(maxpool_list) == 1:
            concatenated_tensor = maxpool_list[0]
        else:
            concatenated_tensor = Concatenate(axis=1)(maxpool_list)
        flatten = Flatten()(concatenated_tensor)
        #pdb.set_trace()
        x = Lambda(lambda x: K.l2_normalize(x, 1))(flatten)

        pred = Dense(self.config.num_classes,
                     use_bias=self.config.use_bias,
                     kernel_constraint=unit_norm())(x)

        self.encoder = Model(x_in, x) # 最终的目的是要得到一个编码器  获得对应的编码器
        self.model = Model(x_in, pred) # 用分类问题做训练 分类训练使用

        self.model.compile(loss=sparse_amsoftmax_loss, optimizer='adam', metrics=['sparse_categorical_accuracy'])
Example #3
0
def create_manTraNet_model( Featex, pool_size_list=[7,15,31], is_dynamic_shape=True, apply_normalization=True ) :
    """
    Create ManTra-Net from a pretrained IMC-Featex model
    """
    img_in = Input(shape=(None,None,3), name='img_in' )
    rf = Featex( img_in )
    rf = Conv2D( 64, (1,1),
                 activation=None, # no need to use tanh if sf is L2normalized
                 use_bias=False,
                 kernel_constraint = unit_norm( axis=-2 ),
                 name='outlierTrans',
                 padding = 'same' )(rf)
    bf = BatchNormalization( axis=-1, name='bnorm', center=False, scale=False )(rf)
    devf5d = NestedWindowAverageFeatExtrator(window_size_list=pool_size_list,
                                             output_mode='5d',
                                             minus_original=True,
                                             name='nestedAvgFeatex' )( bf )
    if ( apply_normalization ) :
        sigma = GlobalStd2D( name='glbStd' )( bf )
        sigma5d = Lambda( lambda t : K.expand_dims( t, axis=1 ), name='expTime')( sigma )
        devf5d = Lambda( lambda vs : K.abs(vs[0]/vs[1]), name='divStd' )([devf5d, sigma5d])
    # convert back to 4d
    devf = ConvLSTM2D( 8, (7,7),
                       activation='tanh',
                       recurrent_activation='hard_sigmoid',
                       padding='same',
                       name='cLSTM',
                       return_sequences=False )(devf5d)
    pred_out = Conv2D(1, (7,7), padding='same', activation='sigmoid', name='pred')( devf )
    return Model( inputs=img_in, outputs=pred_out, name='sigNet' )
Example #4
0
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=False,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=unit_norm(),
                 bias_constraint=None,
                 **kwargs):
        """
        Initialize like Dense.

        *****************************
        """
        # explicit call to parent constructor
        Dense.__init__(self,
                       units,
                       activation=activation,
                       use_bias=use_bias,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer,
                       kernel_regularizer=kernel_regularizer,
                       bias_regularizer=bias_regularizer,
                       activity_regularizer=activity_regularizer,
                       kernel_constraint=kernel_constraint,
                       bias_constraint=bias_constraint,
                       **kwargs)
Example #5
0
def metric_space(x, accent_input, acc_classes, metric_loss, margin, name):

    if metric_loss == "softmax":
        y = DS(acc_classes, activation='softmax', name=name)(x)

    elif metric_loss == "sphereface":
        y = ls.SphereFace(n_classes=acc_classes, m=margin,
                          name=name)([x, accent_input])

    elif metric_loss == "cosface":
        y = ls.CosFace(n_classes=acc_classes, m=margin,
                       name=name)([x, accent_input])

    elif metric_loss == "arcface":
        y = ls.ArcFace(n_classes=acc_classes, m=margin,
                       name=name)([x, accent_input])

    elif metric_loss == "circleloss":
        y = Lambda(lambda x: K.l2_normalize(x, 1))(x)
        y = Dense(acc_classes,
                  activation=None,
                  use_bias=False,
                  kernel_constraint=unit_norm(),
                  name=name)(y)
    else:
        return

    return y
Example #6
0
    def build(self):
        # load data
        train = self.loadData("train.csv")
        test = self.loadData("test.csv")

        # Define the model
        model = Sequential()
        model.add(Dense(50, input_dim=len(train['df'].columns)-1, activation='relu', name='layer_1'))
        model.add(Dropout(0.4))
        # model.add(Dense(100, activation='relu', name='layer_2'))
        model.add(Dense(100, activation='relu', name='layer_2', kernel_constraint=unit_norm()))
        model.add(Dense(50, activation='relu', name='layer_3'))
        model.add(Dense(50, activation='relu', name='layer_4'))
        model.add(Dense(50, activation='relu', name='layer_5'))
        # model.add(Dense(50, activation='relu', name='layer_6'))
        # model.add(Dense(50, activation='relu', name='layer_7'))
        # model.add(Dense(50, activation='relu', name='layer_8'))
        # model.add(Dense(50, activation='relu', name='layer_9'))
        model.add(Dense(1, activation='sigmoid', name='output_layer'))
        model.compile(loss='logcosh', optimizer='adam',metrics=['accuracy'])

        # early stop
        early_stop = EarlyStopping(monitor='val_loss', min_delta=0,patience=3,verbose=1,mode='auto')

        # Train the model
        model.fit(train['X'],train['Y'],epochs=250,shuffle=True,verbose=2, validation_split=0.2,batch_size=5
        # ,validation_data=(test['X'],test['Y']),callbacks=[self._logger]
        ,callbacks=[early_stop]
        )

        # save model
        # model.save(config.playerName+".h5")

        test_error_rate = model.evaluate(test['X'], test['Y'], verbose=0)
        print("accuracy is: {}".format(test_error_rate[1]))
Example #7
0
def test_unit_norm():
    unit_norm_instance = constraints.unit_norm()
    normalized = unit_norm_instance(K.variable(get_example_array()))
    norm_of_normalized = np.sqrt(np.sum(K.eval(normalized) ** 2, axis=0))
    # In the unit norm constraint, it should be equal to 1.
    difference = norm_of_normalized - 1.
    largest_difference = np.max(np.abs(difference))
    assert(np.abs(largest_difference) < 10e-5)
Example #8
0
def test_unit_norm():
    unit_norm_instance = constraints.unit_norm()
    normalized = unit_norm_instance(K.variable(get_example_array()))
    norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=0))
    # In the unit norm constraint, it should be equal to 1.
    difference = norm_of_normalized - 1.
    largest_difference = np.max(np.abs(difference))
    assert (np.abs(largest_difference) < 10e-5)
def dense(input_shape):
    """build a model made of dense layers"""
    from keras.layers import Input, Reshape, Dense
    from keras.regularizers import l2
    from keras.constraints import unit_norm

    dense_params = {"activation": "elu"}

    bottleneck_params = {"name": "bottleneck", "activation": "linear"}

    input = x = Input(shape=input_shape)
    x = Dense(512, kernel_constraint=unit_norm(), **dense_params)(x)
    x = Dense(128, kernel_constraint=unit_norm(), **dense_params)(x)
    x = Dense(2, kernel_constraint=unit_norm(), **bottleneck_params)(x)
    x = Dense(128, **dense_params)(x)
    x = Dense(512, **dense_params)(x)
    output = x = Dense(784, activation='sigmoid')(x)
    return [input], [output]
def amplitude_embedding_layer(embedding_matrix,
                              max_sequence_length,
                              trainable=False,
                              random_init=True):
    embedding_dim = embedding_matrix.shape[0]
    vocabulary_size = embedding_matrix.shape[1]
    if (random_init):
        return (Embedding(vocabulary_size,
                          embedding_dim,
                          embeddings_constraint=unit_norm(axis=1),
                          input_length=max_sequence_length,
                          trainable=trainable))
    else:
        return (Embedding(vocabulary_size,
                          embedding_dim,
                          weights=[np.transpose(embedding_matrix)],
                          embeddings_constraint=unit_norm(axis=1),
                          input_length=max_sequence_length,
                          trainable=trainable))
Example #11
0
    def initialize(self):
        self.doc = Input(shape=(self.opt.max_sequence_length, ), dtype='int32')

        if (self.opt.random_init):
            self.embedding = Embedding(
                trainable=self.opt.embedding_trainable,
                input_dim=self.opt.lookup_table.shape[0],
                output_dim=self.opt.lookup_table.shape[1],
                weights=[self.opt.lookup_table],
                embeddings_constraint=unit_norm(axis=1))
        else:
            self.embedding = Embedding(
                trainable=self.opt.embedding_trainable,
                input_dim=self.opt.lookup_table.shape[0],
                output_dim=self.opt.lookup_table.shape[1],
                embeddings_constraint=unit_norm(axis=1))

        self.dense = Dense(self.opt.nb_classes, activation="sigmoid")
        self.dropout = Dropout(self.opt.dropout_rate_probs)
Example #12
0
    def make_model(self):
        x = Input(shape=(self.look_back, ))

        ar_output = Dense(units=1,
                          kernel_initializer='uniform',
                          kernel_constraint=unit_norm(),
                          name='ar-weights')(x)

        model = Model(inputs=x, outputs=ar_output)
        model.compile('Adam', 'mae')
        return model
Example #13
0
def dense_factor(inputs, input_signal, num_nodes, droput):

    h_1 = BatchNormalization()(inputs)
    h_1 = DFNetsATT(num_nodes,
                    A_hat,
                    arma_conv_AR,
                    arma_conv_MA,
                    input_signal,
                    num_attention_heads=8,
                    attention_combine='concat',
                    attention_dropout=0.5,
                    kernel_initializer=initializers.glorot_normal(seed=1),
                    kernel_regularizer=l2(9e-4),
                    kernel_constraint=unit_norm(),
                    use_bias=True,
                    bias_initializer=initializers.glorot_normal(seed=1),
                    bias_constraint=unit_norm())(h_1)
    h_1 = ReLU()(h_1)
    output = Dropout(droput)(h_1)
    return output
Example #14
0
    def make_model(self):
        x = Input(shape=(self.look_back, self.n_var))

        # make self-evolution
        se_models = self.make_se_model(self.pretrain, self.trainable)
        se_outputs = [
            se_models[idx](
                inputs=Lambda(
                    lambda k: k[:, -self.p_list[idx]:, idx]
                )(x)
            )
            for idx in range(self.n_var)
        ]
        se_pred = concatenate(se_outputs)

        # make res
        c1 = Conv1D(
            filters=1, kernel_size=1, name='Conv1D-1'
        )(x)
        c3 = Conv1D(
            filters=1, kernel_size=3, name='Conv1D-3'
        )(x)
        c5 = Conv1D(
            filters=1, kernel_size=5, name='Conv1D-5'
        )(x)

        r1 = LSTM(
            units=16, name='LSTM-1'
        )(c1)
        r3 = LSTM(
            units=16, name='LSTM-3'
        )(c3)
        r5 = LSTM(
            units=16, name='LSTM-5'
        )(c5)
        r135 = add([r1, r3, r5])
        res_pred = Dense(
            units=self.n_var,
            kernel_initializer='uniform', kernel_constraint=unit_norm()
        )(r135)

        # make final
        se = Multiply(
            unit=self.n_var, name='se-weights'
        )(se_pred)
        res = Multiply(
            unit=self.n_var, name='res-weights'
        )(res_pred)
        y_pred = Add()([se, res])

        model = Model(inputs=x, outputs=y_pred)
        model.compile('Adam', 'mae')
        return model
Example #15
0
def amplitude_embedding_layer(embedding_matrix,
                              input_shape,
                              trainable=False,
                              random_init=True,
                              l2_reg=0.0000005):
    embedding_dim = embedding_matrix.shape[0]
    vocabulary_size = embedding_matrix.shape[1]
    if (random_init):
        return (Embedding(vocabulary_size,
                          embedding_dim,
                          embeddings_constraint=unit_norm(axis=1),
                          input_length=input_shape,
                          embeddings_regularizer=regularizers.l2(l2_reg),
                          trainable=trainable))
    else:
        return (Embedding(vocabulary_size,
                          embedding_dim,
                          weights=[np.transpose(embedding_matrix)],
                          embeddings_constraint=unit_norm(axis=1),
                          input_length=input_shape,
                          embeddings_regularizer=regularizers.l2(l2_reg),
                          trainable=trainable))
Example #16
0
def build(numofInput):
    model = Sequential()
    model.add(
        Dense(numofInput,
              kernel_constraint=unit_norm(),
              input_shape=(numofInput, )))
    # model.add(Activation("relu"))
    # model.add(Dense(numofInput))
    # model.add(BatchNormalization())
    model.add(Activation("linear"))
    adam = Adam(lr=0.1)
    model.compile(optimizer="sgd", loss="mean_squared_error")
    return model
Example #17
0
    def __net(self, modelpath):
            x_in = Input(shape=(self.__maxlen,))
            x_embedded = Embedding(self.__vocabsize+2,self.__wordsize)(x_in)
            x_embedded = BatchNormalization()(x_embedded)
            x_embedded = Activation('relu', )(x_embedded)
            reshape = Reshape((self.__maxlen, self.__wordsize, 1))(x_embedded)
            self.num_filters = 512
            self.filter_sizes = [3, 4, 5]
            conv_0 = Conv2D(self.num_filters, kernel_size=(self.filter_sizes[0], self.__wordsize), padding='valid',
                            kernel_initializer='normal', activation='relu')(reshape)
            conv_1 = Conv2D(self.num_filters, kernel_size=(self.filter_sizes[1], self.__wordsize), padding='valid',
                            kernel_initializer='normal', activation='relu')(reshape)
            conv_2 = Conv2D(self.num_filters, kernel_size=(self.filter_sizes[2], self.__wordsize), padding='valid',
                            kernel_initializer='normal', activation='relu')(reshape)

            maxpool_0 = MaxPool2D(pool_size=(self.__maxlen - self.filter_sizes[0] + 1, 1), strides=(1, 1),
                                  padding='valid')(
                conv_0)
            maxpool_1 = MaxPool2D(pool_size=(self.__maxlen - self.filter_sizes[1] + 1, 1), strides=(1, 1),
                                  padding='valid')(
                conv_1)
            maxpool_2 = MaxPool2D(pool_size=(self.__maxlen - self.filter_sizes[2] + 1, 1), strides=(1, 1),
                                  padding='valid')(
                conv_2)
            concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
            flatten = Flatten()(concatenated_tensor)
            reshape1 = Reshape((3, self.num_filters))(flatten)
            reshape1 = BatchNormalization()(reshape1)
            #reshape1 = Activation('relu', )(reshape1)
            if self.__cuda:
                x = CuDNNGRU(self.__wordsize)(reshape1)
            else:
                x = Bidirectional(GRU(self.__wordsize,return_sequences=True))(reshape1)
            #print('x',x)
            #x = BatchNormalization()(x)
            timestep = TimeDistributed(Dense(1))(x)
            flatterns = Flatten()(timestep)
            flatterns = BatchNormalization()(flatterns)
            attention_weight = Activation('softmax')(flatterns)
            attention_weight = RepeatVector(2 * self.__wordsize)(attention_weight)
            attention_weight = Permute([2, 1])(attention_weight)
            sent_representation = multiply([x, attention_weight])
            sent_representation = Lambda(lambda xin: K.sum(xin, axis=1))(sent_representation)
            x = sent_representation
            x = Lambda(lambda x: K.l2_normalize(x, 1))(x)
            pred = Dense(self.__nclass, use_bias=False,kernel_constraint=unit_norm())(x)
            self.__encoder = Model(x_in, x)
            self.__model = Model(x_in, pred)
            self.__model.compile(loss=sparse_amsoftmax_loss,optimizer='adam',metrics=['sparse_categorical_accuracy'])
            self.__model.load_weights(modelpath)
Example #18
0
def efficientnet_B4(input_channel_num = 1):
    input_layer = Input(shape = (380, 380, input_channel_num))
    dense = EfficientNetB4(include_top = False, weights = None, input_tensor = input_layer, pooling = 'max')
    x = dense.output
    x = Dense(2048, activation = 'tanh')(x)

    # normolize weights and features, then classification
    encoder = Lambda(lambda m: K.l2_normalize(m, axis=1))(x)
    x = Dense(1500, use_bias = False, kernel_constraint = unit_norm())(encoder)

    # build a model and return
    model = Model(inputs = input_layer, outputs = x)

    return model
Example #19
0
def run_real_embedding_network(lookup_table,
                               max_sequence_length,
                               nb_classes=2,
                               random_init=True,
                               embedding_trainable=True):
    embedding_dimension = lookup_table.shape[1]
    sequence_input = Input(shape=(max_sequence_length, ), dtype='int32')
    if (random_init):
        embedding = Embedding(
            trainable=embedding_trainable,
            input_dim=lookup_table.shape[0],
            output_dim=lookup_table.shape[1],
            weights=[lookup_table],
            embeddings_constraint=unit_norm(axis=1))(sequence_input)
    else:
        embedding = Embedding(
            trainable=embedding_trainable,
            input_dim=lookup_table.shape[0],
            output_dim=lookup_table.shape[1],
            embeddings_constraint=unit_norm(axis=1))(sequence_input)
    representation = GlobalAveragePooling1D()(embedding)
    output = Dense(nb_classes, activation='sigmoid')(representation)
    model = Model(sequence_input, output)
    return model
Example #20
0
def dense_block_model(x_train):

    inputs = Input((x_train.shape[1], ))

    x = dense_block(inputs)

    predictions = Dense(7,
                        kernel_initializer=initializers.glorot_normal(seed=1),
                        kernel_regularizer=regularizers.l2(1e-10),
                        kernel_constraint=unit_norm(),
                        activity_regularizer=regularizers.l2(1e-10),
                        use_bias=True,
                        bias_initializer=initializers.glorot_normal(seed=1),
                        bias_constraint=unit_norm(),
                        activation='softmax',
                        name='fc_' + str(1))(x)

    model = Model(input=inputs, output=predictions)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.002),
                  metrics=['acc'])

    return model
Example #21
0
    def _get_train_model(self):
        # 正式模型,基于GRU的分类器
        x_in = Input(shape=(self.config.max_len, ))
        x_embedded = Embedding(self.data_loader.max_feature + 2,
                               self.config.word_size)(x_in)
        x = Bidirectional(LSTM(64))(x_embedded)
        # x = CuDNNGRU(self.config.word_size)(x_embedded)
        x = Lambda(lambda x: K.l2_normalize(x, 1))(x)

        pred = Dense(self.config.num_train_groups,
                     use_bias=False,
                     kernel_constraint=unit_norm())(x)

        self.encoder = Model(x_in, x)  # 最终的目的是要得到一个编码器
        self.model = Model(x_in, pred)  # 用分类问题做训练
Example #22
0
 def initialize(self):
     #
     if (self.opt.random_init):
         self.embedding = Embedding(
             trainable=self.opt.embedding_trainable,
             input_dim=self.opt.lookup_table.shape[0],
             output_dim=self.opt.lookup_table.shape[1],
             weights=[self.opt.lookup_table],
             embeddings_constraint=unit_norm(axis=1))
     else:
         self.embedding = Embedding(
             trainable=self.opt.embedding_trainable,
             input_dim=self.opt.lookup_table.shape[0],
             output_dim=self.opt.lookup_table.shape[1],
             embeddings_constraint=unit_norm(axis=1))
     self.dropout_embedding = Dropout(self.opt.dropout_rate_embedding)
     if self.opt.bert_enabled:
         checkpoint_path = os.path.join(self.opt.bert_dir,
                                        'bert_model.ckpt')
         config_path = os.path.join(self.opt.bert_dir, 'bert_config.json')
         self.bertmodel = load_trained_model_from_checkpoint(
             config_path, checkpoint_path, training=False)
         self.bertmodel.trainable = False
         self.remove_mask = RemoveMask()
Example #23
0
def model_extractor(activation_func, weight_decay=5e-4):
    model = Sequential()

    #Instantiating first set of Layers
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation=activation_func,
               padding='same',
               kernel_constraint=unit_norm(),
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation=activation_func,
               padding='same',
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(BatchNormalization())
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1), name='pool1'))
    model.add(Dropout(0.2))

    # Instantiating second set of Layers
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               activation=activation_func,
               padding='same',
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               activation=activation_func,
               padding='same',
               kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(BatchNormalization())
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1), name='pool2'))
    model.add(Dropout(0.2))

    # Instantiating set of FCs
    model.add(Flatten())
    model.add(Dense(512, activation=activation_func))
    model.add(BatchNormalization())
    #Output Layer
    model.add(Dense(10, activation='softmax'))
    return model
Example #24
0
def run_real_network(lookup_table, max_sequence_length):
    embedding_dimension = lookup_table.shape[1]
    sequence_input = Input(shape=(max_sequence_length, ), dtype='int32')
    embedding = Embedding(trainable=True,
                          input_dim=lookup_table.shape[0],
                          output_dim=lookup_table.shape[1],
                          weights=[lookup_table],
                          embeddings_constraint=unit_norm(axis=1),
                          mask_zero=True)(sequence_input)
    representation = GlobalAveragePooling1D()(embedding)
    output = Dense(1, activation='sigmoid')(representation)

    model = Model(sequence_input, output)
    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    return model
Example #25
0
    def get_model():
        logging.info("Creating model..")
        model = Sequential()
        model.add(
            LSTM(500,
                 input_shape=(
                     1,
                     100,
                 ),
                 return_sequences=True,
                 kernel_constraint=unit_norm()))
        model.add(Dropout(0.5))
        model.add(TimeDistributed(Dense(1, activation="sigmoid")))

        model.compile(optimizer="adam",
                      loss="mean_squared_error",
                      metrics=['accuracy'])
        return model
    def buildDNN(self, hidden_layers=5):
        '''Building the architecture of the network (perceptron)
		Input: number of hidden layers
		'''
        self.classifier = Sequential()

        # First Layer
        self.classifier.add(
            Dense(54,
                  kernel_regularizer=l2(0.0007),
                  bias_regularizer=l2(0.0007),
                  activation='tanh',
                  kernel_constraint=unit_norm(),
                  kernel_initializer='random_normal',
                  input_dim=54))
        self.classifier.add(Dropout(0.08))

        # Hidden Layer(s)
        for nbLayer in range(hidden_layers - 1):
            self.classifier.add(
                Dense(54,
                      kernel_regularizer=l2(0.0007),
                      bias_regularizer=l2(0.0007),
                      activation='tanh',
                      kernel_initializer='random_normal'))
            self.classifier.add(Dropout(0.08))

        # Output Layer
        self.classifier.add(
            Dense(1, activation='sigmoid', kernel_initializer='random_normal'))

        try:
            # load weights
            self.classifier.load_weights("weights.best.hdf5")
        except e:
            pass

        # Optimizer
        adam = optimizers.Adam(lr=0.001)

        # Compiling the neural network
        self.classifier.compile(optimizer=adam,
                                loss='binary_crossentropy',
                                metrics=['accuracy'])
Example #27
0
    def build(self, input_shape):

        self.kernel = self.add_weight(name='kernel',
                                      shape=(2,self.dimension,1),
                                      constraint = unit_norm(axis = (0,1)),
                                      initializer='uniform',
                                      trainable=True)
        # Create a trainable weight variable for this layer.

        if not isinstance(input_shape, list):
            raise ValueError('This layer should be called '
                             'on a list of 2 inputs.')

        if len(input_shape) != 2:
             raise ValueError('This layer should be called '
                             'on a list of 2 inputs.'
                              'Got ' + str(len(input_shape)) + ' inputs.')

        super(Complex1DProjection, self).build(input_shape)  # Be sure to call this somewhere!
Example #28
0
    def build(self, input_shape):
        if not isinstance(input_shape, list):
            raise ValueError('This layer should be called '
                             'on a list of 2 inputs.')

        if len(input_shape) != 2:
            raise ValueError('This layer should be called '
                             'on a list of 2 inputs.'
                             'Got ' + str(len(input_shape)) + ' inputs.')

        self.dim = input_shape[0][-1]
        self.kernel = self.add_weight(name='kernel',
                                      shape=(self.units, self.dim, 2),
                                      constraint=unit_norm(axis=(1, 2)),
                                      initializer=Orthogonal(gain=1.0,
                                                             seed=None),
                                      trainable=self.trainable)

        super(ComplexMeasurement,
              self).build(input_shape)  # Be sure to call this somewhere!
Example #29
0
    def make_model(self):
        x = Input(shape=(self.look_back, ))

        ar_output = Dense(units=1,
                          kernel_initializer='uniform',
                          kernel_constraint=unit_norm(),
                          name='ar-weights')(x)

        pre_point = Lambda(lambda k: k[:, -1:])(x)

        merged_output = concatenate([ar_output, pre_point])

        outputs = Dense(units=1,
                        kernel_initializer=RND_UNI,
                        use_bias=False,
                        kernel_constraint=non_neg(),
                        name='contrib-weights')(merged_output)

        model = Model(inputs=x, outputs=outputs)
        model.compile('Adam', 'mae')
        return model
Example #30
0
def BiLSTM_LMCL(max_seq_len,
                max_features,
                embedding_dim,
                output_dim,
                model_img_path=None,
                embedding_matrix=None):
    model = Sequential()
    if embedding_matrix is None:
        model.add(
            Embedding(max_features,
                      embedding_dim,
                      input_length=max_seq_len,
                      mask_zero=True))
    else:
        model.add(
            Embedding(max_features,
                      embedding_dim,
                      input_length=max_seq_len,
                      mask_zero=True,
                      weights=[embedding_matrix],
                      trainable=True))

    model.add(Bidirectional(LSTM(128, dropout=0.5)))
    model.add(Dropout(0.5))
    model.add(Lambda(lambda x: K.l2_normalize(x, axis=1)))
    adam = Adam(lr=0.003, clipnorm=5.)

    model.add(Dense(output_dim, use_bias=False, kernel_constraint=unit_norm()))
    model.add(Activation('softmax'))
    model.compile(loss=large_margin_cosine_loss,
                  optimizer=adam,
                  metrics=['accuracy'])

    if model_img_path:
        plot_model(model,
                   to_file=model_img_path,
                   show_shapes=True,
                   show_layer_names=False)

    return model
Example #31
0
    def build_model(self, rating_input_dim=None, user_input_dim=None, item_input_dim=None):
        if rating_input_dim is None:
            rating_input_dim = self.config['rating_input_dim']
        if user_input_dim is None:
            user_input_dim = len(self.config['user_id'])
        if item_input_dim is None:
            item_input_dim = len(self.config['item_id'])

        # Rating Embedding
        rating_input = Input(shape=(1,), dtype='int32')
        rating_embedding = Embedding(input_dim=rating_input_dim+1, output_dim=self.config['dim'], embeddings_constraint=unit_norm(), name='Rating_Init_Embedding')(rating_input)
        rating_embedding = Flatten()(rating_embedding)
        # rating_embedding = Activation('relu')(rating_embedding)
        rating_embedding = UnitNorm(name='Rating_Embedding')(rating_embedding)
        self.rating_embedding_model = Model(inputs=rating_input, outputs=rating_embedding, name='rating_embedding_model')

        # User Embedding
        user_input = Input(shape=(1,), dtype='int32')
        user_embedding = Embedding(input_dim=user_input_dim+1, output_dim=self.config['dim'], embeddings_constraint=unit_norm(), name='User_Init_Embedding')(user_input)
        user_embedding = Flatten()(user_embedding)
        # user_embedding = Activation('relu')(user_embedding)
        user_embedding = UnitNorm(name='User_Embedding')(user_embedding)
        self.user_embedding_model = Model(inputs=user_input, outputs=user_embedding, name='user_embedding_model')

        # Item Embedding
        item_input = Input(shape=(1,), dtype='int32')
        item_embedding = Embedding(input_dim=item_input_dim+1, output_dim=self.config['dim'], embeddings_constraint=unit_norm(), name='Item_Init_Embedding')(item_input)
        item_embedding = Flatten()(item_embedding)
        # item_embedding = Activation('relu')(item_embedding)
        item_embedding = UnitNorm(name='Item_Embedding')(item_embedding)
        self.item_embedding_model = Model(inputs=item_input, outputs=item_embedding, name='item_embedding_model')

        # Review Embedding
        word_index = self.config['review_tokenizer'].word_index
        # # Load pre-trained word embedding
        embeddings_index = {}
        f = open(self.config['pre_word_embedding_file'])
        for line in f:
            values = line.split()
            word = values[0]
            embed_value = np.asarray(values[1:], dtype='float32')
            embeddings_index[word] = embed_value
        f.close()
        print('Total %s word vectors.' % len(embeddings_index))
        embedding_matrix = np.random.random((len(word_index) + 1, self.config['pre_word_embedding_dim']))
        for word, i in word_index.items():
            embedding_vector = embeddings_index.get(word)
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector
        # # Build review embedding layers
        review_input = Input(shape=(self.config['max_len'],), dtype='int32')
        word_embedding = Embedding(len(word_index)+1,
                                   self.config['pre_word_embedding_dim'],
                                   weights=[embedding_matrix],
                                   input_length=self.config['max_len'],
                                   name='Word_Embedding')(review_input)
        if self.config['review_embedder'] == 'CNN':
            review_embedding = Conv1D(self.config['num_filters'],
                                      kernel_size=self.config['filter_size'],
                                      strides=1,
                                      activation='tanh',
                                      name='Conv_Layer')(word_embedding)
            review_embedding = AveragePooling1D(pool_size=int(review_embedding.shape[1]),
                                            strides=None,
                                            padding='valid')(review_embedding)
            review_embedding = Activation('tanh')(review_embedding)
            review_embedding = Flatten()(review_embedding)
            # review_embedding = Dense(self.config['dim'],activation='relu')(review_embedding)
        else:
            review_embedding = Bidirectional(GRU(self.config['dim']))(word_embedding)
            review_embedding = Dense(self.config['dim'], activation='tanh')(review_embedding)
        review_embedding = UnitNorm(name='Review_Embedding')(review_embedding)
        self.review_embedding_model = Model(inputs=review_input, outputs=review_embedding, name='review_embedding_model')

        # Embedding Calculation:
        # <user_1, item_1, review_1, rating_1> and <user_2, item_2, review_2, rating_2>
        user_input_1 = Input(shape=(1,), dtype='int32', name='user_input_1')
        user_input_2 = Input(shape=(1,), dtype='int32', name='user_input_2')
        item_input_1 = Input(shape=(1,), dtype='int32', name='item_input_1')
        item_input_2 = Input(shape=(1,), dtype='int32', name='item_input_2')
        review_input_1 = Input(shape=(self.config['max_len'],), dtype='int32', name='review_input_1')
        review_input_2 = Input(shape=(self.config['max_len'],), dtype='int32', name='review_input_2')
        rating_input_1 = Input(shape=(1,), dtype='int32', name='rating_input_1')
        rating_input_2 = Input(shape=(1,), dtype='int32', name='rating_input_2')
        user_1 = self.user_embedding_model(user_input_1)
        user_2 = self.user_embedding_model(user_input_2)
        item_1 = self.item_embedding_model(item_input_1)
        item_2 = self.item_embedding_model(item_input_2)
        review_1 = self.review_embedding_model(review_input_1)
        review_2 = self.review_embedding_model(review_input_2)
        rating_1 = self.rating_embedding_model(rating_input_1)
        rating_2 = self.rating_embedding_model(rating_input_2)

        # Fraud detector
        fraud_input = Input(shape=(4*self.config['dim'],), name='fraud_detector_input')
        fraud_hidden_output = Dense(self.config['fraud_detector_nodes'][0], activation='relu')(fraud_input)
        for i in range(len(self.config['fraud_detector_nodes'])-1):
            fraud_hidden_output = Dense(self.config['fraud_detector_nodes'][i+1], activation='relu')(fraud_hidden_output)
        fraud_output = Dense(1, activation='sigmoid', name='fraud_detector_output')(fraud_hidden_output)
        self.fraud_detector = Model(inputs=fraud_input,outputs=fraud_output, name='fraud_detector')

        # Define Label Inputs
        user_context_input = Input(shape=(1,), name='user_context_flag')
        item_context_input = Input(shape=(1,), name='item_context_flag')
        fraud_label_input_1 = Input(shape=(1,), name='fraud_label_input_1')
        fraud_label_input_2 = Input(shape=(1,), name='fraud_label_input_2')
        behavior_success_input_1 = Input(shape=(1,), name='behavior_success_flag_1')
        behavior_success_input_2 = Input(shape=(1,), name='behavior_success_flag_2')

        # Calculate Loss Value
        joint_features_1 = concatenate([user_1, item_1, review_1, rating_1])  # concatenate embedding features as fraud detector's input
        joint_features_2 = concatenate([user_2, item_2, review_2, rating_2])
        fraud_prediction_1 = self.fraud_detector(joint_features_1)
        fraud_prediction_2 = self.fraud_detector(joint_features_2)
        behavior_success_loss_1 = BehaviorSuccessLoss()([user_1, item_1, review_1, rating_1, behavior_success_input_1])
        behavior_success_loss_2 = BehaviorSuccessLoss()([user_2, item_2, review_2, rating_2, behavior_success_input_2])
        user_social_relation_loss = SocialRelationLoss()([user_1, user_2, user_context_input])
        item_social_relation_loss = SocialRelationLoss()([item_1, item_2, item_context_input])
        fraud_detection_loss_1 = FraudDetectionLoss()([fraud_prediction_1, fraud_label_input_1, behavior_success_input_1])
        fraud_detection_loss_2 = FraudDetectionLoss()([fraud_prediction_2, fraud_label_input_2, behavior_success_input_2])
        loss = JointLoss()([fraud_detection_loss_1, fraud_detection_loss_2, behavior_success_loss_1, behavior_success_loss_2, user_social_relation_loss, item_social_relation_loss], alpha=self.config['alpha'])
        self.joint_model = Model(inputs=[user_input_1, item_input_1,
                                         review_input_1, rating_input_1,
                                         fraud_label_input_1,
                                         user_context_input,
                                         behavior_success_input_1,
                                         user_input_2, item_input_2,
                                         review_input_2, rating_input_2,
                                         fraud_label_input_2,
                                         item_context_input,
                                         behavior_success_input_2],
                                 outputs=loss)
        adam = optimizers.Adam(lr=self.config['lr'])
        self.joint_model.compile(optimizer='adam', loss=None)
Example #32
0

from keras.models import Model
from keras.layers import *
from keras.constraints import unit_norm
from margin_softmax import *

x_in = Input(shape=(maxlen,))
x_embedded = Embedding(len(chars)+2,
                       word_size)(x_in)
x = CuDNNGRU(word_size)(x_embedded)
x = Lambda(lambda x: K.l2_normalize(x, 1))(x)

pred = Dense(num_train,
             use_bias=False,
             kernel_constraint=unit_norm())(x)

encoder = Model(x_in, x) # 最终的目的是要得到一个编码器
model = Model(x_in, pred) # 用分类问题做训练

model.compile(loss=sparse_amsoftmax_loss,
              optimizer='adam',
              metrics=['sparse_categorical_accuracy'])

model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs)

model.save_weights('sent_sim_amsoftmax.weights')