Пример #1
0
def cnn_features_model(n_timesteps, n_features, n_outputs, nb_features):
    input = Input(shape=(n_timesteps, n_features))
    conv_1 = Convolution1D(12,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(input)
    bat_1 = BatchNormalization()(conv_1)
    # drop_1 = Dropout(0.3)(bat_1)
    maxp_1 = MaxPooling1D(pool_size=2, padding='same', strides=2)(bat_1)

    conv_2 = Convolution1D(12,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(maxp_1)
    bat_2 = BatchNormalization()(conv_2)
    # drop_2 = Dropout(0.3)(conv_2)
    maxp_2 = MaxPooling1D(pool_size=2, padding='same', strides=2)(bat_2)

    conv_3 = Convolution1D(20,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(maxp_2)
    bat_3 = BatchNormalization()(conv_3)
    drop_3 = Dropout(0.3)(bat_3)
    maxp_3 = MaxPooling1D(pool_size=2, padding='same', strides=2)(drop_3)

    conv_4 = Convolution1D(16,
                           5,
                           input_shape=(n_timesteps, n_features),
                           padding='same',
                           activation='relu')(maxp_3)
    bat_4 = BatchNormalization()(conv_4)
    # drop_4 = Dropout(0.3)(conv_4)
    maxp_4 = MaxPooling1D(pool_size=2, padding='same', strides=2)(bat_4)

    # drop = Dropout(0.3)(maxp_4)
    seq_features = GlobalAveragePooling1D()(maxp_4)

    other_features = Input(shape=(nb_features, ))
    model = Concatenate()([seq_features, other_features])
    # model.add(Flatten())
    model = Dense(n_outputs,
                  activation='softmax',
                  kernel_regularizer=regularizers.l2(0.2))(model)
    model = Model([input, other_features], model)
    model.compile(
        optimizer=Adam(lr=0.0003),
        # loss='categorical_crossentropy',
        # loss=focal_loss(gamma=2,alpha=1),
        loss=ghm.ghm_class_loss,
        metrics=['categorical_accuracy'])
    return model
Пример #2
0
def build_cos_model(input_size,
                    cos_dist_lvl,
                    n_neurons,
                    n_layers,
                    batch_norm=True,
                    loss=MeanSquaredError(),
                    optimizer=SGD(learning_rate=0.05, momentum=0.025)):
    in_1 = Input(shape=(input_size, ), name="input_1")
    in_2 = Input(shape=(input_size, ), name="input_2")

    if cos_dist_lvl == 0:
        model = Concatenate(name="concatenate")([in_1, in_2])
    else:
        model = Multiply(name="pointwise_multiply")([in_1, in_2])
        if cos_dist_lvl >= 2:
            norm_1 = Lambda(
                lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                name="norm_input_1")(in_1)
            norm_2 = Lambda(
                lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                name="norm_input_2")(in_2)
            norm_mul = Multiply(name="multiply_norms")([norm_1, norm_2])
            model = Lambda(lambda tensors: tf.divide(tensors[0], tensors[1]),
                           name="divide")([model, norm_mul])
        if cos_dist_lvl >= 3:
            model = Lambda(
                lambda tensor: tf.reduce_sum(tensor, axis=1, keepdims=True),
                name="sum")(model)
        if cos_dist_lvl >= 4:
            model = ValueMinusInput(1, name="one_minus_input")(model)

    if batch_norm:
        model = BatchNormalization(name="input_normalization")(model)

    for i in range(n_layers):
        model = Dense(n_neurons,
                      activation='sigmoid',
                      name="dense_{}".format(i))(model)
    model_out = Dense(1, activation='sigmoid', name="classify")(model)

    model = Model([in_1, in_2], model_out)
    model.compile(loss=loss,
                  optimizer=optimizer,
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])

    return model
Пример #3
0
def build_eucl_model(input_size,
                     eucl_dist_lvl,
                     n_neurons,
                     n_layers,
                     batch_norm=True,
                     loss=MeanSquaredError(),
                     optimizer=SGD(learning_rate=0.05, momentum=0.025)):
    in_1 = Input(shape=(input_size, ), name="input_1")
    in_2 = Input(shape=(input_size, ), name="input_2")

    if eucl_dist_lvl == 0:
        model = Concatenate(name="concatenate")([in_1, in_2])
    else:
        model = Subtract(name="subtract")([in_1, in_2])
        if eucl_dist_lvl >= 2:
            model = Lambda(lambda tensor: tf.square(tensor),
                           name="square")(model)
        if eucl_dist_lvl >= 3:
            model = Lambda(
                lambda tensor: tf.reduce_sum(tensor, axis=1, keepdims=True),
                name="sum")(model)
        if eucl_dist_lvl >= 4:
            model = Lambda(lambda tensor: tf.sqrt(tensor), name="root")(model)

    if batch_norm:
        model = BatchNormalization(name="input_normalization")(model)

    for i in range(n_layers):
        model = Dense(n_neurons,
                      activation='sigmoid',
                      name="dense_{}".format(i))(model)
    model_out = Dense(1, activation='sigmoid', name="classify")(model)

    model = Model([in_1, in_2], model_out)
    model.compile(loss=loss,
                  optimizer=optimizer,
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])

    return model
def discriminator(height,width,channels,label_dim_1,label_dim_2,learning_rate):

    input_image = Input((height,width,channels))
    input_label = Input((label_dim_1,))
    input_gen = Input((label_dim_2,))

    label = Concatenate()([input_label, input_gen])
    label = Dense(HEIGHT * WIDTH)(label)
    label = LeakyReLU()(label)
    label = Reshape((HEIGHT, WIDTH, 1))(label)

    disc = Concatenate()([input_image,label])


    disc = Conv2D(128, 4,strides=2,padding="same")(disc)
    disc = LeakyReLU()(disc)

    disc = Conv2D(256, 4,strides=2,padding="same")(disc)
    disc = LeakyReLU()(disc)

    disc = Conv2D(512, 4,strides=2,padding="same")(disc)
    #disc = BatchNormalization()(disc)
    disc = LeakyReLU()(disc)

    disc = Conv2D(1024, 4,strides=2,padding="same")(disc)
    disc = LeakyReLU()(disc)

    disc = Flatten()(disc)

    disc = Dropout(0.4)(disc)

    disc = Dense(1, activation="sigmoid")(disc)

    disc = Model([input_image,input_label,input_gen],disc)

    discriminator_optimizer = Adam(lr=LEARNING_RATE,beta_1=0.5)

    disc.compile(discriminator_optimizer, loss="binary_crossentropy",metrics=['binary_accuracy'])

    return disc
Пример #5
0
    (train_images_mnist.shape[0], 28, 28, 1))
test_images_mnist = test_images_mnist.reshape(
    (test_images_mnist.shape[0], 28, 28, 1))

# print(train_images_mnist.shape)
# print(test_images_mnist)
print('test_images_mnist:', test_images_mnist.shape)
print('test_labels_mnist:', test_labels_mnist.shape)

sgd = optimizers.SGD(learning_rate=0.01,
                     decay=1e-6,
                     momentum=0.9,
                     nesterov=True)

model.compile(
    optimizer=sgd,
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])

history = model.fit(train_images_mnist,
                    train_labels_mnist,
                    batch_size=256,
                    epochs=200,
                    validation_data=(test_images_mnist, test_labels_mnist))

plt.plot(history.history["accuracy"])
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
Пример #6
0
def LSTM_model_veracity(x_train_embeddings,
                        x_train_metafeatures,
                        y_train,
                        x_test_embeddings,
                        x_test_metafeatures,
                        params,
                        eval=False,
                        use_embeddings=True,
                        use_metafeatures=True,
                        Early_Stopping=True,
                        log_path=""):
    # Parameter search
    log_dir = log_path + datetime.datetime.now().strftime("%d%m%Y-%H%M%S")

    num_lstm_units = int(params['num_lstm_units'])
    num_lstm_layers = int(params['num_lstm_layers'])
    num_dense_layers = int(params['num_dense_layers'])
    num_dense_units = int(params['num_dense_units'])
    num_epochs = params['num_epochs']
    learn_rate = params['learn_rate']
    mb_size = params['mb_size']
    l2reg = params['l2reg']
    dropout = params['dropout']
    attention = params['attention']

    # Defining input shapes
    if use_embeddings:
        emb_shape = x_train_embeddings[0].shape

    if use_metafeatures:
        metafeatures_shape = x_train_metafeatures[0].shape

    # Creating the two inputs
    if use_embeddings:
        emb_input = Input(shape=emb_shape, name='Embeddings')

    if use_metafeatures:
        metafeatures_input = Input(shape=metafeatures_shape,
                                   name='Metafeatures')

    # Adding masks to account for zero paddings
    if use_embeddings:
        emb_mask = Masking(mask_value=0,
                           input_shape=(None, emb_shape))(emb_input)
    if use_metafeatures:
        metafeatures_mask = (Masking(
            mask_value=0,
            input_shape=(None, metafeatures_shape)))(metafeatures_input)

    # Adding attention and LSTM layers with varying layers and units using parameter search
    if attention == 1:
        for nl in range(num_lstm_layers):
            if use_embeddings:
                emb_LSTM_query = Bidirectional(
                    LSTM(num_lstm_units,
                         dropout=dropout,
                         recurrent_dropout=0.2,
                         return_sequences=True))(emb_mask)

                emb_LSTM_value = Bidirectional(
                    LSTM(num_lstm_units,
                         dropout=dropout,
                         recurrent_dropout=0.2,
                         return_sequences=True))(emb_mask)
            if use_metafeatures:
                metafeatures_LSTM_query = Bidirectional(
                    LSTM(num_lstm_units,
                         dropout=dropout,
                         recurrent_dropout=0.2,
                         return_sequences=True))(metafeatures_mask)

                metafeatures_LSTM_value = Bidirectional(
                    LSTM(num_lstm_units,
                         dropout=dropout,
                         recurrent_dropout=0.2,
                         return_sequences=True))(metafeatures_mask)
        if use_embeddings:
            emb_LSTM = AdditiveAttention(name='Attention_Embeddings')(
                [emb_LSTM_query, emb_LSTM_value])
        if use_metafeatures:
            metafeatures_LSTM = AdditiveAttention(
                name='Attention_Metafeatures')(
                    [metafeatures_LSTM_query, metafeatures_LSTM_value])
    else:
        if use_embeddings:
            emb_LSTM = Bidirectional(
                LSTM(num_lstm_units,
                     dropout=dropout,
                     recurrent_dropout=dropout,
                     return_sequences=True))(emb_mask)
        if use_metafeatures:
            metafeatures_LSTM = Bidirectional(
                LSTM(num_lstm_units,
                     dropout=dropout,
                     recurrent_dropout=dropout,
                     return_sequences=True))(metafeatures_mask)

    if use_embeddings and use_metafeatures:
        # Concatenating the two inputs
        model = Concatenate()([emb_LSTM, metafeatures_LSTM])
    elif use_metafeatures:
        model = metafeatures_LSTM

    # Adding attention and another LSTM to the concatenated layers
    if attention == 1:
        model_query = Bidirectional(
            LSTM(num_lstm_units,
                 dropout=dropout,
                 recurrent_dropout=0.2,
                 return_sequences=False))(model)
        model_value = Bidirectional(
            LSTM(num_lstm_units,
                 dropout=dropout,
                 recurrent_dropout=0.2,
                 return_sequences=False))(model)
        model = AdditiveAttention(name='Attention_Model')(
            [model_query, model_value])

    else:
        model = Bidirectional(
            LSTM(num_lstm_units,
                 dropout=dropout,
                 recurrent_dropout=dropout,
                 return_sequences=False))(model)

    # Adding dense layer with varying layers and units using parameter search
    for nl in range(num_dense_layers):
        model = Dense(num_dense_units)(model)
        model = LeakyReLU()(model)

    # Adding dropout to the model
    model = Dropout(dropout)(model)

    # Adding softmax dense layer with varying l2 regularizers using parameter search
    output = Dense(3,
                   activation='softmax',
                   activity_regularizer=regularizers.l2(l2reg),
                   name='labels')(model)

    # Model output
    if use_embeddings and use_metafeatures:
        model = Model(inputs=[emb_input, metafeatures_input], outputs=output)
    elif use_metafeatures:
        model = Model(inputs=metafeatures_input, outputs=output)
    #model = Model(inputs=emb_input, outputs=output)
    # Plotting the model
    #plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)

    # Adding Adam optimizer with varying learning rate using parameter search
    adam = optimizers.Adam(lr=learn_rate,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=1e-08,
                           decay=0.0)

    # Compiling model
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    callback_list = []
    #TensorBoard
    tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
    callback_list.append(tensorboard_callback)

    #Early_Stopping
    if Early_Stopping:
        earlystop_callback = EarlyStopping(monitor='val_accuracy',
                                           min_delta=0.0001,
                                           patience=5)
        callback_list.append(earlystop_callback)

    #plot_model(model, "model.png")
    if Early_Stopping:
        # Fitting the model with varying batch sizes and epochs using parameter search
        if use_embeddings and use_metafeatures:
            model.fit(
                {
                    'Embeddings': x_train_embeddings,
                    'Metafeatures': x_train_metafeatures
                },
                y_train,
                batch_size=mb_size,
                epochs=num_epochs,
                shuffle=True,
                class_weight=None,
                verbose=1,
                callbacks=callback_list,
                validation_split=.1)
        elif use_metafeatures:
            model.fit(x_train_metafeatures,
                      y_train,
                      batch_size=mb_size,
                      epochs=num_epochs,
                      shuffle=True,
                      class_weight=None,
                      verbose=1,
                      callbacks=callback_list,
                      validation_split=.1)
    else:
        # Fitting the model with varying batch sizes and epochs using parameter search
        if use_embeddings and use_metafeatures:
            model.fit(
                {
                    'Embeddings': x_train_embeddings,
                    'Metafeatures': x_train_metafeatures
                },
                y_train,
                batch_size=mb_size,
                epochs=num_epochs,
                shuffle=True,
                class_weight=None,
                verbose=1,
                callbacks=callback_list)
        elif use_metafeatures:
            model.fit(x_train_metafeatures,
                      y_train,
                      batch_size=mb_size,
                      epochs=num_epochs,
                      shuffle=True,
                      class_weight=None,
                      verbose=1,
                      callbacks=callback_list)

    # Evaluation time
    if eval == True:

        model.save('output\\model_veracity.h5')
        json_string = model.to_json()
        with open('output\\model_architecture_veracity.json', 'w') as fout:
            json.dump(json_string, fout)
        model.save_weights('output\\model_veracity_weights.h5')

    # Getting confidence of the model
    if use_embeddings and use_metafeatures:
        pred_probabilities = model.predict(
            [x_test_embeddings, x_test_metafeatures],
            batch_size=mb_size,
            verbose=0)
        confidence = np.max(pred_probabilities, axis=1)

        # Getting predictions of the model
        y_prob = model.predict([x_test_embeddings, x_test_metafeatures],
                               batch_size=mb_size)
        Y_pred = y_prob.argmax(axis=-1)
    elif use_metafeatures:
        pred_probabilities = model.predict(x_test_metafeatures,
                                           batch_size=mb_size,
                                           verbose=0)
        confidence = np.max(pred_probabilities, axis=1)

        # Getting predictions of the model
        y_prob = model.predict(x_test_metafeatures, batch_size=mb_size)
        Y_pred = y_prob.argmax(axis=-1)

    return Y_pred, confidence
Пример #7
0
    def compileComponent(self,verbose=False,component_type=None):
        if(component_type=='discriminator'):
            stream = open(self.layout_dir+"/discriminator_layout.yaml","r+")
        elif(component_type=='generator'):
            stream = open(self.layout_dir+"/generator_layout.yaml","r+")

        data = yaml.load(stream, Loader=yaml.FullLoader)
        g1_instructions= data['join'][0]
        g2_instructions= data['join'][1]
        gc_instructions = data['layers']
        if(component_type=='discriminator'):
            g_in_1 = Input((1,))
        elif(component_type=='generator'):
            g_in_1 = Input((self.noise,))

        g_in_2=[]
        for i in range(self.dimensionality):
            g_in_2.append(Input((1,)))

        g1 = (g_in_1)
        for layer_info in g1_instructions['layers']:
            if(self.overrides.get('activation')):
                activation = self.overrides.get('activation')
            else:
                activation = layer_info.get('activation')
            if(component_type=='discriminator' and self.overrides.get('d_nodes')):
                units = self.overrides.get('d_nodes')
            elif(component_type=='generator' and self.overrides.get('g_nodes')):
                units = self.overrides.get('g_nodes')
            else:
                units =  layer_info.get('nodes')
            if(self.overrides.get('dropout_amount')):
                rate = self.overrides.get('dropout_amount')
            else:
                rate = layer_info.get('dropout_amount')
            if(self.overrides.get('leaky_amount')):
                alpha = self.overrides.get('leaky_amount')
            else:
                alpha = layer_info.get('leaky_amount')
            if(layer_info['layer_type']=='dense'):
                g1=Dense(units=units,activation=activation)(g1)
            elif(layer_info['layer_type']=='dropout'):
                g1=Dropout(rate=rate)(g1)
            elif(layer_info['layer_type']=='selu'):
                g1=LeakyReLU(alpha=alpha)(g1)
            elif(layer_info['layer_type']=='batchnorm'):
                g1=BatchNormalization()(g1)
        g2=[]
        for i in range(self.dimensionality):
            g2_current = (g_in_2[i])
            for layer_info in g2_instructions['layers']:
                if(self.overrides.get('activation')):
                    activation = self.overrides.get('activation')
                else:
                    activation = layer_info.get('activation')
                if(component_type=='discriminator' and self.overrides.get('d_nodes')):
                    units = self.overrides.get('d_nodes')
                elif(component_type=='generator' and self.overrides.get('g_nodes')):
                    units = self.overrides.get('g_nodes')
                else:
                    units =  layer_info.get('nodes')
                if(self.overrides.get('dropout_amount')):
                    rate = self.overrides.get('dropout_amount')
                else:
                    rate = layer_info.get('dropout_amount')
                if(self.overrides.get('leaky_amount')):
                    alpha = self.overrides.get('leaky_amount')
                else:
                    alpha = layer_info.get('leaky_amount')
                if(layer_info['layer_type']=='dense'):
                    g2_current=Dense(units=units,activation=activation)(g2_current)
                elif(layer_info['layer_type']=='dropout'):
                    g2_current=Dropout(rate=rate)(g2_current)
                elif(layer_info['layer_type']=='selu'):
                    g2_current=LeakyReLU(alpha=alpha)(g2_current)
                elif(layer_info['layer_type']=='batchnorm'):
                    g2_current=BatchNormalization()(g2_current)

            g2.append(g2_current)
        gc = Concatenate()([g1]+g2)

        for layer_info in gc_instructions:
            if(self.overrides.get('activation')):
                activation = self.overrides.get('activation')
            else:
                activation = layer_info.get('activation')
            if(component_type=='discriminator' and self.overrides.get('d_nodes')):
                units = self.overrides.get('d_nodes')
            elif(component_type=='generator' and self.overrides.get('g_nodes')):
                units = self.overrides.get('g_nodes')
            else:
                units =  layer_info.get('nodes')
            if(self.overrides.get('dropout_amount')):
                rate = self.overrides.get('dropout_amount')
            else:
                rate = layer_info.get('dropout_amount')
            if(self.overrides.get('leaky_amount')):
                alpha = self.overrides.get('leaky_amount')
            else:
                alpha = layer_info.get('leaky_amount')
            if(layer_info['layer_type']=='dense'):
                gc=Dense(units=units,activation=activation)(gc)
            elif(layer_info['layer_type']=='dropout'):
                gc=Dropout(rate=rate)(gc)
            elif(layer_info['layer_type']=='selu'):
                gc=LeakyReLU(alpha=alpha)(gc)
            elif(layer_info['layer_type']=='batchnorm'):
                gc=BatchNormalization()(gc)

        if(component_type=='generator'):
            gc = Dense(1, activation="sigmoid")(gc)
            gc = Model(name="Generator", inputs=[g_in_1]+g_in_2, outputs=[gc])
        elif(component_type=='discriminator'):
            gc = Dense(2, activation="softmax")(gc)
            gc = Model(name="Discriminator", inputs=[g_in_1]+g_in_2, outputs=[gc])
            gc.compile(loss="categorical_crossentropy", optimizer=Adam(self.d_training_rate, beta_1=self.d_beta1), metrics=["accuracy"])

        if(verbose):
            gc.summary()
        return gc