Beispiel #1
0
 def fit(self, X, y, val_X, val_y):
     checkpoint = ModelCheckpoint(filepath='m4.hdf5',
                                  verbose=1,
                                  save_best_only=True,
                                  save_weights_only=True,
                                  monitor='val_acc',
                                  mode='max')
     callbacks = [checkpoint]
     self.clf = Sequential()
     self.clf.add(Dense(256, input_dim=X.shape[1],
                        init='uniform'))  #, use_bias=True))
     self.clf.add(advanced_activations.ELU(alpha=1.0))
     self.clf.add(Dropout(0.6))
     self.clf.add(Dense(128, init='uniform'))
     self.clf.add(advanced_activations.ELU(alpha=1.0))
     self.clf.add(Dropout(0.6))
     self.clf.add(Dense(64, use_bias=True, init='uniform'))
     self.clf.add(advanced_activations.ELU(alpha=1.0))
     self.clf.add(Dropout(0.6))
     #model.add(Dense(4, activation='sigmoid', use_bias=True))
     #model.add(Dropout(0.5))
     self.clf.add(Dense(1, activation='sigmoid', init='uniform'))
     adam = Adam(lr=0.001, decay=1e-6, clipvalue=0.5)
     self.clf.compile(loss='binary_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])
     return self.clf.fit(X,
                         y,
                         epochs=50,
                         batch_size=2800,
                         validation_data=(val_X, val_y),
                         verbose=1,
                         callbacks=callbacks)
Beispiel #2
0
def ResolvedSyn(x=None, type='sigmoid'):

    if type == 'sigmoid':
        resolved = Sigmoid(x)
    elif type == 'tanh':
        resolved = Activation('tanh')(x)
    elif type == 'relu':
        resolved = ReLU(x)
    elif type == 'leakyrelu':
        resolved = LeakyReLU(x)
    elif type == 'elu':
        resolved = advanced_activations.ELU()(x)
    else:
        raise TypeError
    print("activated:", resolved)
    return resolved
Beispiel #3
0
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_test_np = np.array(y_test)
y_test_np = y_test_np.flatten()

# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

adam = keras.optimizers.Adam(lr=0.0001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             decay=0.00001)
ELU_advanced = aact.ELU(alpha=1.1)
regularizer = keras.regularizers.l1(0.001)

tensorboard_caller = keras.callbacks.TensorBoard(log_dir='./logs/Result',
                                                 histogram_freq=0,
                                                 write_graph=True,
                                                 write_images=False)

check_point = keras.callbacks.ModelCheckpoint(filepath='./weights_Result.hdf5',
                                              monitor='val_loss',
                                              verbose=1,
                                              save_best_only=True,
                                              save_weights_only=False,
                                              mode='auto',
                                              period=1)
    padding =  K.cast( K.not_equal(x[1],0.0), dtype=K.floatx())
    return x[0]*padding

if __name__ == "__main__":
    X_train, X_val, y_train, y_val = open_data('./UCR_TS_Archive_2015')
    input_dim = X_train.shape[-1] # 13
    timesteps = X_train.shape[1] # 3
    batch_size = 32

    # Create graph structure.
    input_placeholder = Input(shape=(None, input_dim))

    # Encoder.
    masked_input = Masking(mask_value=0.0, input_shape=(None,input_dim), name = 'masking_layer')(input_placeholder)
    encoded = LSTM(60, return_sequences=True, dropout = 0.2,unit_forget_bias=True)(masked_input)
    encoded = advanced_activations.ELU(alpha=.5)(encoded)
    encoded = LSTM(60,return_sequences=True, dropout = 0.2, unit_forget_bias=True)(encoded)
    encoded = advanced_activations.ELU(alpha=.5)(encoded)
    encoded = LSTM(60,dropout = 0.2, unit_forget_bias=True)(encoded)
    encoded = advanced_activations.ELU(alpha=.5)(encoded)
    encoded = Dense(5)(encoded)
    encoded = advanced_activations.ELU(alpha=.5)(encoded)
    encoded_final = BatchNormalization(name='embedding')(encoded)

    # Decoder.
    decoded = Lambda(repeat)([masked_input,encoded_final])
    decoded = LSTM(60, return_sequences=True, dropout = 0.2, unit_forget_bias=True)(decoded)
    decoded = advanced_activations.ELU(alpha=.5)(decoded)
    decoded = LSTM(60, return_sequences=True, dropout = 0.2, unit_forget_bias=True)(decoded)
    decoded = advanced_activations.ELU(alpha=.5)(decoded)
    decoded = LSTM(input_dim, return_sequences=True, dropout = 0.2, unit_forget_bias=True)(decoded)
Beispiel #5
0
def main(config):
    # read and preprocess of data
    df = pd.read_csv(config.get('GENERAL', 'inputfile'))
    df['Amount_max_fraud'] = 1
    df.loc[df.Amount <= 2125.87, 'Amount_max_fraud'] = 0
    df = df.drop([
        'V28', 'V27', 'V26', 'V25', 'V24', 'V23', 'V22', 'V20', 'V15', 'V13',
        'V8'
    ],
                 axis=1)
    df['V1_'] = df.V1.map(lambda x: 1 if x < -3 else 0)
    df['V2_'] = df.V2.map(lambda x: 1 if x > 2.5 else 0)
    df['V3_'] = df.V3.map(lambda x: 1 if x < -4 else 0)
    df['V4_'] = df.V4.map(lambda x: 1 if x > 2.5 else 0)
    df['V5_'] = df.V5.map(lambda x: 1 if x < -4.5 else 0)
    df['V6_'] = df.V6.map(lambda x: 1 if x < -2.5 else 0)
    df['V7_'] = df.V7.map(lambda x: 1 if x < -3 else 0)
    df['V9_'] = df.V9.map(lambda x: 1 if x < -2 else 0)
    df['V10_'] = df.V10.map(lambda x: 1 if x < -2.5 else 0)
    df['V11_'] = df.V11.map(lambda x: 1 if x > 2 else 0)
    df['V12_'] = df.V12.map(lambda x: 1 if x < -2 else 0)
    df['V14_'] = df.V14.map(lambda x: 1 if x < -2.5 else 0)
    df['V16_'] = df.V16.map(lambda x: 1 if x < -2 else 0)
    df['V17_'] = df.V17.map(lambda x: 1 if x < -2 else 0)
    df['V18_'] = df.V18.map(lambda x: 1 if x < -2 else 0)
    df['V19_'] = df.V19.map(lambda x: 1 if x > 1.5 else 0)
    df['V21_'] = df.V21.map(lambda x: 1 if x > 0.6 else 0)
    df = df.rename(columns={'Class': 'Fraud'})
    y = df.Fraud
    y = y.values
    y = list(y)
    print('df', df.shape)
    df = df.drop(['Fraud'], axis=1)
    X = df.values
    X = list(X)
    #splitdata
    train_X, train_y, val_X, val_y\
            = Utils.valicut(X, y, float(config.get('VALIDATE', 'ratio')))
    Utils.verbose_print('Standardizing.')
    train_X = np.array(train_X)
    val_X = np.array(val_X)
    index = []
    for i in range(19):
        index.append(i)
    a = train_X[:, index]
    scaler = StandardScaler().fit(a)
    a = scaler.transform(a)
    b = val_X[:, index]
    b = scaler.transform(b)
    train_X[:, index] = a
    val_X[:, index] = b

    # sample
    Utils.verbose_print('Sampling, method = {0}.'.format(
        config.get('SAMPLER', 'method')))
    smp = sampler.Smp(config)
    train_X, train_y = smp.fit_sample(train_X, train_y)
    Utils.verbose_print('data size: {0}.'.format(len(train_y)))
    print(train_X.shape)
    print(val_X.shape)
    #building model
    train_y = np.array(train_y)
    val_y = np.array(val_y)
    early = EarlyStopping('val_acc', patience=0)
    checkpoint = ModelCheckpoint(filepath='m3.hdf5'.format(i),
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 monitor='val_acc',
                                 mode='max')
    callbacks = [checkpoint]
    model = Sequential()
    model.add(Dense(256, input_dim=train_X.shape[1],
                    init='uniform'))  #, use_bias=True))
    model.add(advanced_activations.ELU(alpha=1.0))
    model.add(Dropout(0.6))
    model.add(Dense(128, init='uniform'))
    model.add(advanced_activations.ELU(alpha=1.0))
    model.add(Dropout(0.6))
    model.add(Dense(64, use_bias=True, init='uniform'))
    model.add(advanced_activations.ELU(alpha=1.0))
    model.add(Dropout(0.6))
    #model.add(Dense(4, activation='sigmoid', use_bias=True))
    #model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid', init='uniform'))
    adam = Adam(lr=0.001, decay=1e-6, clipvalue=0.5)
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    model.fit(train_X,
              train_y,
              epochs=50,
              batch_size=2800,
              validation_data=(val_X, val_y),
              verbose=1,
              callbacks=callbacks)
    model.load_weights("m3.hdf5")
    # validate
    Utils.verbose_print('Validating.')
    #result_y = clf.predict(val_X)
    result_y = model.predict(val_X)
    print(result_y.shape)
    y = []
    for i in range(len(result_y)):
        y.append(round(result_y[i][0]))
    print(y)
    result_y = y
    correction = Utils.correction(val_y, result_y)
    truth_table = Utils.truth_table(val_y, result_y)
    print('Correction:{0}'.format(correction))
    Utils.verbose_print(Utils.print_truth_table(truth_table))
    f1_score, Precision, Recall = Utils.f1_score(truth_table)
    Utils.verbose_print('F1 Score:{0}'.format(f1_score))
    Utils.verbose_print('Precision:{0}'.format(Precision))
    Utils.verbose_print('Recall:{0}'.format(Recall))
Beispiel #6
0
 * Optimizer function is 'Adam'
'''
'''
 * define numbers for deep learning
'''
FEATURE_NUM = 36
CLASSES = 1
HIDDEN1_SIZE = 100
HIDDEN2_SIZE = 50
MAX_RANGE = 1000

model = Sequential()

model.add(Dense(HIDDEN1_SIZE, input_dim=FEATURE_NUM, init='uniform'))

model.add(advanced_activations.ELU(alpha=1.0))
model.add(Dropout(0.6))
model.add(Dense(HIDDEN2_SIZE, init='uniform'))

model.add(advanced_activations.ELU(alpha=1.0))
model.add(Dropout(0.6))
model.add(Dense(CLASSES, init='uniform', activation='relu'))
'''
 * tensorboard and checkpoints saver callbacks
 * Keras Tensorboard graph is not prettier than original Tensorflow graph, but much easier to use.
'''
checkpointer = ModelCheckpoint(filepath="/tmp/weights.hdf5",
                               verbose=1,
                               save_best_only=True)
tensorboard = TensorBoard(log_dir='./logs',
                          histogram_freq=0,
Beispiel #7
0
def EERACN(shape, NOL):
    """
    Network proposed in the paper by Bing Xu et. al.
    https://arxiv.org/pdf/1505.00853.pdf
    """

    model = Sequential()

    model.add(
        Conv2D(192, (5, 5),
               input_shape=shape,
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())
    model.add(
        Conv2D(160, (1, 1),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())
    model.add(
        Conv2D(96, (1, 1),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    #model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Dropout(0.5))

    model.add(
        Conv2D(192, (5, 5),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())
    model.add(
        Conv2D(192, (1, 1),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())
    model.add(
        Conv2D(192, (1, 1),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Dropout(0.5))

    model.add(
        Conv2D(192, (3, 3),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())
    model.add(
        Conv2D(192, (1, 1),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())
    model.add(
        Conv2D(10, (1, 1),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    # model.add(advanced_activations.LeakyReLU(alpha=0.31))
    model.add(advanced_activations.ELU())

    model.add(
        AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding='same'))

    model.add(Flatten())

    model.add(Dense(NOL, activation='softmax'))

    return model
Beispiel #8
0
def Neural_net(x_train_res, y_train_res, method, feature_count):
    global test
    global test_labels
    global x_val
    global y_val
    model = Sequential()
    model.add(Dense(50, kernel_initializer='uniform', input_dim=feature_count))
    model.add(advanced_activations.ELU(alpha=1.0))
    model.add(Dropout(0.8))
    model.add(Dense(30, kernel_initializer='uniform'))
    model.add(advanced_activations.ELU(alpha=1.0))
    model.add(Dropout(0.9))
    model.add(Dense(15, kernel_initializer='uniform', activation="relu"))
    # model.add(Dense(15, kernel_initializer='uniform'))
    # model.add(advanced_activations.ELU(alpha=1.0))
    # model.add(Dense(10, init='uniform', activation=''))
    model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))

    # Compile model
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['sparse_categorical_accuracy'])

    model.fit(x_train_res,
              y_train_res,
              epochs=10,
              batch_size=1000,
              validation_data=(x_val, y_val))
    # output = model.predict_classes(x_val)
    # output = [i[0] for i in output]
    # output = numpy.array(output)
    # print(output[y_val == 1])
    # print(sum(output[y_val == 1]))
    # print(sum(output))
    # print(sum(y_val))
    # print(recall_score(y_val, output))
    # print(precision_score(y_val, output))

    # scores = model.evaluate(train_data, train_labels)
    # print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
    print("complete")

    ##Computing false and true positive rates
    output = model.predict_classes(test)
    output = [i[0] for i in output]
    output = numpy.array(output)
    print(output[test_labels == 1])
    print(sum(output[test_labels == 1]))
    print(sum(output))
    print(sum(test_labels))
    print(recall_score(test_labels, output))
    print(precision_score(test_labels, output))
    fpr, tpr, _ = roc_curve(output, test_labels, drop_intermediate=False)
    if sum(numpy.isnan(tpr)) == 0:
        roc_acc = roc_auc_score(output, test_labels)
        roc_acc = "Accuracy: " + str(round(roc_acc, 3))
    else:
        roc_acc = "Accuracy: 0%"

    plt.figure()
    ##Adding the ROC
    plt.plot(fpr, tpr, color='red', lw=2, label='ROC curve')
    ##Random FPR and TPR
    plt.plot([0, 1], [0, 1], color='blue', lw=2, linestyle='--')
    ##Title and label
    plt.xlabel('FPR')
    plt.ylabel('TPR')
    plt.title('Neural Network - (' + method + ")" + str(roc_acc))
    # plt.show()
    plt.savefig(fname=method + ".png", pad_inches=0.2)