Esempio n. 1
0
print("After RNN : ", net.get_shape().as_list())
print("After Dropout : ", net.get_shape().as_list())
net = regression(net,
                 optimizer='adam',
                 loss='binary_crossentropy',
                 learning_rate=0.005)
print("After regression : ", net.get_shape().as_list())

testX = trainX[int(0.3 * len(trainY)):]
testY = trainY[int(0.3 * len(trainY)):]

# Training
model = DNN(net, clip_gradients=0., tensorboard_verbose=2)
embeddingWeights = get_layer_variables_by_name('EmbeddingLayer')[0]
# Assign your own weights (for example, a numpy array [input_dim, output_dim])
model.set_weights(embeddingWeights, embeddings)
model.fit(trainX,
          trainY,
          n_epoch=3,
          validation_set=0.1,
          show_metric=True,
          batch_size=32,
          shuffle=True)
#print( model.evaluate(testX, testY) )
predictions = model.predict(testX)
predictions = prob2Onehot(predictions)
#print("Predictions : ", list(predictions[10]))

##Calculate F1 Score
tp = 0
tn = 0
Esempio n. 2
0
net = fully_connected(net, 2, activation='softmax')
net = regression(net,
                 optimizer='adam',
                 loss='categorical_crossentropy',
                 learning_rate=0.005)
print("Done neural network")

testX = trainX[int(0.3 * len(trainY)):]
testY = trainY[int(0.3 * len(trainY)):]

# Training
model = DNN(net, clip_gradients=0., tensorboard_verbose=2)
embeddingWeights = get_layer_variables_by_name('EmbeddingLayer')[0]
POSWeights = get_layer_variables_by_name('POSLayer')[0]
#! Assign your own weights (for example, a numpy array [input_dim, output_dim])
model.set_weights(embeddingWeights, embeddings)
model.set_weights(POSWeights, POS_vectors)
model.fit(trainX,
          trainY,
          n_epoch=3,
          validation_set=0.1,
          show_metric=True,
          batch_size=50,
          shuffle=True)
#print( model.evaluate(testX, testY) )
predictions = model.predict(testX)
predictions = prob2Onehot(predictions)
#print("Predictions : ", list(predictions[10]))
print("Number of triggers : ",
      len([pred for pred in testY if list(pred) == [1, 0]]))
def tflearn_OneClass_NN_linear(data_train, data_test, labels_train):

    X = data_train
    Y = labels_train

    D = X.shape[1]

    No_of_inputNodes = X.shape[1]

    # Clear all the graph variables created in previous run and start fresh
    tf.reset_default_graph()

    # Define the network
    input_layer = input_data(shape=[None,
                                    No_of_inputNodes])  # input layer of size

    np.random.seed(42)
    theta0 = np.random.normal(0, 1, K + K * D + 1) * 0.0001
    #theta0 = np.random.normal(0, 1, K + K*D + 1) # For linear
    hidden_layer = fully_connected(
        input_layer,
        4,
        bias=False,
        activation='linear',
        name="hiddenLayer_Weights",
        weights_init="normal")  # hidden layer of size 2

    output_layer = fully_connected(
        hidden_layer,
        1,
        bias=False,
        activation='linear',
        name="outputLayer_Weights",
        weights_init="normal")  # output layer of size 1

    # Initialize rho
    value = 0.01
    init = tf.constant_initializer(value)
    rho = va.variable(name='rho', dtype=tf.float32, shape=[], initializer=init)

    rcomputed = []
    auc = []

    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    # print sess.run(tflearn.get_training_mode()) #False
    tflearn.is_training(True, session=sess)
    print sess.run(tflearn.get_training_mode())  #now True

    temp = theta0[-1]

    oneClassNN_Net = oneClassNN(output_layer,
                                v,
                                rho,
                                hidden_layer,
                                output_layer,
                                optimizer='sgd',
                                loss='OneClassNN_Loss',
                                learning_rate=1)

    model = DNN(oneClassNN_Net, tensorboard_verbose=3)

    model.set_weights(output_layer.W, theta0[0:K][:, np.newaxis])
    model.set_weights(hidden_layer.W, np.reshape(theta0[K:K + K * D], (D, K)))

    iterStep = 0
    while (iterStep < 100):
        print "Running Iteration :", iterStep
        # Call the cost function
        y_pred = model.predict(data_train)  # Apply some ops
        tflearn.is_training(False, session=sess)
        y_pred_test = model.predict(data_test)  # Apply some ops
        tflearn.is_training(True, session=sess)
        value = np.percentile(y_pred, v * 100)
        tflearn.variables.set_value(rho, value, session=sess)
        rStar = rho
        model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100)
        iterStep = iterStep + 1
        rcomputed.append(rho)
        temp = tflearn.variables.get_value(rho, session=sess)

    # print "Rho",temp
    # print "y_pred",y_pred
    # print "y_predTest", y_pred_test

    # g = lambda x: x
    g = lambda x: 1 / (1 + tf.exp(-x))

    def nnScore(X, w, V, g):
        return tf.matmul(g((tf.matmul(X, w))), V)

    # Format the datatype to suite the computation of nnscore
    X = X.astype(np.float32)
    X_test = data_test
    X_test = X_test.astype(np.float32)
    # assign the learnt weights
    # wStar = hidden_layer.W
    # VStar = output_layer.W
    # Get weights values of fc2
    wStar = model.get_weights(hidden_layer.W)
    VStar = model.get_weights(output_layer.W)

    # print "Hideen",wStar
    # print VStar

    train = nnScore(X, wStar, VStar, g)
    test = nnScore(X_test, wStar, VStar, g)

    # Access the value inside the train and test for plotting
    # Create a new session and run the example
    # sess = tf.Session()
    # sess.run(tf.initialize_all_variables())
    arrayTrain = train.eval(session=sess)
    arrayTest = test.eval(session=sess)

    # print "Train Array:",arrayTrain
    # print "Test Array:",arrayTest

    # plt.hist(arrayTrain-temp,  bins = 25,label='Normal');
    # plt.hist(arrayTest-temp, bins = 25, label='Anomalies');
    # plt.legend(loc='upper right')
    # plt.title('r = %1.6f- Sigmoid Activation ' % temp)
    # plt.show()

    pos_decisionScore = arrayTrain - temp
    neg_decisionScore = arrayTest - temp

    return [pos_decisionScore, neg_decisionScore]
Esempio n. 4
0
temp = theta0[-1] * 1000000

# temp = tflearn.variables.get_value(rho, session=sess)

oneClassNN = oneClassNN(output_layer,
                        v,
                        rho,
                        hidden_layer,
                        output_layer,
                        optimizer='sgd',
                        loss='OneClassNN_Loss',
                        learning_rate=1)

model = DNN(oneClassNN, tensorboard_verbose=3)

model.set_weights(output_layer.W, theta0[0:K][:, np.newaxis])
model.set_weights(hidden_layer.W, np.reshape(theta0[K:K + K * D], (D, K)))

iterStep = 0
while (iterStep < 100):
    print "Running Iteration :", iterStep
    # Call the cost function
    y_pred = model.predict(data_train)  # Apply some ops
    tflearn.is_training(False, session=sess)
    y_pred_test = model.predict(data_test)  # Apply some ops
    tflearn.is_training(True, session=sess)
    value = np.percentile(y_pred, v * 100)
    tflearn.variables.set_value(rho, value, session=sess)
    rStar = rho
    model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100)
    iterStep = iterStep + 1