def main_menu():
    dp.clear()
    #get information from the user
    print("Hello,\n")
    print(
        "Please insert known information on the attack:\n         [property1=numVal1 property2=... e.g. nkill=2 nwound=3] "
    )
    info = input()
    infoarr = info.split(" ")
    cols = []
    infoVec = []
    for inf in infoarr:
        tup = inf.split("=")
        cols.append(tup[0])
        infoVec.append(tup[1])

    print("Please choose the property predict :[e.g. weaptype1, attacktype1]")
    pred = input("Property: ")
    print("Please insert number of categories of this property:")
    n_pred = input("num of categories: ")
    mdl.SoftMax(infoVec, cols, pred, int(n_pred))

    return
def SoftMax(infoVec, cols, pred, n_pred):
    #get the data
    train_x, train_y, test_x, test_y, SoftMaxSet = dp.getTrainAndTestSoftMax(
        cols, pred, n_pred, 'globalterrorismdb_dist.csv')
    SoftMaxSet.sort()

    print(SoftMaxSet)

    #init nub of features and num of categorites
    numOfFeatures = len(train_x[0])
    numOfCategories = len(train_y[0])

    print("Num Of Features ", numOfFeatures, " Num Of Categories ",
          numOfCategories)
    #input layer
    x = tf.placeholder(tf.float32, [None, numOfFeatures])
    y_ = tf.placeholder(tf.float32, [None, numOfCategories])

    #Wegiths and bias
    W = tf.Variable(tf.zeros([numOfFeatures, numOfCategories]))
    b = tf.Variable(tf.zeros([numOfCategories]))

    #output layer
    y = tf.nn.softmax(tf.matmul(x, W) + b)

    #loss and update functions
    loss = -tf.reduce_mean(y_ * tf.log(y))
    update = tf.train.GradientDescentOptimizer(0.000001).minimize(loss)

    #TRAINING
    print(">>>START TRAINING<<<<")
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    axis_y = []
    axis_x = []
    #correct = tf.equal(findMaxIndex(y),findMaxIndex(y_))
    #accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

    for i in range(0, 1000):
        __, currentLoss = sess.run([update, loss],
                                   feed_dict={
                                       x: train_x,
                                       y_: train_y
                                   })
        if (i % 10 == 0):
            #print("current loss at epoch ",i,": ",currentLoss)
            axis_y.append(currentLoss)
            axis_x.append(i)

    plt.ylabel('LOSS')
    plt.xlabel('EPHOCH')
    plt.title('Train Loss Session')
    plt.plot(axis_x, axis_y, 'b--')

    print(">>>DONE TRAINING<<<<")

    #TEST
    print("@@@START TEST@@@")

    predictionList = y.eval(session=sess, feed_dict={x: test_x})
    total = len(predictionList)
    success = 0
    #print("len(predictionList) ",len(predictionList)," len(test_y) ",len(test_y))
    for k, vec in enumerate(predictionList):
        index = findMaxIndex(test_y[k])
        predIndex = findMaxIndex(vec)

        if (predIndex == index):
            success += 1
            #print("----SUCCESS-----\nPrediction ", predictionList[k])
            #print("Original ", test_y[k])
    print("@@@DONE TEST@@@")

    print("With Test Accuracy of ", int(success / total * 100.0), "%")

    feed = [infoVec]
    predict = y.eval(session=sess, feed_dict={x: feed})
    for res in predict:
        for i, val in enumerate(res):
            print("     Probabilty for ", findValuForIndex(i + 1, SoftMaxSet),
                  pred, " is ", val)

    plt.show()
Exemple #3
0
import tensorflow as tf
import numpy as np
import DataPreper as dp


def f(z):
    return 1 / (1.0 + tf.exp(-z))


train_data = dp.getTrainNN()
train_x = train_data[0]
train_y = train_data[1]

test_data = dp.getTestNN()
test_x = test_data[0]
test_y = test_data[1]

best = 0
bestS = 0
features = 6
i = 100
while i < 10000:

    (layer1, layer2, layer3) = (i, 1, 25)
    x = tf.placeholder(tf.float32, [None, features])
    y_ = tf.placeholder(tf.float32, [None, 1])
    #Hidden Layer 1; size 100 ; input layer
    W1 = tf.Variable(tf.truncated_normal([features, layer1], stddev=0.1))
    b1 = tf.Variable(tf.constant(0.1, shape=[layer1]))
    z1 = tf.add(tf.matmul(x, W1), b1)
    a1 = tf.nn.relu(z1)
Exemple #4
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import csv
import DataPreper as dp


def h(x, w, b):
    return 1 / (1 + np.exp(-(np.dot(x, w) + b)))


train_x = dp.getTrain()[0]
train_y = dp.getTrain()[1]

test_x = dp.getTest()[0]
test_y = dp.getTest()[1]

w = np.array([0., 0, 0, 0, 0, 0])
b = 0
alpha = 0.001

for iteration in range(1000):
    gradient_b = np.mean(1 * (train_y - (h(train_x, w, b))))
    gradient_w = np.dot(
        (train_y - h(train_x, w, b)), train_x) * 1 / len(train_y)
    b += alpha * gradient_b
    w += alpha * gradient_w

total = 0
fneg = 0
fpos = 0
import GetData
import DataPreper
import SplitData
import TrainData
import TestData

p = getparams()

np.random.seed(0)  # Seed the random number generator
# p["Data"]["LoadFromCache"] = True
# p["Kmean"]["LoadFromCache"] = True
# p["DataProcess"]["LoadFromCache"] = True

folderList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
images, labels = GetData.get_data(p['Data'], folderList)
image_features = DataPreper.data_prepare(p['DataProcess'], images)
train_x_array, train_y_array, test_x_array, test_y_array = SplitData.split_data_train_test(
    p['Split'], image_features)

linear_svms = []
poly_svms = []
decisions_array = []
decisions_poly_array = []

for i in range(-5, 15):
    p["Train"]["C_Value"] = np.float_power(10, i)
    linear_svm = TrainData.train_data_linear(p['Train'], train_x_array,
                                             train_y_array)
    poly_svms = TrainData.train_data_non_linear(p['Train'], train_x_array,
                                                train_y_array)
    decisions = TestData.test_linear_svm(p['Test'], linear_svm, test_x_array,
Exemple #6
0
import TestData
import Report
from sklearn.metrics import accuracy_score, confusion_matrix

p = getparams()
data_path = os.path.join("D:\\", "Studies", "Learn", "101_ObjectCategories")
np.random.seed(0)  # Seed the random number generator
p["Data"]["BaseDataPath"] = data_path
class_indices = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]

##### train hyper params
if p['TrainHyper']:
    class_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    images, labels = GetData.get_data(p['Data'], class_indices)

    data_prepare_options = DataPreper.get_data_grid(p['DataProcess'])
    number_of_trains = len(data_prepare_options) * (
        len(p['Train']['C_Values']) +
        len(p['Train']['C_Values']) * len(p['Train']['Poly_Values']))

    linear_svms = [None] * number_of_trains
    poly_svms = [None] * number_of_trains
    data = []

    tests_array = [None] * number_of_trains
    tests_dec = [None] * number_of_trains
    acc_array = [None] * number_of_trains
    conf_array = [None] * number_of_trains
    decisions_array = [None] * number_of_trains
    decisions_poly_array = [None] * number_of_trains
    index = 0
#!/usr/bin/python
import numpy as np
import DataPreper as dp
import tensorflow as tf
from fractions import _gcd
from tensorflow.contrib import rnn
from tensorflow.python.ops import rnn, rnn_cell


train_x,train_y,test_x,test_y = dp.TrainAndTestRNN()
# number of features
n_featurs = train_x[0].size
# data size
total_size = train_y.size

batch_size = _gcd(train_y.size,test_y.size)



    # gets the data after the split
    # the X data contains stock prices of 19 consecutive days
    # the y data is the stock price of the 20th day
    # reshape to a 3-dimensional tensor

#train_x = train_x.reshape(train_x.shape + tuple([1]))
#test_x = test_x.reshape(test_x.shape + tuple([1]))
#train_y = train_y.reshape(train_y.shape + tuple([1]))
#test_y = test_y.reshape(test_y.shape + tuple([1]))


# rnn configuration