Пример #1
0
def findThresh():
    pickles_path = "Pickles/New/"
    encoding = DataE.AUTOENCODER_PREPROCESS

    print("Testing...")
    test_data = pd.read_pickle(pickles_path +
                               "Friday-WorkingHours-Afternoon-DDos.pkl")
    model = tf.keras.models.load_model("new_encoding.h5")
    IDS.find_best_thresh(model, encoding, test_data, 50)
Пример #2
0
def grid_sparse(data_encoding, train_data, test_data, results_file_name = "grid_sparse_results.csv"):
    """Does a grid search on a sparse network. 
    
       Trains sparse networks, finds best thresholds, and records the best value using that thresh in a csv
    """

    # Look at this option for using tensorflow tuning
    # https://medium.com/ml-book/neural-networks-hyperparameter-tuning-in-tensorflow-2-0-a7b4e2b574a1

    #TODO: These shouldn't be needed
    seed(1234)
    random.set_seed(2345)

    column = ['Input Layer', 'Bottleneck Layer', 'Input Sparse', 'Hidden Sparse', 'Thresh', 'Score']
    results = pd.DataFrame(columns=column)

    max_layer_outer = 80
    min_layer_outer = 60
    inc_layer_outer = 5

    min_layer_inner = 50
    inc_layer_inner = 4

    max_sparse_outer = 0.3
    min_sparse_outer = 0.1
    inc_sparse_outer = 0.1

    max_sparse_inner = 0.5
    min_sparse_inner = 0.1
    inc_sparse_inner = 0.1
  
    for i in range(min_layer_outer, max_layer_outer, inc_layer_outer):
        for j in range(min_layer_inner, i, inc_layer_inner):
            for k in np.arange(min_sparse_outer, max_sparse_outer, inc_sparse_outer):
                for l in np.arange(min_sparse_inner, max_sparse_inner, inc_sparse_inner):

                    param_string = str(i) + "_" + str(j) + "_" + str(k) + "_" + str(l);
                    print("Running " + param_string)
                    file_name = "result_" + param_string + ".txt"
                    model_name = param_string + ".h5"

                    params = HyperP.cAutoHyper(model_name,[i, j],[0.005, 0], [k, l, 0])

                    model = IDS.train(train_data, data_encoding, params)
                    thresh, score = IDS.find_best_thresh(model, test_data)

                    results = results.append({'Input Layer':i, 'Bottleneck Layer':j, 'Input Sparse': k, 'Hidden Sparse': l,
                                         'Thresh': thresh, 'Score': score}, ignore_index=True)
                    results.to_csv(results_file_name)

    print(results)
Пример #3
0
    def webscan_feature(self,input_data=None,ws_ipall=None):
        svm = IDS.SVM()
        if input_data is not None:
            df = input_data
        else:
            df = self.http_log_content
        feature_columns=['P_malicious_urls','C_similar_url','num_404','H_status']
        os_ipset,ws_ipset = set(),set()
        src_ip_set = set(df['src_ip'])
        if ws_ipall is None:
            ws_ipall = []
        else:
            os_ipset = src_ip_set - set(ws_ipall)
            ws_ipset = src_ip_set - os_ipset
        ws_tb = pd.DataFrame(index = list(ws_ipset),columns = feature_columns)
        os_tb = pd.DataFrame(index = list(os_ipset),columns = feature_columns)
        all_tb = pd.DataFrame(index = list(os_ipset),columns = feature_columns)
        for item in src_ip_set:
            src_ip_table = df[df['src_ip'] == item]
            status_list = list(src_ip_table['status'])
            dest_ip_list = list(src_ip_table['dest_ip'])
            url_df = src_ip_table[['params','dest_ip']]
            if len(url_df['dest_ip']) == 1:
                url_list = url_df['params']
                url_dt = [ Levenshtein.distance(url_list[i].lower(),url_list[i+1].lower()) for i \
										in range(len(url_list)) if i!= len(url_list)-1]
				if len(url_dt) == 0:
                    P_malicious_urls = -1
                else:
                    P_malicious_urls = [for item in url_dt if item < 5]
Пример #4
0
def main():
    pickles_path = "Pickles/New/"
    params = HyperP.cAutoHyper("test.h5", [65, 64], [0.005, 0], [0.1, 0.3, 0])
    encoding = DataE.AUTOENCODER_PREPROCESS

    print("Loading training data...")
    train_file = "Monday-WorkingHours.pkl"
    train_data = pd.read_pickle(pickles_path + train_file)

    print("Training...")
    model = IDS.train(train_data, encoding, params)
    model.save("trained.h5")

    print("Loading test data...")
    test_data = pd.read_pickle(pickles_path + "encode_test.pkl")

    print("Finding best thresh...")
    thresh, score = IDS.find_best_thresh(model, encoding, test_data, 50)
Пример #5
0
def trainNetwork():
    print("Training...")
    pickles_path = "Pickles/New/"
    train_file = "Monday-WorkingHours.pkl"

    train_data = pd.read_pickle(pickles_path + train_file)
    params = HyperP.cAutoHyper("test.h5", [65, 64], [0.005, 0], [0.1, 0.3, 0])
    encoding = DataE.AUTOENCODER_PREPROCESS

    model = IDS.train(train_data, encoding, params)
    model.save("new_encoding.h5")
Пример #6
0
 def run(self):  # this function runs the specified algorithm
     result = False
     if self.algotype == AlgorithmType.ASTAR:
         result = ASTAR.astar(self)
     elif self.algotype == AlgorithmType.IDASTAR:
         result = IDASTAR.idAstar(self)
     elif self.algotype == AlgorithmType.UCS:
         result = UCS.ucs(self)
     elif self.algotype == AlgorithmType.IDS:
         result = IDS.ids(self)
     elif self.algotype == AlgorithmType.BIASTAR:
         result = BIASTAR.biAstar(self)
     if result:  # if the path was found print it
         self.found = True
         self.print()
     else:  # else print not found
         self.print_not_found()
Пример #7
0
def eden(i):
    bfs = BFS.main("./labyrinths/" + i)
    print("---")
    ids = IDS.main("./labyrinths/" + i)
    print("---")
    dfs = DFS.main("./labyrinths/" + i)
    print("---")
    tss = TSS.main("./labyrinths/" + i)
    print("---")
    ast = AStar.main("./labyrinths/" + i)
    print("---")
    dijkstra = Dijkstra.main("./labyrinths/" + i)
    print("---")
    greedy = Greedy.main("./labyrinths/" + i)
    print("---")
    greedyHeuristics = GreedyHeuristics.main("./labyrinths/" + i)
    print("###########################################################################")
Пример #8
0
Файл: test.py Проект: 33-ai/23
import IDS
# In[7]:
# testfile = 'data/good_fromE2.txt'
testfile = 'data/bad_fromE.txt'
# testfile = "data/badqueries.txt"
# a = IDS.LG()

a = IDS.SVM()

# preicdtlist = ['www.foo.com/id=1<script>alert(1)</script>','www.foo.com/name=admin\' or 1=1','abc.com/admin.php','"><svg onload=confirm(1)>','test/q=<a href="javascript:confirm(1)>','q=../etc/passwd']
# result =a.predict(preicdtlist)
# print('正常结果 前10条 ' + str(result[0][:10]))

with open(testfile, 'r') as f:
    print('预测数据集: ' + testfile)
    preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
    result = a.predict(preicdtlist)
    print('恶意结果 前10条' + str(result[1][:10]))
    print('正常结果 前10条 ' + str(result[0][:10]))
    pass
Пример #9
0
import IDS

import time

IP = "192.168.1.1"

# Setup connection to IDS
ids = IDS.Device(IP)
ids.connect()

# Measure on axis 1
# Internally, axes are numbered 0 to 2
axis = 0 # Axis 1

# Start alignment
ids.system.startOpticsAlignment()

# Wait until alignment is running
while not ids.adjustment.getAdjustmentEnabled():
    time.sleep(1)

# Get contrast
warningNo, contrast, baseline, mixcontent = ids.adjustment.getContrastInPermille(axis)
print("Contrast:", contrast, "Baseline:", baseline, "Mixcontent:", mixcontent)

# Stop alignment and start measurement
ids.system.stopOpticsAlignment()
while ids.system.getCurrentMode() != "system idle":
    time.sleep(1)
ids.system.setInitMode(0) # enable high accuracy mode
ids.system.startMeasurement()
Пример #10
0
import IDS
# In[7]:
# testfile = 'data/good_fromE2.txt'
#testfile1= 'data/gambling.txt'
#testfile2= 'data/malicious.txt'
#testfile3= 'data/p**n.txt'
testfile = 'data/2.txt'
# testfile = "data/badqueries.txt"
a = IDS.LG()

#a = IDS.SVM()

# preicdtlist = ['www.foo.com/id=1<script>alert(1)</script>','www.foo.com/name=admin\' or 1=1','abc.com/admin.php','"><svg onload=confirm(1)>','test/q=<a href="javascript:confirm(1)>','q=../etc/passwd']
# result =a.predict(preicdtlist)
# print('正常结果 前10条 ' + str(result[0][:10]))

with open(testfile, 'r') as f:
    print('预测数据集: ' + testfile)
    preicdtlist = [i.strip('\n') for i in f.readlines()[:]]
    result = a.predict(preicdtlist)
    #print('恶意结果 前10条'+str(result[1][:10]))
    #print('正常结果 前10条 ' + str(result[0][:10]))
    pass
# search using DFS Search
myDFSSearch = DFS(eight_puzzle)
result_node = myDFSSearch.search()

if (result_node is None):
    print("No path found using DFS search!")
else:
    print("Path:", result_node.path())
    print("Path Cost:", result_node.path_cost)
    print("Solution:", result_node.solution())
print("Nodes searched with DFS:", myDFSSearch.nodesSearched)
print("Time Spent with DFS:", myDFSSearch.timeSpent)

print("==============")
print("==============")
print("ITERATIVE DEEPENING  SEARCH")

#search using IDS Search
myIDSSearch = IDS(eight_puzzle)
result_node = myIDSSearch.search()

if (result_node is None):
    print("No path found using DFS search!")
else:
    print("Path:", result_node.path())
    print("Path Cost:", result_node.path_cost)
    print("Solution:", result_node.solution())
print("Nodes searched with DFS:", myIDSSearch.nodesSearched)
print("Time Spent with DFS:", myIDSSearch.timeSpent)
Пример #12
0
        euclideanHeuristicMatrix = calcEuclideanHeuristic(
            goalPoint, dim, matrix)
        runningTimeAllowed = (dim / 60) * 15
        path, expandedNodes, trackingpath, cost, scannedNodes, PenetrationRatio, minimumDepth, averageDepth, maximumDepth, averageHeuristicValues, ebf, PenetrationY = ASTAR.astar_search(
            dim, startPoint, goalPoint, matrix, euclideanHeuristicMatrix,
            runningTimeAllowed, start)

    elif algorithm == 'UCS':
        runningTimeAllowed = (dim / 60) * 20
        path, expandedNodes, trackingpath, cost, scannedNodes, PenetrationRatio, minimumDepth, averageDepth, maximumDepth, averageHeuristicValues, ebf, PenetrationY = UCS.ucs_search(
            dim, startPoint, goalPoint, matrix, runningTimeAllowed, start)

    elif algorithm == 'IDS':
        runningTimeAllowed = (dim / 60) * 30
        path, expandedNodes, trackingpath, cost, scannedNodes, PenetrationRatio, minimumDepth, averageDepth, maximumDepth, averageHeuristicValues, ebf, PenetrationY = IDS.ids_search(
            sys.maxsize, dim, startPoint, goalPoint, matrix,
            runningTimeAllowed, start)

    elif algorithm == 'BIASTAR':
        ForwardEuclideanHeuristicMatrix = calcEuclideanHeuristic(
            goalPoint, dim, matrix)
        BackwardEuclideanHeuristicMatrix = calcEuclideanHeuristic(
            startPoint, dim, matrix)
        runningTimeAllowed = (dim / 60) * 10
        path, expandedNodes, trackingpath, cost, scannedNodes, PenetrationRatio, minimumDepth, averageDepth, maximumDepth, averageHeuristicValues, ebf, PenetrationY = BIASTAR.biastar_search(
            dim, startPoint, goalPoint, matrix,
            ForwardEuclideanHeuristicMatrix, BackwardEuclideanHeuristicMatrix,
            runningTimeAllowed, start)

    elif algorithm == 'IDASTAR':
        euclideanHeuristicMatrix = calcEuclideanHeuristic(
Пример #13
0
import AStar1
import GS2
import AStar2

t = puzzle.TilePuzzle(int(sys.argv[1]))
t.permute(int(sys.argv[2]))
#t.printPuzzle()

#BFS
#search = BFS.Bfs(t)

#DFS
#search = DFS.Depthfs(t)

#IDS
search = IDS.IDepthfs(t, 3)

#Greedy1
#search = GS1.Greedy(t)

#Greedy2
#search = GS2.GreedyDepth(t)

#AStar1
#search = AStar1.Astar1(t)

#AStar2
#search = AStar2.Astar2(t)

#UCS1
#search = UCS1.UCS1(t)