def Trapping(): #Bu funksiya zererlinin izlenmesi ucun lazimi 3rd proqram teminatlarini ishe salir ve lazimi sazlamalari heyata kecirir. global caphProcess global capPID global nethProcess global netPID ##Initialize process module p = process() #Start trapping modules ################################ Trap = trap() cap = Trap.capturebat(capture_bat_log) net = Trap.windump(windump_log) if cap == 1: event("[Trapping] Trapping cannot create process CaptureBat") event("success") exit(1) else: caphProcess,capPID = cap event("[Trapping] CaptureBat start successfully. (ProcessID=%d)" % (int(capPID))) if net == 1: event("[Trapping] Trapping cannot create process windump") if p.kill(caphProcess) == 0: event("[Process] CaptureBat process terminated") else: event("[Process] Cannot terminate CaptureBat process") event("success") exit(1) else: nethProcess,netPID = net event("[Trapping] Trapping windump start successfully. (ProcessID=%d)" % (int(netPID)))
def process_results(): file_controller = fileController.fileController() data_processor = process.process(file_controller) kmeans_processor = kmeans.kmeans() kmeans_processor.kmeans() print("This may take a few minutes...") data_processor.basics() data_processor.retweets() data_processor.quotes() data_processor.hashtags() triad_controller = triads.triads() triad_controller.calculate_triads() tries_controller = tries.tries() tries_controller.calculate_tries()
from pyspark.sql.types import FloatType from lib.process import process from lib.spark import spark, sc df = df_base = spark.read.csv('data/yellow_tripdata_2016-01.csv', header=True, inferSchema=True) process(df)
from lib.plotly import py from lib.timer import Timer from lib.process import process import plotly.graph_objs as go from pyspark.ml.feature import VectorAssembler from scipy.spatial import ConvexHull import pickle import numpy as np with Timer('read', 'Reading data'): df = df_base = spark.read.csv('data/yellow_tripdata_2016-01.csv', header=True, inferSchema=True) with Timer('process', 'Cleaning invalid data'): df = process(df) from pyspark.sql.functions import col, udf from pyspark.sql.types import IntegerType from pyspark import StorageLevel from pyspark.ml.clustering import GaussianMixture gmm = GaussianMixture(k=1000) result = [] with Timer('clustering', 'Computing clusters'): for weekday in range(7): for hour in range(24): with Timer('clustering', 'Computing clusters for {}x{}'.format(weekday, hour)):
help='define a maximum number of proteins to process' ) parser.add_argument( '--output-file', '-o', metavar='O', type=str, nargs='?', help='optional output file for csv formatted cluster information' ) args = parser.parse_args() extract_output = extract( args.input_file, extract_lambda=lambda s: s.name, features=args.features, n_proteins=args.maximum ) process_output = process(args.features, extract_output.n_proteins) for (i, d, f) in zip( process_output.informations, process_output.original_dimensions, args.features ): print( 'kept {:7.2%} of information '.format(i) + 'in {:2}-dimensional feature "{}"'.format(d, f) ) precluster_output = precluster( process_output.matrix, min_clusters=10, max_clusters=29, n_replicates=6 ) clusters = cluster(process_output.matrix, precluster_output.best_k)
def Ex(): p = process() #Execute malware sample and snapshot modules Ex = Exec("c:\\malware.exe").execute() if Ex == 1: event("[Executer] Executer cannot execute sample program") if p.kill(capPID) == 0: event("[Process] CaptureBat process terminated") else: event("[Process] Cannot terminate CaptureBat process") if p.kill(netPID) == 0: event("[Process] windump process terminated") else: event("[Process] Cannot terminate windump process") event("success") exit(1) ################################################################################ else: malhProcess,malPID = Ex event("[Executer] Executer CreateProcess with sample program is executed (PID = %s)" % int(malPID)) sleep(1) Modules = p.mod(malPID) ################################################################# if Modules == 1 or Modules == 2: event("[Process] Cannot extract malware process modules error code=%d" % (Modules)) # if p.kill(capPID) == 0: # event("[Process] CaptureBat process terminated") # else: # event("[Process] Cannot terminate CaptureBat process") # if p.kill(netPID) == 0: # event("[Process] windump process terminated") # else: # event("[Process] Cannot terminate windump process") ######################################################################################### else: mf = open("c:\\logs\\mods.log","w") for m in Modules: mf.write(repr(m) + '\n') mf.close() event("[Process] Malware Process modules extracting completed") event("[agent] WaitForSingleObject is called") wait = ctypes.windll.kernel32.WaitForSingleObject(malhProcess,150000) #################################################################### if wait == 0x00000000: event("[Agent] Process self terminated") if p.kill(capPID) == 0 and p.kill(netPID) == 0: event("[Process] CaptureBat and windump process terminated") Z() else: event("[Process] Cannot terminate windump and CaptureBat process") event("success") exit(1) ################################################################# elif wait == 0x00000102: event("[Agent] The time-out interval elapsed") if p.kill(capPID) == 0 and p.kill(netPID) == 0: event("[Process] CaptureBat and windump process terminated") Z() else: event("[Process] Cannot terminate windump and CaptureBat process") event("success") exit(1) ################################################################ else: event("[Agent] WaitFailed") if p.kill(capPID) == 0 and p.kill(netPID) == 0: event("[Process] CaptureBat and windump process terminated") Z() else: event("[Process] Cannot terminate windump and CaptureBat process") event("success") exit(1)
from lib.plotly import py from lib.timer import Timer from lib.process import process import plotly.graph_objs as go from pyspark.ml.feature import VectorAssembler from scipy.spatial import ConvexHull import pickle import numpy as np with Timer('read', 'Reading data'): df = df_base = spark.read.csv('data/yellow_tripdata_2016-01.csv', header=True, inferSchema=True) with Timer('process', 'Cleaning invalid data'): df = process(df_base) from pyspark.sql.functions import col, udf from pyspark.sql.types import IntegerType from pyspark import StorageLevel from pyspark.ml.clustering import GaussianMixture for i in range(6): n = 10**i for k in [5, 25, 50, 100, 500, 1000]: with Timer('limit', 'Limiting data, n={}, k={}'.format(n, k)): df_ik = df.limit(n) with Timer('clustering', 'n={}, k={}'.format(n, k)):