def main(): argc = len(sys.argv) if argc < 2: sys.stderr.write("Error: bad parameters\n") print_help() sys.exit(1) csv_file = sys.argv[1] csv_fd = open(csv_file, "r") csv_file = os.path.basename(csv_file) normal_msgs = con_par.get_messages(csv_fd) parser = con_par.IEC104Parser(normal_msgs) parser.parse_conversations() lines = parser.get_all_conversations(abstraction) index = int(len(lines)*TRAINING) training, testing = lines[:index], lines[index:] tree = fpt.FPT() for line in training: tree.add_string(line) alpha = 0.05 t0 = int(math.log(index, 2)) aut = alergia.alergia(tree, alpha, t0) aut.rename_states() fa = aut.normalize() store_filename = os.path.splitext(os.path.basename(csv_file))[0] store_filename = "{0}a{1}t{2}".format(store_filename, alpha, t0) fa_fd = open("{0}.fa".format(store_filename), "w") fa_fd.write(fa.to_fa_format(True)) fa_fd.close() legend = "File: {0}, alpha: {1}, t0: {2}".format(csv_file, alpha, t0) dot_fd = open("{0}.dot".format(store_filename), "w") dot_fd.write(fa.to_dot(aggregate=False, legend=legend)) dot_fd.close() miss = 0 for line in testing: prob = fa.string_prob_deterministic(line) if prob is None: miss += 1 print("File: {0}".format(csv_file)) print("alpha: {0}, t0: {1}".format(alpha, t0)) print("States {0}".format(len(aut.get_states()))) print("Testing: {0}/{1} (missclassified/all)".format(miss, len(testing))) if len(testing) > 0: print("Accuracy: {0}".format((len(testing)-miss)/float(len(testing)))) csv_fd.close()
def main(): argc = len(sys.argv) if argc < 2: sys.stderr.write("Error: bad parameters\n") print_help() sys.exit(1) csv_file = sys.argv[1] csv_fd = open(csv_file, "r") csv_file = os.path.basename(csv_file) ############################################################################ # Preparing the learning data ############################################################################ normal_msgs = con_par.get_messages(csv_fd) parser = con_par.IEC104Parser(normal_msgs) parser.parse_conversations() lines = parser.get_all_conversations(abstraction) ren_dict = con_par.values_bidict(lines) index = int(len(lines) * TRAINING) training, testing = lines[:index], lines[index:] tree = create_fpt(training, ren_dict) tree.rename_states() fa = tree.normalize() store_filename = os.path.splitext(os.path.basename(csv_file))[0] store_filename = "{0}".format(store_filename) fa_fd = open("{0}-pta.fa".format(store_filename), "w") fa_fd.write(fa.to_fa_format(True)) fa_fd.close() legend = "File: {0} PTA".format(csv_file) dot_fd = open("{0}-pta.dot".format(store_filename), "w") dot_fd.write(fa.to_dot(aggregate=False, legend=legend)) dot_fd.close() miss = 0 for line in testing: prob = fa.string_prob_deterministic(line) if prob is None: miss += 1 print("File: {0}".format(csv_file)) print("States {0}".format(len(tree.get_states()))) print("Testing: {0}/{1} (missclassified/all)".format(miss, len(testing))) print("Accuracy: {0}".format((len(testing) - miss) / float(len(testing)))) csv_fd.close()
def main(): argc = len(sys.argv) if argc != 2: sys.stderr.write("Bad parameters\n") print_help() sys.exit(1) csv_file = sys.argv[1] csv_fd = open(csv_file, "r") bname = os.path.basename(csv_file) store_filename = os.path.splitext(os.path.basename(bname))[0] normal_msgs = con_par.get_messages(csv_fd) distict_dict = dict() fnames = None all_cnt = 0 for row in normal_msgs: if fnames is None: fnames = row.keys() fd = None items = [] for flt in rows_filters: items.append(tuple([row[k] for k in flt])) key = frozenset(items) if not key in distict_dict: distict_dict[key] = open("{0}-part{1}.csv".format(store_filename, all_cnt), "w") distict_dict[key].write(DELIMITER.join(fnames) + "\n") all_cnt += 1 fd = distict_dict[key] fd.write("{0}\n".format(format_row(row))) csv_fd.close()
def main(): argc = len(sys.argv) if argc != 2: sys.stderr.write("Bad parameters\n") print_help() sys.exit(1) csv_file = sys.argv[1] csv_fd = open(csv_file, "r") normal_msgs = con_par.get_messages(csv_fd) distict_set = set() inform_cnt = 0 all_cnt = 0 for row in normal_msgs: for flt in rows_filters: item = tuple([row[k] for k in flt]) distict_set.add(item) if con_par.IEC104Parser.is_inform_message(row): inform_cnt += 1 all_cnt += 1 print("#messages: {0}".format(all_cnt)) print("#i-messages: {0}".format(inform_cnt)) print("#entities: {0}".format(len(distict_set))) print("entities: {0}".format(distict_set)) csv_fd.close()
def main(): argc = len(sys.argv) if argc < 4: sys.stderr.write("Error: bad parameters\n") print_help() sys.exit(1) alg = Params.PA learn_proc = None if sys.argv[1] == "--pa": alg = Params.PA learn_proc = learn_proc_pa elif sys.argv[1] == "--pta": alg = Params.PTA learn_proc = learn_proc_pta normal_file = sys.argv[2] normal_fd = open(normal_file, "r") normal_msgs = con_par.get_messages(normal_fd) test_file = sys.argv[3] test_fd = open(test_file, "r") test_msgs = con_par.get_messages(test_fd) normal_parser = con_par.IEC104Parser(normal_msgs) test_parser = con_par.IEC104Parser(test_msgs) golden_map = learn_golden(normal_parser, learn_proc) anom = distr.AnomDistrComparison(golden_map, learn_proc) res = defaultdict(lambda: []) test_com = test_parser.split_communication_pairs() for item in test_com: for window in item.split_to_windows(DURATION): window.parse_conversations() r = anom.detect(window.get_all_conversations(abstraction), item.compair) res[item.compair].append(r) #Printing results for k, v in res.items(): print(k) for i in range(len(v)): print("{0}: {1}".format(i, v[i])) normal_fd.close() test_fd.close()
def main(): argc = len(sys.argv) if argc != 2: sys.stderr.write("Bad parameters\n") print_help() sys.exit(1) csv_file = sys.argv[1] csv_fd = open(csv_file, "r") normal_msgs = con_par.get_messages(csv_fd) parser = con_par.IEC104Parser(normal_msgs) parser.parse_conversations() lines = parser.get_all_conversations() for line in lines: for row in line: print_row(row) print(DELIMITER*(14)) csv_fd.close()
def create_fpt(ln, ren_dict): tree = fpt.FPT() lines = con_par.rename_values(ln, ren_dict) tree.add_string_list(ln) return tree