def main(): print("Selecione o tipo de instância:") print("1 - Large Scale") print("2 - Low Dimensional") instance_type_option = input() while (instance_type_option not in VALID_INSTANCE_OPTIONS): print("Opção inválida, digite outra") instance_type_option = input() file_names = FileReader.get_file_names( path=INSTANCE_OPTIONS_FOLDER_NAMES.get(instance_type_option)) for file_name in file_names: print(f"{file_names.index(file_name)} - {file_name}") print("Selecione uma instância:") instance_option = input() while not validate_instance_option(instance_option, file_names): print("Opção inválida, digite outra") instance_option = input() instance_reader = FileReader( path=INSTANCE_OPTIONS_FOLDER_NAMES.get(instance_type_option), file_name=file_names[int(instance_option)]) solution_reader = FileReader( path=INSTANCE_OPTIONS_SOLUTION_FOLDER_NAMES.get(instance_type_option), file_name=file_names[int(instance_option)]) optimum_value = solution_reader.parse_solution_data() instance_dict = instance_reader.parse_instance_data() evaluate_methods(optimum_value, instance_dict, f"{file_names[int(instance_option)]}")
def main(argv): setpath() try: opts, args = getopt.getopt(argv, "ht:e:", ["train=", "test="]) if (len(sys.argv) < 5): raise getopt.GetoptError(None) except getopt.GetoptError: print '\nusage: run.py -t <trainfile> -e <testfile> \n' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'run.py -t <trainfile> -e <testfile>' sys.exit() elif opt in ("-t", "--train"): trainfile = arg elif opt in ("-e", "--test"): testfile = arg from file_reader import FileReader fr = FileReader(trainfile) training_Set = fr.getRows() #Readin the test file and creating the matrix from file_reader import FileReader test_File_Reader = FileReader(testfile) testing_Set = test_File_Reader.getRows() test_Result(logistic_Regression(training_Set), testing_Set)
def main(argv): setpath() try: opts, args = getopt.getopt(argv, "ht:e:", ["train=", "test="]) if (len(sys.argv) < 5): raise getopt.GetoptError(None) except getopt.GetoptError: print('\nusage: run.py -t <trainfile> -e <testfile>\n') sys.exit(2) for opt, arg in opts: if opt == '-h': print('run.py -t <trainfile> -e <testfile>') sys.exit() elif opt in ("-t", "--train"): trainfile = arg elif opt in ("-e", "--test"): testfile = arg from file_reader import FileReader fr = FileReader(testfile) from naive_bayes import NaiveBayes nb = NaiveBayes(trainfile) test_file_reader = FileReader(testfile) testData = test_file_reader.getRows() num_errors = 0 true_positive = 0 false_positive = 0 true_negative = 0 false_negative = 0 #Testing phase for idx, row in enumerate(testData): prediction = nb.binary_classify(row) if row[-1] != prediction: num_errors += 1.0 print("Error on row: %s" % str(idx + 1)) if row[-1] == '1': false_negative += 1 else: false_positive += 1 elif row[-1] == '0': true_negative += 1 else: true_positive += 1 print('\n\n--------------Error Count----------------') print(num_errors) print('\n\n--------------Accuracy----------------') print("\n\nThe Accuracy is " + str((len(testData) - num_errors) * 100 / len(testData)) + "%") print("\n===========The confusion matrix===========") print("\t No \t Yes") print("No \t", str(true_negative) + "\t", str(false_positive)) print("Yes \t", str(false_negative) + "\t", str(true_positive))
def metadata_main() -> None: """ Main function for handling metadata. """ logger.info(f"Commencing metadata transformation service.") with FileReader(args.filename) as fr: file_data = fr.load_json() if args.metadata_format.lower() == 'c': uuids_metadata = cantabular_metadata(file_data) elif args.metadata_format.lower() == 'o': uuids_metadata = ons_metadata(file_data) else: raise ValueError("Unrecognised metadata format.") if len(uuids_metadata) > 0: variable_metadata_requests = DatasetTransformations.variable_metadata_request( uuids_metadata) with NomisMetadataApiConnector( config.get_credentials('nomis_metadata'), config.get_client('nomis_metadata')) as metadata_connector: uuids = metadata_connector.add_new_metadata( variable_metadata_requests, return_uuids=True) logger.info( f"METADATA TRANSFORMATION SUCCESS. " f"Metadata was created for entities with the following UUIDS: {uuids}" ) else: logger.info("No metadata appended.")
class Controller: file = FileReader() def load_file(self, infile, temp_file=''): try: if ".txt" in infile[-4:]: self.file.add_file(infile) self.file.read_txt_file() self.file.find_classes() self.file.printProgram() elif ".csv" in infile[-4:]: self.file.read_csv_file(infile, temp_file) self.file.read_txt_file() self.file.find_classes() self.file.printProgram() else: message = "incorrect file format, please see help load" raise NameError(message) except NameError as e: print(e) except FileNotFoundError: print("File not found") except Exception as e: print(e)
def _default(self): if self.user_string: separator = self.user_string else: separator = "," i = FileReader() FileReader.call_file(i, self.detail_mode, separator)
def test_write_to_database_with_display_data(self): # Arrange test_name = "Write to Database with displaying data Test #02" insert = "Y" # see data saved to database expected_result = "['A001', 'F,', '21,', '001,', 'Normal,', '12,', '01/01/1996,', '1']" data_to_test = {'A001': {'gender': 'F', 'age': '21', 'sales': '001', 'bmi': 'Normal', 'salary': '12', 'birthday': '01/01/1996', 'valid': '1'}, 'Q001': {'gender': 'M', 'age': '45', 'sales': '999', 'bmi': 'Underweight', 'salary': '725', 'birthday': '31/12/1971', 'valid': '1'}, 'A002': {'gender': 'F', 'age': '21', 'sales': '001', 'bmi': 'Normal', 'salary': '12', 'birthday': '01/01/1996', 'valid': '1'}, 'A05': {'gender': 'F', 'age': '21', 'sales': '001', 'bmi': 'Normal', 'salary': '12', 'birthday': '01/01/1996', 'valid': '0'}} class_to_test = FileReader() # Action cmd_output = io.StringIO() sys.stdout = cmd_output with patch('builtins.input', side_effect=insert): result = class_to_test.write_to_database(data_to_test) sys.stdout = sys.__stdout__ # Assert try: self.assertTrue(expected_result in cmd_output.getvalue()) except AssertionError: print("{} Failed - Should be {}, but was {}.".format( test_name, expected_result, result)) else: print("{} Passed".format(test_name))
def __init__(self, file_path): print("Reading the file...") file_reader = FileReader(file_path) self.file_name = file_reader.file_name # Read the header self.file_path = file_path self.chunk_id = file_reader.next_string(4) self.chunk_size = file_reader.next_int(4) self.format = file_reader.next_string(4) self.subchunk_1_id = file_reader.next_string(4) self.subchunk_1_size = file_reader.next_int(4) self.audio_format = file_reader.next_int(2) self.num_channels = file_reader.next_int(2) self.sample_rate = file_reader.next_int(4) self.byte_rate = file_reader.next_int(4) self.block_align = file_reader.next_int(2) self.bits_per_sample = file_reader.next_int(2) file_reader.skip_bytes(self.subchunk_1_size - 16) # Skip unused bytes self.subchunk_2_id = file_reader.next_string(4) self.subchunk_2_size = file_reader.next_int(4) self.bytes_per_sample = self.bits_per_sample // 8 self.samples = [] for channel in range(self.num_channels): self.samples.append([]) while file_reader.has_next(): channel_i = 0 while channel_i < self.num_channels: self.samples[channel_i].append( file_reader.next_int(self.bytes_per_sample)) channel_i += 1
def test_05(self): x = FileReader() x.add_file("plant_uml") x.read_file() x.find_classes() if len(x.all_my_classes) > 0: pass
def validate(file): fdict = FileReader(file).fdict delimiter = fdict['delimiter'] validator = ReportErrors(fdict,file,h1000lk) errors = validator.run() headers_fix = validator.set_to_fix() return file, fdict, errors, delimiter, headers_fix
def main(): parser = argparse.ArgumentParser() parser.add_argument("path") parser.add_argument("dialog") parser.add_argument("log") args = parser.parse_args() path_arg = args.path.__str__() dialog_arg = args.dialog.__str__() log_arg = args.log.__str__() args_handler = ArgsHandler() path_dialog = args_handler.get_path_from_args( path_arg, "Veuillez saisir le bon chemin du fichier : \n") dialog = args_handler.check_input_response( dialog_arg, "Souhaitez-vous un dialogue (oui/non) ? \n") log = args_handler.check_input_response( log_arg, 'Souhaitez-vous enregistrer les informations (oui/non) ? \n') file_reader = FileReader(path_dialog) calcul = Calcul(file_reader) display = Display() if dialog == 'non': display.no_dialog(calcul, log, file_reader) elif dialog == 'oui': display.yes_dialog(calcul, log, file_reader)
def test_04(self): x = FileReader() x.add_file("plant_uml") x.read_file() if x.my_file is "plant_uml": pass if len(x.my_class_content) > 0: pass
def __init__(self, filename): self._text = yaml.safe_load(open(filename, 'r')) data = FileReader(self._text) data.read() self._original_instructions = data.get_original_instructions() self._special_instructions = data.get_special_instructions() self._video_list = data.get_video_list() self._names = data.get_names()
def calculate(file_path, file_out_path): file_name = uuid.uuid4().hex with open(file_path) as file: text = file.readlines() calculating_data = FileReader(text, os.path.join(BASE_DIR, 'columns.json')) excel_parser = ExcelParser(calculating_data, file_out_path) excel_parser.export_document(file_name, calculating_data.columns) return file_name
def main(): """Main function for calling others""" parsing = ArgumentParser() rooms_file, students_file = parsing.args_info() example = FileReader() rooms = example.file_reader(rooms_file) students = example.file_reader(students_file) new_info = Distribution(rooms, students).student_distribution() result = JsonExporter(new_info).unloading() print(result)
def main(): system = System(0.02) reader = FileReader(system) reader.read_file() global app app = QApplication(sys.argv) gui = GUI(system) sys.exit(app.exec_())
def main(): args = parse_args() try: rooms = FileReader(args.rooms_path).read() students = FileReader(args.students_path).read() except FileNotFoundError as e: print(e) return student_rooms = [] for room in rooms: student_rooms.append(StudentRoom(Room(room["id"], room["name"]))) for student in students: student_rooms[student["room"]].students.append( Student(student["id"], student["name"])) formats = {"json": JSONWriter(), "xml": XMLWriter()} formats[args.format].write(student_rooms)
def main2(): # 1 or 2 instance_type_option = "1" file_names = FileReader.get_file_names(path=INSTANCE_OPTIONS_FOLDER_NAMES.get(instance_type_option)) for file_name in file_names: instance_option = file_names.index(file_name) instance_reader = FileReader( path=INSTANCE_OPTIONS_FOLDER_NAMES.get(instance_type_option), file_name=file_names[int(instance_option)] ) solution_reader = FileReader( path=INSTANCE_OPTIONS_SOLUTION_FOLDER_NAMES.get(instance_type_option), file_name=file_names[int(instance_option)] ) optimum_value = solution_reader.parse_solution_data() instance_dict = instance_reader.parse_instance_data() evaluate_methods(optimum_value, instance_dict, f"{file_names[int(instance_option)]}")
def unit_test_five(my_file): print("Run Test 5 - File Reader adds class") x = FileReader() x.add_file(my_file) x.read_file() x.find_classes() if len(x.all_my_classes) > 0: print("Class added from file") else: print("ERROR - class not added")
def main(argv): if len(argv) == 1: db = DataBase() elif len(argv) == 5: db = DataBase(argv[1], argv[2], argv[3], argv[4]) else: usage() sys.exit(1) data = FileReader().read_ndx_companies() db.import_ndx_companies(data) db.close()
def unit_test_four(my_file): print("Run Test 4 - File Reader opens file") x = FileReader() x.add_file(my_file) x.read_file() if x.my_file is my_file: print("correct file added") else: print("ERROR - incorrect file added") if len(x.my_class_content) > 0: print("File read complete") else: print("ERROR - file not read")
def __init__(self, from_file, to_structure, **kwargs): """ Initializing a Loader Object with a FileReader object and a structure @type from_file: string @param from_file: Filename of the file that you want to load @type to_structure: structure @param to_structure: Graph, tree, etc that you want to load data into """ self.file_reader = FileReader(from_file) self.structure = to_structure
def retrieve_best_k_related_reviews(K, query): """ Utilizing the Rocchio Classifier, finds and displays the k most relevant reviews to a given query, and their score. :param K: int, number of related documents desired :param query: string, the query to check the reviews against. :return: """ file_name = "./dataset/amazon_cells_labelled_full.txt" data = FileReader(file_name, True, True) tfidf_set, text_set = data.build_set('tfidf', file_name) parsed_query = data.parse_query(query) retrieve(K, parsed_query, tfidf_set, text_set)
def setUp(self): self.parser = DataParser() self.cmd_view = CmdView() self.file_reader = FileReader() self.validator = Validator() self.db = Database("test.db") self.vis = Visualiser() self.val = Validator() self.serial = Serializer() self.controller = Controller(self.cmd_view, self.file_reader, self.parser, self.validator, self.db, self.vis, self.serial) self.init()
def main(): args = parse_args() try: json_rooms = FileReader(args.rooms_path).read() json_students = FileReader(args.students_path).read() except FileNotFoundError as e: print(e) return dB_worker = DBWorker() connection = dB_worker.create_connection(args.host_name, args.user_name, args.user_password, args.database) dB_worker.insert_into(connection, json_rooms, json_students) dB_worker.create_index(connection, create_index_room) dB_worker.create_index(connection, create_index_sex) dB_worker.create_index(connection, create_index_birthday) number_of_students_in_room = dB_worker.execute_read_query( connection, select_number_of_students_in_room) top_five_small_avg_age_rooms = dB_worker.execute_read_query( connection, select_top_five_small_avg_age_rooms) top_five_big_diff_age_rooms = dB_worker.execute_read_query( connection, select_top_five_big_diff_age_rooms) different_sexes_rooms = dB_worker.execute_read_query( connection, select_different_sexes_rooms) formats = {"json": JSONWriter(), "xml": XMLWriter()} formats[args.format].write(number_of_students_in_room, f"query_1.{args.format}") formats[args.format].write(top_five_small_avg_age_rooms, f"query_2.{args.format}") formats[args.format].write(top_five_big_diff_age_rooms, f"query_3.{args.format}") formats[args.format].write(different_sexes_rooms, f"query_4.{args.format}")
def __init__(self, FILE_PATH=None): self.f = FileReader(FILE_PATH) self.vertices = {} # dictionary of verticies self.edges = [] self.vertex_count = 0 self.edge_count = 0 self.digraph = None self.start = None if FILE_PATH is not None: all_verticies = self.f.verticies self.edges = self.f.edges self.digraph = self.f.digraph self.add_verticies(all_verticies) self.add_egdes(self.edges)
def __init__(self, FILE_PATH=None): """Initialize a graph object after reading from a file if one is provided""" self.f = FileReader(FILE_PATH) self.vertices = {} # dictionary of verticies self.edges = [] self.vertex_count = 0 self.edge_count = 0 self.digraph = None if FILE_PATH is not None: all_verticies = self.f.verticies self.add_verticies(all_verticies) self.edges = self.f.edges self.digraph = self.f.digraph self.add_egdes(self.edges)
def append_root(self, root): ''' 添加一个root ''' data = self.read() # 如果目录已存在,或是现有Root集的子路径,跳出;否则添加目录 for _root in data['Root']: if _root == root or _root in root: return data['Root'].append(root) # 初始化一个进度条 progress_bar = ProgressBar('Code Files') # 添加目录下的所有文件 file_reader = FileReader() file_list = file_reader.walk_folder(root) total_number = len(file_list) for index, file in enumerate(file_list): # 获取文件基本信息 ## 获取文件标识码 file_code = file_reader.code_file(file) ## 获取文件名和文件扩展名 file_basename = os.path.basename(file) file_name, file_extension = os.path.splitext(file_basename) ## 获取文件大小 file_size_num = (int)(file_code.split('-')[-1]) file_size_num = round(file_size_num / 1024 / 1024) file_size = str(file_size_num) + ' MB' if file_size_num < 1024 \ else str(round(file_size_num / 1024, 2)) + ' GB' # 如果在已有数据中找不到文件,进行添加 if file not in data['File']: data['File'][file] = { 'code': file_code, 'extension': file_extension, 'name': file_name, 'path': file, 'size': file_size, } # 进度条即时显示 progress_bar.set_value(index + 1, total_number) QApplication.processEvents() # 循环结束,关闭进度条 progress_bar.close() # 回填更新后的数据 self.write(data)
def test_hash_at_start_skips_line(self, mocked_open): """ Check that a file is read and the data added to the list_lines attribute, ignoring lines that start with '#' """ mocked_open.side_effect = [ mock.mock_open( read_data="#CR\rCRLF\r\n# LF\nNo end of file").return_value ] __file_path = "Path to File" test_object = FileReader() test_object.read_file(__file_path) mocked_open.assert_called_with(__file_path, "r") self.assertEqual(2, len(test_object.list_lines), "Should be 2 items in the list")
def test_five_hyphens_at_start_ends_read(self, mocked_open): """ Check that a file is read and the data added to the list_lines attribute, stopping reading when we see a line that starts with '-----' """ mocked_open.side_effect = [ mock.mock_open( read_data="CR\rCRLF\r\n-----LF\nNo end of file").return_value ] __file_path = "Path to File" test_object = FileReader() test_object.read_file(__file_path) mocked_open.assert_called_with(__file_path, "r") self.assertEqual(2, len(test_object.list_lines), "Should be 2 items in the list")