コード例 #1
0
    def save(self, output_file_path):
        print("Saving...")
        # Write header
        file_writer = FileWriter(output_file_path)
        file_writer.start_writing()
        file_writer.write_string(self.chunk_id)
        file_writer.write_int(self.chunk_size, 4)
        file_writer.write_string(self.format)
        file_writer.write_string(self.subchunk_1_id)
        file_writer.write_int(self.subchunk_1_size, 4)
        file_writer.write_int(self.audio_format, 2)
        file_writer.write_int(self.num_channels, 2)
        file_writer.write_int(self.sample_rate, 4)
        file_writer.write_int(self.byte_rate, 4)
        file_writer.write_int(self.block_align, 2)
        file_writer.write_int(self.bits_per_sample, 2)
        file_writer.write_int(0, self.subchunk_1_size - 16)
        file_writer.write_string(self.subchunk_2_id)
        file_writer.write_int(self.subchunk_2_size, 4)

        # Write samples
        for k in range(len(self.samples[0])):
            for i in range(len(self.samples)):
                file_writer.write_int(self.samples[i][k],
                                      self.bytes_per_sample)
        file_writer.finish_writing()
        print("Done")
コード例 #2
0
def evaluate_method(**kwargs):
    start = time.time()
    solution: Solution = kwargs['method_function'](
        optimum_value=kwargs['optimum_value'],
        instance_dict=kwargs['instance_dict'],
        output_filename=kwargs['output_filename'])
    end = time.time()

    solution_dict = solution.to_dict()
    accuracy = (solution_dict.get('value') / kwargs['optimum_value']) * 100
    is_valid_str = "VALID" if solution.is_valid(item_list=kwargs.get(
        'instance_dict', {}).get('item_list')) else "INVALID"

    config = kwargs.get('config')
    del config['enable']

    result_file = FileWriter(file_name="evaluation_result", mode="append")
    result_file.write_line(
        f"----------------------------------------------------------------------------------------------------------"
    )
    result_file.write_line(f"| Method: {kwargs['output_filename']}")
    for parameter in config:
        result_file.write_line(f"| {parameter}: {config[parameter]}")
    result_file.write_line(f"| {json.dumps(solution_dict)}")
    result_file.write_line(
        f"| {is_valid_str} SOLUTION |  Execution time: {end - start}  |  Accuracy: {accuracy}"
    )
    result_file.write_line(
        f"----------------------------------------------------------------------------------------------------------"
    )
    result_file.close_file()
コード例 #3
0
    def __init__(self):
        super(Receiver, self).__init__()

        socket.setdefaulttimeout(
            10
        )  # Setarea timeout-ului de asteptare al socket-ului la 10 secunde

        self.__error_occurred = False  # Variabila booleana care specifica daca s-a intamplat vreo eroare inainte de inceperea primrii pachetelor
        self.__is_socket_open = False  # Variabila booleana care specifica daca socket-ul este deschis.

        self.__receiver_ip = None  # IP-ul receiver-ului.
        self.__receiver_port = None  # Portul receiver-ului
        self.__losing_packets_probability = 0  # Probabilitatea de pierdere a pachetelor

        self.__is_running = True  # Variabila booleana care specifica daca receiver-ul ruleaza
        self.__SWR = {
        }  # Fereastra protocolului sliding window de la nivelul receiver-ului
        self.__SWR_size = -1  # Dimensiunea ferestrei
        self.__last_packet_received = -1  # Ultimul pachet primit
        self.__total_nr_of_packets_to_receive = -1  # Numarul total de pachete care se vor primi

        self.__file_writer = FileWriter(
            ""
        )  # Obiect de tip FileWriter care gestioneaza fisierul in care se vor scrie datele primite.
        self.__ups = UnPackingSystem(
            self.DATA_PACKET_SIZE
        )  # Obiect de tip UnPackingSystem care desparte datele primite in campuri de biti.

        self.__nr_of_packets_recv = 0  # Numarul total de pachete primite
        self.__nr_of_packets_lost = 0  # Numarul total de pachete pierdute
コード例 #4
0
    def run_vns(self, solution: Solution, item_list: list, max_iterations: int,
                neighborhood_size: int, output_filename: str):
        print(f"ic| run_vns: Executing Variable Neighbourhood Search")
        counter = 0
        output_file = FileWriter(file_name=output_filename)

        output_file.write_line(output_filename.replace('TEMP-', ''))
        output_file.write_line(str(solution.optimum))
        output_file.write_line(f"{counter} {solution.value}")

        (value_list,
         weight_list) = General.parse_item_list_data(self.item_list)

        for i in range(max_iterations):
            initial_solution = solution
            k = 1
            while k <= neighborhood_size:
                mask_list = General.get_mask_list(solution.n, k, climb=False)
                initial_solution = self.random_neighbor(
                    solution, k, value_list, weight_list)
                if not initial_solution:
                    continue
                best_neighbor = self.evaluate_neighborhood(
                    initial_solution, mask_list, value_list, weight_list)

                if best_neighbor and best_neighbor.value > solution.value:
                    counter += 1
                    solution = deepcopy(best_neighbor)
                    #solution.print_solution()
                    #ic(f"{counter} {solution.value}")
                    output_file.write_line(f"{counter} {solution.value}")
                else:
                    k += 1
コード例 #5
0
    def run_local_search_improved(self):
        print(
            f"ic| run_local_search_improved: Executing Local Search with distance {self.distance}"
        )
        counter = 0
        output_file = FileWriter(file_name=self.output_filename)

        output_file.write_line(self.output_filename.replace('TEMP-', ''))
        output_file.write_line(str(self.solution.optimum))
        output_file.write_line(f"{counter} {self.solution.value}")

        mask_list = General.get_mask_list(self.solution.n,
                                          self.distance,
                                          climb=True)
        (value_list,
         weight_list) = General.parse_item_list_data(self.item_list)
        while self.evaluate_neighborhood_improved(self.solution, mask_list,
                                                  value_list, weight_list):
            counter += 1
            #self.solution.print_solution()
            ic(f"{counter} {self.solution.value}")
            output_file.write_line(f"{counter} {self.solution.value}")

            # prevent looping indefinitely
            if counter >= 100:
                return
コード例 #6
0
def _main(args):
    file = Path(args.name)
    line_generator = LineGenerator(size=args.line_size)
    file_writer = FileWriter(file=file,
                             records=args.records,
                             line_generator=line_generator)
    file_writer.write(Bar("Processing", max=args.records))
コード例 #7
0
ファイル: doctest.py プロジェクト: sab0946/PR301
 def test_06(self):
     y = FileWriter()
     y.write_file("unit_test_file")
     try:
         with open(y.my_file) as f:
             pass
     except FileNotFoundError:
         print("ERROR - File not created")
コード例 #8
0
 def __init__(self, solution: Solution, item_list: list, distance: int,
              output_filename: str):
     self.solution = solution
     self.item_list = item_list
     self.distance = distance
     self.output_filename = output_filename
     # writing output variables
     self.output_file = FileWriter(file_name=self.output_filename)
コード例 #9
0
def simulated_annealing(optimum_value: float, instance_dict: dict, output_filename: str):
    config = settings.EVALUATE_METHODS_SETTINGS
    max_iterations = config.get('sa', {}).get('max_iterations')
    distance = config.get('sa', {}).get('distance')
    initial_temperature = config.get('sa', {}).get('initial_temperature')
    
    best_solution = Solution(
        n=instance_dict.get('n'),
        capacity=instance_dict.get('capacity'),
        optimum=optimum_value
    )
    best_solution.generate_starter_solution(
        item_list=instance_dict.get('item_list'),
        random_seed=RANDOM_SEED
    )
    best_solution.print_solution()

    sa = SimulatedAnnealing(
        solution=deepcopy(best_solution),
        item_list=instance_dict.get('item_list'),
        distance=2,
        output_filename=f"{output_filename}_temp"
    )

    current_solution = deepcopy(sa.solution)
    writer = FileWriter(file_name=f"{output_filename}_temp")

    for i in range(max_iterations):

        if i == 0:
            writer.write_line(output_filename.replace('TEMP-', ''))
            writer.write_line(str(current_solution.optimum))
            writer.write_line(f"{i} {current_solution.value}")

        sa.solution = deepcopy(current_solution)

        # random neighbor
        test = sa.random_neighbor(distance=distance)

        if not test:
            print(f"ERROR ! Could not find a random neighbor")

        # if better set as best_solution
        if sa.solution.value > best_solution.value:
            best_solution = deepcopy(sa.solution)
            writer.write_line(f"{i} {best_solution.value}")
        # calculate diff
        diff = current_solution.value - sa.solution.value
        # calculate temp
        t = initial_temperature / float(i + 1)
        if diff < 0 or random() < exp(-diff / t):
            current_solution = deepcopy(sa.solution)

    print(f"ic| SA Optimum Solution")
    best_solution.print_solution(item_list=instance_dict.get('item_list'))
    return best_solution
コード例 #10
0
    def test_writer_writes_dates_to_file(self):
        writer = FileWriter(self.test_filename)
        expected_dates = self.article.get_dates()

        writer.write_to_file(self.article)

        with open(self.test_path, 'rt') as f:
            reader = csv.reader(f, delimiter=',')
            for row in reader:
                self.assertIn(expected_dates[0], row)
                self.assertIn(expected_dates[1], row)
コード例 #11
0
ファイル: booking.py プロジェクト: mrvnmchm/BookingScraper
def save_data(data, out_format, country):
    """
    Saves hotels list in file
    :param data: hotels list
    :param out_format: json, csv or excel
    :return:
    """
    writer = FileWriter(data, out_format, country)
    file = writer.output_file()

    print("All accommodations are saved.")
    print("You can find them in", file, "file")
コード例 #12
0
 def __init__(self, subject_id, run_type, transfer_learning, model_name):
     self.subject_id = subject_id
     self.run_type = run_type
     self.transfer_learning = transfer_learning
     self.model_name = model_name
     self.left_hand_total = 0
     self.right_hand_total = 0
     self.left_hand_correct = 0
     self.right_hand_correct = 0
     self.total_time_taken = 0  # int in seconds
     self.predictions = []
     self.file_writer = FileWriter()
コード例 #13
0
def save_data(data, out_format, country):
    '''
    Saves hotels list in file
    :param data: hotels list
    :param out_format: json, csv or excel
    :return:
    '''
    writer = FileWriter(data, out_format, country)
    file = writer.output_file()

    print('All accommodations are saved.')
    print('You can find them in', file, 'file')
コード例 #14
0
def run(journal, num_articles):

    print "Running publication-dates version 1.1\n"

    # Setup output file, set parameters, and use brief run if testing
    writer = FileWriter(journal)

    num_volumes = 18  # 18 volumes per year
    issue = 1  # sample issue for each volume

    if len(sys.argv) > 1:
        print "Testing....."
        num_articles = 3
        num_volumes = 1

    # Sample papers accepted in previous year
    date = html.detect_start_volume()
    start_volume = date[0]
    acceptance_year = date[1]

    counter = 0

    volumes = range(start_volume - num_volumes + 1, start_volume + 1)

    for volume in reversed(volumes):

        # Go to volume/issue contents page, and extract URLs of articles
        articles = html.build_urls(journal, volume, issue)

        for num in range(1, num_articles + 1):

            # For first 'num_articles' in this volume/issue, try to extract date string from article webpage
            url = articles[num]

            try:
                date_string = html.get_date_div(url)
                counter += 1
            except:
                print "Some error occurred (URL '", url, "' not available?). Skipping."
                break

            article = Article(date_string)

            if article.get_year() == acceptance_year:
                writer.write_to_file(article)

    writer.close_file()

    return counter
コード例 #15
0
def scrapeUSA():
    USAMap = {
        "state": 0,
        "totalCases": 1,
        "newCases": 2,
        "totalDeaths": 3,
        "newDeaths": 4,
        "activeCases": 5,
    }
    scrapeRunner = ScrapeRunner(
        "https://www.worldometers.info/coronavirus/country/us",
        "usa_table_countries_today", USAMap)
    singleScrape = scrapeRunner.run()
    fileWriter = FileWriter(singleScrape, "usa")
    fileWriter.writeFile()
コード例 #16
0
def scrapeWorld():
    worldMap = {
        "state": 0,
        "totalCases": 1,
        "newCases": 2,
        "totalDeaths": 3,
        "newDeaths": 4,
        "totalRecovered": 5,
        "activeCases": 6,
    }
    scrapeRunner = ScrapeRunner("https://www.worldometers.info/coronavirus",
                                "main_table_countries_today", worldMap)
    singleScrape = scrapeRunner.run()
    fileWriter = FileWriter(singleScrape, "world")
    fileWriter.writeFile()
コード例 #17
0
def run_detect_cars(in_dir, out_dir, batch_size, limit, conf_thres, nms_thres, debug):
    assert os.path.isdir(out_dir), "directory {} does not exist".format(out_dir)

    now = datetime.datetime.now()
    now_str = now.strftime("%Y-%m-%dT%H-%M-%S")
    detected_dataset_dir = os.path.join(out_dir, now_str)
    os.mkdir(detected_dataset_dir)

    feed_file = os.path.join(detected_dataset_dir, "feed.json")
    detected_dataset_images_dir = os.path.join(detected_dataset_dir, "images")
    os.mkdir(detected_dataset_images_dir)

    cfg_file = os.path.join(HERE, '../cfg/yolov3.cfg')
    weight_file = os.path.join(HERE, '../cfg/yolov3.weights')
    namesfile = os.path.join(HERE, '../cfg/coco.names')
    class_names = load_class_names(namesfile)
    with torch.no_grad():
        model = Yolo(cfg_file=cfg_file, class_names=class_names, batch_size=batch_size)
        model.load_weights(weight_file)
        model.to(DEVICE)
        model.eval()

        image_and_target_transform = Compose([
            SquashResize(416),
            ToTensor()
        ])

        dataset = SimpleCarDataset(
            root_dir=in_dir,
            transforms=image_and_target_transform,
            batch_size=batch_size)

        with FileWriter(file_path=feed_file) as file_writer:
            car_dataset_writer = DetectedCarDatasetWriter(file_writer)

            detected_dataset_helper = DetectedCarDatasetHelper(car_dataset_writer=car_dataset_writer,
                                                               class_names=model.class_names,
                                                               conf_thres=conf_thres,
                                                               nms_thres=nms_thres,
                                                               batch_size=batch_size,
                                                               debug=debug)
            cnt = detect_and_process(model=model,
                                     dataset=dataset,
                                     processor=detected_dataset_helper.process_detections,
                                     limit=limit)

            logger.info("Ran detection of {} images".format(cnt))
コード例 #18
0
    def __init__(self) -> None:
        super(App, self).__init__()

        self.timer: QTimer = QTimer(self)
        self.timer.timeout.connect(self.on_timeout)

        self.requests_queue: Queue[Tuple[Optional[Path], FileWritingMode,
                                         np.ndarray]] = Queue()
        self.results_queue: Queue[np.ndarray] = Queue()
        self.measurement: Optional[Measurement] = None
        self.file_writer: FileWriter = FileWriter(self.requests_queue)
        self.file_writer.start()

        self._data: List[np.ndarray] = []
        self._index_map: List[int] = []
        self._start_date: date = date.today()
        self._measurement_index: int = 1
コード例 #19
0
    def run_local_search(self):
        print(
            f"ic| run_local_search: Executing Local Search with distance {self.distance}"
        )
        counter = 0
        output_file = FileWriter(file_name=self.output_filename)

        output_file.write_line(self.output_filename.replace('TEMP-', ''))
        output_file.write_line(str(self.solution.optimum))
        output_file.write_line(f"{counter} {self.solution.value}")

        while self.evaluate_neighborhood(self.solution, self.item_list,
                                         self.distance):
            counter += 1
            #self.solution.print_solution()
            #ic(f"{counter} {self.solution.value}")
            output_file.write_line(f"{counter} {self.solution.value}")
コード例 #20
0
ファイル: script.py プロジェクト: phufbv/journal-stats
def run(journal, num_articles):

	# Setup output file, get input parameters, and use brief run if testing
	writer = FileWriter(pars.filename)
	journal = journal  # journal name
	num_articles = num_articles  # number of articles to use from each issue

	num_volumes = 18  # 18 volumes per year
	issue = 1  # sample issue for each volume

	# if len(sys.argv) > 1:
	# 	print "Testing....."
	# 	num_articles = 10
	# 	num_volumes = 1


	# Sample papers accepted in previous year
	date = html.detect_start_volume()
	start_volume = date[0]
	acceptance_year = date[1]

	volumes = range(start_volume-num_volumes+1, start_volume+1)


	# for volume in reversed(volumes):

	# 	# Go to volume/issue contents page, and extract URLs of articles
	# 	articles = html.build_urls(journal, volume, issue)
		
	# 	for num in range(1, num_articles+1):

	# 		# For first 'num_articles' in this volume/issue, try to extract date string from article webpage
	# 		url = articles[num]
		    
	# 		try:
	# 			date_string = html.get_date_div(url)
	# 		except:
	# 			print "Some error occurred (URL '",url,"' not available?). Skipping."
	# 			break

	# 		article = Article(date_string)

	# 		if article.get_year() == acceptance_year:
	# 			writer.write_to_file(article)

	writer.close_file()
コード例 #21
0
    parser.add_argument('--save', action='store_true')
    args = parser.parse_args()

    # handler = logging.StreamHandler()
    # handler.setLevel(logging.DEBUG)
    # for logger_name in ("praw", "prawcore"):
    #     logger = logging.getLogger(logger_name)
    #     logger.setLevel(logging.DEBUG)
    #     logger.addHandler(handler)

    sub_reddit = args.reddit

    fw_queue = None
    if args.save:
        fw_queue = queue.Queue()
        fw = FileWriter(fw_queue, sub_reddit=sub_reddit, data_type=args.type)
        fw.start()

    queue = queue.Queue()

    dsc = DeepstreamClient(queue, file_writer_queue=fw_queue)
    dsc.start()

    # sub_reddit = 'stocks'
    if args.type == 'comments':
        get_comments(sub_reddit=sub_reddit, q=queue)
    elif args.type == 'submissions':
        get_submissions(sub_reddit=sub_reddit, q=queue)
    else:
        queue.put('quit')
コード例 #22
0
 def __init__(self, setting, communicator):
     self.communicator = communicator
     self.communicator.debug("CodeGenerator loaded")
     self.communicator.debug("Object:" + str(setting))
     self.setting = setting
     self.file_writer = FileWriter(setting, communicator)
コード例 #23
0
 def __init__(self):
     self.file_writer = FileWriter()
コード例 #24
0
    def test_writer_creates_file(self):
        writer = FileWriter(self.test_filename)

        self.assertTrue(os.path.isfile(self.test_path))
コード例 #25
0
ファイル: doctest.py プロジェクト: sab0946/PR301
 def test_07(self):
     x = FileWriter()
     x.add_data("plant_uml")
     if len(x.my_program) > 0:
         pass
コード例 #26
0
parser.add_argument("--stdout",
                    help="write to stdout instead of file",
                    action="store_true")
parser.add_argument("-nth",
                    type=int,
                    help="only print ever nth sample if --stdout is specified")
args = parser.parse_args()

sensor_reader = SensorReader()
if args.stdout:
    if args.nth is not None:
        writer = StdoutWriter(args.nth)
    else:
        writer = StdoutWriter()
else:
    writer = FileWriter('/home/pi/sensor_recordings/')
sensor_reader.set_sensor_listener(writer)

# Consumer/producer architecture: the SensorReader is the producer, reading data from sensors,
# and the FileWriter is the consumer.
# We use multiprocessing.Process instead of threading.Thread because the latter would also cause
# the other thread to slow down due to Global Interpreter Lock.

# reset this because sensor_reader.start_reading() might execute before file_writer.start_write_loop()
if args.stdout:
    stdout_writer.stop.value = 0
else:
    file_writer.stop.value = 0
process = Process(target=writer.start_write_loop)
process.start()
コード例 #27
0
from file_writer import FileWriter

x = FileWriter()
x.add_data("plant_uml_complex")
print(x.my_program)
コード例 #28
0
ファイル: scr.py プロジェクト: ErShubhamB/scraping
def main():
    url = rq.args.get('url')
    checkin = rq.args.get('checkin')
    checkout = rq.args.get('checkout')
    part_url = url.split('&checkin_year', 1)[0]
    part_url = part_url + '&checkin_year=2019&checkin_month=4&checkin_monthday=12&checkout_year=2019&checkout_month=4&checkout_monthday=13'
    checkin = checkin.split('-', 2)
    checkout = checkout.split('-', 2)
    d1 = date(int(checkin[0]), int(checkin[1]), int(checkin[2]))  # start date
    d2 = date(int(checkout[0]), int(checkout[1]),
              int(checkout[2]))  # start date
    #d2 = date(2019, 1, 20)  # end date
    delta = d2 - d1
    hotels = []
    for i in range(delta.days):
        dt_from = d1 + timedelta(i)
        dt_to = d1 + timedelta(i + 1)
        #[0] => Year [1] => month [2] => day
        split_from = str(dt_from).split('-', 2)
        split_to = str(dt_to).split('-', 2)
        dt_to_year = split_to[0]
        dt_to_month = split_to[1]
        dt_to_day = split_to[2]

        dt_from_year = split_from[0]
        dt_from_month = split_from[1]
        dt_from_day = split_from[2]
        turl = part_url + '&checkout_month=' + dt_to_month + '&checkout_monthday=' + dt_to_day + '&no_rooms=1&group_adults=2&group_children=0&sb_travel_purpose=business&b_h4u_keep_filters=&from_sf=1'
        #print(url)
        r = requests.get(
            turl,
            headers={
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
            })
        html = r.content
        parsed_html = BeautifulSoup(html, 'lxml')
        hotel = parsed_html.find_all('div', {'class': 'sr_item'})
        tm.sleep(5)
        print(len(hotel))
        for ho in hotel:
            #print(ho)
            name = ho.find('span', {'class': 'sr-hotel__name'})
            price = ho.find('strong', {'class': 'availprice'})
            hurl = ho.find('a', {'class': 'hotel_name_link'})['href']
            rating = ho.find('div', {'class': 'bui-review-score__badge'})
            #print(price)
            sub_r = requests.get(
                'http://booking.com' + hurl.replace('\n', ''),
                headers={
                    'User-Agent':
                    'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
                })
            sub_html = sub_r.content
            tm.sleep(5)
            parsed_sub_html = BeautifulSoup(sub_html, 'lxml')
            sub_hotels = parsed_sub_html.find('select',
                                              {'class': 'hprt-nos-select'})
            if (sub_hotels):
                opt = sub_hotels('option')[-1]
                if (opt):
                    #print(opt.text)
                    occupancy = opt.text
                    occupancy = occupancy.replace('\n', '')
                    occupancy = occupancy.split('(', 1)[0]
                    occupancy = occupancy.replace(' ', '')
                    occupancy = int(occupancy)
                    occupancy = occupancy * 10
                    occupancy = str(occupancy) + '%'
                else:
                    occupancy = 0
            if (price):
                pr = price.text
            else:
                pr = ''
            if (rating):
                rate = rating.text
            else:
                rate = ''
            data = {}
            data[
                'check_in_date'] = dt_from_year + '-' + dt_from_month + '-' + dt_from_day
            data[
                'check_out_date'] = dt_to_year + '-' + dt_to_month + '-' + dt_to_day
            data['name'] = name.text.replace('\n', '')
            data['price'] = pr.replace('\n', '')
            data['URL'] = 'http://booking.com' + hurl.replace('\n', '')
            rt = rate.replace('\n', '')
            rt = rt.replace(' ', '')
            data['rating'] = rate.replace('\n', '')
            data['occupancy'] = occupancy
            hotels.append(data)
            tm.sleep(5)
    writer = FileWriter(hotels, out_format='JSON', country='JAPAN')
    file = writer.output_file()
    return jsonify(hotels)
コード例 #29
0
ファイル: entry_point.py プロジェクト: praskovna/app_console
def print_trans_menu():
    print(trans_menu)
    choice = raw_input('Please select: ')
    if choice == '1':
        user_trans_sum = raw_input('Enter transaction sum: ')
        write_obj.set_trans('trans_sum', user_trans_sum)
    elif choice == '2':
        user_currency_code = raw_input('Enter currency code: ')
        write_obj.set_trans('trans_sum', user_currency_code)
    elif choice == '3':
        print(write_menu)
    else:
        process_unknown_option()


def create_parser():
    parser_ = argparse.ArgumentParser(
        description='Process command line file_path.')
    return parser_


if __name__ == '__main__':
    parser = create_parser()
    namespace = parser.parse_args(argv[1:])
    write_obj = FileWriter()
    read_obj = FileReader()

    loop = True
    while loop:
        print_main_menu()