Exemple #1
0
 def search_time_spent(self):
     '''This is the menu where the user enters the number of minutes a task
     took and be able to choose one to see entries from
     '''
     print('SEARCH BY TIME SPENT')
     print("Time spent")
     time_spent = None
     while time_spent is None:
         input_text = input("Enter a whole number of minutes (rounded) ")
         try:
             time_spent = int(input_text)
         except ValueError:
             print("Invalid value")
             continue
     # load csv
     csvm = CsvManager()
     csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
     field_title = self.HEADERS['duration']
     matching_records = self.get_matching_records(csv_data, field_title,
                                                  str(time_spent))
     if len(matching_records) == 0:
         print("\nNo matches, returning to search menu")
         return self.search_entries
     self.records = matching_records
     self.current_record = 0
     return self.present_next_result
Exemple #2
0
 def search_regex_search(self):
     '''This menu is just like `search_text_search` except the user provides
     a regex pattern instead of a text string
     '''
     text_headers = [self.HEADERS['task_name'], self.HEADERS['notes']]
     print('SEARCH USING REGEX PATTERN')
     print("Enter the pattern to search on")
     input_text = input("> ")
     pattern = input_text
     # load csv
     csvm = CsvManager()
     csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
     # perform search
     matching_records = []
     for header in text_headers:
         matches_for_header = self.get_records_with_pattern(
             csv_data, header, pattern)
         if len(matches_for_header) > 0:
             matching_records.append(matches_for_header)
     uniques = []
     for record in matching_records:
         if record not in uniques:
             uniques += record
     if len(uniques) == 0:
         print("\nNo matches, returning to search menu")
         return self.search_entries
     self.records = uniques
     self.current_record = 0
     return self.present_next_result
Exemple #3
0
 def search_text_search(self):
     '''This is the menu where the user enters a text string and is presented
     with all entries containing that string in the task name or notes
     '''
     text_headers = [self.HEADERS['task_name'], self.HEADERS['notes']]
     print('SEARCH USING TEXT STRING')
     print("Enter the text string to search on")
     input_text = input("> ")
     text_string = input_text
     # load csv
     csvm = CsvManager()
     csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
     # perform search
     matching_records = []
     for header in text_headers:
         matches_for_header = self.get_records_containing(
             csv_data, header, text_string)
         if len(matches_for_header) > 0:
             matching_records.append(matches_for_header)
     uniques = []
     for record in matching_records:
         if record not in uniques:
             uniques += record
     if len(uniques) == 0:
         print("\nNo matches, returning to search menu")
         return self.search_entries
     self.records = uniques
     self.current_record = 0
     return self.present_next_result
Exemple #4
0
def upload_csv(user):
    db = mydb.MyDatabase(dbtype=mydb.SQLITE, dbname='wortkarte.sqlite')
    # csvfile = 'static/files/words.csv'
    csvfile = 'static/files/verben_prap.csv'
    csv_man = CsvManager(csvfile)
    words = csv_man.get_all_words()
    for word in words:
        query = f"INSERT INTO words_{user}(source, target, zero, one, two, three, four) " \
                f"VALUES ('{word[0]}','{word[1]}', 1, 0, 0, 0, 0);"
        db.execute_query(query)
Exemple #5
0
def main():
	file_names=sys.argv[1:-1]
	out_name=sys.argv[-1]
	
	fieldnames = config.FIELDNAMES

	csv_manager = CsvManager(fieldnames.keys(), fieldnames)
	for file_name in file_names:
		csv_manager.read_csv(open(file_name, 'r'), preprocess=config.PREPROCESS)
	csv_manager.write_csv(open(out_name, 'w'))
Exemple #6
0
    def search_date_range(self):
        '''This is the menu where the user can enter a from date and to date
        and get back every entry from within that range
        '''
        print('SEARCH DATE RANGE')
        start_date = None
        end_date = None
        # get start_date
        while start_date is None:
            print("Start Date:")
            user_entry = self.date_entry()
            if user_entry[0] is not None:  # error
                print(user_entry[0])
                continue
            else:
                start_date = user_entry[1]
        # get end_date
        while end_date is None:
            print("End Date:")
            user_entry = self.date_entry()
            if user_entry[0] is not None:  # error
                print(user_entry[0])
                continue
            else:
                end_date = user_entry[1]
        # load csv
        csvm = CsvManager()
        csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
        # loop through every loop in range (inclusive)
        if end_date < start_date:
            current_date = end_date
            end_date = start_date
            start_date = end_date
        else:
            current_date = start_date
        print("\nShowing entries:")
        matching_records = []
        while current_date <= end_date:
            #   show entries
            date_string = self.date_to_string(current_date, target='file')
            matching_records += self.get_matching_records(
                csv_data, self.HEADERS['date'], date_string)
            current_date = current_date + datetime.timedelta(days=1)

        if len(matching_records) == 0:
            print("\nNo matches, returning to search menu")
            return self.search_entries
        self.records = matching_records
        self.current_record = 0
        return self.present_next_result
Exemple #7
0
 def delete_current_record(self):
     print("delete record")
     match_index = self.current_record
     record = self.records[match_index]
     # load te csv
     csvm = CsvManager()
     csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
     # find the row that matches record
     for row in csv_data:
         if row == record:
             # delete that row
             csv_data.remove(row)
             break
     # save the csv
     csvm.save_csv(csv_data, self.DATASTORE_FILENAME, truncate=True)
     return self.main_menu
Exemple #8
0
 def edit_record(self):
     print("edit record")
     print('enter the record number to edit')
     user_input = input("> ")
     match_index = int(user_input) - 1
     record = self.records[match_index]
     # get the new values for the record
     date = None
     while date is None:
         print("New date of the Task")
         user_entry = self.date_entry()
         if user_entry[0] is not None:  # error
             print(user_entry[0])
             continue
         else:
             date = user_entry[1]
             date_string = self.date_to_string(date, target='file')
     print("New name of the Task")
     input_text = input("Enter the name of the task > ")
     task_name = input_text
     time_spent = None
     while time_spent is None:
         print("New time spent")
         input_text = input("Enter a whole number of minutes (rounded) ")
         try:
             time_spent = int(input_text)
         except ValueError:
             print("Invalid value")
             continue
     print("New notes")
     input_text = input("(Optional, leave blank for none) ")
     notes = input_text
     # load the csv
     csvm = CsvManager()
     csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
     # find the row that matches record
     for row in csv_data:
         if row == record:
             row[self.HEADERS['date']] = date_string
             row[self.HEADERS['task_name']] = task_name
             row[self.HEADERS['duration']] = time_spent
             row[self.HEADERS['notes']] = notes
     # save the csv
     csvm.save_csv(csv_data, self.DATASTORE_FILENAME, truncate=True)
     return self.main_menu
Exemple #9
0
 def delete_record(self):
     print("delete record")
     print('enter the record number to delete')
     user_input = input("> ")
     match_index = int(user_input) - 1
     record = self.records[match_index]
     # load te csv
     csvm = CsvManager()
     csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
     # find the row that matches record
     for row in csv_data:
         if row == record:
             # delete that reow
             csv_data.remove(row)
             break
     # save the csv
     csvm.save_csv(csv_data, self.DATASTORE_FILENAME, truncate=True)
     return self.main_menu
Exemple #10
0
 def add_entry(self):
     '''This is the menu where the user can add a task that was completed
     '''
     while True:
         print("\nADD ENTRY")
         date = None
         while date is None:
             print("Date of the Task")
             user_entry = self.date_entry()
             if user_entry[0] is not None:  # error
                 print(user_entry[0])
                 continue
             else:
                 date = user_entry[1]
                 date_string = self.date_to_string(date, target='file')
         print("Name of the Task")
         input_text = input("Enter the name of the task > ")
         task_name = input_text
         time_spent = None
         while time_spent is None:
             print("Time spent")
             print("Enter a whole number of minutes (rounded)")
             input_text = input("> ")
             try:
                 time_spent = int(input_text)
             except ValueError:
                 print("Invalid value, please try again")
                 continue
             if time_spent < 0:
                 print("Invalid value, please try again")
                 continue
         print("Notes")
         input_text = input("(Optional, leave blank for none) ")
         notes = input_text
         # call method to write data to file
         csvm = CsvManager()
         file_data = [{
             self.HEADERS['date']: date_string,
             self.HEADERS['task_name']: task_name,
             self.HEADERS['duration']: time_spent,
             self.HEADERS['notes']: notes
         }]
         csvm.save_csv(file_data, self.DATASTORE_FILENAME)
         return self.main_menu
Exemple #11
0
    def search_exact_date(self):
        '''This is the menu where the user browses dates and entries and picks
        the date from a list
        '''
        print("\nSEARCH EXACT DATE")
        # load the csv
        csvm = CsvManager()
        csv_data = csvm.load_csv(self.DATASTORE_FILENAME)
        date_records = self.get_column(csv_data,
                                       self.HEADERS['date'],
                                       unique=True)
        for i, value in enumerate(date_records):
            print("{}) {}".format(i + 1, value))
        selected_date = None
        while selected_date is None:
            user_input = input("> ")
            # perform input validation
            try:
                user_input = int(user_input) - 1
            except ValueError:
                print("Invalid value, try again")
                continue
            if user_input < 0:
                print("Value out of range. Try again.")
                continue
            try:
                selected_date = date_records[user_input]
            except IndexError:
                print("Value out of range. Try again.")
                continue

            # when a date is selected, show all the entries with that date
            matching_records = self.get_matching_records(
                csv_data, self.HEADERS['date'], selected_date)
        self.records = matching_records
        self.current_record = 0
        return self.present_next_result
def multiprocess(traces, commands, raw_config):
    # Get Simulator Configurations...
    config = SimulatorConfig(
        C=raw_config['C'],
        L=raw_config['L'],
        K=raw_config['K'],
        N=raw_config['N'],
        BIT_SIZE=constants.BIT_SIZE,
        input_label=commands.input_file_label,
    )
    simulation_results = run(traces, config)

    # Open CSV file to write...
    output_file = constants.OUTPUT_FOLDER_PATH + populate_output_file_label(
        commands.input_file_label,
        C=raw_config['C'],
        L=raw_config['L'],
        K=raw_config['K'],
        N=raw_config['N'],
    )
    with open(output_file, 'w+') as csv_file:
        # Print out result file as CSV
        csv_manager = CsvManager(csv_file, [
            'Input',
            'Cache-Capacity',
            'L',
            'K',
            'N',
            'Hit-Ratio',
            'Miss-Ratio',
            'AMAT',
            'Hit-Count',
            'Miss-Count',
            'Access-Count',
        ])
        csv_manager.write_row(simulation_results)
Exemple #13
0
def run(commands):
  input_file = constants.INPUT_FOLDER_PATH + commands.input_file_label
  # Parse trace file to programmable.
  traces = []
  with open(input_file, 'r') as trace_file:
    traces = trace_parser.parse(trace_file, constants.BIT_SIZE)

  # Config for L1 I/D, L2 (Fixed)
  config_L1_inst = CacheConfig(
    C=L1_CACHE_SIZE, L=BLOCK_SIZE, K=1, N=512,
    BIT_SIZE=constants.BIT_SIZE,
    input_label=commands.input_file_label,
    HIT_TIME=4,
    MISS_PENALTY=16,
  )
  config_L1_data = CacheConfig(
    C=L1_CACHE_SIZE, L=BLOCK_SIZE, K=1, N=512,
    BIT_SIZE=constants.BIT_SIZE,
    input_label=commands.input_file_label,
    HIT_TIME=4,
    MISS_PENALTY=16,
  )
  config_L2 = CacheConfig(
    C=L2_CACHE_SIZE, L=BLOCK_SIZE, K=8, N=512,
    BIT_SIZE=constants.BIT_SIZE,
    input_label=commands.input_file_label,
    HIT_TIME=16,
    MISS_PENALTY=32,
  )

  raw_configs_dicts_L3 = {}
  with open('configs/project.json', 'r') as raw_config_file:
    raw_configs_dicts_L3 = json.load(raw_config_file)
  raw_configs_L3 = [
    {
      'C': L3_CACHE_SIZE,
      'L': BLOCK_SIZE,
      'K': raw_config['K'],
      'N': raw_config['N'],
      'INST_PREFETCHER': raw_config['INST_PREFETCHER'],
      'DATA_PREFETCHER': raw_config['DATA_PREFETCHER'],
      'REPLACEMENT': raw_config['REPLACEMENT'],
    }
    for raw_config in cartesian_dict_product(raw_configs_dicts_L3)
    if check_raw_config({
      'C': L3_CACHE_SIZE,
      'L': BLOCK_SIZE,
      'K': raw_config['K'],
      'N': raw_config['N'],
    })
  ]
  validate_raw_configs(raw_configs_L3)
  del raw_configs_dicts_L3

  for raw_config_L3 in raw_configs_L3:
    # Config for L3 (Dynamic)
    config_L3 = CacheConfig(
      C=raw_config_L3['C'],
      L=raw_config_L3['L'],
      K=raw_config_L3['K'],
      N=raw_config_L3['N'],
      BIT_SIZE=constants.BIT_SIZE,
      input_label=commands.input_file_label,
      HIT_TIME=32,
      MISS_PENALTY=120,
      # inst_prefetcher=constants.PREFETCHER_TYPE['STREAM_BUFFER'],
      inst_prefetcher=constants.PREFETCHER_TYPE[raw_config_L3['INST_PREFETCHER']],
      # data_prefetcher=constants.PREFETCHER_TYPE['WRITE_BUFFER'],
      data_prefetcher=constants.PREFETCHER_TYPE[raw_config_L3['DATA_PREFETCHER']],
      replacement_policy=constants.REPLACEMENT_POLICY_TYPE[raw_config_L3['REPLACEMENT']],
    )

    # TODO(totorody): Implements to run caches
    cache_L1_inst = Cache(config_L1_inst)
    cache_L1_data = Cache(config_L1_data)
    cache_L2 = Cache(config_L2)
    cache_L3 = Cache(config_L3)

    cache_L1_inst.set_low_cache(cache_L2)
    cache_L1_data.set_low_cache(cache_L2)
    cache_L2.set_low_cache(cache_L3)

    print('Start to run caching...')
    index = 0
    for trace in traces:
      if index % 10000 == 0:
        print('trace #:', index)
      index += 1
      if trace['type'] not in constants.ACCESS_TYPE.values():
        continue
      if trace['type'] == constants.ACCESS_TYPE['INST_READ']:
        cache_L1_inst.access(trace)
      else:
        cache_L1_data.access(trace)

    print('Prints cache simulation results...')
    inst_result = cache_L1_inst.get_result('L1-Inst')
    data_result = cache_L1_data.get_result('L1-Data')
    L2_result = cache_L2.get_result('L2')
    L3_result = cache_L3.get_result('L3')

    output_file = constants.OUTPUT_FOLDER_PATH \
        + populate_output_file_label(config_L3)
    with open(output_file, 'w+') as csv_file:
      csv_manager = CsvManager(csv_file, inst_result.keys())
      csv_manager.write_row(inst_result)
      csv_manager.write_row(data_result)
      csv_manager.write_row(L2_result)
      csv_manager.write_row(L3_result)
Exemple #14
0
    async def crawl_pages(self, category):
        cat_id = self.categories.get(category)
        offset = 0
        max_results = 50
        auctions = list()

        while True:
            url = self.search_category_url_format.format(
                cat_id=cat_id, skip=offset, max_num_of_results=max_results)
            _, page_content = await self.extract_async(url)
            if page_content is not None:
                json_obj = json.loads(page_content.decode("utf-8"),
                                      encoding="utf-8")

                items = json_obj.get("Items")
                auctions.extend(items)

            offset += max_results

            if len(items) < max_results:
                break

        log.debug("Found: %d auctions of category: %s" %
                  (len(auctions), category))

        output_dir = self.output_dir_path_format.format(category=category)
        csv_file_path = os.path.join(
            output_dir, "{category}.csv".format(category=category))

        log.info("Csv output directory path: %s, csv file: %s" %
                 (output_dir, csv_file_path))

        Util.create_directory(output_dir)

        csv_manager = CsvManager(csv_file_path, self.fields, "id")
        csv_manager.open_file()

        tasks = (self.parse_item(category, item) for item in items)
        for res in AsyncCrawler.limited_as_completed(tasks, 5):
            extracted_data = await res

            if csv_manager.check_row_exist(extracted_data):
                extracted_data["flag"] = self.flags.get("updated")
            else:
                extracted_data["flag"] = self.flags.get("new")

            csv_manager.update_row(extracted_data)

            auction_output_dir = os.path.join(output_dir,
                                              extracted_data.get("id"))
            Util.create_directory(auction_output_dir)

            if extracted_data.get("images") is not None:
                images_urls = extracted_data.get("images").split('|')

                local_img = list()

                for img_url in images_urls:
                    local_img_file_path = os.path.join(
                        auction_output_dir, "{img_id}.jpg".format(
                            img_id=self.get_image_id(img_url)))

                    if not Util.check_file_exist(local_img_file_path):
                        local_img.append((img_url, local_img_file_path))

                download_tasks = (self.download_file(img_url, img_file_path)
                                  for img_url, img_file_path in local_img)

                for r in AsyncCrawler.limited_as_completed(download_tasks):
                    await r

        csv_manager.close_file()
Exemple #15
0
  from optparse import OptionParser
  parser = OptionParser('"')
  parser.add_option('-i', '--inputFile', dest='input_file')
  parser.add_option('-o', '--outputFile', dest='output_file')
  
  options, otherjunk = parser.parse_args(argv)
  return options

options = parse_commands(sys.argv[1:])

parser = None
weka_objects = None
with open(options.input_file, 'r') as weka_file:
  parser = WekaParser(weka_file)
  weka_objects = parser.parse()

weka_manager = WekaManager(weka_objects)
weka_manager.filter_objects()
weka_manager.normalize()

analyze_result = weka_manager.analyze()

with open(options.output_file, 'w+') as csv_file:
  csv_manager = CsvManager(csv_file, ['Type', 'Source', 'Target', 'Weight'])
  for relationship, weight in analyze_result.items():
    csv_manager.write_row({
      'Type': 'Undirected',
      'Source': relationship[0],
      'Target': relationship[1],
      'Weight': weight,
    })
from input_manager import InputManager
from list_manager import ListManager
from csv_manager import CsvManager
from ui_manager import UiManager
inputManager = InputManager()
listManager = ListManager()
csvManager = CsvManager()
uiManager = UiManager()

inputManager.prompt_user_for_country_iso_code()
inputManager.prompt_user_for_type_of_data()
inputManager.prompt_user_for_interval_in_days()

covid_cases_filtered_by_country = listManager.filter_by_country_iso_code(
    csvManager.ALL_COVID_CASES_IN_CSV, inputManager.chosen_country_iso_code)

list_of_dates = listManager.get_date_list(covid_cases_filtered_by_country)
list_of_cases = listManager.get_type_of_data_list(
    covid_cases_filtered_by_country, inputManager.chosen_type_of_data)

list_of_dates = listManager.get_cases_by_interval_in_days(
    list_of_dates, inputManager.chosen_interval_in_days)
list_of_cases = listManager.get_cases_by_interval_in_days(
    list_of_cases, inputManager.chosen_interval_in_days)

uiManager.create_plot_from_two_lists(list_of_cases, list_of_dates)
uiManager.define_labels_for_both_axis(
    "Cases",
    f"Date ({uiManager.day_message_plural_check(inputManager.chosen_interval_in_days)})"
)
uiManager.show_plot()
    async def crawl_pages(self, category, max_pages):
        pages = (self.search_category_url_format.format(
            category=category, page_number=page_number)
                 for page_number in range(1, max_pages + 1))

        auctions_links = list()

        tasks = (self.extract_async(url) for url in pages)
        for page in AsyncCrawler.limited_as_completed(tasks, 5):
            url, page_content = await page
            if url is not None and page_content is not None:
                auctions_links.extend(
                    self.parse_search_result_page(page_content))

        if not auctions_links:
            log.warning("No results found for category: %s" % category)
            return

        log.debug("Found: %d auctions in %d pages of category: %s" %
                  (len(auctions_links), max_pages, category))

        output_dir = self.output_dir_path_format.format(category=category)
        csv_file_path = os.path.join(
            output_dir, "{category}.csv".format(category=category))

        Util.create_directory(output_dir)

        csv_manager = CsvManager(csv_file_path, self.fields, "id")
        csv_manager.open_file()

        for auction_url in auctions_links:
            self.driver.get(auction_url)

            extracted_data = self.parse_data(category, auction_url,
                                             self.driver.page_source)
            if csv_manager.check_row_exist(extracted_data):
                log.debug("row already existed in csv")
                extracted_data["flag"] = self.flags.get("updated")
            else:
                log.debug("row in new")
                extracted_data["flag"] = self.flags.get("new")

            csv_manager.update_row(extracted_data)

            auction_output_dir = os.path.join(output_dir,
                                              extracted_data.get("id"))
            Util.create_directory(auction_output_dir)

            if extracted_data.get("images") is not None:
                images_urls = extracted_data.get("images").split('|')

                local_img = list()

                for img_url in images_urls:
                    local_img_file_path = os.path.join(
                        auction_output_dir, "{img_id}.png".format(
                            img_id=self.get_image_id(img_url)))

                    if not Util.check_file_exist(local_img_file_path):
                        local_img.append((img_url, local_img_file_path))

                download_tasks = (self.download_file(img_url, img_file_path)
                                  for img_url, img_file_path in local_img)

                for r in AsyncCrawler.limited_as_completed(download_tasks):
                    await r

        csv_manager.close_file()
 def setup_csv_file_manager(self):
     tracklist = self.config_manager.get_config('PATHS', 'tracklist_file')
     self.csv_manager = CsvManager(tracklist)
class Karaoke:
    """
    This class is basically the main class.
    It uses all other classes to execute commands inserted from the ui
    If it has a valid config, it will start
    If it does not have a valid config
    """

    def __init__(self):
        self.config_manager = ConfigManager()

        if self.config_manager.has_valid_config():
            print("Valid config file is detected")
            self.setup()
        else:
            print("No valid config file is found")

    def has_valid_config(self):
        if not self.config_manager.has_valid_config():
            print("Invalid config")
            return False
        if not self.csv_manager.is_csv_valid():
            print("Invalid tracklist")
            return False
        
        return True

    def get_video_directory(self):
        return self.config_manager.get_config('PATHS', 'video_directory')

    def set_video_directory(self, video_dir):
        return self.config_manager.set_config('PATHS', 'video_directory', video_dir)

    def get_tracklist_file(self):
        return self.config_manager.get_config('PATHS', 'tracklist_file')

    def set_tracklist_file(self, tracklist_file):
        return self.config_manager.set_config('PATHS', 'tracklist_file', tracklist_file)

    def setup(self):
        self.setup_csv_file_manager()
        self.setup_mpv_manager()

    def setup_csv_file_manager(self):
        tracklist = self.config_manager.get_config('PATHS', 'tracklist_file')
        self.csv_manager = CsvManager(tracklist)

    def setup_mpv_manager(self):
        video_dir = self.config_manager.get_config('PATHS', 'video_directory')
        song_completed_callback = self.csv_manager.record_song_played
        self.mpv_manager = MpvManger(song_completed_callback, video_dir)

    def test(self):
        playlist = self.csv_manager.generate_red_playlist()
        self.mpv_manager.play_playlist(playlist)

    def get_all_playlists(self):
        try:
            playlists = self.csv_manager.get_all_playlist_names()
        except AttributeError:
            playlists = []
        return playlists

    def play_playlist(self, playlist_name, shuffle=False, longest_not_played_first=False):
        playlist = self.csv_manager.get_playlist_by_name(playlist_name, shuffle, longest_not_played_first)
        self.mpv_manager.play_playlist(playlist)

    def allowed_to_alter_csv_file(self):
        return self.csv_manager.allowed_to_alter_csv_file()
Exemple #20
0
    async def crawl_pages(self, category, max_pages):
        pages = (self.search_link_format.format(category=category,
                                                page_number=page_number)
                 for page_number in range(1, max_pages + 1))

        auctions_links = list()

        tasks = (self.extract_async(url) for url in pages)
        for page in AsyncCrawler.limited_as_completed(tasks, 5):
            url, page_content = await page
            if url is not None and page_content is not None:
                auctions_links.extend(
                    self.parse_search_result_page(page_content))

        if not auctions_links:
            log.warning("No results found for category: %s" % category)
            return

        log.debug("Found: %d auctions in %d pages of category: %s" %
                  (len(auctions_links), max_pages, category))

        output_dir = self.output_dir_path_format.format(category=category)
        csv_file_path = os.path.join(
            output_dir, "{category}.csv".format(category=category))

        Util.create_directory(output_dir)

        csv_manager = CsvManager(csv_file_path, self.fields, "id")
        csv_manager.open_file()
        '''
        tasks = (self.extract_multi_async([url.replace("aukcja", "zdjecia"), url]) for url in auctions_links)
        for pages in AsyncCrawler.limited_as_completed(tasks):
            results = await pages
            images_url, images_page_content = results[0]
            url, page_content = results[1]
        '''
        tasks = (self.extract_async(url) for url in auctions_links)
        for page in AsyncCrawler.limited_as_completed(tasks, 5):
            url, page_content = await page
            if url is not None and page_content is not None:
                extracted_data = self.parse_data(category, url, page_content)

                images_links = list()
                images_url = url.replace("aukcja", "zdjecia")
                _, images_page_content = await self.extract_async(images_url)
                if images_url is not None and images_page_content is not None:
                    images_links = self.parse_full_images_page(
                        images_page_content)
                    extracted_data["images"] = '|'.join(images_links)

                if csv_manager.check_row_exist(extracted_data):
                    if _translate.get("finished") in extracted_data.get(
                            "stop").lower():
                        extracted_data["flag"] = self.flags.get("sold")
                    else:
                        extracted_data["flag"] = self.flags.get("updated")
                else:
                    extracted_data["flag"] = self.flags.get("new")

                csv_manager.update_row(extracted_data)

                auction_output_dir = os.path.join(output_dir,
                                                  extracted_data.get("id"))
                Util.create_directory(auction_output_dir)

                if extracted_data.get("images") is not None:
                    images_urls = extracted_data.get("images").split('|')

                    local_img = list()

                    for img_url in images_urls:
                        local_img_file_path = os.path.join(
                            auction_output_dir, "{img_id}.jpg".format(
                                img_id=self.get_image_id(img_url)))

                        if not Util.check_file_exist(local_img_file_path):
                            local_img.append((img_url, local_img_file_path))

                    download_tasks = (self.download_file(
                        img_url, img_file_path)
                                      for img_url, img_file_path in local_img)

                    for r in AsyncCrawler.limited_as_completed(download_tasks):
                        await r

            else:
                logging.error("Url or page_content none: %s" % url)

        csv_manager.close_file()