示例#1
0
def test_to_filename_csv():
    expected = "read-this-file.csv"

    assert expected == CsvFile.to_filename_csv("read this file")
    assert expected == CsvFile.to_filename_csv("read-this-file")
    assert expected == CsvFile.to_filename_csv("read-this-file.csv")
    assert expected == CsvFile.to_filename_csv("read          this        file")
示例#2
0
    def test_sort(self, csv_test1: CsvFile, columns, reverse, result_column, expected_results):
        csv_test1.sort_data(columns, reverse)

        sorted_data = csv_test1.get_data()

        for result in expected_results:
            assert sorted_data[result[0]][result_column] == result[1]
示例#3
0
def temp_csv_test1(tmpdir, csv_test1) -> CsvFile:
    file = tmpdir.mkdir("json").join("file.csv")
    file.write("blank")

    csv = CsvFile.create_or_get(file.strpath)
    csv.read_file()
    return csv
 def encode_filter(self, key, value):
     if not super().encode_filter(key, value):
         return False
     if key == 'delimiter' and all(
             CsvFile.delimiter_matches_file_type(value, f)
             for f in single_item_to_list(self.source)):
         return False
     return True
 def from_repr(cls, data: Union[Representation, dict]):
     rep: cls.Representation = data if isinstance(
         data, cls.Representation) else cls.Representation.from_dict(data)
     csv_files = [
         CsvFile.create_or_get(csv)
         for csv in single_item_to_list(rep.source)
     ]
     for c in csv_files:
         c.set_delimiter(rep.delimiter)
         c.read_file()
     return cls(rep=rep, sources=csv_files, columns=rep.columns)
示例#6
0
 def from_repr(cls, data: Union[Representation, dict]):
     rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data)
     csv = CsvFile.create_or_get(rep.file)
     csv.set_delimiter(rep.delimiter)
     csv.read_file()
     return cls(
         rep=rep,
         csv_file=csv,
         note_model=rep.note_model.strip() if rep.note_model else None,
         sort_by_columns=single_item_to_list(rep.sort_by_columns),
         reverse_sort=rep.reverse_sort or False,
         case_insensitive_sort=rep.case_insensitive_sort or True,
         derivatives=list(map(cls.from_repr, rep.derivatives)) if rep.derivatives is not None else [],
     )
示例#7
0
    def __init__(self, config_data, read_now=True):
        super().__init__()

        self.setup_config_with_subconfig_replacement(config_data)
        self.verify_config_entry()

        self.csv_file = CsvFile.create(self.get_config(
            CsvFileMappingKeys.CSV_FILE),
                                       read_now=read_now)

        self.sort_by_columns = single_item_to_list(
            self.get_config(CsvFileMappingKeys.SORT_BY_COLUMNS, []))
        self.reverse_sort = self.get_config(CsvFileMappingKeys.REVERSE_SORT,
                                            False)

        self.note_model_name = self.get_config(CsvFileMappingKeys.NOTE_MODEL,
                                               "")
        self.note_model_name = None if self.note_model_name == "" else self.note_model_name

        self.derivatives = [
            CsvFileMappingDerivative.create_derivative(config,
                                                       read_now=read_now)
            for config in self.get_config(CsvFileMappingKeys.DERIVATIVES, [])
        ]
示例#8
0
    def test_data_override(self, column_headers):
        data_override = [{key: "value" for key in column_headers}]
        csv = CsvFile("file", data_override=data_override)

        assert csv.column_headers == column_headers
示例#9
0
def temp_csv_test1(tmpdir, csv_test1) -> CsvFile:
    file = tmpdir.mkdir("json").join("file.csv")
    file.write("blank")

    return CsvFile.create(file.strpath, data_override=csv_test1.get_data())
示例#10
0
def csv_test2_missing_guids():
    return CsvFile(TestFiles.CsvFiles.TEST2_MISSING_GUIDS)
示例#11
0
def csv_test1_not_read_initially_test():
    return CsvFile(TestFiles.CsvFiles.TEST1, read_now=False)
示例#12
0
def csv_test3():
    return CsvFile(TestFiles.CsvFiles.TEST3)
示例#13
0
    def test_tsv_same_data(self, temp_csv_test1: CsvFile, tsv_test1: CsvFile):
        temp_csv_test1.set_data(tsv_test1.get_data())
        temp_csv_test1.write_file()
        temp_csv_test1.read_file()

        assert temp_csv_test1.get_data() == tsv_test1.get_data()
示例#14
0
def csv_test3():
    csv = CsvFile(TestFiles.CsvFiles.TEST3)
    csv.read_file()
    return csv
示例#15
0
def csv_test1_split2():
    csv = CsvFile(TestFiles.CsvFiles.TEST1_SPLIT2)
    csv.read_file()
    return csv
示例#16
0
def csv_test1():
    return CsvFile(TestFiles.CsvFiles.TEST1)
示例#17
0
    def execute(self):
        self.setup_repo_structure()

        # Create the Deck Parts used
        headers_ca, note_models_all_ca, notes_ca, media_group_ca = self.parts_from_crowdanki(
            self.crowdanki_folder)

        headers = headers_ca.execute().part
        headers_name = LOC_HEADERS + "header1.yaml"
        headers.dump_to_yaml(headers_name)
        # TODO: desc file

        note_models = [m.part for m in note_models_all_ca.execute()]

        notes = notes_ca.execute().part
        used_note_models_in_notes = notes.get_all_known_note_model_names()

        media_group_ca.execute()

        note_model_mappings = [
            NoteModelMapping.Representation(
                [model.name for model in note_models])
        ]
        file_mappings: List[FileMapping.Representation] = []

        csv_files = []

        for model in note_models:
            if model.name in used_note_models_in_notes:
                csv_file_path = os.path.join(
                    LOC_DATA,
                    CsvFile.to_filename_csv(model.name, self.delimiter))
                column_headers = ["guid"
                                  ] + model.field_names_lowercase + ["tags"]
                CsvFile.create_file_with_headers(csv_file_path,
                                                 column_headers,
                                                 delimiter=self.delimiter)

                file_mappings.append(
                    FileMapping.Representation(file=csv_file_path,
                                               note_model=model.name,
                                               delimiter=self.delimiter))

                csv_files.append(csv_file_path)

        deck_path = os.path.join(
            LOC_BUILD, folder_name_from_full_path(self.crowdanki_folder))

        # Generate the Source files that will be kept in the repo
        save_note_models_to_folder = SaveNoteModelsToFolder.from_repr(
            SaveNoteModelsToFolder.Representation(
                [m.name for m in note_models], LOC_NOTE_MODELS, True))
        model_name_to_file_dict = save_note_models_to_folder.execute()

        save_media_to_folder = SaveMediaGroupsToFolder.from_repr(
            SaveMediaGroupsToFolder.Representation(parts=[RECIPE_MEDIA],
                                                   folder=LOC_MEDIA,
                                                   recursive=True,
                                                   clear_folder=True))
        save_media_to_folder.execute()

        generate_csvs = CsvsGenerate.from_repr({
            'notes': RECIPE_NOTES,
            'note_model_mappings': note_model_mappings,
            'file_mappings': file_mappings
        })
        generate_csvs.execute()

        # Create Recipes

        # Anki to Source
        headers_recipe, note_models_all_recipe, notes_recipe, media_group_recipe = self.parts_from_crowdanki(
            deck_path)

        build_part_tasks: List[BuildPartTask] = [
            headers_recipe,
            notes_recipe,
            note_models_all_recipe,
            media_group_recipe,
        ]
        dp_builder = PartsBuilder(build_part_tasks)

        top_level_tasks: List[TopLevelBuildTask] = [
            dp_builder, save_media_to_folder, generate_csvs
        ]
        self.create_yaml_from_top_level(
            top_level_tasks, os.path.join(LOC_RECIPES, "anki_to_source"))

        # Source to Anki
        note_models_from_yaml = [
            NoteModelsFromYamlPart.from_repr(
                NoteModelsFromYamlPart.Representation(name, file))
            for name, file in model_name_to_file_dict.items()
        ]

        media_group_from_folder = MediaGroupFromFolder.from_repr(
            MediaGroupFromFolder.Representation(part_id=RECIPE_MEDIA,
                                                source=LOC_MEDIA,
                                                recursive=True))

        headers_from_yaml = HeadersFromYamlPart.from_repr(
            HeadersFromYamlPart.Representation(part_id=RECIPE_HEADERS,
                                               file=headers_name))

        notes_from_csv = NotesFromCsvs.from_repr({
            'part_id': RECIPE_NOTES,
            'note_model_mappings': note_model_mappings,
            'file_mappings': file_mappings
        })

        build_part_tasks: List[BuildPartTask] = note_models_from_yaml + [
            headers_from_yaml,
            notes_from_csv,
            media_group_from_folder,
        ]
        dp_builder = PartsBuilder(build_part_tasks)

        generate_guids_in_csv = GenerateGuidsInCsvs.from_repr(
            GenerateGuidsInCsvs.Representation(source=csv_files,
                                               columns=["guid"],
                                               delimiter=self.delimiter))

        generate_crowdanki = CrowdAnkiGenerate.from_repr(
            CrowdAnkiGenerate.Representation(
                folder=deck_path,
                notes=NotesToCrowdAnki.Representation(
                    part_id=RECIPE_NOTES).encode(),
                headers=RECIPE_HEADERS,
                media=MediaGroupToCrowdAnki.Representation(
                    parts=[RECIPE_MEDIA]).encode(),
                note_models=NoteModelsToCrowdAnki.Representation(parts=[
                    NoteModelsToCrowdAnki.NoteModelListItem.Representation(
                        name).encode()
                    for name, file in model_name_to_file_dict.items()
                ]).encode()))

        top_level_tasks: List[TopLevelBuildTask] = [
            generate_guids_in_csv, dp_builder, generate_crowdanki
        ]
        source_to_anki_path = os.path.join(LOC_RECIPES, "source_to_anki")
        self.create_yaml_from_top_level(top_level_tasks, source_to_anki_path)

        print(
            f"\nRepo Init complete. You should now run `brainbrew run {source_to_anki_path}`"
        )
示例#18
0
 def encode_filter(self, key, value):
     if not super().encode_filter(key, value):
         return False
     if key == 'delimiter' and CsvFile.delimiter_matches_file_type(value, self.file):
         return False
     return True
示例#19
0
def csv_test2_missing_guids():
    csv = CsvFile(TestFiles.CsvFiles.TEST2_MISSING_GUIDS)
    csv.read_file()
    return csv
示例#20
0
def csv_test1_split2():
    return CsvFile(TestFiles.CsvFiles.TEST1_SPLIT2)
示例#21
0
    def test_runs(self, temp_csv_test1: CsvFile, csv_test1: CsvFile):
        temp_csv_test1.write_file()
        temp_csv_test1.read_file()

        assert temp_csv_test1.get_data() == csv_test1.get_data()
示例#22
0
def csv_test2():
    return CsvFile(TestFiles.CsvFiles.TEST2)
示例#23
0
def tsv_test1():
    tsv = CsvFile(TestFiles.TsvFiles.TEST1, delimiter='\t')
    tsv.read_file()
    return tsv
示例#24
0
def test_to_filename_csv():
    assert "read-this-file.csv" == CsvFile.to_filename_csv("read-this-file")
    assert "read-this-file.csv" == CsvFile.to_filename_csv(
        "read-this-file.csv")
    assert "read-this-file.tsv" == CsvFile.to_filename_csv(
        "read-this-file.tsv")