def test_cache(self): d = {'a': 123} filename = 'lingpy_test.CSV' cache.dump(d, filename) self.assertTrue(cache.path(filename).exists()) self.assertEqual(cache.load(filename), d) os.remove(str(cache.path(filename)))
def compile_dvt(path=''): """ Function compiles diacritics, vowels, and tones. Notes ----- Diacritics, vowels, and tones are defined in the :file:`data/models/dv/` directory of the LingPy package and automatically loaded when loading the LingPy library. The values are defined as the constants :py:obj:`rcParams['vowels']`, :py:obj:`rcParams['diacritics']`, and :py:obj:`rcParams['tones']`. Their core purpose is to guide the tokenization of IPA strings (cf. :py:func:`~lingpy.sequence.sound_classes.ipa2tokens`). In order to change the variables, one simply has to change the text files :file:`diacritics`, :file:`tones`, and :file:`vowels` in the :file:`data/models/dv` directory. The structure of these files is fairly simple: Each line contains a vowel or a diacritic character, whereas diacritics are preceded by a dash. See also -------- lingpy.data.model.Model lingpy.data.derive.compile_model """ log.info("Compiling diacritics and vowels...") # get the path to the models if not path: file_path = util.data_path('models', 'dvt') elif path in ['evolaemp', 'el']: file_path = util.data_path('models', 'dvt_el') else: file_path = path def _read_string(name): # normalize stuff # TODO: this is potentially dangerous and it is important to decide whether # TODO: switching to NFD might not be a better choice return util.read_text_file(os.path.join(file_path, name), normalize='NFC').replace('\n', '') diacritics = _read_string('diacritics').replace('-', '') vowels = ''.join( [v for v in _read_string('vowels') if v not in diacritics]) tones = _read_string('tones') dvt = (diacritics, vowels, tones) if path in ['evolaemp', 'el']: cache.dump(dvt, 'dvt_el') else: cache.dump(dvt, 'dvt') log.info("Diacritics and sound classes were successfully compiled.")
def compile_dvt(path=''): """ Function compiles diacritics, vowels, and tones. Notes ----- Diacritics, vowels, and tones are defined in the :file:`data/models/dv/` directory of the LingPy package and automatically loaded when loading the LingPy library. The values are defined as the constants :py:obj:`rcParams['vowels']`, :py:obj:`rcParams['diacritics']`, and :py:obj:`rcParams['tones']`. Their core purpose is to guide the tokenization of IPA strings (cf. :py:func:`~lingpy.sequence.sound_classes.ipa2tokens`). In order to change the variables, one simply has to change the text files :file:`diacritics`, :file:`tones`, and :file:`vowels` in the :file:`data/models/dv` directory. The structure of these files is fairly simple: Each line contains a vowel or a diacritic character, whereas diacritics are preceded by a dash. See also -------- lingpy.data.model.Model lingpy.data.derive.compile_model """ log.info("Compiling diacritics and vowels...") # get the path to the models if not path: file_path = util.data_path('models', 'dvt') elif path in ['evolaemp', 'el']: file_path = util.data_path('models', 'dvt_el') else: file_path = path def _read_string(name): # normalize stuff # TODO: this is potentially dangerous and it is important to decide whether # TODO: switching to NFD might not be a better choice return util.read_text_file( os.path.join(file_path, name), normalize='NFC').replace('\n', '') diacritics = _read_string('diacritics').replace('-', '') vowels = ''.join([v for v in _read_string('vowels') if v not in diacritics]) tones = _read_string('tones') dvt = (diacritics, vowels, tones) if path in ['evolaemp', 'el']: cache.dump(dvt, 'dvt_el') else: cache.dump(dvt, 'dvt') log.info("Diacritics and sound classes were successfully compiled.")
def pickle(self, filename=None): """ Store the QLCParser instance in a pickle file. Notes ----- The function stores a binary file called ``FILENAME.pkl`` with ``FILENAME`` corresponding to the name of the original file in the `user cache dir <https://github.com/ActiveState/appdirs#some-example-output>`_ for lingpy on your system. To restore the instance from the pickle call :py:meth:`~lingpy.basic.parser.QLCParser.unpickle`. """ # we reset the _class attribute, because it may contain unpicklable stuff, like # `eval`ed lambdas. self._class = {} self.log = None cache.dump(self, filename or self.filename) # after pickling we have to recreate the attribute. self._recreate_unpicklables()
def compile_model(model, path=None): """ Function compiles customized sound-class models. Parameters ---------- model : str A string indicating the name of the model which shall be created. path : str A string indication the path where the model-folder is stored. Notes ----- A model is defined by a folder placed in :file:`data/models` directory of the LingPy package. The name of the folder reflects the name of the model. It contains three files: the file :file:`converter`, the file :file:`INFO`, and the optional file :file:`scorer`. The format requirements for these files are as follows: :file:`INFO` The ``INFO``-file serves as a reference for a given sound-class model. It can contain arbitrary information (and also be empty). If one wants to define specific characteristics, like the ``source``, the ``compiler``, the ``date``, or a ``description`` of a given model, this can be done by employing a key-value structure in which the key is preceded by an ``@`` and followed by a colon and the value is written right next to the key in the same line, e.g.:: @source: Dolgopolsky (1986) This information will then be read from the ``INFO`` file and rendered when printing the model to screen with help of the :py:func:`print` function. :file:`converter` The ``converter`` file contains all sound classes which are matched with their respective sound values. Each line is reserved for one class, precede by the key (preferably an ASCII-letter) representing the class:: B : ɸ, β, f, p͡f, p͜f, ƀ E : ɛ, æ, ɜ, ɐ, ʌ, e, ᴇ, ə, ɘ, ɤ, è, é, ē, ě, ê, ɚ D : θ, ð, ŧ, þ, đ G : x, ɣ, χ ... :file:`matrix` A scoring matrix indicating the alignment scores of all sound-class characters defined by the model. The scoring is structured as a simple tab-delimited text file. The first cell contains the character names, the following cells contain the scores in redundant form (with both triangles being filled):: B 10.0 -10.0 5.0 ... E -10.0 5.0 -10.0 ... F 5.0 -10.0 10.0 ... ... :file:`scorer` The ``scorer`` file (which is optional) contains the graph of class-transitions which is used for the calculation of the scoring dictionary. Each class is listed in a separate line, followed by the symbols ``v``,``c``, or ``t`` (indicating whether the class represents vowels, consonants, or tones), and by the classes it is directly connected to. The strength of this connection is indicated by digits (the smaller the value, the shorter the path between the classes):: A : v, E:1, O:1 C : c, S:2 B : c, W:2 E : v, A:1, I:1 D : c, S:2 ... The information in such a file is automatically converted into a scoring dictionary (see :evobib:`List2012b` for details). Based on the information provided by the files, a dictionary for the conversion of IPA-characters to sound classes and a scoring dictionary are created and stored as a binary. The model can be loaded with help of the :py:class:`~lingpy.data.model.Model` class and used in the various classes and functions provided by the library. See also -------- lingpy.data.model.Model compile_dvt """ log.info("Compiling model <" + model + ">...") # get the path to the models new_path = lambda *cmps: os.path.join(path or util.data_path('models'), model, *cmps) log.debug("Model-Path: %s" % new_path) # load the sound classes sound_classes = _import_sound_classes(new_path('converter')) # dump the data cache.dump(sound_classes, model + '.converter') log.info("... successfully created the converter.") # try to load the scoring function or the score tree scorer = False if os.path.isfile(new_path('matrix')): scorer = read_scorer(new_path('matrix')) elif os.path.isfile(new_path('scorer')): score_tree = _import_score_tree(new_path('scorer')) # calculate the scoring dictionary score_dict = _make_scoring_dictionary(score_tree) # make score_dict a ScoreDict instance chars = sorted(set([s[0] for s in score_dict.keys()])) matrix = [[0 for i in range(len(chars))] for j in range(len(chars))] for (i, charA), (j, charB) in util.multicombinations2(enumerate(chars)): if i < j: matrix[i][j] = score_dict.get((charA, charB), -100) matrix[j][i] = score_dict.get((charB, charA), -100) elif i == j: matrix[i][j] = score_dict[charA, charB] scorer = misc.ScoreDict(chars, matrix) util.write_text_file(new_path('matrix'), scorer2str(scorer)) if scorer: cache.dump(scorer, model + '.scorer') log.info("... successfully created the scorer.") else: log.info("... no scoring dictionary defined.") log.info("Model <" + model + "> was compiled successfully.")
def test_cache(tmppath): d = {'a': 123} filename = 'lingpy_test.CSV' cache.dump(d, filename, d=tmppath / 'cache') assert cache.load(filename, d=tmppath / 'cache') == d