def test_interval(self): """ Try getting the interval between two chords and check it comes out as expected. """ # Some randomly chosen tests tests = [ (0, "C", "C", True), (2, "F", "G", False), (4, "D", "F#", False), (6, "B", "F", True), (8, "F", "Db", False), (10, "Ab", "F#", False), ] for interval, lower, upper, invertible in tests: c0 = Chord(lower) c1 = Chord(upper) self.assertEqual(interval, Chord.interval(c0, c1)) # Try inverting the interval and check it's only the same in the # cases where the interval is its own inverse if invertible: self.assertEqual(interval, Chord.interval(c1, c0)) else: self.assertNotEqual(interval, Chord.interval(c1, c0))
def observation_from_chord_pair(crd1, crd2, chordmap): if crd2 is None: interval = 0 else: interval = Chord.interval(Chord.from_name(str(crd1)), Chord.from_name(str(crd2))) if not isinstance(crd1, Chord) and not isinstance(crd1, DbChord): crd1 = Chord.from_name(crd1) return "%d-%s" % (interval, chordmap[crd1.type])
def observation_from_chord_pair(crd1, crd2): if crd2 is None: interval = 0 else: interval = Chord.interval(Chord.from_name(str(crd1)), Chord.from_name(str(crd2))) if not isinstance(crd1, Chord): crd1 = Chord.from_name(str(crd1)) return "%d-%s" % (interval, crd1.type)
def test_from_name(self): """ from_name covers a lot of possible chord instances. Here we just test a sample of textual chords and check the instance gets the right attributes out of the name. It's by no means exhaustive! """ tests = [ # Name, root, type, additions, tetrad type ("C", 0, "", "", ""), ("F#m7", 6, "m7", "", "m7"), ("G7(9)", 7, "7", "9", "7"), ("A(9)", 9, "", "9", "7"), ("Dsus4", 2, "sus4", "", "sus4"), ("Esus4,7", 4, "sus4,7", "", "sus4,7"), ("Esus4(9)", 4, "sus4", "9", "sus4,7"), ("Fm,M7(+11)", 5, "m,M7", "+11", "m,M7"), ] for name, root, ctype, additions, tetrad in tests: c = Chord.from_name(name) self.assertEqual(root, c.root) self.assertEqual(ctype, c.type) self.assertEqual(additions, c.additions) self.assertEqual(tetrad, c.tetrad_type)
def test_set_root(self): """ Try setting the root or numeral after a chord is created and check that both values get correctly set. """ c = Chord(self.ALLOWED_NUMERALS[0][0]) for numeral, root, trg_num in self.ALLOWED_NUMERALS: # Try setting the root and check root and numeral are correct c.root = root self.assertEqual(root, c.root) self.assertEqual(trg_num, c.root_numeral) for numeral, root, trg_num in self.ALLOWED_NUMERALS: # Try setting the numeral and check root and numeral are correct c.root_numeral = numeral self.assertEqual(root, c.root) self.assertEqual(trg_num, c.root_numeral)
def test_init_type(self): """ Try creating chords with a particular type and check (a) that they successfully create a chord and (b) that the chord has the right type. """ for ctype in Chord.TYPE_SYMBOLS.values(): c = Chord("C", type=ctype) self.assertEqual(c.type, ctype)
def interval_observation_from_chord_string_pair(chord1, chord2, type_mapping=None): """ Given two strings representing chords, produces a string representing a chord observation of the form x-t, where x is the interval between the chords (numeric) and t is the type of the first chord. """ from jazzparser.data import Chord chord1 = Chord.from_name(chord1) if chord2 is None: interval = "" else: chord2 = Chord.from_name(chord2) interval = "%d" % Chord.interval(chord1,chord2) # Apply a mapping to the chord type if one was given if type_mapping is not None: ctype = type_mapping[chord1.type] else: ctype = chord1.type return "%s-%s" % (interval, ctype)
def test_interval(self): """ Try getting the interval between two chords and check it comes out as expected. """ # Some randomly chosen tests tests = [(0, "C", "C", True), (2, "F", "G", False), (4, "D", "F#", False), (6, "B", "F", True), (8, "F", "Db", False), (10, "Ab", "F#", False)] for interval, lower, upper, invertible in tests: c0 = Chord(lower) c1 = Chord(upper) self.assertEqual(interval, Chord.interval(c0, c1)) # Try inverting the interval and check it's only the same in the # cases where the interval is its own inverse if invertible: self.assertEqual(interval, Chord.interval(c1, c0)) else: self.assertNotEqual(interval, Chord.interval(c1, c0))
def interval_observation_from_chord_string_pair(chord1, chord2, type_mapping=None): """ Given two strings representing chords, produces a string representing a chord observation of the form x-t, where x is the interval between the chords (numeric) and t is the type of the first chord. """ from jazzparser.data import Chord chord1 = Chord.from_name(chord1) if chord2 is None: interval = "" else: chord2 = Chord.from_name(chord2) interval = "%d" % Chord.interval(chord1, chord2) # Apply a mapping to the chord type if one was given if type_mapping is not None: ctype = type_mapping[chord1.type] else: ctype = chord1.type return "%s-%s" % (interval, ctype)
def _tree_probs(trace): """ Add counts to the model from a derivation trace """ parent = trace.result # Get prob for the parent category parent_rep = model_category_repr(parent.category) print "%s : %s" % (parent_rep, model._parent_dist.prob(parent_rep)) if len(trace.rules) == 0: # Leaf node - lexical generation # Get prob for this parent expanding as a leaf print "%s -leaf- : %s" % ( parent_rep, model._expansion_type_dist[parent_rep].prob('leaf')) # Interpret the word as a chord chord = Chord.from_name(trace.word) chord = category_relative_chord(chord, parent.category) observation = model.chord_observation(chord) # Count this parent producing this word # The chord root is now relative to the base pitch of the category print "%s -leaf-> %s : %s" % \ (parent_rep, observation, model._lexical_dist[parent_rep].prob(observation)) else: # Internal node - rule application # There should only be one rule application, but just in case... for rule, args in trace.rules: if rule.arity == 1: # Unary rule raise ModelTrainingError, "we don't currently support "\ "unary rule application, but one was found in "\ "the training data" if rule.arity == 2: # Binary rule expansion = 'right' print "%s -right- : %s" % \ (parent_rep, model._expansion_type_dist[parent_rep].prob(expansion)) # Count this parent expanding to the head daughter head_rep = model_category_repr(args[1].result.category, parent.category) print "%s -right-> %s : %s" % \ (parent_rep, head_rep, model._head_expansion_dist[(expansion,parent_rep)].prob(head_rep)) # Count this parent with this head expansion expanding # to the non-head daughter non_head_rep = model_category_repr(args[0].result.category, parent.category) print "%s -right-> %s | %s : %s" % \ (parent_rep, head_rep, non_head_rep, model._non_head_expansion_dist[( head_rep, expansion, parent_rep)].prob(non_head_rep)) # Recurse to count derivations from the daughters for arg in args: _tree_probs(arg)
def test_from_numerals(self): """ Try creating chords using all possbile numerals and check the numeral and root get set correctly. """ for numeral, root, trg_num in self.ALLOWED_NUMERALS: # Try creating a Chord with each numeral c = Chord(numeral) # Check it has the right numeral self.assertEqual(trg_num, c.root_numeral) # and the right root number self.assertEqual(root, c.root)
def test_back_conversion(self): """ Creates a tonal space path, converts it to state labels and converts it back again. This should produce the original path if all goes well. Note that the result of the back conversion will always have the path shifted so it starts as close as possible to the origin. This is correct behaviour: the state labels don't encode the enharmonic block that the path starts in and it is merely by convention that we assume the start point. Each path-chord sequence pair also gives the expected output, which may differ from the original path only in this respect. @todo: update this test """ # Just return for now: I've not had a chance to update this # lf_chords_to_states no longer exists return self.longMessage = True # Run the test on a whole set of paths for (coords,chords,output) in self.PATHS: # Build a CoordinateList for the path ens = [EnharmonicCoordinate.from_harmonic_coord((x,y)) for (x,y,fun) in coords] pcs = [PathCoordinate.from_enharmonic_coord(en) for en in ens] time = 0 for pc,(__,__,fun) in zip(pcs,coords): pc.function = fun pc.duration = 1 pc.time = time time += 1 path = Semantics(CoordinateList(items=pcs)) # Build the list of chords chords = [Chord.from_name(crd).to_db_mirror() for crd in chords] for chord in chords: chord.duration = 1 # Try converting it to states states = lf_chords_to_states(path, chords) # Now try converting it back back = states_chords_to_lf(zip(states,chords)) # Check that we got the same coordinates out in_coords = [(x,y) for (x,y,fun) in output] in_funs = [fun for (x,y,fun) in output] out_coords = [point.harmonic_coord for point in back.lf] out_funs = [point.function for point in back.lf] self.assertEqual(in_coords, out_coords, msg="coordinates converted to states and back produced something different.\nState labels:\n%s" % (states)) self.assertEqual(in_funs, out_funs, msg="coordinates converted to states and back produced different functions.\nState labels:\n%s" % (states))
def generalise_chord_name(chord_name): """ The grammar generalises over chord names, using X to mean "any roman numeral chord root". When a chord name comes as input to the parser, say "IIm", we look up not "IIm", but "Xm". Given any chord name, this function returns the generalised chord name to look up in the grammar. """ from jazzparser.data import Chord # Try building a chord from the chord name chord = Chord.from_name(chord_name) # Only interested in the tetrad type return "X%s" % chord.tetrad_type
def _generate(parent, depth=0, pitch=0): # Transform the parent category so it's relative to itself # All generated categories will be relative to this, # so we need to make the parent self-relative at the # start of each recursion parent_rep = model_category_repr(parent) parent_pitch = (pitch + base_pitch(parent)) % 12 logger.debug("%sGenerating from parent: %s" % (" " * depth, parent_rep)) if max_depth is not None and depth >= max_depth and \ len(self._lexical_dist[parent_rep].samples()) != 0: # Don't go any deeper than this if we can stop here # Only possible if the parent has generated a leaf before exp = 'leaf' logger.debug("%sForcing leaf" % (" " * depth)) else: # Otherwise freely generate an expansion type exp = generate_from_prob_dist( self._expansion_type_dist[parent_rep]) logger.debug("%sExpansion: %s" % (" " * depth, exp)) exp_parent = (exp, parent_rep) if exp == 'leaf': # Generate a leaf node (word) word = generate_from_prob_dist(self._lexical_dist[parent_rep]) logger.debug("%sWord: %s, pitch: %d" % (" " * depth, word, parent_pitch)) chord = Chord.from_name(word) chord.root = (chord.root + parent_pitch) % 12 return [chord] else: # First generate a head node head = generate_from_prob_dist( self._head_expansion_dist[exp_parent]) logger.debug("%sHead: %s" % (" " * depth, head)) # Continue to expand this recursively to a word sequence head_generated = _generate(head, depth=depth+1, \ pitch=parent_pitch) head_exp_parent = (head, exp, parent_rep) # Now generate a non-head node non_head = generate_from_prob_dist( self._non_head_expansion_dist[head_exp_parent]) logger.debug("%sNon-head: %s" % (" " * depth, non_head)) # Continue to expand this too non_head_generated = _generate(non_head, depth=depth+1, \ pitch=parent_pitch) return non_head_generated + head_generated
def _add_counts(trace): """ Add counts to the model from a derivation trace """ parent = trace.result # Add a count for the parent category parent_rep = model_category_repr(parent.category) self._parent_counts.inc(parent_rep) if len(trace.rules) == 0: # Leaf node - lexical generation # Count this parent expanding as a leaf self._expansion_type_counts[parent_rep].inc('leaf') # Interpret the word as a chord chord = Chord.from_name(trace.word) chord = category_relative_chord(chord, parent.category) observation = self.chord_observation(chord) # Count this parent producing this word # The chord root is now relative to the base pitch of the category self._lexical_counts[parent_rep].inc(observation) else: # Internal node - rule application # There should only be one rule application, but just in case... for rule,args in trace.rules: if rule.arity == 1: # Unary rule raise ModelTrainingError, "we don't currently support "\ "unary rule application, but one was found in "\ "the training data" if rule.arity == 2: # Binary rule # Assume all heads come from the right expansion = 'right' self._expansion_type_counts[parent_rep].inc(expansion) # Count this parent expanding to the head daughter head_rep = model_category_repr(args[1].result.category, parent.category) self._head_expansion_counts[ (expansion,parent_rep)].inc(head_rep) # Count this parent with this head expansion expanding # to the non-head daughter non_head_rep = model_category_repr( args[0].result.category, parent.category) self._non_head_expansion_counts[ (head_rep,expansion,parent_rep) ].inc(non_head_rep) # Recurse to count derivations from the daughters for arg in args: _add_counts(arg)
def _add_counts(trace): """ Add counts to the model from a derivation trace """ parent = trace.result # Add a count for the parent category parent_rep = model_category_repr(parent.category) self._parent_counts.inc(parent_rep) if len(trace.rules) == 0: # Leaf node - lexical generation # Count this parent expanding as a leaf self._expansion_type_counts[parent_rep].inc('leaf') # Interpret the word as a chord chord = Chord.from_name(trace.word) chord = category_relative_chord(chord, parent.category) observation = self.chord_observation(chord) # Count this parent producing this word # The chord root is now relative to the base pitch of the category self._lexical_counts[parent_rep].inc(observation) else: # Internal node - rule application # There should only be one rule application, but just in case... for rule, args in trace.rules: if rule.arity == 1: # Unary rule raise ModelTrainingError, "we don't currently support "\ "unary rule application, but one was found in "\ "the training data" if rule.arity == 2: # Binary rule # Assume all heads come from the right expansion = 'right' self._expansion_type_counts[parent_rep].inc(expansion) # Count this parent expanding to the head daughter head_rep = model_category_repr(args[1].result.category, parent.category) self._head_expansion_counts[(expansion, parent_rep)].inc(head_rep) # Count this parent with this head expansion expanding # to the non-head daughter non_head_rep = model_category_repr( args[0].result.category, parent.category) self._non_head_expansion_counts[( head_rep, expansion, parent_rep)].inc(non_head_rep) # Recurse to count derivations from the daughters for arg in args: _add_counts(arg)
def _generate(parent, depth=0, pitch=0): # Transform the parent category so it's relative to itself # All generated categories will be relative to this, # so we need to make the parent self-relative at the # start of each recursion parent_rep = model_category_repr(parent) parent_pitch = (pitch + base_pitch(parent)) % 12 logger.debug("%sGenerating from parent: %s" % (" "*depth,parent_rep)) if max_depth is not None and depth >= max_depth and \ len(self._lexical_dist[parent_rep].samples()) != 0: # Don't go any deeper than this if we can stop here # Only possible if the parent has generated a leaf before exp = 'leaf' logger.debug("%sForcing leaf" % (" "*depth)) else: # Otherwise freely generate an expansion type exp = generate_from_prob_dist(self._expansion_type_dist[parent_rep]) logger.debug("%sExpansion: %s" % (" "*depth, exp)) exp_parent = (exp,parent_rep) if exp == 'leaf': # Generate a leaf node (word) word = generate_from_prob_dist(self._lexical_dist[parent_rep]) logger.debug("%sWord: %s, pitch: %d" % (" "*depth, word, parent_pitch)) chord = Chord.from_name(word) chord.root = (chord.root + parent_pitch) % 12 return [chord] else: # First generate a head node head = generate_from_prob_dist(self._head_expansion_dist[exp_parent]) logger.debug("%sHead: %s" % (" "*depth, head)) # Continue to expand this recursively to a word sequence head_generated = _generate(head, depth=depth+1, \ pitch=parent_pitch) head_exp_parent = (head,exp,parent_rep) # Now generate a non-head node non_head = generate_from_prob_dist( self._non_head_expansion_dist[head_exp_parent]) logger.debug("%sNon-head: %s" % (" "*depth, non_head)) # Continue to expand this too non_head_generated = _generate(non_head, depth=depth+1, \ pitch=parent_pitch) return non_head_generated + head_generated
def inside_probability(self, expansion, parent, left, right=None): """ Probability of a (non-leaf) subtree, computed from the probability of its expansions. This doesn't include the probabilities of the subtrees of the daughters. To get the full inside probability, multiply the returned value with the daughters' insider probabilities. """ parent_rep = model_category_repr(parent.category) # Get the probability of the expansion type exp_prob = self._expansion_type_dist[parent_rep].prob(expansion) if expansion == 'leaf': # Get the probability of the word given parent # If the model doesn't generate words, this probability is 1 if not self.lexical: word_prob = 1.0 else: # In this case the word is given as the left branch word = left # Word should be a chord label: interpret it as such chord = Chord.from_name(word) chord_obs = self.chord_observation( category_relative_chord(chord, category=parent.category)) word_prob = self._lexical_dist[parent_rep].prob(chord_obs) return exp_prob * word_prob else: # We currently only recognise one other case: right-head assert right is not None, "pcfg model only supports binary branches" head = right non_head = left # Get the probability of the head (right) daughter given the parent condition = (expansion, parent_rep) head_rep = model_category_repr(head.category, parent.category) head_prob = self._head_expansion_dist[condition].prob(head_rep) # Get the probability of the non-head daughter given the # parent and the head daughter condition = (head_rep, expansion, parent_rep) non_head_rep = model_category_repr(non_head.category, parent.category) non_head_prob = \ self._non_head_expansion_dist[condition].prob(non_head_rep) return exp_prob * head_prob * non_head_prob
def __init__(self, inputs, durations=None, times=None, roman=False, *args, **kwargs): super(ChordInput, self).__init__(*args, **kwargs) self.inputs = inputs self.durations = durations self.times = times self.roman = roman # Compute the durations from times or vice versa if durations is None and times is None: raise ValueError, "cannot create a ChordInput with neither "\ "times nor durations given" elif times is None: self.times = [ sum(durations[:i], Fraction(0)) for i in range(len(durations)) ] elif durations is None: from jazzparser.utils.base import group_pairs self.durations = [ time1 - time0 for (time1, time0) in group_pairs(times) ] + [Fraction(1)] # Convert all strings to internal chord representation # Done now so we check the chords can all be understood before doing # anything else self.chords = [ Chord.from_name(name, roman=roman).to_db_mirror() for name in inputs ] for chord, dur in zip(self.chords, self.durations): chord.duration = dur
def __init__(self, inputs, durations=None, times=None, roman=False, *args, **kwargs): super(ChordInput, self).__init__(*args, **kwargs) self.inputs = inputs self.durations = durations self.times = times self.roman = roman # Compute the durations from times or vice versa if durations is None and times is None: raise ValueError, "cannot create a ChordInput with neither " "times nor durations given" elif times is None: self.times = [sum(durations[:i], Fraction(0)) for i in range(len(durations))] elif durations is None: from jazzparser.utils.base import group_pairs self.durations = [time1 - time0 for (time1, time0) in group_pairs(times)] + [Fraction(1)] # Convert all strings to internal chord representation # Done now so we check the chords can all be understood before doing # anything else self.chords = [Chord.from_name(name, roman=roman).to_db_mirror() for name in inputs] for chord, dur in zip(self.chords, self.durations): chord.duration = dur
# Output audio files from the harmonical if (options.harmonical is not None or \ options.enharmonical is not None) and len(results) > 0: path = grammar.formalism.sign_to_coordinates(results[0]) # Assuming we used a temporal formalism, the times should be # available as a list from the semantics times = results[0].semantics.get_path_times() point_durations = [next-current for current,next in group_pairs(times)] + [0] # Get 3d coordinates as well path3d = zip(add_z_coordinates(path, pitch_range=2), point_durations) path2d = zip(path,point_durations) # Get chord types out of the input chords = tagger.get_string_input() chord_durs = [tagger.get_word_duration(i) for i in range(tagger.input_length)] chord_types = [(Chord.from_name(c).type,dur) for c,dur in zip(chords,chord_durs)] if options.midi: # Maybe set this as a CL option or a setting # 73 - flute # 0 - piano # 4 - e-piano instrument = 73 # TODO: make these filenames different for multiple inputs if options.harmonical is not None: filename = os.path.abspath(options.harmonical) render_path_to_midi_file(filename, path3d, chord_types=chord_types, tempo=options.tempo, instrument=instrument, bass_root=True, root_octave=-1) messages.append("Output JI MIDI data to %s" % filename) if options.enharmonical is not None: filename = os.path.abspath(options.enharmonical) render_path_to_midi_file(filename, path3d, chord_types=chord_types, equal_temperament=True, tempo=options.tempo, instrument=instrument, bass_root=True, root_octave=-1)
def get_signs_for_word(self, word, tags=None, extra_features=None): """ Given a word string, returns a list of the possible signs (as CCGSigns) that the grammar can assign to it. word may also be a Chord object. For now, this assumes that the input is a single chord in roman numeral notation and that spelling issues have already been resolved (e.g. that 6s have been removed). If tags is given it should be a list of strings. Signs will be restricted to those whose entry's tag name/POS is in the list. If you need to get an instantiated category from a lexical entry, use the methods on L{EntriesItem} directly, or L{get_signs_for_tag}. """ if isinstance(word, Chord): chord = word.to_db_mirror() elif isinstance(word, basestring): chord = Chord.from_name(word, permissive=True).to_db_mirror() elif isinstance(word, DbChord): chord = word else: raise GrammarLookupError, "Tried to get signs for a word of type "\ "'%s': %s" % (type(word), word) # Get a chord type string to look up in the grammar chord_lookup = "X%s" % chord.type # Check whether we know this word if not chord_lookup in self.morph_items: # Word not recognised raise GrammarLookupError, "The word \"%s\" was not found in the "\ "lexicon. (Looked up %s in %s)" \ % (word, chord_lookup, ",".join(["%s" % item for item in self.morph_items.keys()])) # Get the list of interpretations of this word morphs = self.morph_items[chord_lookup] # Limit to tag list if one was given if tags is not None: morphs = [m for m in morphs if m.pos in tags] # Build a sign for each morph-family pair category_list = [] for morph in morphs: # Look for families corresponding to the POS if not morph.pos in self.families: raise GrammarLookupError, "There is no family in the lexicon "\ "for the POS %s." % morph.pos for family in self.families[morph.pos]: # Build a CCGCategory for each entry in each family found for entry in family.entries: sign = entry.sign.copy() sign.tag = entry.tag_name features = { 'root' : chord.root, 'morph' : morph } if extra_features is not None: features.update(extra_features) sign.apply_lexical_features(features) category_list.append(sign) return category_list
def observation_from_chord_pair(crd1, crd2): if crd1 is None or crd2 is None: return "0" return "%d" % Chord.interval(Chord.from_name(str(crd1)), Chord.from_name(str(crd2)))
def get_signs_for_word(self, word, tags=None, extra_features=None): """ Given a word string, returns a list of the possible signs (as CCGSigns) that the grammar can assign to it. word may also be a Chord object. For now, this assumes that the input is a single chord in roman numeral notation and that spelling issues have already been resolved (e.g. that 6s have been removed). If tags is given it should be a list of strings. Signs will be restricted to those whose entry's tag name/POS is in the list. If you need to get an instantiated category from a lexical entry, use the methods on L{EntriesItem} directly, or L{get_signs_for_tag}. """ if isinstance(word, Chord): chord = word.to_db_mirror() elif isinstance(word, basestring): chord = Chord.from_name(word, permissive=True).to_db_mirror() elif isinstance(word, DbChord): chord = word else: raise GrammarLookupError, "Tried to get signs for a word of type "\ "'%s': %s" % (type(word), word) # Get a chord type string to look up in the grammar chord_lookup = "X%s" % chord.type # Check whether we know this word if not chord_lookup in self.morph_items: # Word not recognised raise GrammarLookupError, "The word \"%s\" was not found in the "\ "lexicon. (Looked up %s in %s)" \ % (word, chord_lookup, ",".join(["%s" % item for item in self.morph_items.keys()])) # Get the list of interpretations of this word morphs = self.morph_items[chord_lookup] # Limit to tag list if one was given if tags is not None: morphs = [m for m in morphs if m.pos in tags] # Build a sign for each morph-family pair category_list = [] for morph in morphs: # Look for families corresponding to the POS if not morph.pos in self.families: raise GrammarLookupError, "There is no family in the lexicon "\ "for the POS %s." % morph.pos for family in self.families[morph.pos]: # Build a CCGCategory for each entry in each family found for entry in family.entries: sign = entry.sign.copy() sign.tag = entry.tag_name features = {'root': chord.root, 'morph': morph} if extra_features is not None: features.update(extra_features) sign.apply_lexical_features(features) category_list.append(sign) return category_list
def observation_from_chord(crd): chord = Chord.from_name(crd) return chord.type
def test_back_conversion(self): """ Creates a tonal space path, converts it to state labels and converts it back again. This should produce the original path if all goes well. Note that the result of the back conversion will always have the path shifted so it starts as close as possible to the origin. This is correct behaviour: the state labels don't encode the enharmonic block that the path starts in and it is merely by convention that we assume the start point. Each path-chord sequence pair also gives the expected output, which may differ from the original path only in this respect. @todo: update this test """ # Just return for now: I've not had a chance to update this # lf_chords_to_states no longer exists return self.longMessage = True # Run the test on a whole set of paths for (coords, chords, output) in self.PATHS: # Build a CoordinateList for the path ens = [ EnharmonicCoordinate.from_harmonic_coord((x, y)) for (x, y, fun) in coords ] pcs = [PathCoordinate.from_enharmonic_coord(en) for en in ens] time = 0 for pc, (__, __, fun) in zip(pcs, coords): pc.function = fun pc.duration = 1 pc.time = time time += 1 path = Semantics(CoordinateList(items=pcs)) # Build the list of chords chords = [Chord.from_name(crd).to_db_mirror() for crd in chords] for chord in chords: chord.duration = 1 # Try converting it to states states = lf_chords_to_states(path, chords) # Now try converting it back back = states_chords_to_lf(zip(states, chords)) # Check that we got the same coordinates out in_coords = [(x, y) for (x, y, fun) in output] in_funs = [fun for (x, y, fun) in output] out_coords = [point.harmonic_coord for point in back.lf] out_funs = [point.function for point in back.lf] self.assertEqual( in_coords, out_coords, msg= "coordinates converted to states and back produced something different.\nState labels:\n%s" % (states)) self.assertEqual( in_funs, out_funs, msg= "coordinates converted to states and back produced different functions.\nState labels:\n%s" % (states))