def testExtractChords(self):
   music_testing_lib.add_chords_to_sequence(
       self.note_sequence, [('C', 2), ('G7', 6), ('F', 8)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   quantized_sequence.total_quantized_steps = 10
   chord_progressions, _ = chord_pipelines.extract_chords(quantized_sequence)
   expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7', 'F', 'F']]
   self.assertEqual(expected, [list(chords) for chords in chord_progressions])
 def testExtractChordsAllTranspositions(self):
   music_testing_lib.add_chords_to_sequence(
       self.note_sequence, [('C', 1)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   quantized_sequence.total_quantized_steps = 2
   chord_progressions, _ = chord_pipelines.extract_chords(
       quantized_sequence, all_transpositions=True)
   expected = list(zip([NO_CHORD] * 12, ['Gb', 'G', 'Ab', 'A', 'Bb', 'B',
                                         'C', 'Db', 'D', 'Eb', 'E', 'F']))
   self.assertEqual(expected, [tuple(chords) for chords in chord_progressions])
Esempio n. 3
0
    def cleanDataset(self):

        path = "D:\FAI\Wikifonia"
        count = 0
        for file in glob.glob(path):
            try:
                mxlObject = musicxml_parser.MusicXMLDocument(file)
                mxlSequence = musicxml_reader.musicxml_to_sequence_proto(mxlObject)
                quantizedNoteSequence = sequences_lib.quantize_note_sequence(mxlSequence, 1)
                chord_prog, stats = chord_pipelines.extract_chords(quantizedNoteSequence)
                melodies, stats = melody_pipelines.extract_melodies(quantizedNoteSequence)
                ac, stats = chord_pipelines.extract_chords_for_melodies(quantizedNoteSequence, melodies)
            except:
                os.remove(file)
                print(file)
                count = count + 1
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise sequence_generator.SequenceGeneratorError(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise sequence_generator.SequenceGeneratorError(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    if input_sequence and input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm
    else:
      qpm = note_seq.DEFAULT_QUARTERS_PER_MINUTE
    steps_per_second = note_seq.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      # Use primer melody from input section only. Take backing chords from
      # beginning of input section through end of generate section.
      input_section = generator_options.input_sections[0]
      primer_sequence = note_seq.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
      backing_sequence = note_seq.trim_note_sequence(input_sequence,
                                                     input_section.start_time,
                                                     generate_section.end_time)
      input_start_step = note_seq.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0.0)
    else:
      # No input section. Take primer melody from the beginning of the sequence
      # up until the start of the generate section.
      primer_sequence = note_seq.trim_note_sequence(input_sequence, 0.0,
                                                    generate_section.start_time)
      backing_sequence = note_seq.trim_note_sequence(input_sequence, 0.0,
                                                     generate_section.end_time)
      input_start_step = 0

    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0
    if last_end_time >= generate_section.start_time:
      raise sequence_generator.SequenceGeneratorError(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the input section. This model can only extend melodies. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming and backing sequences.
    quantized_primer_sequence = note_seq.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)
    quantized_backing_sequence = note_seq.quantize_note_sequence(
        backing_sequence, self.steps_per_quarter)

    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = melody_pipelines.extract_melodies(
        quantized_primer_sequence, search_start_step=input_start_step,
        min_bars=0, min_unique_pitches=1, gap_bars=float('inf'),
        ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    start_step = note_seq.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0.0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    end_step = note_seq.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_melodies and extracted_melodies[0]:
      melody = extracted_melodies[0]
    else:
      # If no melody could be extracted, create an empty melody that starts 1
      # step before the request start_step. This will result in 1 step of
      # silence when the melody is extended below.
      steps_per_bar = int(
          note_seq.steps_per_bar_in_quantized_sequence(
              quantized_primer_sequence))
      melody = note_seq.Melody([],
                               start_step=max(0, start_step - 1),
                               steps_per_bar=steps_per_bar,
                               steps_per_quarter=self.steps_per_quarter)

    extracted_chords, _ = chord_pipelines.extract_chords(
        quantized_backing_sequence)
    chords = extracted_chords[0]

    # Make sure that chords and melody start on the same step.
    if chords.start_step < melody.start_step:
      chords.set_length(len(chords) - melody.start_step + chords.start_step)

    assert chords.end_step == end_step

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step - melody.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_melody = self._model.generate_melody(melody, chords, **args)
    generated_lead_sheet = note_seq.LeadSheet(generated_melody, chords)
    generated_sequence = generated_lead_sheet.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Esempio n. 5
0
    def observationModelTrainer(self):
        ALL_CHORD_LIST = [
            'N.C', 'C', 'Cm', 'C#', 'C#m', 'D', 'Dm', 'Eb', 'Ebm', 'E', 'Em',
            'F', 'Fm', 'F#', 'F#m', 'G', 'Gm', 'G#', 'G#m', 'A', 'Am', 'A#',
            'A#m', 'B', 'Bm'
        ]
        Same_Chord = {
            'Db': 'C#',
            'Dbm': 'C#m',
            'D#': 'Eb',
            'D#m': 'Ebm',
            'Gb': 'F#',
            'Gbm': 'F#m',
            'Ab': 'G#',
            'Abm': 'G#m',
            'Bb': 'A#',
            'Bbm': 'A#m'
        }
        path = "Wikifonia"
        for file in glob.glob(path):
            mxlObject = musicxml_parser.MusicXMLDocument(file)
            mxlSequence = musicxml_reader.musicxml_to_sequence_proto(mxlObject)
            quantizedNoteSequence = sequences_lib.quantize_note_sequence(
                mxlSequence, 1)

            chord_prog, stats = chord_pipelines.extract_chords(
                quantizedNoteSequence)
            previous = None
            for chord in list(chord_prog[0]):
                if previous is None:
                    previous = chord
                    continue

                curChord = re.sub(r'\d+', '', chord)
                prevChord = re.sub(r'\d+', '', previous)
                curChord = curChord[:3]
                prevChord = prevChord[:3]

                if curChord != 'N.C':
                    if len(curChord) == 3 and curChord[2] != 'm':
                        curChord = curChord[:2]
                        if curChord[1] not in ['#', 'b']:
                            curChord = curChord[:1]

                if prevChord != 'N.C':
                    if len(prevChord) == 3 and prevChord[2] != 'm':
                        prevChord = prevChord[:2]
                        if prevChord[1] not in ['#', 'b']:
                            prevChord = prevChord[:1]

                    if curChord in Same_Chord:
                        curChord = Same_Chord[curChord]
                    if prevChord in Same_Chord:
                        prevChord = Same_Chord[prevChord]

                    if curChord == 'Cb':
                        curChord = 'B'
                    if prevChord == 'Cb':
                        prevChord = 'B'
                    if curChord == 'Fb':
                        curChord = 'E'
                    if prevChord == 'Fb':
                        prevChord = 'E'
                    if curChord == 'Cbm':
                        curChord = 'D'
                    if prevChord == 'Cbm':
                        prevChord = 'D'
                    if curChord == 'Fbm':
                        curChord = 'Em'
                    if prevChord == 'Fbm':
                        prevChord = 'Em'

                    if prevChord != curChord:
                        a = ALL_CHORD_LIST.index(prevChord)
                        b = ALL_CHORD_LIST.index(curChord)
                        self.ct_matrix[a][b] = self.ct_matrix[a][b] + 1
                    previous = curChord

        normed_ct_matrix = normalize(self.ct_matrix, axis=1, norm='l1')
        self.ct_matrix = normed_ct_matrix