def melody_rnn(input_sequence): # Initialize the model. print("Initializing Melody RNN...") bundle = sequence_generator_bundle.read_bundle_file( '/content/basic_rnn.mag') generator_map = melody_rnn_sequence_generator.get_generator_map() melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle) melody_rnn.initialize() # Model options. Change these to get different generated sequences! input_sequence = twinkle_twinkle # change this to teapot if you want num_steps = 128 # change this for shorter or longer sequences temperature = 1.0 # the higher the temperature the more random the sequence. # Set the start time to begin on the next step after the last note ends. last_end_time = (max( n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) qpm = input_sequence.tempos[0].qpm seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter total_seconds = num_steps * seconds_per_step generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) # Ask the model to continue the sequence. return melody_rnn.generate(input_sequence, generator_options)
def generate(self, num_steps=128, temperature=1.0, steps_per_second_avail=False, empty=False): """ generates a song. """ if hasattr(self, 'num_steps'): num_steps = self.num_steps if hasattr(self, 'temperature'): temperature = self.temperature input_sequence = self.sequence if empty: input_sequence = music_pb2.NoteSequence() input_sequence.tempos.add(qpm=80) qpm = input_sequence.tempos[0].qpm if steps_per_second_avail: steps_per_quarter = int(self.model.steps_per_second * (1 / (qpm / 60))) seconds_per_step = 1 / self.model.steps_per_second else: seconds_per_step = 60.0 / qpm / self.model.steps_per_quarter steps_per_quarter = self.model.steps_per_quarter quantized_sequence = mm.quantize_note_sequence(input_sequence, steps_per_quarter) last_end_time = (max( n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) primer_sequence_steps = quantized_sequence.total_quantized_steps if primer_sequence_steps > num_steps: # easier to make num_steps bigger to accommodate for sizes # 4 times the size of original sequence.. num_steps = primer_sequence_steps * 4 total_seconds = num_steps * seconds_per_step input_sequence.total_time = min(total_seconds, input_sequence.total_time) generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) self.output_sequence = self.model.generate(input_sequence, generator_options) request_dict = self.put_request_dict utils.generated_sequence_2_mp3(self.output_sequence, f"{self.unique_id}", use_salamander=True, request_dict=request_dict)
def setup_generator(input_sequence, melody_rnn, num_steps, temperature): last_end_time = (max( n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) qpm = input_sequence.tempos[0].qpm seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter total_seconds = num_steps * seconds_per_step generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) return generator_options
def generate_1(self, duration): # prepare the note sequence midi = notes_to_midi(self.notes, self.t0) primer_seq = magenta.music.midi_io.midi_to_note_sequence(midi) # predict the tempo if len(primer_seq.notes) > 4: estimated_tempo = midi.estimate_tempo() if estimated_tempo > 240: qpm = estimated_tempo / 2 else: qpm = estimated_tempo else: qpm = 120 primer_seq.tempos[0].qpm = qpm # generate gen_options = generator_pb2.GeneratorOptions() last_end_time = max( n.end_time for n in primer_seq.notes) if primer_seq.notes else 0 gen_start_time = last_end_time + steps_to_seconds(1, qpm) gen_end_time = gen_start_time + duration gen_options.generate_sections.add(start_time=gen_start_time, end_time=gen_end_time) gen_seq = self.generator.generate(primer_seq, gen_options) gen_midi = magenta.music.midi_io.note_sequence_to_pretty_midi(gen_seq) # the primer sequence is included in the generated data, so strip it new_notes = [] for note in gen_midi.instruments[0].notes: if note.start >= gen_start_time: new_note = pretty_midi.Note(note.velocity, note.pitch, note.start - gen_start_time, note.end - gen_start_time) new_notes.append(new_note) gen_midi.instruments[0].notes = new_notes gen_notes = midi_to_notes(gen_midi) # add the new notes to the play queue self.queue.put(gen_notes)
def generate_continue_for_polyphony(input_sequence, model_name, name, num_steps, polyphony_rnn, temperature): start = time.time() last_end_time = (max( n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) qpm = input_sequence.tempos[0].qpm seconds_per_step = 60.0 / qpm / polyphony_rnn.steps_per_quarter total_seconds = num_steps * seconds_per_step generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) filename = name + '_' + model_name + '_output.mid' sequence = polyphony_rnn.generate(input_sequence, generator_options) mm.sequence_proto_to_midi_file(sequence, './output/' + filename) end = time.time() print(filename + " created") print("That took:" + (end - start).__str__())
def _generate(self, input_sequence, zero_time, response_start_time, response_end_time): """Generates a response sequence with the currently-selected generator. Args: input_sequence: The NoteSequence to use as a generation seed. zero_time: The float time in seconds to treat as the start of the input. response_start_time: The float time in seconds for the start of generation. response_end_time: The float time in seconds for the end of generation. Returns: The generated NoteSequence. """ # Generation is simplified if we always start at 0 time. response_start_time -= zero_time response_end_time -= zero_time generator_options = generator_pb2.GeneratorOptions() generator_options.input_sections.add( start_time=0, end_time=response_start_time) generator_options.generate_sections.add( start_time=response_start_time, end_time=response_end_time) # Get current temperature setting. generator_options.args['temperature'].float_value = self._temperature # Generate response. tf.logging.info( "Generating sequence using '%s' generator.", self._sequence_generator.details.id) tf.logging.debug('Generator Details: %s', self._sequence_generator.details) tf.logging.debug('Bundle Details: %s', self._sequence_generator.bundle_details) tf.logging.debug('Generator Options: %s', generator_options) response_sequence = self._sequence_generator.generate( adjust_sequence_times(input_sequence, -zero_time), generator_options) response_sequence = magenta.music.trim_note_sequence( response_sequence, response_start_time, response_end_time) return adjust_sequence_times(response_sequence, zero_time)
def hello(): body = request.get_json() input_sequence = music_pb2.NoteSequence() input_sequence.tempos.add(qpm=body['tempo']) input_sequence.total_time = body['totalTime'] # Add the notes to the sequence. for note in body['notes']: input_sequence.notes.add(pitch=note['pitch'], start_time=note['startTime'], end_time=note['endTime'], velocity=note['velocity']) # change this for shorter or longer sequences num_steps = body['numSteps'] if 'numSteps' in body else 128 # the higher the temperature the more random the sequence. temperature = body['temperature'] if 'temperature' in body else 1.0 # Set the start time to begin on the next step after the last note ends. last_end_time = (max( n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) qpm = input_sequence.tempos[0].qpm seconds_per_step = 60.0 / qpm / app.melody_rnn.steps_per_quarter total_seconds = num_steps * seconds_per_step generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) # Ask the model to continue the sequence. print("Evaluating " + str(len(body['notes'])) + " notes...") data = app.melody_rnn.generate(input_sequence, generator_options) print("Sending back response") return app.response_class(response=MessageToJson(data), status=200, mimetype='application/json')
def generate_melody(input_list): # the NoteSequence input_ns = _pitchList2NoteSequence(_list2Pitch(input_list)) # the higher the temperature the more random the sequence. temperature = 1.0 # the end time of input last_end_time = (max(n.end_time for n in input_ns.notes) if input_ns.notes else 0) # length of generated piece input_length = len(input_list) total_seconds = input_length * 0.5 * 3 - last_end_time # options generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generator_section = generator_options.generate_sections.add( start_time=last_end_time + 0.5, end_time=total_seconds) gen_sequence = melody_rnn.generate(input_ns, generator_options) output = tempfile.NamedTemporaryFile() mm.sequence_proto_to_midi_file(gen_sequence, output.name) output.seek(0) return output
def run_with_flags(generator): """Generates melodies and saves them as MIDI files. Uses the options specified by the flags defined in this module. Args: generator: The MelodyRnnSequenceGenerator to use for generation. """ if not FLAGS.output_dir: tf.logging.fatal('--output_dir required') return FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir) primer_midi = None if FLAGS.primer_midi: primer_midi = os.path.expanduser(FLAGS.primer_midi) if not tf.gfile.Exists(FLAGS.output_dir): tf.gfile.MakeDirs(FLAGS.output_dir) primer_sequence = None qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE if FLAGS.primer_melody: primer_melody = magenta.music.Melody( ast.literal_eval(FLAGS.primer_melody)) primer_sequence = primer_melody.to_sequence(qpm=qpm) elif primer_midi: primer_sequence = magenta.music.midi_file_to_sequence_proto( primer_midi) if primer_sequence.tempos and primer_sequence.tempos[0].qpm: qpm = primer_sequence.tempos[0].qpm else: tf.logging.warning( 'No priming sequence specified. Defaulting to a single middle C.') primer_melody = magenta.music.Melody([60]) primer_sequence = primer_melody.to_sequence(qpm=qpm) # Derive the total number of seconds to generate based on the QPM of the # priming sequence and the num_steps flag. seconds_per_step = 60.0 / qpm / generator.steps_per_quarter total_seconds = FLAGS.num_steps * seconds_per_step # Specify start/stop time for generation based on starting generation at the # end of the priming sequence and continuing until the sequence is num_steps # long. generator_options = generator_pb2.GeneratorOptions() if primer_sequence: input_sequence = primer_sequence # Set the start time to begin on the next step after the last note ends. if primer_sequence.notes: last_end_time = max(n.end_time for n in primer_sequence.notes) else: last_end_time = 0 generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) if generate_section.start_time >= generate_section.end_time: tf.logging.fatal( 'Priming sequence is longer than the total number of steps ' 'requested: Priming sequence length: %s, Generation length ' 'requested: %s', generate_section.start_time, total_seconds) return else: input_sequence = music_pb2.NoteSequence() input_sequence.tempos.add().qpm = qpm generate_section = generator_options.generate_sections.add( start_time=0, end_time=total_seconds) generator_options.args['temperature'].float_value = FLAGS.temperature generator_options.args['beam_size'].int_value = FLAGS.beam_size generator_options.args['branch_factor'].int_value = FLAGS.branch_factor generator_options.args[ 'steps_per_iteration'].int_value = FLAGS.steps_per_iteration tf.logging.debug('input_sequence: %s', input_sequence) tf.logging.debug('generator_options: %s', generator_options) # Make the generate request num_outputs times and save the output as midi # files. date_and_time = time.strftime('%Y-%m-%d_%H%M%S') digits = len(str(FLAGS.num_outputs)) for i in range(FLAGS.num_outputs): generated_sequence = generator.generate(input_sequence, generator_options) midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits)) midi_path = os.path.join(FLAGS.output_dir, midi_filename) magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path) tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, FLAGS.output_dir)
def generate_sequence(self, primer_melody=[60], num_steps=128, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1): """Generates melodies and saves them as MIDI files. Uses the options specified by the flags defined in this module. Args: primer_melody (list): The startoff point for the sequence. Is always part of the returned sequence, ex. if the primer_melody melody is [60, 61, 62] the generated sequence start with notes [60, 61, 62, ...]. Default is [60] num_steps (int): Number of steps in the generated sequence. Default is 128 temperature (int): Controles the amount of randomness in the sequence generation. Default is 1.0 beam_size (int): Default is 1 branch_factor (int): Default is 1 steps_per_iteration (int): Default is 1 """ qpm = magenta.music.DEFAULT_QUARTERS_PER_MINUTE primer_melody = magenta.music.Melody(primer_melody) primer_sequence = primer_melody.to_sequence(qpm=qpm) # Derive the total number of seconds to generate based on the QPM of the # priming sequence and the num_steps flag. seconds_per_step = 60.0 / qpm / self.generator.steps_per_quarter total_seconds = num_steps * seconds_per_step # Specify start/stop time for generation based on starting generation at the # end of the priming sequence and continuing until the sequence is num_steps # long. generator_options = generator_pb2.GeneratorOptions() input_sequence = primer_sequence # Set the start time to begin on the next step after the last note ends. if primer_sequence.notes: last_end_time = max(n.end_time for n in primer_sequence.notes) else: last_end_time = 0 generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) if generate_section.start_time >= generate_section.end_time: raise ValueError( 'Priming sequence is longer than the total number of steps ' 'requested: Priming sequence length: %s, Generation length ' 'requested: %s', generate_section.start_time, total_seconds) generator_options.args['temperature'].float_value = temperature generator_options.args['beam_size'].int_value = beam_size generator_options.args['branch_factor'].int_value = branch_factor generator_options.args[ 'steps_per_iteration'].int_value = steps_per_iteration tf.logging.debug('input_sequence: %s', input_sequence) tf.logging.debug('generator_options: %s', generator_options) # Make the generate request and return it generated_sequence = self.generator.generate(input_sequence, generator_options) return generated_sequence
def run_with_flags(generator): """Generates pianoroll tracks and saves them as MIDI files. Uses the options specified by the flags defined in this module. Args: generator: The PianorollRnnNadeSequenceGenerator to use for generation. """ if not FLAGS.output_dir: tf.logging.fatal('--output_dir required') return output_dir = os.path.expanduser(FLAGS.output_dir) primer_midi = None if FLAGS.primer_midi: primer_midi = os.path.expanduser(FLAGS.primer_midi) if not tf.gfile.Exists(output_dir): tf.gfile.MakeDirs(output_dir) primer_sequence = None qpm = FLAGS.qpm if FLAGS.qpm else 60 if FLAGS.primer_pitches: primer_sequence = music_pb2.NoteSequence() primer_sequence.tempos.add().qpm = qpm primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ for pitch in ast.literal_eval(FLAGS.primer_pitches): note = primer_sequence.notes.add() note.start_time = 0 note.end_time = 60.0 / qpm note.pitch = pitch note.velocity = 100 primer_sequence.total_time = primer_sequence.notes[-1].end_time elif FLAGS.primer_pianoroll: primer_pianoroll = magenta.music.PianorollSequence( events_list=ast.literal_eval(FLAGS.primer_pianoroll), steps_per_quarter=4, shift_range=True) primer_sequence = primer_pianoroll.to_sequence(qpm=qpm) elif primer_midi: primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi) if primer_sequence.tempos and primer_sequence.tempos[0].qpm: qpm = primer_sequence.tempos[0].qpm else: tf.logging.warning( 'No priming sequence specified. Defaulting to empty sequence.') primer_sequence = music_pb2.NoteSequence() primer_sequence.tempos.add().qpm = qpm primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ # Derive the total number of seconds to generate. seconds_per_step = 60.0 / qpm / generator.steps_per_quarter generate_end_time = FLAGS.num_steps * seconds_per_step # Specify start/stop time for generation based on starting generation at the # end of the priming sequence and continuing until the sequence is num_steps # long. generator_options = generator_pb2.GeneratorOptions() # Set the start time to begin when the last note ends. generate_section = generator_options.generate_sections.add( start_time=primer_sequence.total_time, end_time=generate_end_time) if generate_section.start_time >= generate_section.end_time: tf.logging.fatal( 'Priming sequence is longer than the total number of steps ' 'requested: Priming sequence length: %s, Total length ' 'requested: %s', generate_section.start_time, generate_end_time) return generator_options.args['beam_size'].int_value = FLAGS.beam_size generator_options.args['branch_factor'].int_value = FLAGS.branch_factor tf.logging.info('primer_sequence: %s', primer_sequence) tf.logging.info('generator_options: %s', generator_options) # Make the generate request num_outputs times and save the output as midi # files. date_and_time = time.strftime('%Y-%m-%d_%H%M%S') digits = len(str(FLAGS.num_outputs)) for i in range(FLAGS.num_outputs): generated_sequence = generator.generate(primer_sequence, generator_options) midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits)) midi_path = os.path.join(output_dir, midi_filename) magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path) tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, output_dir)
def run_with_flags(generator): """Generates performance tracks and saves them as MIDI files. Uses the options specified by the flags defined in this module. Args: generator: The PerformanceRnnSequenceGenerator to use for generation. """ if not FLAGS.output_dir: tf.logging.fatal('--output_dir required') return output_dir = os.path.expanduser(FLAGS.output_dir) primer_midi = None if FLAGS.primer_midi: primer_midi = os.path.expanduser(FLAGS.primer_midi) if not tf.gfile.Exists(output_dir): tf.gfile.MakeDirs(output_dir) primer_sequence = None if FLAGS.primer_pitches: primer_sequence = music_pb2.NoteSequence() primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ for pitch in ast.literal_eval(FLAGS.primer_pitches): note = primer_sequence.notes.add() note.start_time = 0 note.end_time = 60.0 / magenta.music.DEFAULT_QUARTERS_PER_MINUTE note.pitch = pitch note.velocity = 100 primer_sequence.total_time = note.end_time elif FLAGS.primer_melody: primer_melody = magenta.music.Melody( ast.literal_eval(FLAGS.primer_melody)) primer_sequence = primer_melody.to_sequence() elif primer_midi: primer_sequence = magenta.music.midi_file_to_sequence_proto( primer_midi) else: tf.logging.warning( 'No priming sequence specified. Defaulting to empty sequence.') primer_sequence = music_pb2.NoteSequence() primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ # Derive the total number of seconds to generate. seconds_per_step = 1.0 / generator.steps_per_second generate_end_time = FLAGS.num_steps * seconds_per_step # Specify start/stop time for generation based on starting generation at the # end of the priming sequence and continuing until the sequence is num_steps # long. generator_options = generator_pb2.GeneratorOptions() # Set the start time to begin when the last note ends. generate_section = generator_options.generate_sections.add( start_time=primer_sequence.total_time, end_time=generate_end_time) if generate_section.start_time >= generate_section.end_time: tf.logging.fatal( 'Priming sequence is longer than the total number of steps ' 'requested: Priming sequence length: %s, Total length ' 'requested: %s', generate_section.start_time, generate_end_time) return for control_cls in magenta.music.all_performance_control_signals: if FLAGS[control_cls.name].value is not None and ( generator.control_signals is None or not any(control.name == control_cls.name for control in generator.control_signals)): tf.logging.warning( 'Control signal requested via flag, but generator is not set up to ' 'condition on this control signal. Request will be ignored: %s = %s', control_cls.name, FLAGS[control_cls.name].value) if (FLAGS.disable_conditioning is not None and not generator.optional_conditioning): tf.logging.warning( 'Disable conditioning flag set, but generator is not set up for ' 'optional conditioning. Requested disable conditioning flag will be ' 'ignored: %s', FLAGS.disable_conditioning) if generator.control_signals: for control in generator.control_signals: if FLAGS[control.name].value is not None: generator_options.args[control.name].string_value = ( FLAGS[control.name].value) if FLAGS.disable_conditioning is not None: generator_options.args['disable_conditioning'].string_value = ( FLAGS.disable_conditioning) generator_options.args['temperature'].float_value = FLAGS.temperature generator_options.args['beam_size'].int_value = FLAGS.beam_size generator_options.args['branch_factor'].int_value = FLAGS.branch_factor generator_options.args[ 'steps_per_iteration'].int_value = FLAGS.steps_per_iteration tf.logging.debug('primer_sequence: %s', primer_sequence) tf.logging.debug('generator_options: %s', generator_options) # Make the generate request num_outputs times and save the output as midi # files. date_and_time = time.strftime('%Y-%m-%d_%H%M%S') digits = len(str(FLAGS.num_outputs)) for i in range(FLAGS.num_outputs): generated_sequence = generator.generate(primer_sequence, generator_options) midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits)) midi_path = os.path.join(output_dir, midi_filename) magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path) tf.logging.info('Wrote %d MIDI files to %s', FLAGS.num_outputs, output_dir)
bundle = sequence_generator_bundle.read_bundle_file( 'content/attention_rnn.mag') generator_map = melody_rnn_sequence_generator.get_generator_map() melody_rnn = generator_map['attention_rnn'](checkpoint=None, bundle=bundle) melody_rnn.initialize() # Model options. Change these to get different generated sequences! input_sequence = twinkle_twinkle # change this to teapot if you want num_steps = 128 # change this for shorter or longer sequences temperature = 1.0 # the higher the temperature the more random the sequence. # Set the start time to begin on the next step after the last note ends. last_end_time = (max( n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) qpm = input_sequence.tempos[0].qpm seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter total_seconds = num_steps * seconds_per_step generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) # Ask the model to continue the sequence. sequence = melody_rnn.generate(input_sequence, generator_options) # print(type(sequence.notes[1])) print(sequence) print('🎉 Done!')
def generate(self, empty=False, backup_seq=None): """ different implementation is needed for improv rnn's generation function. """ if backup_seq is not None: self.sequence = copy.deepcopy(backup_seq) input_sequence = copy.deepcopy(self.sequence) num_steps = self.num_steps # change this for shorter/longer sequences temperature = self.temperature # Set the start time to begin on the next step after the last note ends. last_end_time = (max(n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) qpm = input_sequence.tempos[0].qpm input_sequence = mm.quantize_note_sequence(input_sequence, self.model.steps_per_quarter) primer_sequence_steps = input_sequence.total_quantized_steps if primer_sequence_steps > num_steps: # easier to make num_steps bigger to accommodate for sizes # 4 times the size of original sequence.. num_steps = primer_sequence_steps * 4 mm.infer_chords_for_sequence(input_sequence) raw_chord_string = "" for annotation in input_sequence.text_annotations: if annotation.annotation_type == CHORD_SYMBOL: chord_name = annotation.text raw_chord_string += f'{chord_name} ' raw_chord_string = raw_chord_string[:-1] raw_chords = raw_chord_string.split() repeated_chords = [chord for chord in raw_chords for _ in range(16)] * self.phrase_num self.backing_chords = mm.ChordProgression(repeated_chords) chord_sequence = self.backing_chords.to_sequence(sequence_start_time=0.0, qpm=qpm) for text_annotation in chord_sequence.text_annotations: if text_annotation.annotation_type == CHORD_SYMBOL: chord = self.sequence.text_annotations.add() chord.CopyFrom(text_annotation) seconds_per_step = 60.0 / qpm / self.model.steps_per_quarter total_seconds = len(self.backing_chords) * seconds_per_step self.sequence.total_time = total_seconds generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generate_section = generator_options.generate_sections.add( start_time=last_end_time + seconds_per_step, end_time=total_seconds) sequence = self.model.generate(self.sequence, generator_options) renderer = mm.BasicChordRenderer(velocity=CHORD_VELOCITY) renderer.render(sequence) request_dict = self.put_request_dict generated_sequence_2_mp3(sequence, f"{self.unique_id}", request_dict=request_dict)