Esempio n. 1
0
def make_music_vae(input_midi_file_1, input_midi_file_2):
    input_midi_1 = os.path.expanduser(input_midi_file_1)
    input_midi_2 = os.path.expanduser(input_midi_file_2)
    if not os.path.exists(input_midi_1):
        raise ValueError('Input MIDI 1 not found: %s' % input_midi_file_1)

    if not os.path.exists(input_midi_2):
        raise ValueError('Input MIDI 2 not found: %s' % input_midi_file_2)
    input_1 = mm.midi_file_to_note_sequence(input_midi_1)
    input_2 = mm.midi_file_to_note_sequence(input_midi_2)

    _check_extract_examples(input_1, input_midi_file_1, 1)
    _check_extract_examples(input_2, input_midi_file_2, 2)
def piano_continuation(primer):
    print("이함수도안돌아?")
    primer_ns = mm.midi_file_to_note_sequence(primer)

    # Handle sustain pedal in the primer.
    primer_ns = mm.apply_sustain_control_changes(primer_ns)

    # Trim to desired number of seconds.
    max_primer_seconds = 20  #@param {type:"slider", min:1, max:120}
    if primer_ns.total_time > max_primer_seconds:
        print('Primer is longer than %d seconds, truncating.' %
              max_primer_seconds)
        primer_ns = mm.extract_subsequence(primer_ns, 0, max_primer_seconds)

    # Remove drums from primer if present.
    if any(note.is_drum for note in primer_ns.notes):
        print('Primer contains drums; they will be removed.')
        notes = [note for note in primer_ns.notes if not note.is_drum]
        del primer_ns.notes[:]
        primer_ns.notes.extend(notes)

    # Set primer instrument and program.
    for note in primer_ns.notes:
        note.instrument = 1
        note.program = 0

    #note_sequence_to_midi_file(primer_ns, 'modified_'+primer)

    targets = uncondi_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, 4096 - len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    # Generate sample events.
    sample_ids = next(uncondi_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids, encoder=uncondi_encoders['targets'])
    ns = mm.midi_file_to_note_sequence(midi_filename)

    # Append continuation to primer.
    continuation_ns = mm.concatenate_sequences([primer_ns, ns])

    note_sequence_to_midi_file(continuation_ns, 'continuated_' + primer)
Esempio n. 3
0
def test():
    test_path = os.path.join(cfg.SEQ_SAMPLE_PATH, str(cfg.FRAME_TIME) + '_ms', 'test')
    test_files = list_files(test_path, 'pickle')
    idx = 40
    print(test_files[idx])
    sample = data.read_sample(test_files[idx])
    
    m = model.OaF_Drum()
    m.load(os.path.join(m.checkpoint_dir, '8.h5'))    
    
    onset, _ = m.predict(sample['Frames'])

    sequence1 = mm.midi_file_to_note_sequence(test_files[idx].split('.')[0] + '.mid')
    print(sequence1.tempos[0])
    fig = mm.plot_sequence(sequence1, show_figure=False)
    data.export_png(fig, filename="test.png")

    sequence2 = data.matrix2sequence(sample['Onset'], onset=sample['Onset'])
    mm.sequence_proto_to_midi_file(sequence2, 'test.mid')
    fig2 = mm.plot_sequence(sequence2, show_figure=False)
    data.export_png(fig2, filename="test_gen.png")

    sequence3 = data.matrix2sequence(onset[0], onset=onset[0])
    mm.sequence_proto_to_midi_file(sequence3, 'pred.mid')
    fig3 = mm.plot_sequence(sequence3, show_figure=False)
    data.export_png(fig3, filename="pred.png")
Esempio n. 4
0
 def __init__(self, *args, **kwargs):
     # Command-line arguments
     #self.args = kwargs.get('args')
     #self.model = kwargs.get('model')
     # Init model
     self.temperature = 1
     self._modelpath = "/Users/carsault/Dropbox/work/code/gitlab/cat-mel_2bar_big.tar"
     self.config_name = 'cat-mel_2bar_big'
     self.config = configs.CONFIG_MAP[self.config_name]
     self.config.data_converter.max_tensors_per_item = None
     checkpoint_dir_or_path = os.path.expanduser(self._modelpath)
     print('Loading model')
     self.model = TrainedModel(self.config, batch_size=1,checkpoint_dir_or_path=checkpoint_dir_or_path)
     # Init encoded files
     self.style_name = ['blues', 'classic', 'country', 'jazz', 'poprock','world', 'game', 'empty', 'RnB']
     input_files_list = ["./MVAE_input_valid/Blues1.mid",
                 "./MVAE_input_valid/classic1.mid",
                 "./MVAE_input_valid/country1.mid",
                 "./MVAE_input_valid/jazz1.mid",
                 "./MVAE_input_valid/poprock1.mid",
                 "./MVAE_input_valid/World1.mid",
                 "./MVAE_input_valid/game1.mid",
                 "./MVAE_input_valid/empty1.mid",
                 "./MVAE_input_valid/RnB1.mid"]
     self.input_z_list = []
     for file in input_files_list:
         input_midi = os.path.expanduser(file)
         inp = mm.midi_file_to_note_sequence(input_midi)
         z, mu, _ = self.model.encode([inp])
         self.input_z_list.append(z[0])
     # Init OSC server
     super(MVAEServer, self).__init__(*args)
     self.print('Server is ready.')
Esempio n. 5
0
def app(unused_argv):
    sequence = midi_file_to_note_sequence(
        os.path.join("primers", "52_jazz_125_beat_4-4.mid"))

    tapped_sequence = get_tapped_2bar(sequence)

    return 0
Esempio n. 6
0
def encode_midi(tm, input_midi_path, output_path, print_progress=False):
  """
  Convert midi file to latent vector file.
  If the midi file fails to convert to a NoteSequence, or the NoteSequence
    does not correspond to any tensors, do not output any file.
  tm: Trained model used for encoding
  input_midi_path: Path to midi file.
  output_path: Path to .npy file to save latent vectors.
    The output latent vector array has shape (# NoteSequences, 
    # latent vector dimensions)
  """
  try:
    ns = mm.midi_file_to_note_sequence(input_midi_path)
  except mm.MIDIConversionError as e:
    print(input_midi_path, 'Midi conversion error:', str(e))
    return
  tensors = tm._config.data_converter.to_tensors(ns)
  if len(tensors.inputs) == 0 and print_progress:
    print(input_midi_path, 'does not encode to any vectors')
    return
  _, mu, _ = tm.encode_tensors(
    list(tensors.inputs),
    list(tensors.lengths),
    list(tensors.controls))
  np.save(output_path, mu)
  if print_progress:
    print('Encoded', input_midi_path, '->', len(tensors.inputs), 'vectors at', output_path)
Esempio n. 7
0
def create_song_prototype(song_path,
                          start_time,
                          stop_time,
                          model_used='attention_rnn',
                          temperature=1.0):
    magenta_model_path = '%s/magenta_models/%s.mag' % (MEDIA_ROOT, model_used)
    bundle = mm.sequence_generator_bundle.read_bundle_file(magenta_model_path)
    generator_map = melody_rnn_sequence_generator.get_generator_map()
    melody_rnn = generator_map[model_used](checkpoint=None, bundle=bundle)
    melody_rnn.initialize()

    base_sequence = midi_file_to_note_sequence(song_path)
    target_sequence = extract_subsequence(base_sequence, start_time, stop_time)

    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generator_options.generate_sections.add(
        start_time=target_sequence.total_time,
        end_time=2 * target_sequence.total_time)
    generated_sequence = melody_rnn.generate(target_sequence,
                                             generator_options)

    proceed_sequence = extract_subsequence(generated_sequence,
                                           target_sequence.total_time,
                                           2 * target_sequence.total_time)

    return proceed_sequence
Esempio n. 8
0
    def _read_midi(self, input_file):
        """
        Read midi file into note sequence.
        :param input_file: A string path to midi file.
        :return: Note sequence.
        """
        note_sequence = mm.midi_file_to_note_sequence(input_file)

        # Handle sustain pedal in primer.
        if self.config.sustain:
            note_sequence = mm.apply_sustain_control_changes(note_sequence)

        # Trim to desired number of seconds.
        if note_sequence.total_time > self.config.max_length:
            LOGGER.warning(
                'Note sequence %d is longer than max seconds %d, truncating.',
                note_sequence.total_time, self.config.max_length)
            note_sequence = mm.extract_subsequence(note_sequence, 0,
                                                   self.config.max_length)

        # Whether or not remove drums.
        if any(note.is_drum
               for note in note_sequence.notes) and not self.config.use_drum:
            LOGGER.warning('Midi file contains drum sounds, removing.')
            notes = [note for note in note_sequence.notes if not note.is_drum]
            del note_sequence.notes[:]
            note_sequence.notes.extend(notes)

        # Set primer instrument and program.
        for note in note_sequence.notes:
            note.instrument = 1
            note.program = 0

        return note_sequence
Esempio n. 9
0
def get_primer_ns(filename, max_length):
    """
    Convert Midi file to note sequences for priming.
    :param filename: Midi file name.
    :param max_length: Maximum note sequence length for priming in seconds.
    :return:
        Note sequences for priming.
    """
    primer_ns = mm.midi_file_to_note_sequence(filename)

    # Handle sustain pedal in primer.
    primer_ns = mm.apply_sustain_control_changes(primer_ns)

    # Trim to desired number of seconds.
    if primer_ns.total_time > max_length:
        LOGGER.warn(
            'Primer duration %d is longer than max second %d, truncating.' %
            (primer_ns.total_time, max_length))
        primer_ns = mm.extract_subsequence(primer_ns, 0, max_length)

    # Remove drums from primer if present.
    if any(note.is_drum for note in primer_ns.notes):
        LOGGER.warn('Primer contains drums; they will be removed.')
        notes = [note for note in primer_ns.notes if not note.is_drum]
        del primer_ns.notes[:]
        primer_ns.notes.extend(notes)

    # Set primer instrument and program.
    for note in primer_ns.notes:
        note.instrument = 1
        note.program = 0

    return primer_ns
def run(config_map):
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

    if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
        raise ValueError(
            'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.'
        )
    if FLAGS.output_npy is None:
        raise ValueError('`--output_npy` is required.')
    # tf.gfile.MakeDirs(FLAGS.output_npy)

    if FLAGS.config not in config_map:
        raise ValueError('Invalid config name: %s' % FLAGS.config)
    config = config_map[FLAGS.config]
    config.data_converter.max_tensors_per_item = None

    # Check midi file
    if not os.path.exists(FLAGS.midi_dir):
        raise ValueError('MIDI dir not found: %s' % FLAGS.midi_dir)

    logging.info(
        'Attempting to extract examples from input MIDIs using config `%s`...',
        FLAGS.config)

    logging.info('Loading model...')
    if FLAGS.run_dir:
        checkpoint_dir_or_path = os.path.expanduser(
            os.path.join(FLAGS.run_dir, 'train'))
    else:
        checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
    model = TrainedModel(config,
                         batch_size=min(FLAGS.max_batch_size,
                                        FLAGS.num_outputs),
                         checkpoint_dir_or_path=checkpoint_dir_or_path)

    logging.info('Extracting latent parameters...')
    midi_files = [
        f for f in listdir(FLAGS.midi_dir) if isfile(join(FLAGS.midi_dir, f))
    ]
    extraction = np.zeros((len(midi_files), 3, 512))
    for i, _midi_file in tqdm(enumerate(midi_files)):
        midi_file = FLAGS.midi_dir + '/' + _midi_file
        try:
            input_midi = mm.midi_file_to_note_sequence(midi_file)
        except:
            continue
        tensor = config.data_converter.to_tensors(input_midi).outputs
        if not tensor:
            # logging.info('Skipping {:s}'.format(_midi_file))
            continue
        z, mu, sigma = model.encode([input_midi])
        extraction[i, 0, :] = z
        extraction[i, 1, :] = mu
        extraction[i, 2, :] = sigma

    np.save(FLAGS.output_npy, extraction)

    logging.info('Done.')
Esempio n. 11
0
    def download(self, request):
        is_required_params = 'song_id' in request.query_params
        if not is_required_params:
            raise RequiredParamsNotProvidedException

        file_path = self.get_song_file_path(
            request.query_params.get('song_id'))
        note_sequence = midi_file_to_note_sequence(file_path)
        prepared_song = note_sequence_to_note_list(note_sequence)

        return Response(data=prepared_song, status=status.HTTP_200_OK)
Esempio n. 12
0
    def generate_primer(self):
        """
        Put something important here.

        """
        if self.conditioned:
            raise ValueError("Should be using an unconditioned model!")

        primer_ns = self.sequence
        primer_ns = mm.apply_sustain_control_changes(primer_ns)
        max_primer_seconds = 10

        if primer_ns.total_time > max_primer_seconds:
            print(f'Primer is longer than {max_primer_seconds} seconds, truncating.')
            # cut primer if it's too long
            primer_ns = mm.extract_subsequence(
                primer_ns, 0, max_primer_seconds)

        if any(note.is_drum for note in primer_ns.notes):
            print('Primer contains drums; they will be removed.')
            notes = [note for note in primer_ns.notes if not note.is_drum]
            del primer_ns.notes[:]
            primer_ns.notes.extend(notes)

        for note in primer_ns.notes:
            # make into piano
            note.instrument = 1
            note.program = 0

        self.targets = self.encoders['targets'].encode_note_sequence(
                        primer_ns)
        # Remove the end token from the encoded primer.
        self.targets = self.targets[:-1]
        self.decode_length = max(0, 4096 - len(self.targets))

        if len(self.targets) >= 4096:
            print('Primer has more events than maximum sequence length; nothing will be generated.')
        # Generate sample events.
        sample_ids = next(self.samples)['outputs']

        midi_filename = self.decode(
                        sample_ids,
                        encoder=self.encoders['targets'])
        ns = mm.midi_file_to_note_sequence(midi_filename)
        # Append continuation to primer.
        continuation_ns = mm.concatenate_sequences([primer_ns, ns])

        request_dict = self.put_request_dict
        generated_sequence_2_mp3(continuation_ns, f"{self.unique_id}", use_salamander=True,
                                 request_dict=request_dict)
Esempio n. 13
0
def main():
    #load midi file
    loaded_sequence = mm.midi_file_to_note_sequence(base + "/input/input.mid")

    s = loaded_sequence
    s = change_tempo(get_tapped_2bar(s, velocity=85, ride=False),
                     s.tempos[0].qpm)

    h = drumify(s, groovae_2bar_tap)
    h = change_tempo(h, s.tempos[0].qpm)

    midi_io.note_sequence_to_midi_file(start_notes_at_0(h),
                                       base + "/output/output.mid")

    print("Generate Done")
Esempio n. 14
0
    def generate(self):
        # based on i
        self.targets = []
        self.decode_length = 1024

        # Generate sample events.
        sample_ids = next(self.samples)['outputs']
        # Decode to NoteSequence.
        midi_filename = self.decode(
            sample_ids,
            encoder=self.encoders['targets'])
        unconditional_ns = mm.midi_file_to_note_sequence(midi_filename)
        request_dict = self.put_request_dict
        generated_sequence_2_mp3(unconditional_ns, f"{self.unique_id}", use_salamander=True,
                                 request_dict=request_dict)
Esempio n. 15
0
def generate(
    estimator,
    unconditional_encoders,
    decode_length,
    targets,
    primer_note_sequence,
):
    """
    Generate unconditioned music samples from estimator
    :param estimator: Transformer estimator
    :param unconditional_encoders: A dictionary contains key and its encoder.
    :param decode_length: A number represents the duration of music snippet.
    :param targets: Target input for Transformer.
    :param primer_note_sequence: Notesequence represents the primer.
    :return:
    """

    # Output filename
    tf.gfile.MakeDirs(FLAGS.output_dir)
    date_and_time = time.strftime("%Y-%m-%d_%H%M%S")
    base_name = os.path.join(FLAGS.output_dir,
                             f"unconditioned_{date_and_time:s}.mid")

    # Generating sample
    LOGGER.info("Generating sample.")
    input_function = decoding.make_input_fn_from_generator(
        unconditional_input_generator(targets, decode_length))
    unconditional_samples = estimator.predict(input_function,
                                              checkpoint_path=FLAGS.model_path)

    # Sample events
    LOGGER.info("Generating sample events.")
    sample_ids = next(unconditional_samples)["outputs"]

    # Decode to note sequence
    LOGGER.info("Decoding sample ID")
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders["targets"])
    unconditional_note_seqs = mm.midi_file_to_note_sequence(midi_filename)

    # Append continuation to primer
    coninuation_note_sequence = mm.concatenate_sequences(
        [primer_note_sequence, unconditional_note_seqs])

    # Saving MIDI file
    mm.sequence_proto_to_midi_file(coninuation_note_sequence, base_name)
Esempio n. 16
0
    def generate_basic_notes(self, qpm=160, failsafe=False):
        """
        Requires melody conditioned model.
        """
        if not self.conditioned:
            raise ValueError("Model should be conditioned!")

        if failsafe:
            self.failsafe()

        else:
            melody_ns = copy.deepcopy(self.sequence)
            try:
                melody_instrument = mm.infer_melody_for_sequence(melody_ns)
                notes = [note for note in melody_ns.notes
                        if note.instrument == melody_instrument]

                melody_ns.notes.extend(
                    sorted(notes, key=lambda note: note.start_time))
                for i in range(len(melody_ns.notes) - 1):
                    melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time

                # sequence can only be one min to save time during inference.
                melody_ns = mm.extract_subsequence(melody_ns, 0, 60)
                self.inputs = self.encoders['inputs'].encode_note_sequence(
                            melody_ns)
                print("Melody successfully parsed and encoded!")
            except Exception as e:
                print(f"Error in encoding stage {e}")
                print("Resorting to a basic melody")
                self.failsafe()

        self.decode_length = 4096
        sample_ids = next(self.samples)['outputs']

        # Decode to NoteSequence.
        midi_filename = self.decode(
            sample_ids,
            encoder=self.encoders['targets'])
        accompaniment_ns = mm.midi_file_to_note_sequence(midi_filename)

        request_dict = self.put_request_dict
        generated_sequence_2_mp3(accompaniment_ns, f"{self.unique_id}", use_salamander=True,
                                 request_dict=request_dict)
Esempio n. 17
0
def get_primer_ns(filename):
    """
    Convert MIDI file to note sequences for priming.
    :param filename: MIDI file name.
    :return:
        Note sequences for priming.
    """
    primer_note_sequence = mm.midi_file_to_note_sequence(filename)

    # Handle sustain pedal in primer.
    primer_note_sequence = mm.apply_sustain_control_changes(
        primer_note_sequence)

    # Set primer instrument and program.
    for note in primer_note_sequence.notes:
        note.instrument = 1
        note.program = 0

    return primer_note_sequence
Esempio n. 18
0
def get_melody_ns(filename):
    """
    Convert melody Midi file to note sequence.
    :param filename: Midi file name.
    :return:
        Melody note sequences.
    """
    melody_ns = mm.midi_file_to_note_sequence(filename)
    melody_instrument = mm.infer_melody_for_sequence(melody_ns)
    # pylint: disable=no-member
    notes = [
        note for note in melody_ns.notes
        if note.instrument == melody_instrument
    ]
    del melody_ns.notes[:]
    melody_ns.notes.extend(sorted(notes, key=lambda note: note.start_time))
    for i in range(len(melody_ns.notes) - 1):
        melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time

    # pylint: disable=no-member

    return melody_ns
def generate():
    if FLAGS.run_dir is None:
        raise ValueError('You must specify `run_dir`!')
    train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train/')
    if FLAGS.load_model is None:
        raise ValueError('You must specify `load_model`!')
    checkpoints_dir = train_dir + FLAGS.load_model

    # configuration
    config = configs.CONFIG_MAP[FLAGS.config]
    hparams = config.hparams
    hparams.dropout_keep_prob = 1.0

    # params
    z_size = hparams.z_size
    batch_size = hparams.batch_size

    graph = tf.get_default_graph()
    with graph.as_default():
        sess = tf.Session()

        encoder = BidirectionalLstmEncoder(
            hparams, name_or_scope='vae-pg/bilstm-encoder')
        decoder_theta = LstmPolicyGradientDecoder(
            hparams, name_or_scope='vae-pg/PG-decoder')
        decoder_beta = LstmPolicyGradientDecoder(
            hparams, name_or_scope='vae-pg-copy/PG-decoder')
        dis = BidirectionalLstmDiscriminator(hparams)
        seq_vae = SeqVAE(hparams, encoder, decoder_theta, decoder_beta, dis)

        if FLAGS.mode == 'interpolate':
            if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
                raise ValueError(
                    '`--input_midi_1` and `--input_midi_2` must be specified in '
                    '`interpolate` mode.')
            logging.info('Interpolating...')
            input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
            input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
            input_1 = mm.midi_file_to_note_sequence(input_midi_1)
            input_2 = mm.midi_file_to_note_sequence(input_midi_2)

            inputs = []
            lengths = []
            for note_sequence in [input_1, input_2]:
                extracted_tensors = configs.mel_16bar_converter.to_tensors(
                    note_sequence)
                if len(extracted_tensors.inputs) > 0:
                    inputs.append(extracted_tensors.inputs[0])
                    lengths.append(extracted_tensors.lengths[0])
            inputs = np.array(inputs).astype(np.float)
            inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)
            mu = encoder.get_mu(inputs, lengths)

        # used to sample z
        mvn = tfd.MultivariateNormalDiag(loc=[0] * z_size,
                                         scale_diag=[1] * z_size)
        z_op = mvn.sample(batch_size)
        # generate_op
        generate_op, _ = seq_vae.generate(z_op)

        saver = tf.train.Saver()
        sess.run(tf.local_variables_initializer())

        # load trained
        save_path = tf.train.latest_checkpoint(checkpoints_dir)
        logging.info('Load model from %s...' % save_path)
        saver.restore(sess, save_path)

        checkpoint = tf.train.get_checkpoint_state(checkpoints_dir)
        meta_graph_path = checkpoint.model_checkpoint_path + ".meta"
        step = int(meta_graph_path.split("-")[-1].split(".")[0])

        logging.info('Start...')

        num = FLAGS.num
        count = 0
        # generate music
        if FLAGS.mode == 'sample':
            basename = os.path.join(FLAGS.run_dir + FLAGS.output_dir,
                                    '%s-*.mid' % FLAGS.config)
            while count < num:
                f_ms = config.data_converter.to_items(sess.run(generate_op))
                for f_m in f_ms:
                    mm.sequence_proto_to_midi_file(
                        f_m, basename.replace('*', '%03d' % count))
                    count += 1
                    if count == num:
                        break
        elif FLAGS.mode == 'test':
            save_file_name = os.path.join(
                FLAGS.run_dir + FLAGS.output_dir,
                FLAGS.config + '_' + str(step // 1000) + 'k.npy')
            results = []
            while count < num:
                f_ms = sess.run(generate_op)
                for f_m in f_ms:
                    results.append(f_m)
                    count += 1
                    if count == num:
                        break
            np.save(save_file_name, results)
        elif FLAGS.mode == 'interpolate':
            mu_values = sess.run(mu)
            z = np.array([
                utils.slerp(mu_values[0], mu_values[1], t)
                for t in np.linspace(0, 1, num)
            ])
            results = sess.run(seq_vae.generate(z)[0])
            note_sequence_arr = config.data_converter.to_items(results)
            basename = os.path.join(FLAGS.run_dir + FLAGS.output_dir,
                                    'interpolate-*.mid')
            while count < num:
                mm.sequence_proto_to_midi_file(
                    note_sequence_arr[count],
                    basename.replace('*', '%03d' % count))
                count += 1

        logging.info('Done.')
Esempio n. 20
0
                        type=int,
                        default=DEFAULT_STEPS_PER_BAR)
    parser.add_argument("--num_output", type=int, default=6)
    parser.add_argument("--output_root_dir", type=str)
    return parser


if __name__ == "__main__":

    def _expanduser(_path):
        return str(Path(_path).expanduser())

    known_args, _ = get_parser().parse_known_args()
    output_root_dir = Path(known_args.output_root_dir).expanduser()
    if not output_root_dir.exists():
        output_root_dir.mkdir(parents=True)

    total_bars = known_args.num_output * known_args.num_bar_per_sample
    num_steps_per_sample = known_args.num_bar_per_sample * known_args.num_steps_per_bar
    core.interpolate(
        known_args.model_name,
        start_sequence=mm.midi_file_to_note_sequence(
            _expanduser(known_args.input_midi_1)),
        end_sequence=mm.midi_file_to_note_sequence(
            _expanduser(known_args.input_midi_2)),
        num_steps_per_sample=num_steps_per_sample,
        num_output=known_args.num_output,
        total_bars=total_bars,
        output_dir=output_root_dir,
    )
input_fn = decoding.make_input_fn_from_generator(input_generator())
uncondi_samples = estimator.predict(input_fn,
                                    checkpoint_path=uncondi_ckpt_path)

# "Burn" one.
_ = next(uncondi_samples)
print("ㅇㅣ건잘되지않아?")
targets = []
decode_length = 1024

# Generate sample events.
sample_ids = next(uncondi_samples)['outputs']

# Decode to NoteSequence.
midi_filename = decode(sample_ids, encoder=uncondi_encoders['targets'])
uncondi_ns = mm.midi_file_to_note_sequence(midi_filename)

#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).
note_sequence_to_midi_file(uncondi_ns, 'unconditional_piano_performance.mid')

piano_continuation('c_major_arpeggio.mid')
piano_continuation('c_major_scale.mid')
piano_continuation('clair_de_lune.mid')
piano_continuation('fur_elise.mid')
piano_continuation('moonlight_sonata.mid')
piano_continuation('prelude_in_c_major.mid')
piano_continuation('twinkle_twinkle_little_star.mid')
piano_continuation('mary_had_a_little_lamb.mid')

# ###############################################################################################
Esempio n. 22
0
twinkle_twinkle.notes.add(pitch=67, start_time=1.0, end_time=1.5, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=1.5, end_time=2.0, velocity=80)
twinkle_twinkle.notes.add(pitch=69, start_time=2.0, end_time=2.5, velocity=80)
twinkle_twinkle.notes.add(pitch=69, start_time=2.5, end_time=3.0, velocity=80)
twinkle_twinkle.notes.add(pitch=67, start_time=3.0, end_time=4.0, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80)
twinkle_twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80)
twinkle_twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80)
twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)
twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)
twinkle_twinkle.total_time = 8
twinkle_twinkle.tempos.add(qpm=60)

babyshark = mm.midi_file_to_note_sequence('./mid/babyshark.mid')
babyshark = mm.extract_subsequence(babyshark, 0, 8)

babyshark.ticks_per_quarter = 0
babyshark.time_signatures.pop()
babyshark.key_signatures.pop()
babyshark.tempos.pop()
babyshark.tempos.add(qpm=60)

for note in babyshark.notes:
    if note.pitch < 60:
        note.pitch = 60
    note.instrument = 0
    note.is_drum = False

# This gives us a list of sequences.
Esempio n. 23
0
def render_sequence_to_music_dict(midi_file,
                                  music_dict,
                                  model_string="melody_rnn"):
    sequence = mm.midi_file_to_note_sequence(midi_file)
    # scale to num steps.
    music_dict['num_steps'] = 1024 * music_dict['length']
    backup_sequence = None
    basic_models = [
        "melody_rnn", "performance_rnn", "polyphony_rnn", "pianoroll_rnn_nade"
    ]
    if model_string in basic_models:
        subsequence = mm.extract_subsequence(sequence, 0.0, C.SUBSEQUENCE_TIME)
        for note in subsequence.notes:
            # rnns can work with piano data.
            note.program = 0
            note.instrument = 1
        music_dict['sequence'] = subsequence
        if model_string == "performance_rnn":
            music_dict['num_steps'] = music_dict['num_steps'] * 4

    elif model_string == "improv_rnn" or model_string == "music_vae":
        subsequence = mm.extract_subsequence(sequence, 0.0, C.SUBSEQUENCE_TIME)
        melody = mm.infer_melody_for_sequence(subsequence)

        new_sequence = music_pb2.NoteSequence()
        backup_sequence = music_pb2.NoteSequence()
        new_val = 0.
        backup_val = 0.
        for note in subsequence.notes:
            # rnns can work with piano data.
            if note.instrument == melody:
                start = note.start_time
                end = note.end_time
                diff = end - start

                new_sequence.notes.add(pitch=note.pitch,
                                       start_time=new_val,
                                       end_time=new_val + diff,
                                       velocity=160)
                backup_sequence.notes.add(pitch=note.pitch,
                                          start_time=backup_val,
                                          end_time=backup_val + 0.5,
                                          velocity=160)

                new_val += diff
                backup_val += 0.5
            if model_string == "improv_rnn":
                note.program = 0
                note.instrument = 1
        new_sequence.total_time = new_val
        new_sequence.tempos.add(qpm=subsequence.tempos[0].qpm)
        backup_sequence.total_time = backup_val
        backup_sequence.tempos.add(qpm=60)
        music_dict['sequence'] = subsequence
        music_dict['backup_sequence'] = backup_sequence

    elif model_string == "music_transformer":
        # model generate will take care of things
        music_dict['sequence'] = sequence

    return music_dict
Esempio n. 24
0
# Loading model
checkpoint_dir_or_path = os.path.expanduser(checkpoint_file)
model = TrainedModel(config,
                     batch_size=8,
                     checkpoint_dir_or_path=checkpoint_dir_or_path)

input_midi_1 = os.path.expanduser(input_files_list[style_name.index(
    arguments['<style1>'])])
input_midi_2 = os.path.expanduser(input_files_list[style_name.index(
    arguments['<style2>'])])
input_midi_3 = os.path.expanduser(input_files_list[style_name.index(
    arguments['<style3>'])])
input_midi_4 = os.path.expanduser(input_files_list[style_name.index(
    arguments['<style4>'])])
input_1 = mm.midi_file_to_note_sequence(input_midi_1)
input_2 = mm.midi_file_to_note_sequence(input_midi_2)
input_3 = mm.midi_file_to_note_sequence(input_midi_3)
input_4 = mm.midi_file_to_note_sequence(input_midi_4)
#_check_extract_examples(input_1, path_midi_1, 1)
#_check_extract_examples(input_2, path_midi_2, 2)

#_, mu, _ = model.encode([input_1, input_2])
z, mu, _ = model.encode([input_1, input_2, input_3, input_4])

# Get the new 'z' with the interpolation values
z_new = z[0] * float(arguments['<val1>']) + z[1] * float(
    arguments['<val2>']) + z[2] * float(arguments['<val3>']) + z[3] * float(
        arguments['<val4>'])
z_new_2 = z_new + 0.05
z_new = np.expand_dims(z_new, axis=0)
Esempio n. 25
0
def run(config_map):
  """Load model params, save config file and start trainer.

  Args:
    config_map: Dictionary mapping configuration name to Config object.

  Raises:
    ValueError: if required flags are missing or invalid.
  """
  date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

  if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
    raise ValueError(
        'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.')
  if FLAGS.output_dir is None:
    raise ValueError('`--output_dir` is required.')
  tf.gfile.MakeDirs(FLAGS.output_dir)
  if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':
    raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)

  if FLAGS.config not in config_map:
    raise ValueError('Invalid config name: %s' % FLAGS.config)
  config = config_map[FLAGS.config]
  config.data_converter.max_tensors_per_item = None

  if FLAGS.mode == 'interpolate':
    if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
      raise ValueError(
          '`--input_midi_1` and `--input_midi_2` must be specified in '
          '`interpolate` mode.')
    input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
    input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
    if not os.path.exists(input_midi_1):
      raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
    if not os.path.exists(input_midi_2):
      raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
    input_1 = mm.midi_file_to_note_sequence(input_midi_1)
    input_2 = mm.midi_file_to_note_sequence(input_midi_2)

    def _check_extract_examples(input_ns, path, input_number):
      """Make sure each input returns exactly one example from the converter."""
      tensors = config.data_converter.to_tensors(input_ns).outputs
      if not tensors:
        print(
            'MusicVAE configs have very specific input requirements. Could not '
            'extract any valid inputs from `%s`. Try another MIDI file.' % path)
        sys.exit()
      elif len(tensors) > 1:
        basename = os.path.join(
            FLAGS.output_dir,
            '%s_input%d-extractions_%s-*-of-%03d.mid' %
            (FLAGS.config, input_number, date_and_time, len(tensors)))
        for i, ns in enumerate(config.data_converter.to_notesequences(tensors)):
          mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))
        print(
            '%d valid inputs extracted from `%s`. Outputting these potential '
            'inputs as `%s`. Call script again with one of these instead.' %
            (len(tensors), path, basename))
        sys.exit()
    logging.info(
        'Attempting to extract examples from input MIDIs using config `%s`...',
        FLAGS.config)
    _check_extract_examples(input_1, FLAGS.input_midi_1, 1)
    _check_extract_examples(input_2, FLAGS.input_midi_2, 2)

  logging.info('Loading model...')
  if FLAGS.run_dir:
    checkpoint_dir_or_path = os.path.expanduser(
        os.path.join(FLAGS.run_dir, 'train'))
  else:
    checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
  model = TrainedModel(
      config, batch_size=min(FLAGS.max_batch_size, FLAGS.num_outputs),
      checkpoint_dir_or_path=checkpoint_dir_or_path)

  if FLAGS.mode == 'interpolate':
    logging.info('Interpolating...')
    _, mu, _ = model.encode([input_1, input_2])
    z = np.array([
        _slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, FLAGS.num_outputs)])
    results = model.decode(
        length=config.hparams.max_seq_len,
        z=z,
        temperature=FLAGS.temperature)
  elif FLAGS.mode == 'sample':
    logging.info('Sampling...')
    results = model.sample(
        n=FLAGS.num_outputs,
        length=config.hparams.max_seq_len,
        temperature=FLAGS.temperature)

  basename = os.path.join(
      FLAGS.output_dir,
      '%s_%s_%s-*-of-%03d.mid' %
      (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
  logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
  for i, ns in enumerate(results):
    mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

  logging.info('Done.')
Esempio n. 26
0
    def start(self):

        init_contents()

        while True:
            self.print_controls()
            wait_for_user_input()

            if input.user_input == "empty":
                continue

            if input.user_input == "1":
                print()
                create_midi_samples()

            elif input.user_input == "2":
                print()

                input_sequence = twinkle_twinkle()
                num_steps = 128
                temperature = 1.0
                model_name = 'basic_rnn'
                generate_continue_for("twinkle_twinkle", input_sequence,
                                      model_name, num_steps, temperature)
                model_name = 'attention_rnn'
                generate_continue_for("twinkle_twinkle", input_sequence,
                                      model_name, num_steps, temperature)
                model_name = 'lookback_rnn'
                generate_continue_for("twinkle_twinkle", input_sequence,
                                      model_name, num_steps, temperature)
                model_name = 'mono_rnn'
                generate_continue_for("twinkle_twinkle", input_sequence,
                                      model_name, num_steps, temperature)

            elif input.user_input == "3":
                print()

                input_sequence = teapot()
                num_steps = 128
                temperature = 1.0
                model_name = 'basic_rnn'
                generate_continue_for("teapot", input_sequence, model_name,
                                      num_steps, temperature)
                model_name = 'attention_rnn'
                generate_continue_for("teapot", input_sequence, model_name,
                                      num_steps, temperature)
                model_name = 'lookback_rnn'
                generate_continue_for("teapot", input_sequence, model_name,
                                      num_steps, temperature)
                model_name = 'mono_rnn'
                generate_continue_for("teapot", input_sequence, model_name,
                                      num_steps, temperature)

            elif input.user_input == "4":
                print()

                if not os.path.isfile("./content/cat-mel_2bar_big.ckpt.data-00000-of-00001") \
                        or not os.path.isfile("./content/cat-mel_2bar_big.ckpt.index"):
                    print("models for interpolate doesnt find!")
                    return
                else:

                    model_name = 'cat-mel_2bar_big'
                    music_vae = init_music_vae(model_name)
                    num_steps = 8
                    length = 32
                    generate_interpolation_for(model_name, music_vae,
                                               num_steps, length,
                                               twinkle_twinkle(), teapot())

            elif input.user_input == "5":
                print()

                path = "./input"
                check_dir(path)
                check_dir("./output")

                for file in glob.glob(path + "/*.mid"):
                    name = os.path.splitext(os.path.basename(file))[0]
                    print(name)
                    input_sequence = mm.midi_file_to_note_sequence(file)
                    num_steps = 1028
                    temperature = 1.0
                    model_name = 'basic_rnn'
                    generate_continue_for(name, input_sequence, model_name,
                                          num_steps, temperature)
                    model_name = 'attention_rnn'
                    generate_continue_for(name, input_sequence, model_name,
                                          num_steps, temperature)
                    model_name = 'lookback_rnn'
                    generate_continue_for(name, input_sequence, model_name,
                                          num_steps, temperature)
                    model_name = 'mono_rnn'
                    generate_continue_for(name, input_sequence, model_name,
                                          num_steps, temperature)

            elif input.user_input == "6":
                print("6 input")

                path = "./input"
                check_dir(path)
                check_dir("./output")

                for file in glob.glob(path + "/*.mid"):
                    name = os.path.splitext(os.path.basename(file))[0]
                    print(name)
                    input_sequence = mm.midi_file_to_note_sequence(file)
                    num_steps = 1028
                    temperature = 1.0
                    model_name = 'polyphony'
                    polyphony_rnn = initialisation_polyphony(model_name)
                    generate_continue_for_polyphony(input_sequence, model_name,
                                                    name, num_steps,
                                                    polyphony_rnn, temperature)

            # elif input.user_input == "7":
            #     print("7 input")
            # elif input.user_input == "8":
            #     print("8 input")
            # elif input.user_input == "9":
            #     print("9 input")
            elif input.user_input == "0":
                print()
                print_versions()
            elif input.user_input == "esc":
                print("ESC pressed, exit")
                break
Esempio n. 27
0
#@markdown Because we use a 
#@markdown [representation](http://g.co/magenta/performance-rnn)
#@markdown where each event corresponds to a variable amount of
#@markdown time, the actual number of seconds generated may vary.

targets = []
decode_length = 1024

# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']

# Decode to NoteSequence.
midi_filename = decode(
    sample_ids,
    encoder=unconditional_encoders['targets'])
unconditional_ns = mm.midi_file_to_note_sequence(midi_filename)

# Play and plot.
mm.play_sequence(
    unconditional_ns,
    synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(unconditional_ns)

#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).

mm.sequence_proto_to_midi_file(
    unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')

#@title Choose Priming Sequence
Esempio n. 28
0
def run(config_map):
    """Load model params, save config file and start trainer.

  Args:
    config_map: Dictionary mapping configuration name to Config object.

  Raises:
    ValueError: if required flags are missing or invalid.
  """
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

    if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
        raise ValueError(
            'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.'
        )
    if FLAGS.output_dir is None:
        raise ValueError('`--output_dir` is required.')
    tf.gfile.MakeDirs(FLAGS.output_dir)
    if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate' and FLAGS.mode != 'isample':
        raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)

    if FLAGS.config not in config_map:
        raise ValueError('Invalid config name: %s' % FLAGS.config)
    config = config_map[FLAGS.config]
    config.data_converter.max_tensors_per_item = None

    if FLAGS.mode == 'interpolate':
        if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
            raise ValueError(
                '`--input_midi_1` and `--input_midi_2` must be specified in '
                '`interpolate` mode.')
        input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
        input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
        if not os.path.exists(input_midi_1):
            raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
        if not os.path.exists(input_midi_2):
            raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
        input_1 = mm.midi_file_to_note_sequence(input_midi_1)
        input_2 = mm.midi_file_to_note_sequence(input_midi_2)

        def _check_extract_examples(input_ns, path, input_number):
            """Make sure each input returns exactly one example from the converter."""
            tensors = config.data_converter.to_tensors(input_ns).outputs
            if not tensors:
                print(
                    'MusicVAE configs have very specific input requirements. Could not '
                    'extract any valid inputs from `%s`. Try another MIDI file.'
                    % path)
                sys.exit()
            elif len(tensors) > 1:
                basename = os.path.join(
                    FLAGS.output_dir,
                    '%s_input%d-extractions_%s-*-of-%03d.mid' %
                    (FLAGS.config, input_number, date_and_time, len(tensors)))
                for i, ns in enumerate(
                        config.data_converter.to_notesequences(tensors)):
                    mm.sequence_proto_to_midi_file(
                        ns, basename.replace('*', '%03d' % i))
                print(
                    '%d valid inputs extracted from `%s`. Outputting these potential '
                    'inputs as `%s`. Call script again with one of these instead.'
                    % (len(tensors), path, basename))
                sys.exit()

        logging.info(
            'Attempting to extract examples from input MIDIs using config `%s`...',
            FLAGS.config)
        _check_extract_examples(input_1, FLAGS.input_midi_1, 1)
        _check_extract_examples(input_2, FLAGS.input_midi_2, 2)

    logging.info('Loading model...')
    if FLAGS.run_dir:
        checkpoint_dir_or_path = os.path.expanduser(
            os.path.join(FLAGS.run_dir, 'train'))
    else:
        checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
    model = TrainedModel(config,
                         batch_size=min(FLAGS.max_batch_size,
                                        FLAGS.num_outputs),
                         checkpoint_dir_or_path=checkpoint_dir_or_path)

    if FLAGS.mode == 'interpolate':
        logging.info('Interpolating...')
        _, mu, _ = model.encode([input_1, input_2])
        z = np.array([
            _slerp(mu[0], mu[1], t)
            for t in np.linspace(0, 1, FLAGS.num_outputs)
        ])
        results = model.decode(length=config.hparams.max_seq_len,
                               z=z,
                               temperature=FLAGS.temperature)
    elif FLAGS.mode == 'sample':
        logging.info('Sampling...')
        results = model.sample(n=FLAGS.num_outputs,
                               length=config.hparams.max_seq_len,
                               temperature=FLAGS.temperature)
    elif FLAGS.mode == 'isample':
        assert FLAGS.input_image is not None, 'Provide an image to sample from'
        assert FLAGS.input_midi_1 is not None, 'Provide a music to sample from'
        logging.info('Sampling z from image vae...')
        img = cv2.imread(FLAGS.input_image) / 255.
        img = np.asarray(cv2.resize(img, (320, 240)))
        img = np.expand_dims(img, axis=0)
        print(img.shape)
        latent = None
        input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
        input_1 = mm.midi_file_to_note_sequence(input_midi_1)

        def _check_extract_examples(input_ns, path, input_number):
            """Make sure each input returns exactly one example from the converter."""
            tensors = config.data_converter.to_tensors(input_ns).outputs
            if not tensors:
                print(
                    'MusicVAE configs have very specific input requirements. Could not '
                    'extract any valid inputs from `%s`. Try another MIDI file.'
                    % path)
                sys.exit()
            elif len(tensors) > 1:
                basename = os.path.join(
                    FLAGS.output_dir,
                    '%s_input%d-extractions_%s-*-of-%03d.mid' %
                    (FLAGS.config, input_number, date_and_time, len(tensors)))
                for i, ns in enumerate(
                        config.data_converter.to_notesequences(tensors)):
                    mm.sequence_proto_to_midi_file(
                        ns, basename.replace('*', '%03d' % i))
                print(
                    '%d valid inputs extracted from `%s`. Outputting these potential '
                    'inputs as `%s`. Call script again with one of these instead.'
                    % (len(tensors), path, basename))
                sys.exit()

        logging.info(
            'Attempting to extract examples from input MIDIs using config `%s`...',
            FLAGS.config)

        _check_extract_examples(input_1, FLAGS.input_midi_1, 1)

        with model._sess as sess:
            z_music, mu_music, sigma_music = model.encode([input_1])
            dataset = tf.data.Dataset.from_tensors(img.astype(np.float32))
            img = dataset.repeat().make_one_shot_iterator().get_next()
            mu, sigma = model.vae.encode(img, config.hparams)
            mu = mu.eval()
            sigma = sigma.eval()
            latent = ds.MultivariateNormalDiag(loc=mu + mu_music,
                                               scale_diag=sigma +
                                               sigma_music).sample().eval()
            results = model.decode(length=config.hparams.max_seq_len,
                                   z=latent,
                                   temperature=FLAGS.temperature)
            print(results)

    basename = os.path.join(
        FLAGS.output_dir, '%s_%s_%s-*-of-%03d.mid' %
        (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
    logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
    for i, ns in enumerate(results):
        mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

    logging.info('Done.')
Esempio n. 29
0
def get_parser(_=None):
    parser = argparse.ArgumentParser("vae_groove")
    parser.add_argument("--model_name", type=str, default="groovae_2bar_humanize")
    parser.add_argument("--input_midi", type=str)
    parser.add_argument("--num_bar_per_sample", type=int, default=2)
    parser.add_argument("--num_steps_per_bar", type=int, default=DEFAULT_STEPS_PER_BAR)
    parser.add_argument("--num_output", type=int, default=6)
    parser.add_argument("--output_root_dir", type=str)
    return parser


if __name__ == "__main__":
    known_args, _ = get_parser().parse_known_args()
    output_root_dir = Path(known_args.output_root_dir).expanduser()
    if not output_root_dir.exists():
        output_root_dir.mkdir(parents=True)

    total_bars = known_args.num_output * known_args.num_bar_per_sample
    num_steps_per_sample = known_args.num_bar_per_sample * known_args.num_steps_per_bar
    core.groove(
        known_args.model_name,
        interpolate_sequence=mm.midi_file_to_note_sequence(
            str(Path(known_args.input_midi).expanduser())
        ),
        num_steps_per_sample=num_steps_per_sample,
        num_output=known_args.num_output,
        total_bars=total_bars,
        output_dir=output_root_dir,
    )
Esempio n. 30
0
def music_generator(primer='erik_gnossienne',
                    primer_begin_buffer=10,
                    primer_length=90,
                    output_path='.',
                    filename='./public/output'):
    SF2_PATH = './models/Yamaha-C5-Salamander-JNv5.1.sf2'
    SAMPLE_RATE = 16000

    # Upload a MIDI file and convert to NoteSequence.
    def upload_midi():
        data = list(files.upload().values())
        if len(data) > 1:
            print('Multiple files uploaded; using only one.')
        return mm.midi_to_note_sequence(data[0])

    # Decode a list of IDs.
    def decode(ids, encoder):
        ids = list(ids)
        if text_encoder.EOS_ID in ids:
            ids = ids[:ids.index(text_encoder.EOS_ID)]
        return encoder.decode(ids)

    model_name = 'transformer'
    hparams_set = 'transformer_tpu'
    ckpt_path = './models/checkpoints/unconditional_model_16.ckpt'

    class PianoPerformanceLanguageModelProblem(score2perf.Score2PerfProblem):
        @property
        def add_eos_symbol(self):
            return True

    problem = PianoPerformanceLanguageModelProblem()
    unconditional_encoders = problem.get_feature_encoders()

    # Set up HParams.
    hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
    trainer_lib.add_problem_hparams(hparams, problem)
    hparams.num_hidden_layers = 16
    hparams.sampling_method = 'random'

    # Set up decoding HParams.
    decode_hparams = decoding.decode_hparams()
    decode_hparams.alpha = 0.0
    decode_hparams.beam_size = 1

    # Create Estimator.
    run_config = trainer_lib.create_run_config(hparams)
    estimator = trainer_lib.create_estimator(model_name,
                                             hparams,
                                             run_config,
                                             decode_hparams=decode_hparams)

    # These values will be changed by subsequent cells.
    targets = []
    decode_length = 0

    # Create input generator (so we can adjust priming and
    # decode length on the fly).
    def input_generator():
        global targets
        global decode_length
        while True:
            yield {
                'targets': np.array([targets], dtype=np.int32),
                'decode_length': np.array(decode_length, dtype=np.int32)
            }

    # Start the Estimator, loading from the specified checkpoint.
    input_fn = decoding.make_input_fn_from_generator(input_generator())
    unconditional_samples = estimator.predict(input_fn,
                                              checkpoint_path=ckpt_path)

    # "Burn" one.
    _ = next(unconditional_samples)

    filenames = {
        'C major arpeggio': './models/primers/c_major_arpeggio.mid',
        'C major scale': './models/primers/c_major_scale.mid',
        'Clair de Lune': './models/primers/clair_de_lune.mid',
        'Classical':
        'audio_midi/Classical_Piano_piano-midi.de_MIDIRip/bach/bach_846_format0.mid',
        'erik_gymnopedie': 'audio_midi/erik_satie/gymnopedie_1_(c)oguri.mid',
        'erik_gymnopedie_2': 'audio_midi/erik_satie/gymnopedie_2_(c)oguri.mid',
        'erik_gymnopedie_3': 'audio_midi/erik_satie/gymnopedie_3_(c)oguri.mid',
        'erik_gnossienne': 'audio_midi/erik_satie/gnossienne_1_(c)oguri.mid',
        'erik_gnossienne_2': 'audio_midi/erik_satie/gnossienne_2_(c)oguri.mid',
        'erik_gnossienne_3': 'audio_midi/erik_satie/gnossienne_3_(c)oguri.mid',
        'erik_gnossienne_dery':
        'audio_midi/erik_satie/gnossienne_1_(c)dery.mid',
        'erik_gnossienne_dery_2':
        'audio_midi/erik_satie/gnossienne_2_(c)dery.mid',
        'erik_gnossienne_dery_3':
        'audio_midi/erik_satie/gnossienne_3_(c)dery.mid',
        'erik_gnossienne_dery_5':
        'audio_midi/erik_satie/gnossienne_5_(c)dery.mid',
        'erik_gnossienne_dery_6':
        'audio_midi/erik_satie/gnossienne_6_(c)dery.mid',
        '1': 'audio_midi/erik_satie/1.mid',
        '2': 'audio_midi/erik_satie/2.mid',
        '3': 'audio_midi/erik_satie/3.mid',
        '4': 'audio_midi/erik_satie/4.mid',
        '5': 'audio_midi/erik_satie/5.mid',
        '6': 'audio_midi/erik_satie/6.mid',
        '7': 'audio_midi/erik_satie/7.mid',
        '8': 'audio_midi/erik_satie/8.mid',
        '9': 'audio_midi/erik_satie/9.mid',
        '10': 'audio_midi/erik_satie/10.mid',
    }
    # primer = 'C major scale'

    #if primer == 'Upload your own!':
    #  primer_ns = upload_midi()
    #else:
    #  # Use one of the provided primers.
    #  primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
    primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
    # Handle sustain pedal in the primer.
    primer_ns = mm.apply_sustain_control_changes(primer_ns)

    # Trim to desired number of seconds.
    max_primer_seconds = primer_length
    if primer_ns.total_time > max_primer_seconds:
        print('Primer is longer than %d seconds, truncating.' %
              max_primer_seconds)
        primer_ns = mm.extract_subsequence(
            primer_ns, primer_begin_buffer,
            max_primer_seconds + primer_begin_buffer)

    # Remove drums from primer if present.
    if any(note.is_drum for note in primer_ns.notes):
        print('Primer contains drums; they will be removed.')
        notes = [note for note in primer_ns.notes if not note.is_drum]
        del primer_ns.notes[:]
        primer_ns.notes.extend(notes)

    # Set primer instrument and program.
    for note in primer_ns.notes:
        note.instrument = 1
        note.program = 0

    ## Play and plot the primer.
    #mm.play_sequence(
    #    primer_ns,
    #    synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    #mm.plot_sequence(primer_ns)
    mm.sequence_proto_to_midi_file(
        primer_ns, join(output_path, 'primer_{}.mid'.format(filename)))

    targets = unconditional_encoders['targets'].encode_note_sequence(primer_ns)

    # Remove the end token from the encoded primer.
    targets = targets[:-1]

    decode_length = max(0, 10000 - len(targets))
    if len(targets) >= 4096:
        print(
            'Primer has more events than maximum sequence length; nothing will be generated.'
        )

    # Generate sample events.
    sample_ids = next(unconditional_samples)['outputs']

    # Decode to NoteSequence.
    midi_filename = decode(sample_ids,
                           encoder=unconditional_encoders['targets'])
    ns = mm.midi_file_to_note_sequence(midi_filename)
    print('Sample IDs: {}'.format(sample_ids))
    print('Sample IDs length: {}'.format(len(sample_ids)))
    print('Encoder: {}'.format(unconditional_encoders['targets']))
    print('Unconditional Samples: {}'.format(unconditional_samples))
    # print('{}'.format(ns))

    # continuation_ns = mm.concatenate_sequences([primer_ns, ns])
    continuation_ns = ns
    # mm.play_sequence(
    #     continuation_ns,
    #     synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
    # mm.plot_sequence(continuation_ns)
    # try:
    audio = mm.fluidsynth(continuation_ns,
                          sample_rate=SAMPLE_RATE,
                          sf2_path=SF2_PATH)

    normalizer = float(np.iinfo(np.int16).max)
    array_of_ints = np.array(np.asarray(audio) * normalizer, dtype=np.int16)

    wavfile.write(join(output_path, filename + '.wav'), SAMPLE_RATE,
                  array_of_ints)
    print('[+] Output stored as {}'.format(filename + '.wav'))
    mm.sequence_proto_to_midi_file(
        continuation_ns,
        join(output_path, 'continuation_{}.mid'.format(filename)))
Esempio n. 31
0
def run(config_map):
    """Load model params, save config file and start trainer.

    Args:
      config_map: Dictionary mapping configuration name to Config object.

    Raises:
      ValueError: if required flags are missing or invalid.
    """
    date_and_time = time.strftime('%Y-%m-%d_%H%M%S')

    if (FLAGS.run_dir is None) == (FLAGS.checkpoint_file is None):
        raise ValueError(
            'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.'
        )
    if FLAGS.output_dir is None:
        raise ValueError('`--output_dir` is required.')
    tf.gfile.MakeDirs(FLAGS.output_dir)
    if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':
        raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)

    if FLAGS.config not in config_map:
        raise ValueError('Invalid config name: %s' % FLAGS.config)
    config = config_map[FLAGS.config]
    config.data_converter.max_tensors_per_item = None

    if FLAGS.mode == 'interpolate':
        if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
            raise ValueError(
                '`--input_midi_1` and `--input_midi_2` must be specified in '
                '`interpolate` mode.')
        input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
        input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
        if not os.path.exists(input_midi_1):
            raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
        if not os.path.exists(input_midi_2):
            raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
        input_1 = mm.midi_file_to_note_sequence(input_midi_1)
        input_2 = mm.midi_file_to_note_sequence(input_midi_2)

        def _check_extract_examples(input_ns, path, input_number):
            """Make sure each input returns exactly one example from the converter."""
            tensors = config.data_converter.to_tensors(input_ns).outputs
            if not tensors:
                print(
                    'MusicVAE configs have very specific input requirements. Could not '
                    'extract any valid inputs from `%s`. Try another MIDI file.'
                    % path)
                sys.exit()
            elif len(tensors) > 1:
                basename = os.path.join(
                    FLAGS.output_dir,
                    '%s_input%d-extractions_%s-*-of-%03d.mid' %
                    (FLAGS.config, input_number, date_and_time, len(tensors)))
                for i, ns in enumerate(
                        config.data_converter.from_tensors(tensors)):
                    mm.sequence_proto_to_midi_file(
                        ns, basename.replace('*', '%03d' % i))
                print(
                    '%d valid inputs extracted from `%s`. Outputting these potential '
                    'inputs as `%s`. Call script again with one of these instead.'
                    % (len(tensors), path, basename))
                sys.exit()

        logging.info(
            'Attempting to extract examples from input MIDIs using config `%s`...',
            FLAGS.config)
        _check_extract_examples(input_1, FLAGS.input_midi_1, 1)
        _check_extract_examples(input_2, FLAGS.input_midi_2, 2)

    logging.info('Loading model...')
    if FLAGS.run_dir:
        checkpoint_dir_or_path = os.path.expanduser(
            os.path.join(FLAGS.run_dir, 'train'))
    else:
        checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
    model = TrainedModel(config,
                         batch_size=min(FLAGS.max_batch_size,
                                        FLAGS.num_outputs),
                         checkpoint_dir_or_path=checkpoint_dir_or_path)

    if FLAGS.mode == 'interpolate':
        logging.info('Interpolating...')
        _, mu, _ = model.encode([input_1, input_2])
        z = np.array([
            _slerp(mu[0], mu[1], t)
            for t in np.linspace(0, 1, FLAGS.num_outputs)
        ])
        results = model.decode(length=config.hparams.max_seq_len,
                               z=z,
                               temperature=FLAGS.temperature)
    elif FLAGS.mode == 'sample':
        logging.info('Sampling...')
        results = model.sample(n=FLAGS.num_outputs,
                               length=config.hparams.max_seq_len,
                               temperature=FLAGS.temperature)

    basename = os.path.join(
        FLAGS.output_dir, '%s_%s_%s-*-of-%03d.mid' %
        (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))
    logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
    for i, ns in enumerate(results):
        mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))

    logging.info('Done.')