def __init__(self, root, verbose=False): assert os.path.isdir(root), root paths = utils.find_files_by_extensions(root, ['.data']) self.root = root self.samples = [] self.seqlens = [] if verbose: paths = Bar(root).iter(list(paths)) for path in paths: eventseq, controlseq = torch.load(path) controlseq = ControlSeq.recover_compressed_array(controlseq) assert len(eventseq) == len(controlseq) self.samples.append((eventseq, controlseq)) self.seqlens.append(len(eventseq)) self.avglen = np.mean(self.seqlens)
def main(_): if os.path.isfile(FLAGS.control) or os.path.isdir(FLAGS.control): if os.path.isdir(FLAGS.control): files = list(utils.find_files_by_extensions(FLAGS.control)) assert len(files) > 0, 'no file in "{control}"'.format( control=FLAGS.control) control = np.random.choice(files) events, compressed_controls = torch.load(FLAGS.control) controls = ControlSeq.recover_compressed_array(compressed_controls) max_len = FLAGS.max_length if FLAGS.max_length == 0: max_len = controls.shape[0] control = np.expand_dims(controls, 1).repeat(1, 1) control = 'control sequence from "{control}"'.format(control=control) assert max_len > 0, 'either max length or control sequence length should be given' #FLAGS.start_string = FLAGS.start_string.decode('utf-8') if os.path.isdir(FLAGS.checkpoint_path): FLAGS.checkpoint_path =\ tf.train.latest_checkpoint(FLAGS.checkpoint_path) model = CharRNN(EventSeq.dim(), ControlSeq.dim(), sampling=True, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size) model.sess.run(tf.global_variables_initializer()) model.load(FLAGS.checkpoint_path) outputs = model.sample(1000, prime=events[0:100], vocab_size=EventSeq.dim()) outputs = outputs.reshape([-1, 1]) print(outputs) name = 'output-{i:03d}.mid'.format(i=0) path = os.path.join("output/", name) n_notes = utils.event_indeces_to_midi_file(outputs[:, 0], path) print('===> {path} ({n_notes} notes)'.format(path=path, n_notes=n_notes))
if use_beam_search: greedy_ratio = 'DISABLED' else: beam_size = 'DISABLED' assert os.path.isfile(sess_path), f'"{sess_path}" is not a file' if control is not None: if os.path.isfile(control) or os.path.isdir(control): if os.path.isdir(control): files = list(utils.find_files_by_extensions(control)) assert len(files) > 0, f'no file in "{control}"' control = np.random.choice(files) _, compressed_controls = torch.load(control) controls = ControlSeq.recover_compressed_array(compressed_controls) if max_len == 0: max_len = controls.shape[0] controls = torch.tensor(controls, dtype=torch.float32) controls = controls.unsqueeze(1).repeat(1, batch_size, 1).to(device) control = f'control sequence from "{control}"' else: pitch_histogram, note_density = control.split(';') pitch_histogram = list(filter(len, pitch_histogram.split(','))) if len(pitch_histogram) == 0: pitch_histogram = np.ones(12) / 12 else: pitch_histogram = np.array(list(map(float, pitch_histogram))) assert pitch_histogram.size == 12 assert np.all(pitch_histogram >= 0)