Exemple #1
0
def main():
    load_path = 'result/result_model'
    length = 2048
    save_path = 'generated.mid'

    mt = MusicTransformerDecoder(loader_path=load_path)
    inputs = encode_midi('Midiset/test.mid')
    result = mt.generate(inputs[:20], length=length)
    decode_midi(result, file_path=save_path)
Exemple #2
0
def preprocess_midi_files_under(midi_root, save_dir):
    midi_paths = list(find_midi_files(midi_root))
    os.makedirs(save_dir, exist_ok=True)
    out_fmt = '{}-{}.data'

    for path in Bar('Processing').iter(midi_paths):
        print(' ', end='[{}]'.format(path), flush=True)

        try:
            data = encode_midi(path)
        except EOFError:
            print('EOF Error')

        with open('{}/{}.pickle'.format(save_dir,
                                        path.split('/')[1]), 'wb') as f:
            pickle.dump(data, f)
def encode_midi_files(midi_dir_path, save_dir_path, extension):
  #create directory for saving files
  os.makedirs(save_dir_path, exist_ok=True)
  #get all midi files from midi_directory
  for file in os.listdir(midi_dir_path):
    if file.endswith(tuple(extension)):
        print(os.path.join(midi_dir_path, file))
        print(file + ' is being processed', flush=True)
        try:
          encoded_file = encode_midi(midi_dir_path+file)
        except KeyboardInterrupt:
            print(' Stopped by keyboard')
            return
        except EOFError:
            print('EOF Error')
            return
        with open(save_dir_path+file+'.encoded', 'wb') as f:
            pickle.dump(encoded_file, f)
Exemple #4
0
from model import MusicTransformerDecoder
import datetime
from processor import decode_midi, encode_midi


max_seq = 1024
load_path = '.\\saved_model'
length = 1024
save_path= 'generated.mid'

mt = MusicTransformerDecoder(loader_path=load_path)

inputs = encode_midi('dataset/midi/test.mid')

result = mt.generate(inputs[:20], length=length, vis_enable=True)


decode_midi(result, file_path=save_path)

def prep_midi(maestro_root, output_dir):
    """
    ----------
    Author: Damon Gwinn
    ----------
    Pre-processes the maestro dataset, putting processed midi data (train, eval, test) into the
    given output folder
    ----------
    """

    train_dir = os.path.join(output_dir, "train")
    os.makedirs(train_dir, exist_ok=True)
    val_dir = os.path.join(output_dir, "val")
    os.makedirs(val_dir, exist_ok=True)
    test_dir = os.path.join(output_dir, "test")
    os.makedirs(test_dir, exist_ok=True)

    maestro_json_file = os.path.join(maestro_root, JSON_FILE)
    if (not os.path.isfile(maestro_json_file)):
        print("ERROR: Could not find file:", maestro_json_file)
        return False

    maestro_json = json.load(open(maestro_json_file, "r"))
    print("Found", len(maestro_json), "pieces")
    print("Preprocessing...")

    total_count = 0
    train_count = 0
    val_count = 0
    test_count = 0

    for piece in maestro_json:
        mid = os.path.join(maestro_root, piece["midi_filename"])
        split_type = piece["split"]
        f_name = mid.split("/")[-1] + ".pickle"

        if (split_type == "train"):
            o_file = os.path.join(train_dir, f_name)
            train_count += 1
        elif (split_type == "validation"):
            o_file = os.path.join(val_dir, f_name)
            val_count += 1
        elif (split_type == "test"):
            o_file = os.path.join(test_dir, f_name)
            test_count += 1
        else:
            print("ERROR: Unrecognized split type:", split_type)
            return False

        prepped = midi_processor.encode_midi(mid)

        o_stream = open(o_file, "wb")
        pickle.dump(prepped, o_stream)
        o_stream.close()

        total_count += 1
        if (total_count % 50 == 0):
            print(total_count, "/", len(maestro_json))

    print("Num Train:", train_count)
    print("Num Val:", val_count)
    print("Num Test:", test_count)
    return True
Exemple #6
0
def main():
    """
    ----------
    Author: Damon Gwinn
    ----------
    Entry point. Generates music from a model specified by command line arguments
    ----------
    """

    args = parse_generate_args()
    print_generate_args(args)

    if (args.force_cpu):
        use_cuda(False)
        print("WARNING: Forced CPU usage, expect model to perform slower")
        print("")

    os.makedirs(args.output_dir, exist_ok=True)

    # Grabbing dataset if needed
    _, _, dataset = create_epiano_datasets(args.midi_root,
                                           args.num_prime,
                                           random_seq=False)

    # Can be None, an integer index to dataset, or a file path
    if (args.primer_file is None):
        f = str(random.randrange(len(dataset)))
    else:
        f = args.primer_file

    if (f.isdigit()):
        idx = int(f)
        primer, _ = dataset[idx]
        primer = primer.to(get_device())

        print("Using primer index:", idx, "(", dataset.data_files[idx], ")")

    else:
        raw_mid = encode_midi(f)
        if (len(raw_mid) == 0):
            print("Error: No midi messages in primer file:", f)
            return

        primer, _ = process_midi(raw_mid, args.num_prime, random_seq=False)
        primer = torch.tensor(primer,
                              dtype=TORCH_LABEL_TYPE,
                              device=get_device())

        print("Using primer file:", f)

    model = MusicTransformer(n_layers=args.n_layers,
                             num_heads=args.num_heads,
                             d_model=args.d_model,
                             dim_feedforward=args.dim_feedforward,
                             max_sequence=args.max_sequence,
                             rpr=args.rpr).to(get_device())

    model.load_state_dict(torch.load(args.model_weights))

    # Saving primer first
    f_path = os.path.join(args.output_dir, "primer.mid")
    decode_midi(primer[:args.num_prime].cpu().numpy(), file_path=f_path)

    # GENERATION
    model.eval()
    with torch.set_grad_enabled(False):
        if (args.beam > 0):
            print("BEAM:", args.beam)
            beam_seq = model.generate(primer[:args.num_prime],
                                      args.target_seq_length,
                                      beam=args.beam)

            f_path = os.path.join(args.output_dir, "beam.mid")
            decode_midi(beam_seq[0].cpu().numpy(), file_path=f_path)
        else:
            print("RAND DIST")
            rand_seq = model.generate(primer[:args.num_prime],
                                      args.target_seq_length,
                                      beam=0)

            f_path = os.path.join(args.output_dir, "rand.mid")
            decode_midi(rand_seq[0].cpu().numpy(), file_path=f_path)