def trainClassifier(m, realFilesDir, genFilesDir):
    # Reading real music and generating features for training
    numSamples = 100
    realFiles = multi_training.loadPieces(realFilesDir)
    i,o = zip(*[MT.getPieceSegment(realFiles) for _ in range(numSamples)])
    i = numpy.array(i)
    realFeat = []
    for row in range(0,len(i)):
        realFeat.append(i[row,:].flatten())
    realFeat = np.array(realFeat)

    # Reading generated music and generating features for training
    genFiles = multi_training.loadPieces(genFilesDir)
    i,o = zip(*[MT.getPieceSegment(genFiles) for _ in range(numSamples)])
    i = numpy.array(i)
    genFeat = []
    for row in range(0,len(i)):
        genFeat.append(i[row,:].flatten())
    genFeat = np.array(genFeat)

    # Fitting classifier to training data
    dataTrain = np.concatenate((realFeat, genFeat), axis = 0)
    labelsTrain = np.array([1] * numSamples + [0] * numSamples)
    classifier = SVC()
    classifier.fit(dataTrain, labelsTrain)
    return classifier
Example #2
0
def gen_adaptive(m,
                 pcs,
                 times,
                 keep_thoughts=False,
                 name=multi_training.ofile):
    xIpt, xOpt = map(lambda x: numpy.array(x, dtype='int8'),
                     multi_training.getPieceSegment(pcs))
    all_outputs = [xOpt[0]]
    if keep_thoughts:
        all_thoughts = []
    m.start_slow_walk(xIpt[0])
    cons = 1
    for time in range(multi_training.batch_len * times):
        resdata = m.slow_walk_fun(cons)
        nnotes = numpy.sum(resdata[-1][:, 0])
        print(cons)
        if nnotes < 3:
            if cons > 1:
                cons = 1
            cons -= 0.1
        else:
            cons += (1 - cons) * 0.1
        all_outputs.append(resdata[-1])
        if keep_thoughts:
            all_thoughts.append(resdata)
    noteStateMatrixToMidi(numpy.array(all_outputs), 'output/' + name)
    if keep_thoughts:
        pickle.dump(all_thoughts, open('output/' + name + '.p', 'wb'))
Example #3
0
def gen_adaptive(m, pcs, times, keep_thoughts=False, name="final", rbm=None):
    xIpt, xOpt = map(lambda x: np.array(x, dtype="int8"), multi_training.getPieceSegment(pcs))
    all_outputs = [xOpt[0]]
    if keep_thoughts:
        all_thoughts = []
    m.start_slow_walk(xIpt[0])
    cons = 1
    for time in range(multi_training.batch_len * times):
        resdata = m.slow_walk_fun(cons)
        nnotes = np.sum(resdata[-1][:, 0])
        if nnotes < 2:
            if cons > 1:
                cons = 1
            cons -= 0.02
        else:
            cons += (1 - cons) * 0.3
        all_outputs.append(resdata[-1])
        if keep_thoughts:
            all_thoughts.append(resdata)
    from RBM import rbm_train, rbm_reconstruct

    if not rbm:
        rbm = rbm_train(
            np.array(pcs.values()), window_len=16, learning_rate=0.1, training_epochs=1, batch_size=20, n_hidden=1500
        )
    rbm_outputs, rbm_outputs_discrete = rbm_reconstruct(rbm, np.array(all_outputs), window_len=16)
    # pickle.dump(np.array(all_outputs), open('output/composition.sm', 'wb'))

    noteStateMatrixToMidi(np.array(rbm_outputs), "output/" + name + " rbm")
    noteStateMatrixToMidi(np.array(rbm_outputs_discrete), "output/" + name + " discrete rbm")
    noteStateMatrixToMidi(np.array(all_outputs), "output/" + name)
    print ("Saved generated music and reconstruction.")

    if keep_thoughts:
        pickle.dump(all_thoughts, open("output/" + name + ".p", "wb"))
Example #4
0
def gen_adaptive(m,pcs,times,keep_thoughts=False,name="final"):
    pcs = multi_training.loadPieces("./music/單一小節資料夾")
	xIpt, xOpt = map(lambda x: numpy.array(x, dtype='int8'), multi_training.getPieceSegment(pcs))
	all_outputs = [xOpt[0]]
	if keep_thoughts:
		all_thoughts = []
	m.start_slow_walk(xIpt[0])
	cons = 1
Example #5
0
def test_predict_one_step_model():
    pcs = multi_training.loadPieces("Train_data")
    data, _ = multi_training.getPieceSegment(pcs)
    data = torch.Tensor(data[0])
    print(data.shape)

    model = m.BiaxialRNNModel([300, 300], [100, 50])
    print(model)

    out = model(data)  # Done in order to print
Example #6
0
def test_predict_n_step_model():
    pcs = multi_training.loadPieces("Train_data")
    data, _ = multi_training.getPieceSegment(pcs)
    data = torch.Tensor(data[0])
    print(data.shape)

    model = m.BiaxialRNNModel([300, 300], [100, 50])
    print(model)

    out = model(data, 5)
    print(out)
    print(torch.Tensor(np.array(out)).shape)
def classifyGenerated:
    testingDirs = [f for f in os.listdir('/home/ubuntu/test/output/') if re.match(r'gen*', f)]
    numSamples = 100
    for ind in range(len(testingDirs)):
        genPcsTrain = multi_training.loadPieces(testingDirs[ind])
        i,o = zip(*[MT.getPieceSegment(genPcsTrain) for _ in range(numSamples)])
        i = numpy.array(i)
        genFeat = []
        for row in range(0,len(i)):
            genFeat.append(i[row,:].flatten())
        genFeat = np.array(genFeat)
        pred_label = classifier.predict(genFeat)
        accuracy = sum([pred_label[i] == np.array([1] * len(genFeat)) for i in range(len(genFeat))]) / len(genFeat) * 100
        print testingDirs[ind], " ", accuracy
Example #8
0
def gen_adaptive(m, pcs, times, name="final"):
    xIpt, xOpt = map(lambda x: numpy.array(x, dtype="int8"), multi_training.getPieceSegment(pcs))
    all_outputs = [xOpt[0]]
    m.start_slow_walk(xIpt[0])
    cons = 1
    for time in range(multi_training.batch_len * times):
        resdata = m.slow_walk_fun(cons)
        nnotes = numpy.sum(resdata[-1][:, 0])
        if nnotes < 2:
            if cons > 1:
                cons = 1
            cons -= 0.02
        else:
            cons += (1 - cons) * 0.3

        all_outputs.append(resdata[-1])

    noteStateMatrixToMidi(numpy.array(all_outputs), "/home/azureuser/hackzurich/output/" + name)
Example #9
0
def gen_adaptive(m, pcs, times, keep_thoughts=False, name="final", rbm=None):
    xIpt, xOpt = map(lambda x: np.array(x, dtype='int8'),
                     multi_training.getPieceSegment(pcs))
    all_outputs = [xOpt[0]]
    if keep_thoughts:
        all_thoughts = []
    m.start_slow_walk(xIpt[0])
    cons = 1
    for time in range(multi_training.batch_len * times):
        resdata = m.slow_walk_fun(cons)
        nnotes = np.sum(resdata[-1][:, 0])
        if nnotes < 2:
            if cons > 1:
                cons = 1
            cons -= 0.02
        else:
            cons += (1 - cons) * 0.3
        all_outputs.append(resdata[-1])
        if keep_thoughts:
            all_thoughts.append(resdata)
    from RBM import rbm_train, rbm_reconstruct
    if not rbm:
        rbm = rbm_train(np.array(pcs.values()),
                        window_len=16,
                        learning_rate=0.1,
                        training_epochs=1,
                        batch_size=20,
                        n_hidden=1500)
    rbm_outputs, rbm_outputs_discrete = rbm_reconstruct(rbm,
                                                        np.array(all_outputs),
                                                        window_len=16)
    #pickle.dump(np.array(all_outputs), open('output/composition.sm', 'wb'))

    noteStateMatrixToMidi(np.array(rbm_outputs), 'output/' + name + ' rbm')
    noteStateMatrixToMidi(np.array(rbm_outputs_discrete),
                          'output/' + name + ' discrete rbm')
    noteStateMatrixToMidi(np.array(all_outputs), 'output/' + name)
    print('Saved generated music and reconstruction.')

    if keep_thoughts:
        pickle.dump(all_thoughts, open('output/' + name + '.p', 'wb'))
Example #10
0
def gen_adaptive(m,pcs,times,keep_thoughts=False,name="final"):
	xIpt, xOpt = map(lambda x: numpy.array(x, dtype='int8'), multi_training.getPieceSegment(pcs))
	all_outputs = [xOpt[0]]
	if keep_thoughts:
		all_thoughts = []
	m.start_slow_walk(xIpt[0])
	cons = 1
	for time in range(multi_training.batch_len*times):
		resdata = m.slow_walk_fun( cons )
		nnotes = numpy.sum(resdata[-1][:,0])
		if nnotes < 2:
			if cons > 1:
				cons = 1
			cons -= 0.02
		else:
			cons += (1 - cons)*0.3
		all_outputs.append(resdata[-1])
		if keep_thoughts:
			all_thoughts.append(resdata)
	noteStateMatrixToMidi(numpy.array(all_outputs),'output/'+name)
	if keep_thoughts:
		pickle.dump(all_thoughts, open('output/'+name+'.p','wb'))
Example #11
0
# Loading model at 4500 epochs to generate music for training and testing
m.learned_config = pickle.load(open("params4500.p", "rb"))
for i in range(1,15):
    main.gen_adaptive(m,pcs,10,name="beeth_train_gen_"+str(i))

for i in range(1,15):
    main.gen_adaptive(m,pcs,10,name="beeth_gen"+str(i))

# Loading model at 8000 epochs to generate music for testing
m.learned_config = pickle.load(open("params8000.p", "rb"))
for i in range(1,15):
    main.gen_adaptive(m,pcs,10,name="beeth_gen"+str(i))

# Reading real music and generating features for training
realPcs = multi_training.loadPieces("../DataSet/liszt")
i,o = zip(*[MT.getPieceSegment(realPcs) for _ in range(100)])
i = numpy.array(i)
realFeat = []
for row in range(0,len(i)):
    realFeat.append(i[row,:].flatten())
realFeat = np.array(realFeat)


# Reading generated music and generating features for training
genPcsTrain = multi_training.loadPieces("../test_data/gen_train")
i,o = zip(*[MT.getPieceSegment(genPcsTrain) for _ in range(100)])
i = numpy.array(i)
genFeat = []
for row in range(0,len(i)):
    genFeat.append(i[row,:].flatten())
genFeat = np.array(genFeat)
Example #12
0
	for time in range(三小節):
		nnotes = numpy.sum(resdata[-1][:,0])
		if nnotes < 2:
			if cons > 1:
				cons = 1
			cons -= 0.02
		else:
			cons += (1 - cons)*0.3
		all_outputs.append(resdata[-1])
		if keep_thoughts:
			all_thoughts.append(resdata)
            
    // 開始接上最後一小節
    pcs = multi_training.loadPieces("./music/過門資料夾")
    
	xIpt, xOpt = map(lambda x: numpy.array(x, dtype='int8'), multi_training.getPieceSegment(pcs))
	all_outputs = [xOpt[0]]
	if keep_thoughts:
		all_thoughts = []
	m.start_slow_walk(xIpt[0])
	cons = 1
    
    // 利用model產生過門
    resdata = m.slow_walk_fun( cons )
	nnotes = numpy.sum(resdata[-1][:,0])
	if nnotes < 2:
		if cons > 1:
			cons = 1
		cons -= 0.02
	else:
		cons += (1 - cons)*0.3