def test_getPrediction(self): """ Return the probability distribution of notes from a given state :param state: a sequence of viewPoints of sier order :type state: np.array(order) :return: np.array(alphabetSize).astype(float) """ X = np.arange(1000) % 10 for depth in range(1, 10): for i in range(1, 5): M = jumpModel.jumpModel("pitch", maxOrder=i, maxDepth=depth) M.train(X) state = [1, 2, 3, 4, 5] for j in range(len(M.models)): for model in M.models[j]: self.assertEqual( round( model.getPrediction(state[-model.order:])[str( (6 + j) % 10)], 2), 1.0) for j in range(len(M.reverse)): self.assertEqual( round( M.reverse[j].getPrediction( state[0:M.reverse[j].order])[str( (0 - j) % 10)], 2), 1.0) self.assertEqual(round(M.getPrediction(state)['6'], 2), 1.0)
def test_getLikelihood(self): """ Return the likelihood of a note given a state :param state: a sequence of viewPoints of sier order :param note: integer or name of the note :type state: np.array(order) :type note: int or string :return: float value of the likelihood """ X = np.arange(1000) % 10 for depth in range(1, N // 2): for i in range(1, N): M = jumpModel.jumpModel("pitch", maxOrder=i, maxDepth=depth) M.train(X) alphabet = [] for model in M.models[0]: alphabet.extend(model.alphabet) alphabet = list(set(alphabet)) alphabet.sort() for state in alphabet: for note in alphabet: if (int(state) + 1) % 10 == int(note) % 10: self.assertEqual( M.getLikelihood([int(state)], note), 1.0) else: self.assertEqual( M.getLikelihood([int(state)], note), 0.0)
def eval(self, data, k_fold=1): Likelihood = [] for i in range(len(data.getData(self.viewPoints[0])) // k_fold): # We initialize the models self.LTM = [] for viewPoint in self.viewPoints: if self.jump is False: self.LTM.append( longTermModel.longTermModel(viewPoint, maxOrder=self.maxOrder)) else: self.LTM.append( jumpModel.jumpModel(viewPoint, maxOrder=self.maxOrder, maxDepth=self.maxDepth)) # We train them with the given dataset k = 0 for viewPoint in self.viewPoints: self.LTM[k].train( data.getData(viewPoint)[:i * k_fold] + data.getData(viewPoint)[(i + 1) * k_fold:]) print(data.getData(viewPoint)) print() print( data.getData(viewPoint)[:i * k_fold] + data.getData(viewPoint)[(i + 1) * k_fold:]) quit() k += 1
def test_saveAndLoad(self): """ Check wether the loaded object is the same as the saved one """ for depth in range(1, N // 2): for i in range(1, N): M1 = jumpModel.jumpModel("pitch", maxOrder=i, maxDepth=depth) X = np.arange(500) % 10 M1.train(X) M1.save("longterm.s") M2 = jumpModel.jumpModel("pitch") M2.load("longterm.s") os.remove("longterm.s") for i in range(len(M1.models)): self.assertEqual(M1.models[i][0].__dict__, M2.models[i][0].__dict__)
def setUp(self): """ Construct some models for testing """ self.models = [] for depth in range(1, N // 2): for i in range(1, N): for viewPoint in viewPoints: self.models.append( jumpModel.jumpModel(viewPoint, maxOrder=i, maxDepth=depth))
def test_train(self): """ Fill the matrix from data :param data: pre-processed data to train with :type data: data object """ X = [] for i in range(10): X.append(np.arange(200) % 10 - 1) np.random.shuffle(X[i]) for depth in range(2, N // 2): for i in range(N): M = jumpModel.jumpModel("pitch", maxOrder=i, maxDepth=depth) M.train(X) if i == 0: M = jumpModel.jumpModel("pitch") M.train(X) x = X[0] for start in tqdm(range(len(x) - 2 * N)): for end in range(start + i + 1, start + i + N): alphabet = [] for model in M.models[0]: alphabet.extend(model.alphabet) alphabet = list(set(alphabet)) alphabet.sort() p = 0 for z in alphabet: p += M.getLikelihood(x[start:end], z) if round(p, 2) != 1: print(p, x[start:end]) self.assertEqual(round(p, 2), 1.0)
def test_sample(self): X = np.arange(1000) % 10 for depth in range(1, N // 2): for order in range(2, N): M = jumpModel.jumpModel("pitch", maxOrder=order, maxDepth=depth) M.train(X) for z in M.models[0][0].stateAlphabet: state = ast.literal_eval(z) s = M.sample(state) self.assertEqual(round(M.getLikelihood(state, s), 2), 1.0)
def cleanWeights(self, order=None): """ Delete all trained models and fix an order if given """ if order is None: order = self.maxOrder self.LTM = [] for viewPoint in self.viewPoints: if self.jump is False: self.LTM.append( longTermModel.longTermModel(viewPoint, maxOrder=self.maxOrder)) else: self.LTM.append( jumpModel.jumpModel(viewPoint, maxOrder=self.maxOrder, maxDepth=self.maxDepth))
def test_generate(self): """ Implement a very easy random walk in order to generate a sequence :param length: length of the generated sequence :type length: int :return: sequence (np.array()) """ X = [] for i in range(10): X.append((np.arange(100) + i) % 10) for depth in range(1, N // 2): for order in range(1, N): M = jumpModel.jumpModel("pitch", maxOrder=order, maxDepth=depth) M.train(X) S = M.generate(400) S.sort() target = list(np.sort(np.arange(400) % 10)) self.assertEqual(S, target)
def __init__(self, maxOrder=None, viewPoints=["pitch", "length"], dataTrain=None, dataTrial=None, jump=False, maxDepth=10, stm=True): # viewpoints to use for the model self.viewPoints = viewPoints # maximal order for the markov chains self.maxOrder = maxOrder #maximal depth for the jump model self.maxDepth = maxDepth # we store wether we use jump self.jump = jump # wether we also use short term model or not self.stm = stm # list of all models for each viewpoints self.LTM = [] for viewPoint in self.viewPoints: if self.jump is False: self.LTM.append( longTermModel.longTermModel(viewPoint, maxOrder=self.maxOrder)) else: self.LTM.append( jumpModel.jumpModel(viewPoint, maxOrder=self.maxOrder, maxDepth=self.maxDepth))
import sys sys.path.append('../') from idyom import jumpModel from idyom import data from idyom import score import numpy as np import matplotlib.pyplot as plt L = jumpModel.jumpModel("pitch", maxDepth=10, maxOrder=20) M = data.data() M.parse("../datasetprout/") #M.parse("dataBaseTest/") L.train(M.getData("pitch")) G = L.generate(500) print(G) s = score.score(G) s.plot() s.writeToMidi("exGen.mid") L.save("jumpModel.save")
def getLikelihoodfromFile(self, file): """ Return likelihood over a score :param folder: file to compute likelihood on :type data: string :return: np.array(length) """ D = data.data() D.addFile(file) probas = np.ones(D.getSizeofPiece(0)) if self.jump is False: probas[0] = 1 / len(self.LTM[0].models[0].alphabet) else: probas[0] = 1 / len(self.LTM[0].models[0][0].alphabet) for model in self.LTM: dat = D.getData(model.viewPoint)[0] if self.jump is False: STM = longTermModel.longTermModel(model.viewPoint, maxOrder=20, STM=True, init=dat) else: STM = jumpModel.jumpModel(model.viewPoint, maxOrder=20) for i in tqdm(range(1, len(dat))): # we instanciate a Short Term Model for the current viewpoint if self.jump is True: STM = jumpModel.jumpModel(model.viewPoint, maxOrder=20) STM.train([dat[:i]]) else: STM.train([dat[:i]], shortTerm=True) p = model.getLikelihood(dat[:i], dat[i]) flag = True # This happens when the state never happened in the training data if p is None: p = 0 flag = None p2 = STM.getLikelihood(dat[:i], dat[i]) if self.stm and p2 is not None: if flag is not None: p = self.mergeProbas([p, p2], [ model.getEntropy(dat[:i]), STM.getEntropy(dat[:i]) ]) else: p = p2 probas[i] *= p if probas[i] == 563540: print("LTM:", model.getLikelihood(dat[:i], dat[i])) print("STM:", p2) #print("ret:", self.mergeProbas([p, p2], [model.getEntropy(dat[:i]), STM.getEntropy(dat[:i])])) print() return probas