def test_train(self): """ Fill the matrix from data :param data: pre-processed data to train with :type data: data object """ X = np.arange(1000) % 10 for i in range(1, N): M = markovChain.markovChain(i) M.train(X) for state in M.stateAlphabet: target = str((ast.literal_eval(state)[-1] + 1) % 10) self.assertEqual(M.getPrediction(state)[target], 1.0) X = np.arange(1000) % 10 np.random.shuffle(X) for i in range(1, N): M = markovChain.markovChain(i) M.train(X) for state in M.stateAlphabet: P = M.getPrediction(state) P = list(P.values()) self.assertLessEqual(abs(np.sum(P) - 1), 0.00001)
def test_train(self): """ Fill the matrix from data :param data: pre-processed data to train with :type data: data object """ X = np.arange(1000) % 10 for i in range(1, N): M = markovChain.markovChain(i) M.train(X) T = M.transitions for state in T: target = str( list((np.arange( ast.literal_eval(state)[-1], ast.literal_eval(state)[-1] + i) + 1) % 10)) self.assertEqual(T[state][target], 1.0) X = np.arange(1000) % 10 np.shuffle(X) for i in range(1, N): M = markovChain.markovChain(i) M.train(X) P = M.getPrediction() for state in M.stateAlphabet: self.assertEqual(np.sum(P[state]), 1.0)
def train(self, data): """ Fill the matrix from data :param data: data to train from :type data: list of np.array or list of list of int """ if isinstance(data, list): maxOrder = len(data[0]) for i in range(1, len(data)): maxOrder = min(len(data[i]), maxOrder) else: maxOrder = len(data) if self.maxOrder is None: maxOrder = maxOrder//2 else: maxOrder = self.maxOrder self.maxOrder = maxOrder print("The maximal order is:", self.maxOrder) # list contening different order markov chains self.models = [] for depth in range(self.maxDepth+1): self.models.append([]) for order in range(1, self.maxOrder+1): self.models[depth].append(markovChain.markovChain(order, depth=depth)) self.reverse =[] for depth in range(self.maxDepth): self.reverse.append(markovChain.markovChain(1, depth=depth)) self.reverse[depth].train(data, reverse=True) # training all the models for depth in range(self.maxDepth+1): for order in range(maxOrder): self.models[depth][order].train(data) if self.models[depth][order].usedScores == 0: print("The order is too high for these data, we stop the training here.") break self.alphabet = [] for model in self.models[0]: self.alphabet.extend(model.alphabet) self.alphabet = list(set(self.alphabet)) self.alphabet.sort()
def test_getLikelihood(self): """ Return the likelihood of a note given a state :param state: a sequence of viewPoints of sier order :param note: integer or name of the note :type state: np.array(order) :type note: int or string :return: float value of the likelihood """ X = np.arange(1000) % 10 for i in range(1, N): M = markovChain.markovChain(i) M.train(X) for state in M.stateAlphabet: for note in M.alphabet: target = str((ast.literal_eval(state)[-1] + 1) % 10) if target == note: self.assertEqual(M.getLikelihood(state, note), 1.0) else: self.assertEqual(M.getLikelihood(state, note), 0.0)
def test_getPrediction(self): """ Return the probability distribution of notes from a given state :param state: a sequence of viewPoints of sier order :type state: np.array(order) :return: np.array(alphabetSize).astype(float) """ X = np.arange(1000) % 10 for i in range(1, N): M = markovChain.markovChain(i) M.train(X) T = M.probabilities for state in T: target = str((ast.literal_eval(state)[-1] + 1) % 10) self.assertEqual(T[state][target], 1.0) for state in M.stateAlphabet: target = str((ast.literal_eval(state)[-1] + 1) % 10) self.assertEqual(M.getPrediction(state)[target], 1.0)
def test_getStatesMatrix(self): """ Return the transition matrix between states made from the dictionnary :return: transition matrix (np.array()) """ X = np.arange(1000) % 10 for order in range(1, 2): M = markovChain.markovChain(order) M.train(X) matrix = M.getStatesMatrix() for i in range(len(M.stateAlphabet)): for j in range(len(M.stateAlphabet)): target = str( list((np.arange( ast.literal_eval(M.stateAlphabet[i])[-1], ast.literal_eval(M.stateAlphabet[i])[-1] + order) + 1) % 10)) if target == M.stateAlphabet[j]: self.assertEqual(matrix[i][j], 1.0) else: self.assertEqual(matrix[i][j], 0.0)
def __init__(self, viewPoint, maxOrder=None, STM=False, init=None): # ViewPoint to use self.viewPoint = viewPoint # maximum order if given self.maxOrder = maxOrder # to track if is LTM or STM self.STM = STM # in order to compute model entropy directly from MC entropies self.entropies = {} if init is not None: maxOrder = len(init) if self.maxOrder is None: maxOrder = maxOrder // 2 # CHANGE IT TO maxOrder - 1, maybe else: maxOrder = self.maxOrder self.maxOrder = maxOrder if VERBOSE: print("The maximal order is:", self.maxOrder) # list contening different order markov chains self.models = [] for order in range(1, self.maxOrder+1): self.models.append(markovChain.markovChain(order, STM=self.STM)) self.benchmark = [0, 0, 0]
def setUp(self): """ Construct some models for testing """ self.models = [] for i in range(1, N): self.models.append(markovChain.markovChain(i))
def test_saveAndLoad(self): """ Check wether the loaded object is the same as the saved one """ for i in range(1, N): M1 = markovChain.markovChain(i) X = np.arange(500) % 10 M1.train(X) M1.save("unittest.s") M2 = markovChain.markovChain(1) M2.load("unittest.s") os.remove("unittest.s") self.assertEqual(M1.__dict__, M2.__dict__)
def test_sample(self): X = np.arange(1000) % 10 for order in range(1, N): M = markovChain.markovChain(order) M.train(X) for z in M.stateAlphabet: state = ast.literal_eval(z) s = M.sample(state) self.assertEqual(M.getLikelihood(z, s), 1.0)
def train(self, data, shortTerm=False): """ Fill the matrix from data :param data: data to train from :type data: list of np.array or list of list of int """ if shortTerm is True: # training all the models for i in range(len(self.models)): self.models[i].train([data[0][-self.models[i].order - 1:]]) if self.models[i].usedScores == 0: if VERBOSE: print( "The order is too high for these data, we stop the training here." ) break return if isinstance(data, list): maxOrder = len(data[0]) for i in range(1, len(data)): maxOrder = max(len(data[i]), maxOrder) else: maxOrder = len(data) if self.maxOrder is None: maxOrder = maxOrder // 2 else: maxOrder = self.maxOrder self.maxOrder = maxOrder if VERBOSE: print("The maximal order is:", self.maxOrder) # list contening different order markov chains self.models = [] for order in range(1, self.maxOrder + 1): self.models.append(markovChain.markovChain(order, STM=self.STM)) # training all the models for i in range(len(self.models)): self.models[i].train(data) if self.models[i].usedScores == 0: if VERBOSE: print( "The order is too high for these data, we stop the training here." ) break
def test_generate(self): """ Implement a very easy random walk in order to generate a sequence :param length: length of the generated sequence :type length: int :return: sequence (np.array()) """ X = np.arange(1000) % 10 for order in range(1, N): M = markovChain.markovChain(order) M.train(X) S = list(M.generate(10).getData()) S.sort() self.assertEqual(S, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
import sys sys.path.append('../') from idyom import markovChain from idyom import data from idyom import score import numpy as np M = markovChain.markovChain(3) D = data.data() D.parse("dataBaseTest/") M.train(D.getData("pitch")) print(D.getData("pitch")) S = M.generate(500) S.writeToMidi("generation1.mid") S.toWaveForm("generation1.wav") print(S.getData()) quit() matrix = M.getStatesMatrix() print(M.transitions)