コード例 #1
0
ファイル: idyom.py プロジェクト: GuiMarion/IDyOM
    def __init__(self,
                 maxOrder=24,
                 viewPoints=["pitch", "length"],
                 dataTrain=None,
                 dataTrial=None,
                 stm=True,
                 evolutive=False):

        # viewpoints to use for the model
        self.viewPoints = viewPoints

        # maximal order for the markov chains
        self.maxOrder = maxOrder

        # wether we also use short term model or not
        self.stm = stm

        self.evolutive = evolutive

        # list of all models for each viewpoints
        self.LTM = []
        for viewPoint in self.viewPoints:
            self.LTM.append(
                longTermModel.longTermModel(viewPoint,
                                            maxOrder=self.maxOrder,
                                            evolutive=evolutive))
コード例 #2
0
    def test_getLikelihood(self):
        """
		Return the likelihood of a note given a state
		
		:param state: a sequence of viewPoints of sier order
		:param note: integer or name of the note

		:type state: np.array(order)
		:type note: int or string

		:return: float value of the likelihood
		"""

        X = np.arange(1000) % 10

        for i in range(1, N):
            M = longTermModel.longTermModel("pitch", i)
            M.train(X)

            alphabet = []
            for model in M.models:
                alphabet.extend(model.alphabet)

            alphabet = list(set(alphabet))
            alphabet.sort()

            for state in alphabet:
                for note in alphabet:
                    if (int(state) + 1) % 10 == int(note) % 10:
                        self.assertEqual(M.getLikelihood([int(state)], note),
                                         1.0)
                    else:
                        self.assertEqual(M.getLikelihood([int(state)], note),
                                         0.0)
コード例 #3
0
    def test_generate(self):
        """
		Implement a very easy random walk in order to generate a sequence

		:param length: length of the generated sequence
		:type length: int

		:return: sequence (np.array()) 
		"""

        X = []
        for i in range(10):
            X.append(np.arange(300) % 10)

        for order in range(1, N):
            M = longTermModel.longTermModel("pitch", order)
            M.train(X)

            S = M.generate(400)
            S.sort()
            target = list(np.sort(np.arange(400) % 10))
            self.assertEqual(S, target)


#unittest.main()
コード例 #4
0
ファイル: idyom.py プロジェクト: GuiMarion/IDyOM
    def eval(self, data, k_fold=1):

        Likelihood = []

        for i in range(len(data.getData(self.viewPoints[0])) // k_fold):

            # We initialize the models
            self.LTM = []
            for viewPoint in self.viewPoints:
                self.LTM.append(
                    longTermModel.longTermModel(viewPoint,
                                                maxOrder=self.maxOrder))

            # We train them with the given dataset
            k = 0
            for viewPoint in self.viewPoints:
                self.LTM[k].train(
                    data.getData(viewPoint)[:i * k_fold] +
                    data.getData(viewPoint)[(i + 1) * k_fold:])
                print(data.getData(viewPoint))
                print()
                print(
                    data.getData(viewPoint)[:i * k_fold] +
                    data.getData(viewPoint)[(i + 1) * k_fold:])
                quit()
                k += 1
コード例 #5
0
    def test_saveAndLoad(self):
        """
		Check wether the loaded object is the same as the saved one
		"""

        for i in range(1, N):
            M1 = longTermModel.longTermModel("pitch", i)
            X = np.arange(500) % 10
            M1.train(X)
            M1.save("longterm.s")

            M2 = longTermModel.longTermModel("pitch", 1)
            M2.load("longterm.s")

            os.remove("longterm.s")
            for i in range(len(M1.models)):
                self.assertEqual(M1.models[i].__dict__, M2.models[i].__dict__)
コード例 #6
0
    def setUp(self):
        """
		Construct some models for testing
		"""

        self.models = []
        for i in range(1, N):
            for viewPoint in viewPoints:
                self.models.append(longTermModel.longTermModel(viewPoint, i))
コード例 #7
0
    def test_sample(self):
        X = np.arange(1000) % 10

        for order in range(2, N):
            M = longTermModel.longTermModel("pitch", order)
            M.train(X)

            for z in M.models[order - 2].stateAlphabet:
                state = ast.literal_eval(z)
                s = M.sample(state)
                self.assertEqual(round(M.getLikelihood(state, s), 2), 1.0)
コード例 #8
0
ファイル: idyom.py プロジェクト: GuiMarion/IDyOM
    def cleanWeights(self, order=None):
        """
		Delete all trained models and fix an order if given
		"""

        if order is None:
            order = self.maxOrder

        self.LTM = []
        for viewPoint in self.viewPoints:
            self.LTM.append(
                longTermModel.longTermModel(viewPoint, maxOrder=self.maxOrder))
コード例 #9
0
    def test_train(self):
        """
		Fill the matrix from data
		
		:param data: pre-processed data to train with
		:type data: data object
		"""

        X = []
        for i in range(10):
            X.append(np.arange(200) % 10 - 1)
            np.random.shuffle(X[i])

        for i in range(N):
            M = longTermModel.longTermModel("pitch", i)
            M.train(X)

            if i == 0:
                M = longTermModel.longTermModel("pitch")
                M.train(X)

            for start in range(len(X) - 2 * N):
                for end in range(start + i, len(X) - N):

                    alphabet = []
                    for model in M.models:
                        alphabet.extend(model.alphabet)

                    alphabet = list(set(alphabet))
                    alphabet.sort()

                    p = 0
                    for z in alphabet:
                        p += M.getLikelihood(X[start:end], z)

                    if round(p, 2) != 1:
                        print(p, X[start:end])
                    self.assertEqual(round(p, 2), 1.0)
コード例 #10
0
    def test_getPrediction(self):
        """
		Return the probability distribution of notes from a given state
		
		:param state: a sequence of viewPoints of sier order
		:type state: np.array(order)

		:return: np.array(alphabetSize).astype(float)
		"""

        X = np.arange(1000) % 10

        for i in range(1, 5):
            M = longTermModel.longTermModel("pitch", i)
            M.train(X)

            state = [1, 2, 3, 4, 5]

            for model in M.models:
                self.assertEqual(
                    model.getPrediction(state[-model.order:])['6'], 1.0)

            self.assertEqual(M.getPrediction(state)['6'], 1.0)
コード例 #11
0
    def __init__(self,
                 maxOrder=None,
                 viewPoints=["pitch", "length"],
                 dataTrain=None,
                 dataTrial=None,
                 jump=False,
                 maxDepth=10,
                 stm=True):

        # viewpoints to use for the model
        self.viewPoints = viewPoints

        # maximal order for the markov chains
        self.maxOrder = maxOrder

        #maximal depth for the jump model
        self.maxDepth = maxDepth

        # we store wether we use jump
        self.jump = jump

        # wether we also use short term model or not
        self.stm = stm

        # list of all models for each viewpoints
        self.LTM = []
        for viewPoint in self.viewPoints:
            if self.jump is False:
                self.LTM.append(
                    longTermModel.longTermModel(viewPoint,
                                                maxOrder=self.maxOrder))
            else:
                self.LTM.append(
                    jumpModel.jumpModel(viewPoint,
                                        maxOrder=self.maxOrder,
                                        maxDepth=self.maxDepth))
コード例 #12
0
ファイル: idyom.py プロジェクト: GuiMarion/IDyOM
    def getDistributionsfromFile(self,
                                 file,
                                 threshold,
                                 short_term_only=False,
                                 long_term_only=False,
                                 normalization=True):
        """
		Return likelihood over a score
		
		:param folder: file to compute likelihood on 

		:type data: string

		:return: np.array(length)

		"""

        D = data.data()
        D.addFile(file)

        distribution = []

        for model in self.LTM:
            if model.viewPoint == "length":
                dat = D.getData(model.viewPoint)[0]

                STM = longTermModel.longTermModel(model.viewPoint,
                                                  maxOrder=20,
                                                  STM=True,
                                                  init=dat)

                for i in tqdm(range(1, len(dat))):
                    # we instanciate a Short Term Model for the current viewpoint

                    STM.train([dat[:i]], shortTerm=True)
                    predictions_LTM = model.getPrediction(dat[:i])
                    predictions_STM = STM.getPrediction(dat[:i])

                    durations = []
                    for duration in predictions_LTM:
                        if duration not in durations and predictions_LTM[
                                duration] != 0:
                            durations.append(duration)

                    for duration in predictions_STM:
                        if duration not in durations and predictions_STM[
                                duration] != 0:
                            durations.append(duration)

                    distribution_note = {}
                    for duration in durations:
                        if duration in predictions_LTM:
                            p1 = predictions_LTM[duration]
                            flag = True
                        else:
                            p1 = 1 / 30
                            flag = None
                        if duration in predictions_STM:
                            p2 = predictions_STM[duration]
                        else:
                            p2 = None

                        if self.stm and p2 is not None:
                            if flag is not None:
                                p = self.mergeProbas([p1, p2], [
                                    model.getRelativeEntropy(dat[:i]),
                                    STM.getRelativeEntropy(dat[:i])
                                ])
                            else:
                                p = p2
                        else:
                            p = p1

                        if long_term_only:
                            p = p1
                        if short_term_only:
                            p = p2
                            if p is None:
                                p = 1 / 30
                        distribution_note[duration] = p

                    distribution.append(distribution_note)

        ### Time Representation

        D = data.data()
        D.addFile(file)

        probas, entropies = self.getLikelihoodfromFile(
            file,
            short_term_only=short_term_only,
            long_term_only=short_term_only)

        # We compute the surprise by using -log2(probas)
        probas = -np.log(probas + sys.float_info.epsilon) / np.log(2)

        # We get the length of the notes
        lengths = D.getData("length")[0]

        ret = []
        for i in range(len(probas)):
            ret.append(probas[i])
            for j in range(int(lengths[i])):
                ret.append(0)

        notes_surprise = ret

        indexes = []
        probas = []
        current_index = 1
        for i in range(len(distribution)):
            sum_distribution = sum(distribution[i].values())
            keys = np.array(list(distribution[i])).astype(int)
            keys.sort()
            for duration in keys:
                duration = str(duration)
                if int(duration) < int(
                        lengths[i]
                ) and distribution[i][duration] / sum_distribution > threshold:
                    indexes.append(current_index + int(duration))
                    probas.append(distribution[i][duration] / sum_distribution)

                if normalization:
                    sum_distribution -= distribution[i][duration]
            current_index += int(lengths[i]) + 1

        missing_notes = np.zeros(len(notes_surprise))
        missing_notes[indexes] = probas

        plt.plot(notes_surprise)
        plt.plot(missing_notes)
        plt.legend(["surprise", "missing notes"])
        plt.show()

        return notes_surprise, missing_notes
コード例 #13
0
ファイル: idyom.py プロジェクト: GuiMarion/IDyOM
    def getLikelihoodfromFile(self,
                              file,
                              short_term_only=False,
                              long_term_only=False):
        """
		Return likelihood over a score
		
		:param folder: file to compute likelihood on 

		:type data: string

		:return: np.array(length)

		"""

        D = data.data()
        D.addFile(file)

        probas = np.ones(D.getSizeofPiece(0))
        probas[0] = 1 / len(self.LTM[0].models[0].alphabet)

        entropies = np.zeros(D.getSizeofPiece(0))
        L = np.ones(len(self.LTM[0].models[0].alphabet)) / len(
            self.LTM[0].models[0].alphabet)
        entropies[0] = -np.sum(L * np.log2(L))

        for model in self.LTM:
            dat = D.getData(model.viewPoint)[0]
            if long_term_only is False:
                STM = longTermModel.longTermModel(model.viewPoint,
                                                  maxOrder=20,
                                                  STM=True,
                                                  init=dat)

            for i in range(1, len(dat)):
                # we instanciate a Short Term Model for the current viewpoint

                if long_term_only is False:
                    STM.train([dat[:i]], shortTerm=True)

                p1 = model.getLikelihood(dat[:i], dat[i])
                if p1 is None:
                    e1 = 4.9
                else:
                    e1 = model.getEntropy(dat[:i])

                flag = True

                # This happens when the state never happened in the training data
                if p1 is None:
                    p1 = 1 / 30
                    e1 = 4.9
                    flag = None
                if long_term_only is False:
                    p2 = STM.getLikelihood(dat[:i], dat[i])
                    if p2 is None:
                        e2 = 4.9
                    else:
                        e2 = STM.getEntropy(dat[:i])
                if long_term_only:
                    p = p1
                    e = e1
                elif short_term_only:
                    p = p2
                    e = e2
                    if p is None:
                        p = 1 / 30
                        e = 4.9
                elif self.stm and p2 is not None:
                    if flag is not None:
                        p = self.mergeProbas([p1, p2], [
                            model.getRelativeEntropy(dat[:i]),
                            STM.getRelativeEntropy(dat[:i])
                        ])
                        e = self.mergeProbas([e1, e2], [
                            model.getRelativeEntropy(dat[:i]),
                            STM.getRelativeEntropy(dat[:i])
                        ])
                    else:
                        p = p2
                        e = e2
                else:
                    p = p1
                    e = e1

                probas[i] *= p
                entropies[i] += e

        return probas, entropies
コード例 #14
0
ファイル: idyom.py プロジェクト: kdoelling1919/IDyOM
    def getLikelihoodfromFile(self,
                              file,
                              short_term_only=False,
                              long_term_only=False):
        """
		Return likelihood over a score
		
		:param folder: file to compute likelihood on 

		:type data: string

		:return: np.array(length)

		"""

        D = data.data()
        D.addFile(file)

        probas = np.ones(D.getSizeofPiece(0))
        probas[0] = 1 / len(self.LTM[0].models[0].alphabet)

        for model in self.LTM:
            dat = D.getData(model.viewPoint)[0]

            STM = longTermModel.longTermModel(model.viewPoint,
                                              maxOrder=20,
                                              STM=True,
                                              init=dat)

            for i in tqdm(range(1, len(dat))):
                # we instanciate a Short Term Model for the current viewpoint

                STM.train([dat[:i]], shortTerm=True)

                p1 = model.getLikelihood(dat[:i], dat[i])

                flag = True

                # This happens when the state never happened in the training data
                if p1 is None:
                    p1 = 1 / 30
                    flag = None

                p2 = STM.getLikelihood(dat[:i], dat[i])

                if self.stm and p2 is not None:

                    if flag is not None:
                        p = self.mergeProbas([p1, p2], [
                            model.getRelativeEntropy(dat[:i]),
                            STM.getRelativeEntropy(dat[:i])
                        ])
                    else:
                        p = p2
                else:
                    p = p1

                if long_term_only:
                    p = p1
                if short_term_only:
                    p = p2
                    if p is None:
                        p = 1 / 30

                probas[i] *= p

                if probas[i] == 563540:
                    print("LTM:", model.getLikelihood(dat[:i], dat[i]))
                    print("STM:", p2)
                    #print("ret:", self.mergeProbas([p, p2], [model.getEntropy(dat[:i]), STM.getEntropy(dat[:i])]))
                    print()

        return probas
コード例 #15
0
import sys
sys.path.append('../')

from idyom import longTermModel
from idyom import data
from idyom import score

import numpy as np
import matplotlib.pyplot as plt

L = longTermModel.longTermModel("pitch", maxOrder=None)

M = data.data()

#M.parse("../dataset/")
M.parse("dataBaseTest/")

L.train(M.getData("pitch"))

G = L.generate(500)

print(G)

s = score.score(G)

s.plot()

s.writeToMidi("exGen.mid")

L.save("longTermModel.save")