Esempio n. 1
0
    def train(self):
        hmmModel = HMM(12, noteRange+1)
        hmmModel2 = HMM(12, noteRange+1)
        obs = []
        ground = []
        obs2 = []
        ground2 = []
        actions = []
        for i in range(minNote, maxNote):
            actions.append(i)
        qModel = QLearner(actions, epsilon=0.1, alpha=0.2, gamma=0.9)

#HMM
#2. It might also be that a lot of four-note runs are produced by the certain class of these short sequences, so the first note of 4-note run, was tried as the hidden state configuration.
#3. Note that a tritone interval sounds much the same anywhere, so it might have been the case that the next note is generated from the difference between the previous note and the note before that.
        for ls in self.clusterData:
            for quadidx, quad in enumerate(ls):
                tempquad = map(lambda x: x - minNote, quad)
                obs.append(tempquad[1:])
                obs2.append(tempquad[1:])
                tempquad2 = map(lambda x: (x - minNote) % 12, quad)
                notediff = [tempquad2[0] - tempquad2[1], tempquad2[1] - tempquad2[2], tempquad2[2] - tempquad2[3]]
                notediff = map(lambda x: abs(x), notediff)
                ground.append(notediff) #difference between prev note and note before that
                ground2.append([tempquad2[0]] * 3)
                if (quad):
                    for idx, note in enumerate(quad):
                        if idx > 0:
                            prevNote = quad[idx - 1]
                            qModel.learn(abs((prevNote) - (note)), note, 1, note)
        hmmModel.learn(obs, ground)
        hmmModel2.learn(obs2, ground2)
        return (hmmModel, hmmModel2, qModel)
Esempio n. 2
0
 def train(self):
     mmModel = np.zeros((noteRange+1, noteRange+1))
     mm3Model = np.zeros((noteRange+1, noteRange+1, noteRange+1))
     hmmModel = HMM(12, noteRange+1)
     obs = []
     ground = []
     actions = []
     for i in range(minNote, maxNote):
         actions.append(i)
     qModel = QLearner(actions, epsilon=0.1, alpha=0.2, gamma=0.9)
     for ls in self.clusterData:
         for quadidx, quad in enumerate(ls):
             tempquad = map(lambda x: x - minNote, quad) #take this out for prevnote stuff
             obs.append(tempquad[1:]) #this is for hmm: you can also do same thing for qlearning to change state that way
             tempquad = map(lambda x: (x - minNote) % 12, quad)
             ground.append(tempquad[:3])
             if (quad):
                 for idx, note in enumerate(quad):
                     if idx > 0:
                         currNote = note
                         prevNote = quad[idx - 1]
                         #Q learning
                         #q.learn(state1, action1, reward, state2)
                         qModel.learn(prevNote, note, 1, note)
                         #Markov model
                         mmModel[currNote - minNote, prevNote - minNote] += 1
                     if idx > 2:
                         #Markov model, more order
                         currNote = note - minNote
                         prevNote = quad[idx - 1] - minNote
                         prevNote2 = quad[idx - 2] - minNote
                         mm3Model[currNote, prevNote, prevNote2] += 1
     hmmModel.learn(obs, ground)
     return (mmModel, mm3Model, hmmModel, qModel)