def build_and_save_model(self, encoded_chord_string, encoded_duration_string): self.chordsmc = mc.MarkovChain().from_data(encoded_chord_string) with open('ml_models/markov_bass_chords', 'wb') as outp: # Overwrites any existing file. pickle.dump(self.chordsmc, outp, pickle.HIGHEST_PROTOCOL) self.durationmc = mc.MarkovChain().from_data(encoded_duration_string) with open('ml_models/markov_bass_durations', 'wb') as outp: # Overwrites any existing file. pickle.dump(self.durationmc, outp, pickle.HIGHEST_PROTOCOL)
def forecast_markov(trainingdata, evaldata): n = len(evaldata) mod = mc.MarkovChain().from_data(trainingdata['count']) ids, states = mod.simulate(n, start=10) plot(f'markov_{n}.png', states, 'Forecast based on Markov Chains', evaldata['count'].values) print('Accuracy Scores for Markov Chains:') accuracy.eval_model(states, 1, evaldata['count'].values)
def predict(prev_play, oppnent_history, memoryLength): if len(oppnent_history) > memoryLength: oppnent_history.pop(0) chain = mc.MarkovChain().from_data(oppnent_history) predictionNextItem = giveMostProbableNextItem(chain, prev_play) winningMove = winDict[predictionNextItem] return winningMove
def getMarkovProbas(observations): markovChain = mc.MarkovChain().from_data(observations) # ~ print (observations) print( pandas.DataFrame(markovChain.observed_p_matrix, index=markovChain.states, columns=markovChain.states, dtype=float)) return markovChain.observed_p_matrix
def listen(): global keyboardListener, observations, playBuffer, keyboardController, lastKeyPlayed, period, playThreeshold, lastKeyPressedTime, silentEnding, silentEndingRemoved keyboardListener = keyboard.Listener(on_press=on_press, on_release=on_release) keyboardListener.start() print("starting listening to keyboard events") while True: timeElapsedSinceKeyboard = datetime.now() - lastKeyPressedTime if timeElapsedSinceKeyboard.seconds > playThreeshold: # play mode, uninterrupted if len(playBuffer) == 0: # nothing to play yet if not silentEndingRemoved: observations = observations[:-silentEnding] silentEndingRemoved = True if len(observations) > 500: # ~ print(observations) markovChain = mc.MarkovChain().from_data(observations) samplesToPlay = min( int(len(observations) / 2), int(10000 / period) ) # will play longer on stronger databases, 10s max # ~ ids, playBuffer = markovChain.simulate(samplesToPlay, start=lastKeyPressed) ids, playBuffer = markovChain.simulate( 10000, start=lastKeyPressed) else: # we are continuing to play previously computed predictions if not continuousLearning and len(observations) > 0: observations = numpy.array( []) #clear database when playing if playBuffer[0] == "0": # this is a pause print("PAUSE") if lastKeyPlayed is not None: # we've pressed a key keyboardController.release(lastKeyPlayed) print("OFF_" + lastKeyPlayed) lastKeyPlayed = None elif playBuffer[ 0] != lastKeyPlayed and lastKeyPlayed is not None: # were changing from one key to another keyboardController.release(lastKeyPlayed) print("OFF_" + lastKeyPlayed) lastKeyPlayed = playBuffer[0] keyboardController.press(playBuffer[0]) print("ON_" + lastKeyPlayed) elif lastKeyPlayed is None: # we are pressing a key coming from a pause lastKeyPlayed = playBuffer[0] keyboardController.press(playBuffer[0]) print("ON_" + lastKeyPlayed) playBuffer = numpy.delete(playBuffer, 0) elif len(keysCurrentlyPressed) > 0: # record mode : a key is pressed observations = numpy.append(observations, lastKeyPressed) if len(playBuffer) > 0: playBuffer = numpy.array([]) else: # record mode : nothing is pressed observations = numpy.append(observations, '0') if len(playBuffer) > 0: time.sleep(period / 1000) else: time.sleep(period / 1000)
def getMarkovProbas(observations) : global removeSilenceFromVisualisation if removeSilenceFromVisualisation : observations = observations[observations != "0"] markovChain = mc.MarkovChain().from_data(observations) names = [n for n in markovChain.states] # needed to convert np array into JSON-serialisable list weights = markovChain.observed_p_matrix.tolist() # ~ weights = [[round(w, 2) for w in W] for W in weights] # round floats to 3 digits weights = [[w for w in W] for W in weights] # ~ print("states :", names) # ~ print("probas :", weights) if len(names) > 1 : return {"weights":weights, "names":names} else : return False
def getMarkovProbas(observations): global keyToNote, removeSilenceFromVisualisation if removeSilenceFromVisualisation: observations = observations[observations != "0"] markovChain = mc.MarkovChain().from_data(observations) names = markovChain.states weights = markovChain.observed_p_matrix.tolist() # ~ weights = [[round(w, 2) for w in W] for W in weights] # round floats to 3 digits weights = [[w for w in W] for W in weights] names = [keyToNote[n] for n in names ] # translate keystrokes into corresponding readable note names print("states :", names) print("probas :", weights) # ~ print(pandas.DataFrame(markovChain.observed_p_matrix, index=markovChain.states, columns=markovChain.states, dtype=float)) if len(names) > 1: return {"weights": weights, "names": names} else: return False
#%% import mchmm as mc #%% chain = mc.MarkovChain().from_data('RSPPSRRRSSSSPRSSPRSSRPRPPSR') # %% chain.observed_matrix # %% graph = chain.graph_make(format="png", graph_attr=[("rankdir", "LR")], node_attr=[("fontname", "Roboto bold"), ("fontsize", "20")], edge_attr=[("fontname", "Iosevka"), ("fontsize", "12")]) graph.render() # %% # %%
return markovChain.observed_p_matrix if __name__ == '__main__': keyboardListener = keyboard.Listener(on_press=on_press, on_release=on_release) keyboardListener.start() while True: timeElapsedSinceKeyboard = datetime.now() - lastKeyPressedTime if timeElapsedSinceKeyboard.seconds > playThreeshold: # play mode, uninterrupted if len(playBuffer) == 0: # nothing to play yet if len(observations) > 500: print(observations) markovChain = mc.MarkovChain().from_data(observations) ids, playBuffer = markovChain.simulate( 10000, start=lastKeyPressed) else: # we are continuing to play previously computed predictions if playBuffer[0] == "0": # this is a pause print("PAUSE") if lastKeyPlayed is not None: # we've pressed a key keyboardController.release(lastKeyPlayed) print("OFF_" + lastKeyPlayed) lastKeyPlayed = None elif playBuffer[ 0] != lastKeyPlayed and lastKeyPlayed is not None: # were changing from one key to another keyboardController.release(lastKeyPlayed) print("OFF_" + lastKeyPlayed) lastKeyPlayed = playBuffer[0] keyboardController.press(playBuffer[0])
# Slicing our requests searchPath = searchPath + '&from=' + str(begIndex) + '&size=' + str(batchSize) response = requests.get(CONSUMER_URL + searchPath, headers=headers, verify=False) # I: https://docs.python.org/3/library/http.client.html#httpresponse-objects respData = response.json() observationsArray = respData["hits"]["hits"] for observation in observationsArray: # Important to convert to seconds, datetime cannot handle ms # TODO: consider rounding to minutes timestamp_s = int(observation['_source']['timestamp'] / 1000) count = int(observation['_source']['value']) cur_date = datetime.fromtimestamp(timestamp_s) df_row = pd.DataFrame([[cur_date, count]], columns=['t', 'count']) retrievedData = retrievedData.append(df_row) begIndex = begIndex + batchSize countLeft = countLeft - batchSize print(retrievedData) retrievedData.index = retrievedData.t # Create the model from scratch mod = mc.MarkovChain().from_data(retrievedData['count']) print(mod.expected_matrix) pickle.dump(mod, open(modelFilepath, 'wb')) else: print("Not enough arguments. The call should be: python3 markovderivator.py <sensor id> <model file name> <number of weeks|0> <batch size>")