def find_difficulties(sample_dir_path, r_chain_path, m_chain_path, output_path, mode): r""" `sample_dir_path`: the path to the directory containing samples `r_chain_path`: the path to the pickled rhythm chain used `m_chain_path`: the path to the pickled melody chain used `output_path`: the path to a file in which results will be pickled Note that result is encoded as a list of tuples, each of which represents the likelihood of the rhythm and melody (respectively) of a single measure. This function also assumes that samples are being taken, implying that measures are uncorrelated. """ with open(r_chain_path, 'rb') as fh: r_chain = pickle.load(fh) _, r_chain = maximum_likelihood_probabilities(r_chain) if mode != 'Temperley': with open(m_chain_path, 'rb') as fh: m_chain = pickle.load(fh) _, m_chain = maximum_likelihood_probabilities(m_chain) else: m_chain = None results = [] samples = os.listdir(sample_dir_path) for sample_num, sample_path in enumerate(samples, start=1): try: LOG.info('{num} of {total}'.format(num=sample_num, total=len(samples))) full_path = os.path.join(sample_dir_path, sample_path) measures = _assemble_measures(full_path) # note that closing measures are ignored because they are simpler LOG.info(sample_path) for m_count, measure in enumerate(measures[0:12], start=1): LOG.info('{num} of {total}'.format(num=m_count, total=12)) likelihood_rhythm = _log_likelihood( [str(rhythm) for rhythm in _measure_rhythms(measure)], r_chain)[0] if mode != 'Temperley': likelihood_melody = _log_likelihood( [str(melody) for melody in _measure_melodies(measure)], m_chain)[0] else: measure.write('midi', '/home/vincent/temp_measure.midi') likelihood_melody = sight_reading.temperley.likelihood_melody( '/home/vincent/temp_measure.midi', '/home/vincent/test/temp.midi') results.append((likelihood_rhythm, likelihood_melody)) except Exception as e: LOG.error(e.message) with open(output_path, 'wb') as fh: pickle.dump(results, fh, pickle.HIGHEST_PROTOCOL)
def find_difficulties(sample_dir_path, r_chain_path, m_chain_path, output_path, mode): r""" `sample_dir_path`: the path to the directory containing samples `r_chain_path`: the path to the pickled rhythm chain used `m_chain_path`: the path to the pickled melody chain used `output_path`: the path to a file in which results will be pickled Note that result is encoded as a list of tuples, each of which represents the likelihood of the rhythm and melody (respectively) of a single measure. This function also assumes that samples are being taken, implying that measures are uncorrelated. """ with open(r_chain_path, "rb") as fh: r_chain = pickle.load(fh) _, r_chain = maximum_likelihood_probabilities(r_chain) if mode != "Temperley": with open(m_chain_path, "rb") as fh: m_chain = pickle.load(fh) _, m_chain = maximum_likelihood_probabilities(m_chain) else: m_chain = None results = [] samples = os.listdir(sample_dir_path) for sample_num, sample_path in enumerate(samples, start=1): try: LOG.info("{num} of {total}".format(num=sample_num, total=len(samples))) full_path = os.path.join(sample_dir_path, sample_path) measures = _assemble_measures(full_path) # note that closing measures are ignored because they are simpler LOG.info(sample_path) for m_count, measure in enumerate(measures[0:12], start=1): LOG.info("{num} of {total}".format(num=m_count, total=12)) likelihood_rhythm = _log_likelihood([str(rhythm) for rhythm in _measure_rhythms(measure)], r_chain)[0] if mode != "Temperley": likelihood_melody = _log_likelihood( [str(melody) for melody in _measure_melodies(measure)], m_chain )[0] else: measure.write("midi", "/home/vincent/temp_measure.midi") likelihood_melody = sight_reading.temperley.likelihood_melody( "/home/vincent/temp_measure.midi", "/home/vincent/test/temp.midi" ) results.append((likelihood_rhythm, likelihood_melody)) except Exception as e: LOG.error(e.message) with open(output_path, "wb") as fh: pickle.dump(results, fh, pickle.HIGHEST_PROTOCOL)
def createChain(self,inputData, fromFile=False): if fromFile: inputData = pykov.readtrj(inputData) #print t p, P = pykov.maximum_likelihood_probabilities(inputData, lag_time=1, separator='a') #print p #vector #print P # chain return p, P
def create(self, state): """ Create the Markov chain itself. We use the parameter instead of the attribute so we can compute the matrix for different states """ # Separete the letters considering the letter and the symbol as a unique state: # So from "88,a,b," we get: '8' '8,' 'a,' 'b,' try: # This is a first order markov model. Each individual object (letter, number, etc.) is a state separated_letters = list(state) except AttributeError: print_error('There is no state yet') return False # Generate the MC self.init_vector, self.matrix = pykov.maximum_likelihood_probabilities( separated_letters, lag_time=1, separator='#')
def create(self, state): """ Create the Markov chain itself. We use the parameter instead of the attribute so we can compute the matrix for different states """ # Separete the letters considering the letter and the symbol as a unique state: # So from "88,a,b," we get: '8' '8,' 'a,' 'b,' try: # This is a first order markov model. Each individual object (letter, number, etc.) is a state separated_letters = list(state) except AttributeError: print_error("There is no state yet") return False # Generate the MC self.init_vector, self.matrix = pykov.maximum_likelihood_probabilities( separated_letters, lag_time=1, separator="#" )
def _get_dependencies(mode, key_mode): my_dir = os.path.dirname(os.path.realpath(__file__)) runtime_dir = my_dir.replace('src','runtime') parameter_path = os.path.join(runtime_dir, 'parameters.ini') config = ConfigParser.ConfigParser() config.read(parameter_path) music_path = config.get('APOPCALEAPS', 'music_folder') data_path = config.get('Analysis', 'data_root') with open(os.path.join(data_path, 'pickled_r_percentiles_'+key_mode)) as fh: r_percentiles = pickle.load(fh) with open(os.path.join(data_path, 'pickled_rhythm')) as fh: r_chain = pickle.load(fh) _, P = maximum_likelihood_probabilities(r_chain) r_chain = P if mode == 'Temperley': m_percentiles_path = os.path.join(data_path, 'pickled_m_percentiles_temperley_'+key_mode) m_chain = None elif mode == 'Relative': m_chain_path = os.path.join(data_path, 'pickled_melody_relative') m_percentiles_path = os.path.join(data_path, 'pickled_m_percentiles_relative') elif mode == 'Mixed': m_chain_path = os.path.join(data_path, 'pickled_melody_mixed') m_percentiles_path = os.path.join(data_path, 'pickled_m_percentiles_mixed') with open(m_percentiles_path, 'rb') as fh: m_percentiles = pickle.load(fh) if mode != 'Temperley': with open(m_chain_path) as fh: m_chain = pickle.load(fh) _, P = maximum_likelihood_probabilities(m_chain) m_chain = P return music_path, r_chain, r_percentiles, m_chain, m_percentiles
def create(self, state): """ Create the Markov chain itself. We use the parameter instead of the attribute so we can compute the matrix for different states """ try: # This is a second order markov model. Every two objects is a state separated_letters = [] i = 0 while i < len(state): separated_letters.append(state[i:i+2]) i+=1 # Store the vectorized state separated every 2 letters self.set_vectorstate(separated_letters) except AttributeError: print_error('There is no state yet') return False # Generate the MC. The lag_time is the second order parameter self.init_vector, self.matrix = pykov.maximum_likelihood_probabilities(separated_letters, lag_time=2, separator='#') print self.matrix
def _log_likelihood(entries, original_chain): r""" Find the log likelihood of a sequence of entries, given a Markov chain representing a corpus. #. `entries`: a sequence of events in the part, in string form #. `original_chain`: the pykov chain used to evaluate likelihood """ LOG.debug("Checking likelihood for {entries}".format(**locals())) state = entries[0] log_likelihood = 0 _, local_chain = maximum_likelihood_probabilities(entries) mod_chain = alt_combine_markov_chains(original_chain, local_chain, 0.02) for entry in entries[1:]: LOG.debug("Current entry: {entry}".format(**locals())) try: successors = original_chain.succ(state) except KeyError: return -float("inf"), mod_chain log_likelihood += log(mod_chain.succ(state)[entry]) state = entry return log_likelihood, mod_chain
def _log_likelihood(entries, original_chain): r""" Find the log likelihood of a sequence of entries, given a Markov chain representing a corpus. #. `entries`: a sequence of events in the part, in string form #. `original_chain`: the pykov chain used to evaluate likelihood """ LOG.debug('Checking likelihood for {entries}'.format(**locals())) state = entries[0] log_likelihood = 0 _, local_chain = maximum_likelihood_probabilities(entries) mod_chain = alt_combine_markov_chains(original_chain, local_chain, 0.02) for entry in entries[1:]: LOG.debug('Current entry: {entry}'.format(**locals())) try: successors = original_chain.succ(state) except KeyError: return -float("inf"), mod_chain log_likelihood += log(mod_chain.succ(state)[entry]) state = entry return log_likelihood, mod_chain
#usage: # SCRIPT source_file steps if len(sys.argv) < 3: print("Usage:") print("THIS-SCRIPT source_file steps") sys.exit(1) #Building the tuple of all the words, in order, to be fed to Pykov s = open(sys.argv[1], 'r').read() s = s.replace('\n', ' ') t = tuple(s.split(' ')) #Gets the vector and markov chain from the tuple built. vec, chn = pykov.maximum_likelihood_probabilities(t) #Next, we need to randomly pick a good starting point for the chain. #More or less ripped from the original pykov source, but the code is broken. # So here, we write a fix. n = random.uniform(0, 1) inital = None for state, prob in six.iteritems(chn): if n < prob: inital = state break n = n - prob #Subtracting two steps since we have to hardcode the first two steps. steps = int(sys.argv[2]) - 2
import os dir_other = '/home/ckirst/Science/Projects/CElegansBehaviour/Experiment/DwellingRoaming/Scripts/Other/' dir_base = '/home/ckirst/Science/Projects/CElegansBehaviour/Experiment/DwellingRoaming' os.chdir(dir_other) import pykov as pk import numpy as np t = np.random.rand(100) > 0.5 t = np.array(t, dtype=int) p, c = pk.maximum_likelihood_probabilities(t) os.chdir(dir_base) import experiment as exp strain = 'N2' feat = 'roam' data = exp.load_data(strain) d = getattr(data, feat) sbins = exp.stage_bins(data, nbins=2**4) c = np.zeros((data.nworms, sbins[0].shape[1], 2)) p = np.zeros((data.nworms, sbins[0].shape[1], 2))