def note_scan( filename, beats, channel ) : print( "detecting frequencies..." ) wave_ifile = wave.open( filename, 'r' ) print( wave_ifile.getparams()) frame_rate = wave_ifile.getframerate() composition = Composition() composition.set_channel( channel ) start_time = 0 for i,beat in enumerate(beats) : if i == 0 : note_length = beats[i] else : note_length = beats[i] - beats[i-1] iframes = wave_ifile.readframes( note_length ) if not iframes: break data = np.fromstring( iframes, np.int16 ) end_time = start_time + note_length frequency = autocorrelation_frequency( data, frame_rate ) amplitude = fft_amplitude( data ) note = Note( start_time, end_time, frequency, amplitude ) composition.add_note( note ) start_time += note_length wave_ifile.close() return composition
def basic_note_scan( filename, channel ) : print( "detecting frequencies..." ) wave_ifile = wave.open( filename, 'r' ) print( wave_ifile.getparams()) frame_rate = wave_ifile.getframerate() composition = Composition() composition.set_channel( channel ) start_time = 0 frame_size = 1024 while( 1 ) : iframes = wave_ifile.readframes( frame_size ) if not iframes: break data = np.fromstring( iframes, np.int16 ) end_time = start_time + frame_size frequency = autocorrelation_frequency( data, frame_rate ) amplitude = fft_amplitude( data ) note = Note( start_time, end_time, frequency, amplitude ) composition.add_note( note ) start_time += frame_size wave_ifile.close() return composition
def main(args, config): wDir = os.getcwd() #Instance Preprocessing class window = Preprocessing(args.fasta_file, config['win_length'], config['win_step']) window.output_window() print >> sys.stderr, "Creating windows_sequence.fasta" #Instance Similarity and Composition class sim = Similarity(args.fasta_file, config['score_adj'],wDir) sim_matrix = sim.mcl_perform() comp_results = Composition(config['kmer_len']) comp_matrix = comp_results.joined() #Join similarity and composition matrix for PCA join = pd.concat([comp_matrix, sim_matrix], axis= 1, join='inner') print >> sys.stderr, "Calculating similarity and composition matrix" #Instance Reduction class pca = Reduction(join, config['pca_comp']) pca_data = pca.perform_pca() print >> sys.stderr, "Performing PCA" #Instance Clustering class cluster = Clustering(pca_data) clust_obj = cluster.plot() print >> sys.stderr, "Performing clustering plot" #Instance ClusterReport class report = ClusterReport(clust_obj) file_name, querySeq = report.output_queryseq() print >> sys.stderr, "Doing report of clusters" #Instance Validate class valid = Validate(file_name, args.fasta_file,wDir) jfileComp, jfileMinus = valid.roundTwo() print >> sys.stderr, "Validation of results" #Instance ParseJplace Class parsing = ParseJplace(jfileComp, jfileMinus) corrMat = parsing.correlation() print >> sys.stderr, "Doing profiles" #Instance Profile Class ttest = Profiles(corrMat, querySeq) bestWin = ttest.windowsAssigment() print >>sys.stderr, "Doing permutations" #Instance StatsBinom finalResult = StatsBinom(args.fasta_file, config['win_length'],bestWin) finalResult.binomial() cleaning(file_name)
def remove_note(self, line): try: if line.strip() != "": parts = line.split(",") measure = int(parts[0].strip()) measurenotes = self.sort_measurenotes(measure) notesort = int(parts[1].strip()) note = measurenotes[notesort] Composition.remove_note(self.comp, measure, note.start, note.pitch) return True except: print("Huono nuotti.")
def parse_sanoitus(self, line): try: if line.strip() != "": parts = line.split(",") measure = int(parts[0].strip()) # measure start = parts[1].split("/") start = float(int(start[0].strip()) / int(start[1].strip())) # start string = parts[2].strip() lyric = Lyrics(measure, start, string) Composition.add_lyric(self.comp, lyric) return True except: print("Huono sanoitustavu.")
def remove_beam(self, line): try: if line.strip() != "": parts = line.split(",") measure = int(parts[0].strip()) start = parts[1].split("/") if len(start) == 1: start = float(start[0].strip()) elif len(start) == 2: start = float( int(start[0].strip()) / int(start[1].strip())) else: raise CorruptedCompositionFileError("Huono alkamishetki.") for beam in self.comp.beams: if beam.start == start and beam.measure == measure: Composition.remove_beam(self.comp, measure, start) return True except: print("Huono palkki.")
def __init__(self): QWidget.__init__(self) self._ui_model = None self.setOrientation(Qt.Vertical) self._composition = Composition() self._typewriter_panel = TypewriterPanel() v = QVBoxLayout() v.addWidget(self._composition) v.addWidget(self._typewriter_panel) self.setLayout(v)
class MainSplitter(QSplitter): def __init__(self): QWidget.__init__(self) self._ui_model = None self.setOrientation(Qt.Vertical) self._composition = Composition() self._typewriter_panel = TypewriterPanel() v = QVBoxLayout() v.addWidget(self._composition) v.addWidget(self._typewriter_panel) self.setLayout(v) def set_ui_model(self, ui_model): self._ui_model = ui_model self._composition.set_ui_model(ui_model) self._typewriter_panel.set_ui_model(ui_model) def unregister_updaters(self): self._typewriter_panel.unregister_updaters() self._composition.unregister_updaters()
def parse_tauot(self, line): try: if line.strip() != "": parts = line.split(",") measure = int(parts[0].strip()) # measure start = parts[1].split("/") start = float(int(start[0].strip()) / int(start[1].strip())) # start duration = parts[2].split("/") if len(duration) == 1: duration = float(duration[0].strip()) elif len(duration) == 2: duration = float( int(duration[0].strip()) / int(duration[1].strip())) else: raise CorruptedCompositionFileError("Huono kesto") rest = Rest(measure, start, duration) Composition.add_rest(self.comp, rest) return True except: print("Huono tauko.")
def getComposition(self): composition = Composition() # Star the collection collection = self.__builder.getCollection() composition.setCollection(collection) # Then add bag bag = self.__builder.getBag() composition.setBag(bag) # Finish wiht an extra extra = self.__builder.getExtra() composition.setExtra(extra) return composition
def parse_palkit(self, line): try: if line.strip() != "": parts = line.split(",") measure = int(parts[0].strip()) measurenotes = self.sort_measurenotes(measure) notes = [] note_parts = parts[1].split(":") for note in note_parts: try: notesort = int(note.strip()) except ValueError: raise CorruptedCompositionFileError( "Omituinen nuotti palkille") notes.append(measurenotes[notesort]) beam = Beam(notes) Composition.add_beam(self.comp, beam) return True except: print("Huono palkki")
def find_items(self, comp): ''' Finds items that are on this column. ''' for note in comp.notes: if note.measure == self.measure and note.start == self.start: self.notes.append(note) for rest in comp.rests: if rest.measure == self.measure and rest.start == self.start: self.rests.append(rest) for note in self.notes: for rest in self.rests: if note.start == rest.start: Composition.remove_rest(comp, rest.measure, rest.start) print("Nuotti ja tauko paallekain. Tauko poistettu.") for beam in comp.beams: if beam.measure == self.measure: self.beams.append(beam) for lyric in comp.lyrics: if lyric.measure == self.measure and lyric.start == self.start: self.lyrics.append(lyric)
def pulse_width_mod( composition, pulse_width ) : notes = composition.get_notes() new_composition = Composition() final_end_time = notes[-1].get_end_time() time = 0 pwm = 0 cni = 0 # composition note index while time < final_end_time : end_time = time + pulse_width while cni < len(notes) and notes[cni].get_end_time() < end_time : note = Note() note.set_start_time( time ) note.set_end_time( notes[cni].get_end_time() ) note.set_frequency( notes[cni].get_frequency() ) note.set_amplitude( notes[cni].get_amplitude() ) note.set_pwm( get_pwm_val( pwm ) ) new_composition.add_note( note ) time += notes[cni].get_end_time() - notes[cni].get_start_time() cni += 1 if cni == len(notes) : break note = Note() note.set_start_time( time ) note.set_end_time( end_time ) note.set_frequency( notes[cni].get_frequency() ) note.set_amplitude( notes[cni].get_amplitude() ) note.set_pwm( get_pwm_val( pwm ) ) new_composition.add_note( note ) time = end_time pwm = (pwm + 1) % 4 composition.set_notes( new_composition.get_notes() )
plt.gca().set_yscale("log") plt.gca().set_xscale("log") plt.savefig(main_seq_folder + "RvM.pdf", format="pdf") plt.show() if __name__ == "__main__": # Remember to turn off logging in adaptive_bisection.py # composition = [Composition.fromZX(Z, 0.73) for Z in [0.00, 0.01, 0.015, 0.02, 0.03]] # v_main_seq = [MainSequence(min_core_temp=5e6, max_core_temp=3.5e7, composition=comp, num_stars=100) for comp in composition] # for main_seq in v_main_seq: # main_seq.solve_stars() # plot_main_sequence(v_main_seq) # test_star = Star(temp_c = 1.5e7, density_c=1.6e5, composition=Composition.fromXY(0.69, 0.29)) # test_star = Star(temp_c = 3e7, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 1.2e10, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 1e6, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 3.5e7, composition=Composition.fromXY(0.5, 0.1)) # test_star = Star(temp_c = 1e8, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 3.5e7, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 3.5e7, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 8.23e6, composition=Composition.fromZX(1e-8, 0.73)) test_star = Star(temp_c = 8.23e6, composition=Composition.fromZX(0.2, 0.73)) test_star.solve() # # # test_star.log_raw(b=20) test_star.log_solved_properties() plot_star(test_star)
plt.show() if __name__ == "__main__": # Remember to turn off logging in adaptive_bisection.py # composition = [Composition.fromZX(Z, 0.73) for Z in [0.00, 0.01, 0.015, 0.02, 0.03]] # v_main_seq = [MainSequence(min_core_temp=5e6, max_core_temp=3.5e7, composition=comp, num_stars=100) for comp in composition] # for main_seq in v_main_seq: # main_seq.solve_stars() # plot_main_sequence(v_main_seq) # test_star = Star(temp_c = 1.5e7, density_c=1.6e5, composition=Composition.fromXY(0.69, 0.29)) # test_star = Star(temp_c = 3e7, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 1.2e10, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 1e6, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 3.5e7, composition=Composition.fromXY(0.5, 0.1)) # test_star = Star(temp_c = 1e8, composition=Composition.fromXY(0.73, 0.25)) # test_star = Star(temp_c = 3.5e7, composition=Composition.fromXY(0.73, 0.25)) test_star1 = Star(temp_c = 8.23e6, composition=Composition.fromXY(0.73, 0.25)) test_star2 = Star(temp_c = 8.23e6, composition=Composition.fromXY(0.73, 0.27-0.00001)) test_star3 = Star(temp_c = 8.23e6, composition=Composition.fromXY(0.73, 0.17)) # test_star.solve() # # # test_star.log_raw(b=20) # test_star.log_solved_properties() # # # plot_step_sizes(test_star) plot_star(test_star1) plot_star(test_star2) plot_star(test_star3)
import graph_relations import supervised import test_lists from composition import Composition import sys #print(lcs.process(example_list)) #test lcs #print(relations.process(food_list50)) #test relations #per liste molto lunghe nel graph_relations e' bene aumentare leggermente il parametro della similarita' per #ridurre il numero di termini in output e quindi prendere un gruppo minore di nodi del grafo #output1 = graph_relations.process(food_list50, 0.2) #test graph_relations #print(output1) #print(lcs.process(['pasta', 'vegetable', 'yogurt', 'meat', 'cheese', 'butter', 'chocolate', 'beef', 'seafood', 'bread', 'pork', 'fish'])) #print(supervised.process(" ".join(food_list50))) summary1 = ['relations', 'graph_relations'] #riassuntivo 1 summary2 = ['relations', 'lcs', 'graph_relations'] #riassuntivo 2 description1 = ['graph_relations', 'lcs', 'relations'] #descrittivo 1 description2 = ['lcs', 'graph_relations', 'relations'] #descrittivo 2 #param Composition > stringlist, sequenza, similarita' minima per coppie di termini, numero di synset massimi per un termine composition = Composition(test_lists.food_list50, summary2, 0.3, 10) #creare una funzione che permuta automaticamente i metodi generando tutte le possibili
compositions.append( composition ) for composition in compositions : if ( composition.get_channel() == 1 ): if ( 1 in args.echo ) : retro_conformer.single_channel_echo(composition) if ( 1 in args.mod ) : #retro_conformer.split_composition_notes(composition) retro_conformer.pulse_width_mod( composition, sampling_rate/8 ) if ( args.reverb ) : # remove other channel 2 compositions for comp2 in compositions : if comp2.get_channel() == 2 : compositions.remove(comp2) composition_channel2 = Composition() composition_channel2.set_channel( 2 ) composition_channel2.add_notes( composition.get_notes() ) retro_conformer.reverb_composition(composition_channel2, 0.5, sampling_rate/4) compositions.append(composition_channel2) if composition.get_channel() == 2: if ( 2 in args.echo ) : retro_conformer.single_channel_echo(composition) if ( 2 in args.mod ) : #retro_conformer.split_composition_notes(composition) retro_conformer.pulse_width_mod( composition, sampling_rate/8 ) if composition.get_channel() == 3: if ( args.trikick ) : retro_conformer.kick_drum_line(composition, 4)
data.bufferKanjiSimData(kanjisim_url1, kanjisim_url2, kanjissim_file_name) data.get(kanjis_url, kanjis_file_name) fetched = True except Exception as e: print("- retry") print(e) time.sleep(60) print("Printing similarity") sdot = Similarity.graph(data) sdisplay = random.random() < 0.7 sdot.render('D:\Japanese\jap_anki\graphs\similarity', view=sdisplay) print("Printing composition") cdot = Composition.graph(data) cdisplay = random.random() < 0.7 cdot.render('D:\Japanese\jap_anki\graphs\composition', view=cdisplay) print("Printing ORoots") odot = ORoots.graph(data) odisplay = random.random() < 0.5 odot.render('D:\Japanese\jap_anki\graphs\oroots', view=odisplay) print("Printing components") rdot = Components.graph(data) rdisplay = (not (sdisplay or cdisplay or odisplay)) or (random.random() < 0.4) rdot.render('D:\Japanese\jap_anki\graphs\components', view=(random.random() < 0.4)) print("All done")
from pydub.utils import make_chunks from pydub.effects import * from composition import Sound from composition import Composition from composition import open from composition import cut_tool import numpy as np from scipy.signal import butter, lfilter, freqz import matplotlib.pyplot as plt import array import IPython comp = Composition() lib = comp.library #make samp tool #make a beat #frequency limits/zones #16 to 32 #32 to 512 #512 to 2048 #2048 to 8192 #8192 to 16384 #C #16.351 #32.703
def basicMode(config, fasta_file, profilePath): #create output folders wDir = os.getcwd() folders = ['pplacer', 'testing'] for folder in folders: os.mkdir(os.path.join(wDir, folder)) #Instance Preprocessing class window = Preprocessing(fasta_file, config['win_length'], config['win_step'], "windows_sequence.fasta") window.output_window() reverseSeq = Preprocessing(fasta_file, config['win_length'], config['win_step'], "reverse_windows.fasta") reverseSeq.output_window() print >> sys.stderr, "Creating windows_sequence.fasta" #Instance Similarity and Composition class sim = Similarity(fasta_file, config['score_adj'], wDir) sim_matrix = sim.mcl_perform() comp_results = Composition(config['kmer_len']) comp_matrix = comp_results.joined() #Join similarity and composition matrix for PCA join = pd.concat([comp_matrix, sim_matrix], axis=1, join='inner') print >> sys.stderr, "Calculating similarity and composition matrix" #Instance Reduction class pca = Reduction(join, config['pca_comp']) pca_data = pca.perform_pca() print >> sys.stderr, "Performing PCA" #Instance Clustering class cluster = Clustering(pca_data) clust_obj = cluster.plot() print >> sys.stderr, "Performing clustering plot" #Instance ClusterReport class report = ClusterReport(clust_obj) file_name, querySeq = report.output_queryseq() print >> sys.stderr, "Doing report of clusters" #Instance Validate class valid = Validate(file_name, fasta_file, wDir) jfileComp, jfileMinus = valid.roundTwo() print >> sys.stderr, "Validation of results" #Instance ParseJplace Class parsing = ParseJplace(jfileComp, jfileMinus) corrMat = parsing.correlation() print >> sys.stderr, "Doing profiles" #Instance Profile Class ttest = Profiles(corrMat, querySeq, wDir, profilePath) bestWin = ttest.windowsAssigment() print >> sys.stderr, "Doing permutations" #Instance StatsBinom finalResult = StatsBinom(fasta_file, config['win_length'], bestWin) finalResult.binomial() print >> sys.stderr, "Calculating p-value" cleaning(file_name)
def read_file(self): '''Reads File and parses it using other methods''' file = open(self.file_name, "r") current_line = file.readline() self.header = current_line header_parts = current_line.split(" ") if header_parts[0] != "SAVELLYS": raise CorruptedCompositionFileError("Unknown file type") if header_parts[2].strip().lower() != 'tallennustiedosto': raise CorruptedCompositionFileError("Unknown file type") for line in file: ref = False count = 0 while line[0] == "#" or ref: count += 1 if count > 10: break if line[1:7].lower() == 'tiedot': if self.comp != None: raise CorruptedCompositionFileError("Monta tietoa") else: self.comp = Composition( None, None, None, None, [], []) # Creating the composition object line = self.parse_tiedot(file) if line[1:10].lower() == 'kommentit': for line in file: if line[0] == "#": ref = True break else: ref = False self.commentblock = self.commentblock + line elif line[1:7].lower() == 'nuotit': for line in file: if line[0] == "#": ref = True break else: ref = False self.parse_nuotit(line) elif line[1:6].lower() == 'tauot': for line in file: if line[0] == "#": ref = True break else: ref = False self.parse_tauot(line) elif line[1:7].lower() == 'palkit': for line in file: if line[0] == "#": ref = True break else: ref = False self.parse_palkit(line) elif line[1:9].lower() == 'sanoitus': for line in file: if line[0] == "#": ref = True break else: ref = False self.parse_sanoitus(line) file.close() CharGraphics(self.comp)
def parse_nuotit(self, line): '''(pitch, octave, flat, sharp, measure, start, duration)''' try: if line.strip() != "": parts = line.split(",") if parts[0].strip().lower() in { "a", "b", "c", "d", "e", "f", "g" }: if parts[0].strip().lower() == "a": pitch = Note.A elif parts[0].strip().lower() == "b": pitch = Note.B elif parts[0].strip().lower() == "c": pitch = Note.C elif parts[0].strip().lower() == "d": pitch = Note.D elif parts[0].strip().lower() == "e": pitch = Note.E elif parts[0].strip().lower() == "f": pitch = Note.F elif parts[0].strip().lower() == "g": pitch = Note.G if int(parts[1].strip()) in {0, 1, 2}: octave = int(parts[1].strip()) else: raise CorruptedCompositionFileError("Outo oktaavi") pitch = pitch - (octave * 7) # pitch if parts[2].strip().lower() == "k": flat = True # flat else: flat = False if parts[3].strip().lower() == "k": sharp = True # sharp else: sharp = False measure = int(parts[4].strip()) # measure start = parts[5].split("/") if len(start) == 1: start = float(start[0].strip()) elif len(start) == 2: start = float( int(start[0].strip()) / int(start[1].strip())) else: raise CorruptedCompositionFileError( "Huono alkamishetki.") duration = parts[6].split("/") if len(duration) == 1: duration = float(duration[0].strip()) elif len(duration) == 2: duration = float( int(duration[0].strip()) / int(duration[1].strip())) else: raise CorruptedCompositionFileError("Huono kesto") note = Note(pitch, octave, flat, sharp, measure, start, duration) Composition.add_note(self.comp, note) return True else: print("Huono savelkorkeus") except: print("Huono nuotti.")