Beispiel #1
0
    def getComposition(self):
        composition = Composition()

        # Star the collection
        collection = self.__builder.getCollection()
        composition.setCollection(collection)

        # Then add bag
        bag = self.__builder.getBag()
        composition.setBag(bag)

        # Finish wiht an extra
        extra = self.__builder.getExtra()
        composition.setExtra(extra)

        return composition
Beispiel #2
0
from pydub.utils import make_chunks
from pydub.effects import *

from composition import Sound
from composition import Composition
from composition import open
from composition import cut_tool

import numpy as np
from scipy.signal import butter, lfilter, freqz
import matplotlib.pyplot as plt
import array

import IPython

comp = Composition()
lib = comp.library

#make samp tool
#make a beat

#frequency limits/zones
#16 to 32
#32 to 512
#512 to 2048
#2048 to 8192
#8192 to 16384

#C
#16.351
#32.703
Beispiel #3
0
    def read_file(self):
        '''Reads File and parses it using other methods'''

        file = open(self.file_name, "r")
        current_line = file.readline()
        self.header = current_line
        header_parts = current_line.split(" ")

        if header_parts[0] != "SAVELLYS":
            raise CorruptedCompositionFileError("Unknown file type")

        if header_parts[2].strip().lower() != 'tallennustiedosto':
            raise CorruptedCompositionFileError("Unknown file type")

        for line in file:
            ref = False
            count = 0
            while line[0] == "#" or ref:
                count += 1
                if count > 10: break

                if line[1:7].lower() == 'tiedot':
                    if self.comp != None:
                        raise CorruptedCompositionFileError("Monta tietoa")
                    else:
                        self.comp = Composition(
                            None, None, None, None, [],
                            [])  # Creating the composition object
                        line = self.parse_tiedot(file)

                if line[1:10].lower() == 'kommentit':
                    for line in file:
                        if line[0] == "#":
                            ref = True
                            break
                        else:
                            ref = False
                            self.commentblock = self.commentblock + line

                elif line[1:7].lower() == 'nuotit':
                    for line in file:
                        if line[0] == "#":
                            ref = True
                            break
                        else:
                            ref = False
                            self.parse_nuotit(line)

                elif line[1:6].lower() == 'tauot':
                    for line in file:
                        if line[0] == "#":
                            ref = True
                            break
                        else:
                            ref = False
                            self.parse_tauot(line)

                elif line[1:7].lower() == 'palkit':
                    for line in file:
                        if line[0] == "#":
                            ref = True
                            break
                        else:
                            ref = False
                            self.parse_palkit(line)

                elif line[1:9].lower() == 'sanoitus':
                    for line in file:
                        if line[0] == "#":
                            ref = True
                            break
                        else:
                            ref = False
                            self.parse_sanoitus(line)

        file.close()
        CharGraphics(self.comp)
Beispiel #4
0
import graph_relations
import supervised
import test_lists
from composition import Composition
import sys

#print(lcs.process(example_list)) #test lcs
#print(relations.process(food_list50)) #test relations

#per liste molto lunghe nel graph_relations e' bene aumentare leggermente il parametro della similarita' per
#ridurre il numero di termini in output e quindi prendere un gruppo minore di nodi del grafo
#output1 =  graph_relations.process(food_list50, 0.2) #test graph_relations
#print(output1)
#print(lcs.process(['pasta', 'vegetable', 'yogurt', 'meat', 'cheese', 'butter', 'chocolate', 'beef', 'seafood', 'bread', 'pork', 'fish']))
#print(supervised.process(" ".join(food_list50)))

summary1 = ['relations', 'graph_relations'] #riassuntivo 1
summary2 = ['relations', 'lcs', 'graph_relations'] #riassuntivo 2
description1 = ['graph_relations', 'lcs', 'relations'] #descrittivo 1
description2 = ['lcs', 'graph_relations', 'relations'] #descrittivo 2

#param Composition > stringlist, sequenza, similarita' minima per coppie di termini, numero di synset massimi per un termine
composition = Composition(test_lists.food_list50, summary2, 0.3, 10)


#creare una funzione che permuta automaticamente i metodi generando tutte le possibili




Beispiel #5
0
def basicMode(config, fasta_file, profilePath):

    #create output folders
    wDir = os.getcwd()
    folders = ['pplacer', 'testing']
    for folder in folders:
        os.mkdir(os.path.join(wDir, folder))

    #Instance Preprocessing class
    window = Preprocessing(fasta_file, config['win_length'],
                           config['win_step'], "windows_sequence.fasta")
    window.output_window()
    reverseSeq = Preprocessing(fasta_file, config['win_length'],
                               config['win_step'], "reverse_windows.fasta")
    reverseSeq.output_window()
    print >> sys.stderr, "Creating windows_sequence.fasta"

    #Instance Similarity and Composition class
    sim = Similarity(fasta_file, config['score_adj'], wDir)
    sim_matrix = sim.mcl_perform()
    comp_results = Composition(config['kmer_len'])
    comp_matrix = comp_results.joined()
    #Join similarity and composition matrix for PCA
    join = pd.concat([comp_matrix, sim_matrix], axis=1, join='inner')
    print >> sys.stderr, "Calculating similarity and composition matrix"

    #Instance Reduction class
    pca = Reduction(join, config['pca_comp'])
    pca_data = pca.perform_pca()
    print >> sys.stderr, "Performing PCA"

    #Instance Clustering class
    cluster = Clustering(pca_data)
    clust_obj = cluster.plot()
    print >> sys.stderr, "Performing clustering plot"

    #Instance ClusterReport class
    report = ClusterReport(clust_obj)
    file_name, querySeq = report.output_queryseq()
    print >> sys.stderr, "Doing report of clusters"

    #Instance Validate class
    valid = Validate(file_name, fasta_file, wDir)
    jfileComp, jfileMinus = valid.roundTwo()
    print >> sys.stderr, "Validation of results"

    #Instance ParseJplace Class
    parsing = ParseJplace(jfileComp, jfileMinus)
    corrMat = parsing.correlation()
    print >> sys.stderr, "Doing profiles"

    #Instance Profile Class
    ttest = Profiles(corrMat, querySeq, wDir, profilePath)
    bestWin = ttest.windowsAssigment()
    print >> sys.stderr, "Doing permutations"

    #Instance StatsBinom
    finalResult = StatsBinom(fasta_file, config['win_length'], bestWin)
    finalResult.binomial()
    print >> sys.stderr, "Calculating p-value"

    cleaning(file_name)