示例#1
0
def find_answer(n, adj):
    paths = [SimplePath(i, adj) for i in xrange(0, n)]
    valid_paths = [p for p in paths if p.is_circle]
    from pprint import pformat as pf
    #pp(paths)
    complete_circles = []
    lines = []
    for p in valid_paths:
        #pp((p, p.is_circle, len(p)))
        complete_circles.append(p) if p.is_complete_circle else lines.append(p)

    # TODO: optimize?    
    complete_circles.sort(key=len, reverse=True)    
    logging.debug(pf(complete_circles))
    # TODO: 
    lines.sort(key=len, reverse=True)
    logging.debug(pf(lines))
    from itertools import combinations
    comb = combinations(lines, 2)

    joinable_line_pairs = [p for p in comb if p[0].can_join_path(p[1])]
    #print 'joinable_line_pairs'
    #pp(joinable_line_pairs)
    joined_paths = [JoinedPath(*pair) for pair in joinable_line_pairs]
    joined_paths.sort(key=len, reverse=True)
    logging.debug('joined_paths')
    logging.debug(pf(joined_paths))

    longest_circle_path_len = len(complete_circles[0]) if complete_circles else -1
    longest_joined_path_len = len(joined_paths[0]) if joined_paths else -1
    longest_lines_len = len(lines[0]) if lines else -1

    return max(longest_circle_path_len, longest_joined_path_len, longest_lines_len)
    def gen_data(self, tempdata):
        """Generates p-values for each marker"""

        self.dataset.group.get_markers()

        pheno_vector = np.array([val == "x" and np.nan or float(val) for val in self.vals])

        if self.dataset.group.species == "human":
            p_values, t_stats = self.gen_human_results(pheno_vector, tempdata)
        else:
            genotype_data = [marker['genotypes'] for marker in self.dataset.group.markers.markers]
            
            no_val_samples = self.identify_empty_samples()
            trimmed_genotype_data = self.trim_genotypes(genotype_data, no_val_samples)
            
            genotype_matrix = np.array(trimmed_genotype_data).T
            
            print("pheno_vector: ", pf(pheno_vector))
            print("genotype_matrix: ", pf(genotype_matrix))
            print("genotype_matrix.shape: ", pf(genotype_matrix.shape))
            
            t_stats, p_values = lmm.run(
                pheno_vector,
                genotype_matrix,
                restricted_max_likelihood=True,
                refit=False,
                temp_data=tempdata
            )
        
        self.dataset.group.markers.add_pvalues(p_values)

        self.qtl_results = self.dataset.group.markers.markers
示例#3
0
def find_outliers(vals):
    """Calculates the upper and lower bounds of a set of sample/case values


    >>> find_outliers([3.504, 5.234, 6.123, 7.234, 3.542, 5.341, 7.852, 4.555, 12.537])
    (11.252500000000001, 0.5364999999999993)

    >>> >>> find_outliers([9,12,15,17,31,50,7,5,6,8])
    (32.0, -8.0)

    If there are no vals, returns None for the upper and lower bounds,
    which code that calls it will have to deal with.
    >>> find_outliers([])
    (None, None)

    """

    logger.debug("xerxes vals is:", pf(vals))

    if vals:
        #logger.debug("vals is:", pf(vals))
        stats = corestats.Stats(vals)
        low_hinge = stats.percentile(25)
        up_hinge = stats.percentile(75)
        hstep = 1.5 * (up_hinge - low_hinge)

        upper_bound = up_hinge + hstep
        lower_bound = low_hinge - hstep

    else:
        upper_bound = None
        lower_bound = None

    logger.debug(pf(locals()))
    return upper_bound, lower_bound
示例#4
0
def search_page():
    print("in search_page")
    if 'info_database' in request.args:
        print("Going to sharing_info_page")
        template_vars = sharing_info_page()
        if template_vars.redirect_url:
            print("Going to redirect")
            return flask.redirect(template_vars.redirect_url)
        else:
            return render_template("data_sharing.html", **template_vars.__dict__)
    else:
        key = "search_results:v1:" + json.dumps(request.args, sort_keys=True)
        print("key is:", pf(key))
        with Bench("Loading cache"):
            result = Redis.get(key)

        if result:
            print("Cache hit!!!")
            with Bench("Loading results"):
                result = pickle.loads(result)
        else:
            print("calling search_results.SearchResultPage")
            print("request.args is", request.args)
            the_search = search_results.SearchResultPage(request.args)
            result = the_search.__dict__

            print("result: ", pf(result))
            Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
            Redis.expire(key, 60*60)

        if result['quick']:
            return render_template("quick_search.html", **result)
        else:
            return render_template("search_result_page.html", **result)
示例#5
0
    def __init__(self,
                 dataset,
                 sample_names,
                 this_trait,
                 sample_group_type,
                 header):

        self.dataset = dataset
        self.this_trait = this_trait
        self.sample_group_type = sample_group_type    # primary or other
        self.header = header

        self.sample_list = [] # The actual list
        self.sample_attribute_values = {}

        self.get_attributes()
        print("camera: attributes are:", pf(self.attributes))

        if self.this_trait and self.dataset and self.dataset.type == 'ProbeSet':
            self.get_extra_attribute_values()

        for counter, sample_name in enumerate(sample_names, 1):
            sample_name = sample_name.replace("_2nd_", "")

            #ZS - If there's no value for the sample/strain, create the sample object (so samples with no value are still displayed in the table)
            try:
                sample = self.this_trait.data[sample_name]
            except KeyError:
                print("No sample %s, let's create it now" % sample_name)
                sample = webqtlCaseData.webqtlCaseData(sample_name)
            
            #sampleNameAdd = ''
            #if fd.RISet == 'AXBXA' and sampleName in ('AXB18/19/20','AXB13/14','BXA8/17'):
            #    sampleNameAdd = HT.Href(url='/mouseCross.html#AXB/BXA', text=HT.Sup('#'), Class='fs12', target="_blank")
            sample.extra_info = {}
            if self.dataset.group.name == 'AXBXA' and sample_name in ('AXB18/19/20','AXB13/14','BXA8/17'):   
                sample.extra_info['url'] = "/mouseCross.html#AXB/BXA"
                sample.extra_info['css_class'] = "fs12" 

            print("  type of sample:", type(sample))

            if sample_group_type == 'primary':
                sample.this_id = "Primary_" + str(counter)
            else:
                sample.this_id = "Other_" + str(counter)

            #### For extra attribute columns; currently only used by several datasets - Zach
            if self.sample_attribute_values:
                sample.extra_attributes = self.sample_attribute_values.get(sample_name, {})
                print("sample.extra_attributes is", pf(sample.extra_attributes))
            
            self.sample_list.append(sample)

        print("self.attributes is", pf(self.attributes))

        self.do_outliers()
        #do_outliers(the_samples)
        print("*the_samples are [%i]: %s" % (len(self.sample_list), pf(self.sample_list)))
        for sample in self.sample_list:
            print("apple:", type(sample), sample)
    def search(self):
        self.search_terms = parser.parse(self.search_terms)
        print("After parsing:", self.search_terms)

        for a_search in self.search_terms:
            print("[kodak] item is:", pf(a_search))
            search_term = a_search['search_term']
            search_operator = a_search['separator']
            if a_search['key']:
                search_type = a_search['key'].upper()
            else:
                # We fall back to the dataset type as the key to get the right object
                search_type = self.dataset.type
                
            print("search_type is:", pf(search_type))

            # This is throwing an error when a_search['key'] is None, so I changed above    
            #search_type = string.upper(a_search['key'])
            #if not search_type:
            #    search_type = self.dataset.type

            search_ob = do_search.DoSearch.get_search(search_type)
            search_class = getattr(do_search, search_ob)
            print("search_class is: ", pf(search_class))
            the_search = search_class(search_term,
                                    search_operator,
                                    self.dataset,
                                    )
            self.results.extend(the_search.run())
            #print("in the search results are:", self.results)

        self.header_fields = the_search.header_fields
示例#7
0
    def get_cm_length_list(self):
        """Chromosome length in centimorgans
        
        Calculates the length in centimorgans by subtracting the centimorgan position
        of the last marker in a chromosome by the position of the first marker
        
        """
        
        self.dataset.group.read_genotype_file()
        
        self.cm_length_list = []
        
        for chromosome in self.dataset.group.genotype:
            self.cm_length_list.append(chromosome[-1].cM - chromosome[0].cM)
            
        print("self.cm_length_list:", pf(self.cm_length_list))
        
        assert len(self.cm_length_list) == len(self.chromosomes), "Uh-oh lengths should be equal!"
        for counter, chromosome in enumerate(self.chromosomes.values()):
            chromosome.cm_length = self.cm_length_list[counter]
            #self.chromosomes[counter].cm_length = item
            
        for key, value in self.chromosomes.items():
            print("bread - %s: %s" % (key, pf(vars(value))))
        

# Testing                
#if __name__ == '__main__':    
#    foo = dict(bar=dict(length))
示例#8
0
 def _debug(self, op, path, args, ret):
     own = op in self.__class__.__dict__
     sys.stderr.write('%s:%s:%i/%i/%i\n' % (
         (op.upper(), own) + fuse_get_context()
         ))
     sys.stderr.write(':: %s\n' % path)
     if op not in ('read', 'write'):
         sys.stderr.write(':: %s\n' % pf(args))
         sys.stderr.write(':: %s\n' % pf(ret))
     sys.stderr.write('\n')
     sys.stderr.flush()
示例#9
0
    def __init__(self,Y,K,Kva=[],Kve=[],X0=None,verbose=True):
 
       """
       The constructor takes a phenotype vector or array of size n.
       It takes a kinship matrix of size n x n.  Kva and Kve can be computed as Kva,Kve = linalg.eigh(K) and cached.
       If they are not provided, the constructor will calculate them.
       X0 is an optional covariate matrix of size n x q, where there are q covariates.
       When this parameter is not provided, the constructor will set X0 to an n x 1 matrix of all ones to represent a mean effect.
       """

       if X0 is None: X0 = np.ones(len(Y)).reshape(len(Y),1)
       self.verbose = verbose
 
       #x = Y != -9
       x = True - np.isnan(Y)
       #pdb.set_trace()
       if not x.sum() == len(Y):
          print("Removing %d missing values from Y\n" % ((True - x).sum()))
          if self.verbose: sys.stderr.write("Removing %d missing values from Y\n" % ((True - x).sum()))
          Y = Y[x]
          print("x: ", len(x))
          print("K: ", K.shape)
          #K = K[x,:][:,x]
          X0 = X0[x,:]
          Kva = []
          Kve = []
       self.nonmissing = x
 
       print("this K is:", K.shape, pf(K))
       
       if len(Kva) == 0 or len(Kve) == 0:
          # if self.verbose: sys.stderr.write("Obtaining eigendecomposition for %dx%d matrix\n" % (K.shape[0],K.shape[1]) )
          begin = time.time()
          # Kva,Kve = linalg.eigh(K)
          Kva,Kve = kvakve(K)
          end = time.time()
          if self.verbose: sys.stderr.write("Total time: %0.3f\n" % (end - begin))
          print("sum(Kva),sum(Kve)=",sum(Kva),sum(Kve))

       self.K = K
       self.Kva = Kva
       self.Kve = Kve
       print("self.Kva is: ", self.Kva.shape, pf(self.Kva))
       print("self.Kve is: ", self.Kve.shape, pf(self.Kve))
       self.Y = Y
       self.X0 = X0
       self.N = self.K.shape[0]

       # ----> Below moved to kinship.kvakve(K)
       # if sum(self.Kva < 1e-6):
       #    if self.verbose: sys.stderr.write("Cleaning %d eigen values\n" % (sum(self.Kva < 0)))
       #    self.Kva[self.Kva < 1e-6] = 1e-6
 
       self.transform()
示例#10
0
def human_association(snp,
                      n,
                      keep,
                      lmm_ob,
                      pheno_vector,
                      covariate_matrix,
                      kinship_matrix,
                      refit):

    x = snp[keep].reshape((n,1))
    #x[[1,50,100,200,3000],:] = np.nan
    v = np.isnan(x).reshape((-1,))

    # Check SNPs for missing values
    if v.sum():
        keeps = True - v
        xs = x[keeps,:]
        # If no variation at this snp or all genotypes missing 
        if keeps.sum() <= 1 or xs.var() <= 1e-6:
            return np.nan, np.nan
            #p_values.append(np.nan)
            #t_stats.append(np.nan)
            #continue

        # Its ok to center the genotype -  I used options.normalizeGenotype to
        # force the removal of missing genotypes as opposed to replacing them with MAF.

        #if not options.normalizeGenotype:
        #    xs = (xs - xs.mean()) / np.sqrt(xs.var())

        filtered_pheno = pheno_vector[keeps]
        filtered_covariate_matrix = covariate_matrix[keeps,:]
        
        print("kinship_matrix shape is: ", pf(kinship_matrix.shape))
        print("keeps is: ", pf(keeps.shape))
        filtered_kinship_matrix = kinship_matrix[keeps,:][:,keeps]
        filtered_lmm_ob = lmm.LMM(filtered_pheno,filtered_kinship_matrix,X0=filtered_covariate_matrix)
        if refit:
            filtered_lmm_ob.fit(X=xs)
        else:
            #try:
            filtered_lmm_ob.fit()
            #except: pdb.set_trace()
        ts,ps,beta,betaVar = Ls.association(xs,returnBeta=True)
    else:
        if x.var() == 0:
            return np.nan, np.nan
            #p_values.append(np.nan)
            #t_stats.append(np.nan)
            #continue
        if refit:
            lmm_ob.fit(X=x)
        ts, ps, beta, betaVar = lmm_ob.association(x)
    return ps, ts
def trim_strains(strains, probeset_vals):
    trimmed_strains = []
    #print("probeset_vals is:", pf(probeset_vals))
    first_probeset = list(probeset_vals.itervalues())[0]
    print("\n**** first_probeset is:", pf(first_probeset))
    for strain in strains:
        print("\n**** strain is:", pf(strain))
        if strain in first_probeset:
            trimmed_strains.append(strain)
    print("trimmed_strains:", pf(trimmed_strains))
    return trimmed_strains
示例#12
0
    def __init__(self, search_term, search_operator=None, dataset=None):
        self.search_term = search_term
        # Make sure search_operator is something we expect
        assert search_operator in (None, "=", "<", ">", "<=", ">="), "Bad search operator"
        self.search_operator = search_operator
        self.dataset = dataset

        if self.dataset:
            print("self.dataset is boo: ", type(self.dataset), pf(self.dataset))
            print("self.dataset.group is: ", pf(self.dataset.group))
            #Get group information for dataset and the species id
            self.species_id = webqtlDatabaseFunction.retrieve_species_id(self.dataset.group.name)
示例#13
0
def run_other_new(pheno_vector,
        genotype_matrix,
        restricted_max_likelihood=True,
        refit=False,
        tempdata=None      # <---- can not be None
        ):
    
    """Takes the phenotype vector and genotype matrix and returns a set of p-values and t-statistics
    
    restricted_max_likelihood -- whether to use restricted max likelihood; True or False
    refit -- whether to refit the variance component for each marker
    temp_data -- TempData object that stores the progress for each major step of the
    calculations ("calculate_kinship" and "GWAS" take the majority of time) 
    
    """
    
    print("Running the new LMM2 engine in run_other_new")
    print("REML=",restricted_max_likelihood," REFIT=",refit)

    # Adjust phenotypes
    Y,G,keep = phenotype.remove_missing(pheno_vector,genotype_matrix,verbose=True)
    print("Removed missing phenotypes",Y.shape)

    # if options.maf_normalization:
    #     G = np.apply_along_axis( genotype.replace_missing_with_MAF, axis=0, arr=g )
    #     print "MAF replacements: \n",G
    # if not options.skip_genotype_normalization:
    # G = np.apply_along_axis( genotype.normalize, axis=1, arr=G)

    with Bench("Calculate Kinship"):
        K,G = calculate_kinship(G, tempdata)
    
    print("kinship_matrix: ", pf(K))
    print("kinship_matrix.shape: ", pf(K.shape))

    # with Bench("Create LMM object"):
    #     lmm_ob = lmm2.LMM2(Y,K)
    # with Bench("LMM_ob fitting"):
    #     lmm_ob.fit()

    print("run_other_new genotype_matrix: ", G.shape)
    print(G)

    with Bench("Doing GWAS"):
        t_stats, p_values = gwas.gwas(Y,
                                      G.T,
                                      K,
                                      restricted_max_likelihood=True,
                                      refit=False,verbose=True)
    Bench().report()
    return p_values, t_stats
示例#14
0
def marker_regression_page():
    initial_start_vars = request.form
    temp_uuid = initial_start_vars['temp_uuid']
    wanted = (
        'trait_id',
        'dataset',
        'suggestive'
    )

    start_vars = {}
    for key, value in initial_start_vars.iteritems():
        if key in wanted or key.startswith(('value:')):
            start_vars[key] = value

    version = "v1"
    key = "marker_regression:{}:".format(version) + json.dumps(start_vars, sort_keys=True)
    print("key is:", pf(key))
    with Bench("Loading cache"):
        result = Redis.get(key)

    #print("************************ Starting result *****************")
    #print("result is [{}]: {}".format(type(result), result))
    #print("************************ Ending result ********************")

    if result:
        print("Cache hit!!!")
        with Bench("Loading results"):
            result = pickle.loads(result)
    else:
        print("Cache miss!!!")
        template_vars = marker_regression.MarkerRegression(start_vars, temp_uuid)

        template_vars.js_data = json.dumps(template_vars.js_data,
                                           default=json_default_handler,
                                           indent="   ")

        result = template_vars.__dict__

        print("DATASET:", pf(result['dataset']))

        for item in template_vars.__dict__.keys():
            print("  ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))

        #causeerror
        Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
        Redis.expire(key, 60*60)

    with Bench("Rendering template"):
        rendered_template = render_template("marker_regression.html", **result)

    return rendered_template
示例#15
0
    def all_notif(self, *args, **kwargs):
        import time
        from pprint import pformat as pf
        extra = []
        if 'value' in kwargs and kwargs['value']:
            extra.append('value: %s' % self._make_uri(kwargs['node'], kwargs['value']))
        elif 'node' in kwargs and kwargs['node']:
            extra.append('node: %s' % (kwargs['node'].name if kwargs['node'].name else kwargs['node'].node_id))
        extra = ' ; '.join(extra)

        if args:
            logger.warning('>~>~># %f: %s ; %s ; %s', time.time(), pf(args), pf(kwargs), extra)
        else:
            logger.warning('>~>~># %f: %s ; %s', time.time(), pf(kwargs), extra)
示例#16
0
    def calculate_pca(self, corr_results, cols): 
        base = importr('base')
        stats = importr('stats')        
        print("checking:", pf(stats.rnorm(100)))

        corr_results_to_list = robjects.FloatVector([item for sublist in corr_results for item in sublist])
        print("corr_results:",  pf(corr_results_to_list))

        m = robjects.r.matrix(corr_results_to_list, nrow=len(cols))
        eigen = base.eigen(m)
        print("eigen:", eigen)
        pca = stats.princomp(m, cor = "TRUE")
        print("pca:", pca)

        return pca
示例#17
0
 def addheaders(self, environ):
     headers = {}
     self.credentials.apply(headers)
     for header, content in headers.items():
         environ["HTTP_"+header.upper()] = content
     logging.debug(pf(environ))
     return environ
示例#18
0
    def run(self):

        #self.search_term = [float(value) for value in self.search_term]
        #
        #if self.search_operator == "=":
        #    assert isinstance(self.search_term, (list, tuple))
        #    self.mean_min, self.mean_max = self.search_term[:2]
        #
        #    self.where_clause = """ %sXRef.mean > %s and
        #                     %sXRef.mean < %s """ % self.mescape(self.dataset.type,
        #                                                        min(self.mean_min, self.mean_max),
        #                                                        self.dataset.type,
        #                                                        max(self.mean_min, self.mean_max))
        #else:
        #    # Deal with >, <, >=, and <=
        #    self.where_clause = """ %sXRef.mean %s %s """ % self.mescape(self.dataset.type,
        #                                                                self.search_operator,
        #                                                                self.search_term[0])

        self.where_clause = self.get_where_clause()
        print("where_clause is:", pf(self.where_clause))

        self.query = self.compile_final_query(where_clause = self.where_clause)

        return self.execute(self.query)
示例#19
0
    def get_final_query(self):
        self.where_clause = self.get_where_clause()
        print("where_clause is:", pf(self.where_clause))

        self.query = self.compile_final_query(where_clause = self.where_clause)
        
        return self.query
示例#20
0
def search_page():
    logger.info("in search_page")
    logger.info(request.url)
    result = None
    if USE_REDIS:
        with Bench("Trying Redis cache"):
            key = "search_results:v1:" + json.dumps(request.args, sort_keys=True)
            logger.debug("key is:", pf(key))
            result = Redis.get(key)
            if result:
                logger.info("Redis cache hit on search results!")
                result = pickle.loads(result)
    else:
        logger.info("Skipping Redis cache (USE_REDIS=False)")

    logger.info("request.args is", request.args)
    the_search = search_results.SearchResultPage(request.args)
    result = the_search.__dict__
    valid_search = result['search_term_exists']

    logger.debugf("result", result)

    if USE_REDIS and valid_search:
        Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
        Redis.expire(key, 60*60)

    if valid_search:
        return render_template("search_result_page.html", **result)
    else:
        return render_template("search_error.html")
示例#21
0
    def compile_final_query(self, from_clause = '', where_clause = ''):
        """Generates the final query string"""

        from_clause = self.normalize_spaces(from_clause)

        if self.search_term[0] == "*":
            query = (self.base_query +
                    """%s
                        WHERE PublishXRef.InbredSetId = %s
                        and PublishXRef.PhenotypeId = Phenotype.Id
                        and PublishXRef.PublicationId = Publication.Id
                        and PublishFreeze.Id = %s""" % (
                            from_clause,
                            escape(str(self.dataset.group.id)),
                            escape(str(self.dataset.id))))
        else:
            query = (self.base_query +
                    """%s
                        WHERE %s
                        and PublishXRef.InbredSetId = %s
                        and PublishXRef.PhenotypeId = Phenotype.Id
                        and PublishXRef.PublicationId = Publication.Id
                        and PublishFreeze.Id = %s""" % (
                            from_clause,
                            where_clause,
                            escape(str(self.dataset.group.id)),
                            escape(str(self.dataset.id))))

        print("query is:", pf(query))

        return query
示例#22
0
    def run(self):
        self.where_clause = self.get_where_clause()
        logger.debug("where_clause is:", pf(self.where_clause))

        self.query = self.compile_final_query(where_clause=self.where_clause)

        return self.execute(self.query)
示例#23
0
    def get_where_clause(self):
        if self.search_operator == "=":
            assert isinstance(self.search_term, (list, tuple))
            self.range_min, self.range_max = self.search_term[:2]
            where_clause = """ (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) > %s AND
                                    (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) < %s
                                    """ % self.mescape(
                min(self.range_min, self.range_max), max(self.range_min, self.range_max)
            )
        else:
            # Deal with >, <, >=, and <=
            where_clause = """ (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) > %s
                                    """ % (
                escape(self.search_term[0])
            )

        logger.debug("where_clause is:", pf(where_clause))

        return where_clause
示例#24
0
    def run(self):

        self.search_term = [float(value) for value in self.search_term]

        if self.search_operator == "=":
            assert isinstance(self.search_term, (list, tuple))
            self.range_min, self.range_max = self.search_term[:2]
            self.where_clause = """ (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) > %s AND
                                    (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) < %s
                                    """ % self.mescape(min(self.range_min, self.range_max),
                                                       max(self.range_min, self.range_max))
        else:
            # Deal with >, <, >=, and <=
            self.where_clause = """ (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) > %s
                                    """ % (escape(self.search_term[0]))

        print("where_clause is:", pf(self.where_clause))

        self.query = self.compile_final_query(where_clause = self.where_clause)

        return self.execute(self.query)
    def gen_search_result(self):
        """
        Get the info displayed in the search result table from the set of results computed in
        the "search" function

        """
        self.trait_list = []
        
        species = webqtlDatabaseFunction.retrieve_species(self.dataset.group.name)
        
        # result_set represents the results for each search term; a search of 
        # "shh grin2b" would have two sets of results, one for each term
        print("self.results is:", pf(self.results))
        for result in self.results:
            if not result:
                continue
            
            #### Excel file needs to be generated ####

            print("foo locals are:", locals())
            trait_id = result[0]
            this_trait = GeneralTrait(dataset=self.dataset, name=trait_id)
            this_trait.retrieve_info(get_qtl_info=True)
            self.trait_list.append(this_trait)

        self.dataset.get_trait_info(self.trait_list, species)
示例#26
0
    def run(self):

        self.search_term = [float(value) for value in self.search_term]

        if self.search_operator == "=":
            assert isinstance(self.search_term, (list, tuple))
            self.pvalue_min, self.pvalue_max = self.search_term[:2]
            self.where_clause = """ %sXRef.pValue > %s and %sXRef.pValue < %s
                                    """ % self.mescape(
                                        self.dataset.type,
                                        min(self.pvalue_min, self.pvalue_max),
                                        self.dataset.type,
                                        max(self.pvalue_min, self.pvalue_max))
        else:
            # Deal with >, <, >=, and <=
            self.where_clause = """ %sXRef.pValue %s %s
                                    """ % self.mescape(
                                        self.dataset.type,
                                        self.search_operator,
                                        self.search_term[0])

        print("where_clause is:", pf(self.where_clause))

        self.query = self.compile_final_query(where_clause = self.where_clause)

        return self.execute(self.query)
示例#27
0
def new_main():
    print("Running new_main")
    tests = [PrintAll, PrintSome, PrintNone]
    for test in tests:
        test()

    print(pf(TheCounter.Counters))
示例#28
0
 def execute(self, query):
     """Executes query and returns results"""
     query = self.normalize_spaces(query)
     print("in do_search query is:", pf(query))
     results = g.db.execute(query, no_parameters=True).fetchall()
     #results = self.cursor.fetchall()
     return results
示例#29
0
文件: main.py 项目: obreitwi/pydemx
def main_loop(argv=None):
    if argv is None:
        argv = sys.argv

    args = docopt.docopt(get_updated_docstring(), argv=argv[1:],
            version=".".join(map(str, __version__)))

    if not args["--silent"]:
        logcfg.set_loglevel(log, "INFO")
        for h in log.handlers:
            logcfg.set_loglevel(h, "INFO")
    elif args["--verbose"] > 0:
        logcfg.make_verbose()
        log.debug(pf(args))

    ext = args["--extension"]
    recursive = args["--recursive"]

    files_and_folders = []
    files_and_folders.extend(args["<file_or_folder>"])

    for faf in files_and_folders:
        if osp.isfile(faf):
            parse_file(faf, args)
        elif osp.isdir(faf):
            for entry in os.listdir(faf):
                path = osp.join(faf, entry)

                valid_file = osp.isfile(path)\
                    and osp.splitext(path)[-1] == ext\
                    and osp.basename(osp.splitext(path)[0]) != "cfg"
                valid_folder = recursive and osp.isdir(path)

                if valid_file or valid_folder:
                    files_and_folders.append(path)
示例#30
0
    def gen_search_result(self):
        """
        Get the info displayed in the search result table from the set of results computed in
        the "search" function

        """
        self.trait_list = []
        json_trait_list = []

        species = webqtlDatabaseFunction.retrieve_species(self.dataset.group.name)
        # result_set represents the results for each search term; a search of
        # "shh grin2b" would have two sets of results, one for each term
        logger.debug("self.results is:", pf(self.results))
        for index, result in enumerate(self.results):
            if not result:
                continue

            #### Excel file needs to be generated ####

            #logger.debug("foo locals are:", locals())
            trait_id = result[0]
            this_trait = trait.GeneralTrait(dataset=self.dataset, name=trait_id, get_qtl_info=True, get_sample_info=False)
            self.trait_list.append(this_trait)
            json_trait_list.append(trait.jsonable_table_row(this_trait, self.dataset.name, index + 1))

        self.json_trait_list = json.dumps(json_trait_list)
示例#31
0
    async def __call__(self, *args, **kwargs):
        """Call the method with given parameters.

        On error this call will raise a :class:SongpalException:. If the error is
        reported by the device (e.g. not a problem doing the request), the exception
        will contain `error` attribute containing the device-reported error message.
        """
        try:
            res = await self.service.call_method(self, *args, **kwargs)
        except Exception as ex:
            raise SongpalException("Unable to make a request: %s" % ex) from ex

        if self.debug > 1:
            _LOGGER.debug("got payload: %s" % res)

        if "error" in res:
            _LOGGER.debug(self)
            raise SongpalException(
                "Got an error for {}: {}".format(self.name, res["error"]),
                error=res["error"],
            )

        if self.debug > 0:
            _LOGGER.debug("got res: %s" % pf(res))

        if "result" not in res:
            _LOGGER.error("No result in response, how to handle? %s" % res)
            return

        res = res["result"]
        if len(res) > 1:
            _LOGGER.warning("Got a response with len >  1: %s" % res)
            return res
        elif len(res) < 1:
            _LOGGER.debug("Got no response, assuming success")
            return True

        return res[0]
示例#32
0
def search_page():
    logger.info("in search_page")
    logger.error(request.url)
    if 'info_database' in request.args:
        logger.info("Going to sharing_info_page")
        template_vars = sharing_info_page()
        if template_vars.redirect_url:
            logger.info("Going to redirect")
            return flask.redirect(template_vars.redirect_url)
        else:
            return render_template("data_sharing.html", **template_vars.__dict__)
    else:
        result = None
        if USE_REDIS:
            with Bench("Trying Redis cache"):
                key = "search_results:v1:" + json.dumps(request.args, sort_keys=True)
                logger.debug("key is:", pf(key))
                result = Redis.get(key)
                if result:
                    logger.info("Redis cache hit on search results!")
                    result = pickle.loads(result)
        else:
            logger.info("Skipping Redis cache (USE_REDIS=False)")

        logger.info("request.args is", request.args)
        the_search = search_results.SearchResultPage(request.args)
        result = the_search.__dict__

        logger.debugf("result", result)

        if USE_REDIS:
            Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
            Redis.expire(key, 60*60)

        if result['search_term_exists']:
            return render_template("search_result_page.html", **result)
        else:
            return render_template("search_error.html")
示例#33
0
def pdict(d, nd={}, rdl=0):
    """
  to convert a frame 1st call pframe to convert to dict
  d = dict
  nd = new dict
  rd = recursion depth limit
  """
    for k, v in d.items():
        prefix = k.split('_')[0]
        if not k.startswith('_'):
            print('1')
            print(f'k: {k}')
            print(f'prefix: {prefix}')
            print(f'v: {v}')
            print(f'rdl: {rdl}')
            if prefix in ATTRS and k in ATTRS[prefix]:
                if isinstance(v, dict):
                    if rdl > 0:
                        print('1.1')
                        nv = rlimiter(v)
                        print('1.1.1')
                    else:
                        print('1.2')
                        rdl += 1
                        nv = pdict(v, {}, rdl)
                    print('1.1.2')
                    print('a')
                    nd.update({k: nv})
                    print(nd)
                elif type(v).__name__ in ATTRS:
                    print('b')
                    rdl += 1
                    nd.update({k: pdict(pframe(v), {}, rdl)})
                    print(nd)
                else:
                    print('c')
                    nd.update({k: pf(v)})
    return nd
示例#34
0
def get_ordered_spike_idx(spiketrains):
    """
        Take spike trains and return a (num_spikes,) record array that contains
        the spike ids ('id') on first and the spike times ('t') on second
        position. The spike times are sorted in ascending order.
    """
    num_spikes = sum((len(st) for st in spiketrains))
    spikes = np.zeros((num_spikes, ), dtype=[("id", int), ("t", float)])

    current = 0

    for i, st in enumerate(spiketrains):
        if log.getEffectiveLevel() <= logging.DEBUG:
            log.debug("Raw spikes for #{}: {}".format(i, pf(st)))
        spikes["id"][current:current + len(st)] = i
        spikes["t"][current:current + len(st)] = np.array(st)

        current += len(st)

    sort_idx = np.argsort(spikes["t"])
    sorted_spikes = spikes[sort_idx].copy()

    return sorted_spikes
示例#35
0
    def trace_lines(frame, event, arg):  #_apply_op_helper
        global lnl, lsl, etc, prev_filenamens, f
        if event != 'line':
            return
        co = frame.f_code
        func_name = co.co_name
        line_no = frame.f_lineno

        prev_filenamens.append(
            f'{line_no}, len(pfn): {len(prev_filename)}, in _apply_op_helper')

        lns = line_no_string = f"* {func_name:-<70}{line_no}"
        lnl.append(lns)

        _s = pf({k: v for k, v in frame.f_locals.items() if k not in EXCLUDE})
        _sl = _s.split('\n')
        _p = '** '
        _pl = [_p] + [' ' * len(_p)] * (len(_sl) - 1)
        _l = [''.join(_el) for _el in zip(_pl, _sl)]
        lss = locals_string = '\n'.join(_l)
        lsl.append(lss)

        etc.append(str(to_json(frame.f_locals)))
示例#36
0
    def run_plink(self):
        plink_output_filename = webqtlUtil.genRandStr("%s_%s_"%(self.dataset.group.name, self.this_trait.name))

        self.gen_pheno_txt_file_plink(pheno_filename = plink_output_filename)

        plink_command = PLINK_COMMAND + ' --noweb --ped %s/%s.ped --no-fid --no-parents --no-sex --no-pheno --map %s/%s.map --pheno %s%s.txt --pheno-name %s --maf %s --missing-phenotype -9999 --out %s%s --assoc ' % (PLINK_PATH, self.dataset.group.name, PLINK_PATH, self.dataset.group.name, TMPDIR, plink_output_filename, self.this_trait.name, self.maf, TMPDIR, plink_output_filename)
        logger.debug("plink_command:", plink_command)

        os.system(plink_command)

        count, p_values = self.parse_plink_output(plink_output_filename)

        #for marker in self.dataset.group.markers.markers:
        #    if marker['name'] not in included_markers:
        #        logger.debug("marker:", marker)
        #        self.dataset.group.markers.markers.remove(marker)
        #        #del self.dataset.group.markers.markers[marker]

        logger.debug("p_values:", pf(p_values))

        self.dataset.group.markers.add_pvalues(p_values)

        return self.dataset.group.markers.markers
示例#37
0
    def get_where_clause(self):
        if self.search_operator == "=":
            assert isinstance(self.search_term, (list, tuple))
            self.range_min, self.range_max = self.search_term[:2]
            where_clause = """ (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) > %s AND
                                    (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) < %s
                                    """ % self.mescape(
                min(self.range_min, self.range_max),
                max(self.range_min, self.range_max))
        else:
            # Deal with >, <, >=, and <=
            where_clause = """ (SELECT Pow(2, max(value) -min(value))
                                     FROM ProbeSetData
                                     WHERE ProbeSetData.Id = ProbeSetXRef.dataId) > %s
                                    """ % (escape(self.search_term[0]))

        logger.debug("where_clause is:", pf(where_clause))

        return where_clause
示例#38
0
    def run(self):

        self.search_term = [float(value) for value in self.search_term]

        if self.search_operator == "=":
            assert isinstance(self.search_term, (list, tuple))
            self.pvalue_min, self.pvalue_max = self.search_term[:2]
            self.where_clause = """ %sXRef.pValue > %s and %sXRef.pValue < %s
                                    """ % self.mescape(
                self.dataset.type, min(self.pvalue_min, self.pvalue_max),
                self.dataset.type, max(self.pvalue_min, self.pvalue_max))
        else:
            # Deal with >, <, >=, and <=
            self.where_clause = """ %sXRef.pValue %s %s
                                    """ % self.mescape(
                self.dataset.type, self.search_operator, self.search_term[0])

        logger.debug("where_clause is:", pf(self.where_clause))

        self.query = self.compile_final_query(where_clause=self.where_clause)

        logger.sql(self.query)
        return self.execute(self.query)
示例#39
0
    async def create_post_request(self, method, params):
        headers = {"Content-Type": "application/json"}
        payload = {"method": method,
                   "params": [params],
                   "id": next(self.idgen),
                   "version": "1.0"}
        req = requests.Request("POST", self.guide_endpoint,
                               data=json.dumps(payload), headers=headers)
        prepreq = req.prepare()
        s = requests.Session()
        try:
            response = s.send(prepreq)
            if response.status_code != 200:
                _LOGGER.error("Got !200 response: %s" % response.text)
                return None

            response = response.json()
        except requests.RequestException as ex:
            raise SongpalException("Unable to get APIs: %s" % ex) from ex

        if self.debug > 1:
            _LOGGER.debug("Got getSupportedApiInfo: %s", pf(response))

        return response
示例#40
0
文件: main.py 项目: obreitwi/pydemx
def main_loop(argv=None):
    if argv is None:
        argv = sys.argv

    args = docopt.docopt(get_updated_docstring(),
                         argv=argv[1:],
                         version=".".join(map(str, __version__)))

    if not args["--silent"]:
        logcfg.set_loglevel(log, "INFO")
        for h in log.handlers:
            logcfg.set_loglevel(h, "INFO")
    elif args["--verbose"] > 0:
        logcfg.make_verbose()
        log.debug(pf(args))

    ext = args["--extension"]
    recursive = args["--recursive"]

    files_and_folders = []
    files_and_folders.extend(args["<file_or_folder>"])

    for faf in files_and_folders:
        if osp.isfile(faf):
            parse_file(faf, args)
        elif osp.isdir(faf):
            for entry in os.listdir(faf):
                path = osp.join(faf, entry)

                valid_file = (osp.isfile(path)
                              and osp.splitext(path)[-1] == ext
                              and osp.basename(osp.splitext(path)[0]) != "cfg")
                valid_folder = recursive and osp.isdir(path)

                if valid_file or valid_folder:
                    files_and_folders.append(path)
示例#41
0
def handle_ecr_global(event_in):
    if event_in.get("eventName", "_") not in ("PutImage"):
        return True
    if event_in.get("errorCode", "_") in ("ImageAlreadyExistsException", "InvalidParameterException"):
        logger.info(
            " = Image problem, skipping ({!s})".format(event_in.get("errorMessage", ""))
        )
        return True

    if verbose:
        logger.debug(" = EcrInput: {}".format(pf(event_in)))

    repo = event_in.get("requestParameters", {}).get("repositoryName")
    logger.info(" = Repo: {}".format(pf(repo)))

    tag = event_in.get("requestParameters", {}).get("imageTag")
    logger.info(" = Tag: {}".format(pf(tag)))

    registry_id = event_in.get("requestParameters", {}).get("registryId")
    logger.debug(" = RegId: {}".format(pf(registry_id)))

    registry_region = event_in.get("awsRegion", "us-east-1")
    logger.debug(" = RegRegion: {}".format(pf(registry_region)))

    event_time = event_in.get(
        "eventTime", datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%SZ")
    )
    logger.debug(" = EventTime: {}".format(pf(event_time)))

    user_id_base = event_in.get("userIdentity", {})
    user_id = user_id_base.get("userName", user_id_base.get("arn", "*Unknown*"))
    logger.debug(" = UserId: {}".format(pf(user_id)))

    handle_image(
        registry_id, registry_region, repo, tag, user_id, event_time.replace("T", " ")
    )
    return True
示例#42
0
 def __repr__(self):
     return "<SampleList> --> %s" % (pf(self.__dict__))
示例#43
0
    def _do_request(self, rpcmethod, subsystem, method, **params):
        if len(params) == 0:
            params = {}
        retry_count = params.get("retry_count", 1)
        data = json.dumps({
            "jsonrpc":
            "2.0",
            "id":
            self.id,
            "method":
            rpcmethod,
            "params": [self.session_id, subsystem, method, params]
        })
        _LOGGER.debug(">> %s" % pf(data))

        try:
            res = requests.post(self.endpoint, data=data, timeout=self.timeout)
        except requests.exceptions.Timeout as ex:
            raise UbusException("Got timeout") from ex
        except requests.exceptions.ConnectionError as ex:
            raise UbusException(
                "Got error during post, false credentials?") from ex

        if res.status_code == 200:
            response = res.json()
            _LOGGER.debug("<< %s" % pf(response))

            if 'error' in response:
                # {'code': -32002, 'message': 'Access denied'}
                error = RPCError(response["error"]["code"])
                if error == RPCError.ERROR_ACCESS_DENIED:
                    if self._session_id == Ubus.EMPTY_SESSION:
                        raise UbusException(
                            "Access denied with empty session, please login() first."
                            % response)
                    else:
                        if retry_count < 1:
                            raise UbusException(
                                "Access denied, '%s' has no permission to call '%s' on '%s'"
                                % (self.username, method, subsystem))

                        _LOGGER.warning(
                            "Got access denied, renewing a session and trying again.."
                        )
                        self.login()
                        params["retry_count"] = retry_count - 1
                        return self._do_request(rpcmethod, subsystem, method,
                                                **params)
                else:
                    raise UbusException("Got error from ubus: %s" %
                                        response['error'])

            if 'result' not in response:
                raise UbusException("Got no result: %s" % response)

            result = response["result"]
            if isinstance(result, dict):  # got payload, passing directly
                return result
            if isinstance(
                    result, list
            ):  # got list, first one for error code, second for payload
                if len(result) == 1 and (UbusError(result[0])
                                         == UbusError.UBUS_STATUS_OK):
                    return result
                if len(result) == 1:
                    error = UbusError(result[0])
                    raise UbusException("Got error %s" % error)
                if len(result) != 2:
                    raise UbusException("Result length was not 2: %s" % result)

                error = UbusError(result[0])
                if error != UbusError.UBUS_STATUS_OK:
                    raise UbusException("Got an error: %s" % result)

                payload = result[1]
                if method in payload:  # unwrap if necessary
                    payload = payload[method]
                if isinstance(payload,
                              bool):  # e.g. access() returns a boolean
                    return payload
                if 'results' in payload:
                    payload = payload['results']
                return payload
        else:
            raise UbusException("Got a non-200 retcode: %s" % res.status_code)
示例#44
0
def info(vac: mirobo.Vacuum):
    """Return device information."""
    res = vac.info()

    click.echo("%s" % res)
    _LOGGER.debug("Full response: %s", pf(res.raw))
示例#45
0
def sample_network(database,
                   num_samplers=5,
                   weights=None,
                   biases=None,
                   neuron_id=1,
                   sim_name="pyNN.nest",
                   duration=1e5,
                   savefilename=None,
                   numpy_seed=42):
    """
        Run and plot a sample network wit the given weights and biases
    """
    db.setup(database)
    db.purge_incomplete_calibrations()

    np.random.seed(numpy_seed)

    if savefilename is not None:
        bm = network.BoltzmannMachine.load(savefilename)
    else:
        bm = None

    if bm is None:
        # no network loaded, we need to create it
        bm = network.BoltzmannMachine(num_samplers=num_samplers,
                                      sim_name=sim_name,
                                      neuron_parameters_db_ids=neuron_id)

        bm.load_calibration()

        if weights is None:
            weights = np.random.randn(bm.num_samplers, bm.num_samplers)
            weights = (weights + weights.T) / 2.
        bm.weights_theo = weights

        if biases is None:
            bm.biases_theo = np.random.randn(bm.num_samplers)
        else:
            bm.biases_theo = biases

        bm.saturating_synapses_enabled = True

        bm.gather_spikes(duration=duration, dt=0.1, burn_in_time=500.)
        if savefilename is not None:
            bm.save(savefilename)

    log.info("Weights (theo):\n" + pf(bm.weights_theo))
    log.info("Biases (theo):\n" + pf(bm.biases_theo))

    log.info("Weights (bio):\n" + pf(bm.weights_bio))
    log.info("Biases (bio):\n" + pf(bm.biases_bio))

    log.info("Spikes: {}".format(pf(bm.ordered_spikes)))

    log.info("Spike-data: {}".format(pf(bm.spike_data)))

    bm.selected_sampler_idx = range(bm.num_samplers)

    log.info("Marginal prob (sim):\n" + pf(bm.dist_marginal_sim))

    log.info("Joint prob (sim):\n" +
             pf(list(np.ndenumerate(bm.dist_joint_sim))))

    log.info("Marginal prob (theo):\n" + pf(bm.dist_marginal_theo))

    log.info("Joint prob (theo):\n" +
             pf(list(np.ndenumerate(bm.dist_joint_theo))))

    log.info("DKL marginal: {}".format(
        utils.dkl_sum_marginals(bm.dist_marginal_theo, bm.dist_marginal_sim)))

    log.info("DKL joint: {}".format(
        utils.dkl(bm.dist_joint_theo.flatten(), bm.dist_joint_sim.flatten())))

    bm.plot_dist_marginal(save=True)
    bm.plot_dist_joint(save=True)
示例#46
0
def main():
    parser = argparse.ArgumentParser(
        description=
        "Process triage, network or memory dump evidence file(s), sorted by projects for correlation"
    )
    parser.add_argument('-d',
                        dest='directory',
                        type=str,
                        help="Directory containing evidence files")
    parser.add_argument(
        '-p',
        dest='projectname',
        type=str,
        required=True,
        help="Codename of the project that the evidence is part of")
    parser.add_argument(
        '-s',
        dest='split',
        type=int,
        required=False,
        help="Split timeline to a maximum of X rows per file.  Default 100k.")
    args = parser.parse_args()

    if args.split:
        split = args.split
    else:
        split = CONFIG['TIMELINE']['DEFAULT_SPLIT']

    #Initialize folders
    projectName = args.projectname
    searchDirectory = args.directory
    magnetodir = os.getcwd()
    resultsDir = magnetodir + "/Results"
    if not os.path.exists(resultsDir):
        try:
            os.makedirs(resultsDir)
        except:
            logging.error("Unable to create results folder")
            sys.exit()

    projResultsDir = magnetodir + "/Results/" + projectName
    if not os.path.exists(projResultsDir):
        try:
            os.makedirs(projResultsDir)
        except:
            logging.error("Unable to create Project results folder")
            sys.exit()

    logger.info("submit.py STARTED on %s with project %s" %
                (searchDirectory, args.projectname))

    #Initialize database
    DATABASE = CONFIG['DATABASE']
    dbhandle = db.databaseConnect(DATABASE['HOST'], DATABASE['DATABASENAME'],
                                  DATABASE['USER'], DATABASE['PASSWORD'])
    logger.debug("dbhandle is " + str(dbhandle))

    db.databaseInitiate()
    imagelist = []
    Schema = "project"
    Table = "project_image_mapping"
    insertProjectValue = collections.OrderedDict.fromkeys(
        ['projectname', 'imagename'])

    #Process Incident Folders
    if "Incident" in searchDirectory:
        pathParts = searchDirectory.split('\\')
        for part in pathParts:
            if "Incident" in part:
                insertProjectValue['projectname'] = args.projectname
                insertProjectValue['imagename'] = part
                logger.info("insertProjectValue is %s" %
                            pf(insertProjectValue))
                db.databaseExistInsert(dbhandle, Schema, Table,
                                       insertProjectValue)
        logger.info("postTriage.postTriage on %s" % searchDirectory)
        postTriage.postTriage(searchDirectory, projectName)
        logger.info("submitDatabase.dbprocess on %s" % searchDirectory)
        submitDatabase.dbprocess(dbhandle, searchDirectory)
        logger.info("summary.outputSummary on %s" % searchDirectory)
        summary.outputSummary(searchDirectory, projectName, projResultsDir)
        logger.info("timeline.outputTimeline on %s" % searchDirectory)
        timeline.outputTimeline(searchDirectory, projectName, projResultsDir,
                                split)

    else:
        for root, dirs, files in os.walk(searchDirectory):
            #searchDirectory cannot end with a slash!
            for directory in dirs:
                if "Incident" in directory:
                    if directory not in imagelist:
                        imagelist.append(directory)
                        insertProjectValue['projectname'] = args.projectname
                        insertProjectValue['imagename'] = directory
                        logger.info("insertProjectValue is %s" %
                                    pf(insertProjectValue))
                        db.databaseExistInsert(dbhandle, Schema, Table,
                                               insertProjectValue)

                        fulldirectory = os.path.join(root, directory)

                        logger.info("postTriage.postTriage on %s" %
                                    fulldirectory)
                        postTriage.postTriage(fulldirectory, projectName)
                        logger.info("submitDatabase.dbprocess on %s" %
                                    fulldirectory)
                        submitDatabase.dbprocess(dbhandle, fulldirectory)
                        logger.info("summary.outputSummary on %s" %
                                    fulldirectory)
                        summary.outputSummary(fulldirectory, projectName,
                                              projResultsDir)
                        logger.info("timeline.outputTimeline on %s" %
                                    fulldirectory)
                        timeline.outputTimeline(fulldirectory, projectName,
                                                projResultsDir, split)
示例#47
0
文件: parser.py 项目: obreitwi/pydemx
    def __init__(self, cfg, tokenizer):
        text_blocks = tokenizer.text_blocks
        repl_blocks = tokenizer.repl_blocks
        code_blocks = tokenizer.code_blocks

        log.debug("Parsing file.")
        self.cfg = {k: cfg[k] for k in self.config_keys}
        self.replacement_t = make_replacement_t()

        self._create_utils()

        self.text_blocks = text_blocks

        # scrape all textblocks for defined replacements
        # scrape the contents of all replacement blcoks as well
        for tb in text_blocks:
            self.read_replacements(tb.lines)

        # define replacements from replacement blocks
        known_repl_block_names = set()
        for rb in repl_blocks:
            self.read_replacements(rb.lines)
            match = self.matcher_repl_block_title.match(rb.title).groupdict()

            log.debug("Match object for replacement block: {}".format(pf(match)))

            # if we haven't seen a replacement block yet, it should be inserted
            # into regular text where first defined
            if match["name"] not in known_repl_block_names:
                known_repl_block_names.add(match["name"])
                text_repl = self.replacement_t.format.format(name=match["name"])
                if log.getEffectiveLevel() <= logging.DEBUG:
                    log.debug(
                        "Text inserted for replacement block: {}".format(pf(text_repl))
                    )
                if rb.index > 0:
                    self.text_blocks[rb.index - 1].lines.append(text_repl)
                else:
                    self.text_blocks[0].lines.insert(0, text_repl)

            repl = self.replacement_t(match["name"])
            combined_lines = os.linesep.join(rb.lines)
            if match["key"] is not None:
                # see if we have more than one key fo which we have the same
                # replacement
                mks = self.cfg["multi_key_seperator"]
                if mks is not None and mks in match["key"]:
                    keys = match["key"].split(mks)
                else:
                    keys = [match["key"]]

                log.debug("Keys for {}: {}".format(match["name"], pf(keys)))

                for k in keys:
                    repl[k] = combined_lines
            else:
                repl.default = combined_lines

            if log.getEffectiveLevel() <= logging.DEBUG:
                log.debug(pf(repl))

        # execute the code from code blocks
        # include a dummy cfg dict to be compatible with the first cfg block

        # allow the code lines to pass data along
        context = {"R": self.replacement_t, "cfg": copy.deepcopy(cfg)}
        m.execute_code(code_blocks[0].lines, context)
        for cb in code_blocks[1:]:
            m.execute_code(cb.lines, context)
示例#48
0
    def test_sample_rtr_cond(self):
        """
            How to setup and evaluate a Boltzmann machine. Please note that in
            order to instantiate BMs all needed neuron parameters need to be in
            the database and calibrated.

            Does the same thing as sbs.tools.sample_network(...).
        """
        np.random.seed(422342134)

        # Networks can be saved outside of the database.
        duration = 1e4

        # Try to load the network from file. This function returns None if no
        # network could be loaded.
        # No network loaded, we need to create it. We need to specify how
        # many samplers we want and what neuron parameters they should
        # have. Refer to the documentation for all the different ways this
        # is possible.

        sampler_config = sbs.db.SamplerConfiguration.load(
            "test-calibration-cond.json")

        # expand regular neuon parameters to native nest parameters
        sampler_config.neuron_parameters =\
            sbs.db.ConductanceExponentialRandomRefractory.convert(
                sampler_config.neuron_parameters)

        # set a random distribution
        sampler_config.neuron_parameters.tau_refrac_dist = {
            "distribution": "uniform",
            "low": 20.,
            "high": 30.,
        }

        bm = sbs.network.ThoroughBM(num_samplers=5,
                                    sim_name=sim_name,
                                    sampler_config=sampler_config)

        # Set random symmetric weights.
        weights = np.random.randn(bm.num_samplers, bm.num_samplers)
        weights = (weights + weights.T) / 2.
        bm.weights_theo = weights

        # Set random biases.
        bm.biases_theo = np.random.randn(bm.num_samplers)

        # NOTE: By setting the theoretical weights and biases, the
        # biological ones automatically get calculated on-demand by
        # accessing bm.weights_bio and bm.biases_bio

        # NOTE: Currently we have no saturating synapses enabled for nest
        # native models, working on it to fix it!
        bm.saturating_synapses_enabled = True
        bm.use_proper_tso = True

        if bm.sim_name == "pyNN.neuron":
            bm.saturating_synapses_enabled = False

        bm.gather_spikes(duration=duration, dt=0.1, burn_in_time=500.)

        # Now we just print out some information and plot the distributions.

        log.info("Weights (theo):\n" + pf(bm.weights_theo))
        log.info("Biases (theo):\n" + pf(bm.biases_theo))

        log.info("Weights (bio):\n" + pf(bm.weights_bio))
        log.info("Biases (bio):\n" + pf(bm.biases_bio))

        log.info("Spikes: {}".format(pf(bm.ordered_spikes)))

        log.info("Spike-data: {}".format(pf(bm.spike_data)))

        bm.selected_sampler_idx = range(bm.num_samplers)

        log.info("Marginal prob (sim):\n" + pf(bm.dist_marginal_sim))

        log.info("Joint prob (sim):\n" +
                 pf(list(np.ndenumerate(bm.dist_joint_sim))))

        log.info("Marginal prob (theo):\n" + pf(bm.dist_marginal_theo))

        log.info("Joint prob (theo):\n" +
                 pf(list(np.ndenumerate(bm.dist_joint_theo))))

        log.info("DKL marginal: {}".format(
            sbs.utils.dkl_sum_marginals(bm.dist_marginal_theo,
                                        bm.dist_marginal_sim)))

        log.info("DKL joint: {}".format(
            sbs.utils.dkl(bm.dist_joint_theo.flatten(),
                          bm.dist_joint_sim.flatten())))
示例#49
0
def handle_image(
    registry_id,
    registry_region,
    repo,
    tag,
    who=None,
    event_time=datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%SZ"),
):

    logger.debug(" = Running")

    dry_run = True if os.environ.get("DRY_RUN", "true") == "true" else False

    registry_address = os.environ.get(
        "ECR_REGISTRY_URL_FORMAT",
        "{registry_id}.dkr.ecr.{registry_region}.amazonaws.com",
    ).format(registry_id=registry_id, registry_region=registry_region)

    if str(tag) == "":
        tag = "latest"

    if tag == "latest" or tag == "":
        if os.environ.get("SKIP_UNTAGGED", "true") == "true":
            logger.info(' = Skipping "un-tagged" image: {}'.format(pf(repo)))
            return True

    # get ECR login
    ecrc = boto3.client(
        "ecr",
        region_name=registry_region,
        aws_access_key_id=os.environ.get("ECR_ACCESS_KEY", None),
        aws_secret_access_key=os.environ.get("ECR_SECRET_KEY", None),
        aws_session_token=os.environ.get("ECR_SESSION_TOKEN", None),
    )
    ecrauth = ecrc.get_authorization_token(registryIds=[registry_id])

    registry_username, registry_password = (
        base64.b64decode(
            ecrauth.get("authorizationData", [])[0].get(
                "authorizationToken", "user:pass"
            )
        )
        .decode("utf-8")
        .split(":")
    )

    logger.debug(
        ' = ECR Credentials: "{}" / "{}" (truncated to 12)'.format(
            registry_username[:12], registry_password[:12]
        )
    )

    log_html = "/tmp/log-{}.html".format(rid(12))
    robot_result = robot.run(
        "/opt/imagecheck/robot/basic-test-suite.robot",
        name="Containerized Service Compliance",
        log=log_html,
        report=None,
        output="/dev/null",
        stdout=None,
        stderr=None,
        console="quiet",
        removekeywords=["TAG:SECRET"],
        flattenkeywords=["NAME:*"],
        critical="CRITICAL",
        noncritical="robot:exit",
        tagstatexclude=["robot:exit", "CRITICAL"],
        logtitle="⌘ {}: {}".format(repo, tag),
        variable=[
            "PUSHED_BY:{}".format(who),
            "PUSHED_DATE:{}".format(event_time),
            "ECR_LOGIN_ADDRESS:https://{}".format(registry_address),
            "ECR_USERNAME:{}".format(registry_username),
            "ECR_PASSWORD:{}".format(registry_password),
            "IMAGE:{}/{}:{}".format(registry_address, repo, tag),
        ],
    )
    logger.debug(" = Robot result: {} ({})".format(pf(robot_result), log_html))

    url = None
    try:
        url = put_report_s3_presign(inject_custom_html(log_html))
    except Exception as e:
        logger.error(" = put_report_s3_presign: {}".format(pf(e)))
        url = None

    should_delete_image = False if robot_result == 0 else True

    colours = {
        "ok": "#98ef8d",  # Screaming Green
        "warn": "#ffd000",  # Bright Yellow
        "concern": "#ff7400",  # Strong Orange
        "critical": "#ff003e",  # Danger Red
    }

    at_title = "⌘ {}:{}".format(repo, tag)

    at_colour = (
        colours["warn"]
        if (should_delete_image and dry_run)
        else colours["critical"]
        if should_delete_image
        else colours["ok"]
    )

    at_summary = (
        "Failed Compliance Tests"
        if (should_delete_image and dry_run)
        else "Failed Compliance Tests => Image Deleted"
        if should_delete_image
        else "Passed Compliance Tests"
    )

    at_msg = (
        "**Failed Compliance Tests**"
        if (should_delete_image and dry_run)
        else "**Failed Compliance Tests**\n\n * Image Deleted"
        if should_delete_image
        else "*Passed Compliance Tests*"
    )

    try:
        teams_webhooks_delete = json.loads(os.environ.get("TEAMS_DELETE_URLS", []))
        teams_webhooks_warning = json.loads(os.environ.get("TEAMS_WARNING_URLS", []))
        teams_webhooks_ok = json.loads(os.environ.get("TEAMS_OK_URLS", []))
    except Exception as e:
        logger.error(" = Error parsing Teams URLs: {}".format(e))

    teams_webhooks = (
        teams_webhooks_delete
        if should_delete_image
        else teams_webhooks_warning
        if (should_delete_image and dry_run)
        else teams_webhooks_ok
    )

    alert_teams(at_title, at_colour, at_summary, at_msg, teams_webhooks, url)

    if should_delete_image and not dry_run:
        try:
            logger.debug(" = ECR Delete: {}/{}".format(repo, tag))
            del_tag = ecrc.batch_delete_image(
                registryId=registry_id,
                repositoryName=repo,
                imageIds=[{"imageTag": tag}],
            )
            logger.debug(" = ECR DeleteTag: {}".format(pf(del_tag)))
        except Exception as e:
            logger.error(" = ECR Delete Error: {}".format(e))
 def __repr__(self):
     return pf(vars(self))
示例#51
0
def info(vac: mirobo.Vacuum):
    """Returns info"""
    res = vac.info()

    click.echo(res)
    _LOGGER.debug("Full response: %s" % pf(res.raw))
#!/usr/bin/env python
# coding=utf-8
import sys
# Step 1
import pickle

from lifxlan import LifxLAN
from pprint import pformat as pf
import os

print(os.getcwd())
lifx = LifxLAN(1, verbose=False)
device = lifx.get_device_by_name(sys.argv[1])
print("Hardware: %s " % pf(device.get_color()))
with open(sys.argv[1]+'.light', 'wb') as fileChambre:
 
  # Step 3
  pickle.dump(device, fileChambre)
 
示例#53
0
 def __repr__(self):
     return "<Method %s.%s(%s) -> %s>" % (self.service,
                                          self.name,
                                          pf(self.inputs),
                                          pf(self.outputs))
示例#54
0
 def dump_raw(dev):
     raw = {k: dev[k] for k in dev.keys()}
     _LOGGER.info(pf(raw))
示例#55
0
 def __str__(self):
     return pf(vars(self))
示例#56
0
    def do_request(self,
                   method,
                   path,
                   params=None,
                   data=None,
                   raw_content=False):
        url = '{}{}'.format(self.base_url, path)
        logger.debug('%s REST API %s req: %s data: %s', self.client_name,
                     method.upper(), path, data)
        try:
            if method.lower() == 'get':
                resp = self.session.get(url,
                                        headers=self.headers,
                                        params=params,
                                        auth=self.auth)
            elif method.lower() == 'post':
                resp = self.session.post(url,
                                         headers=self.headers,
                                         params=params,
                                         data=data,
                                         auth=self.auth)
            elif method.lower() == 'put':
                resp = self.session.put(url,
                                        headers=self.headers,
                                        params=params,
                                        data=data,
                                        auth=self.auth)
            elif method.lower() == 'delete':
                resp = self.session.delete(url,
                                           headers=self.headers,
                                           params=params,
                                           data=data,
                                           auth=self.auth)
            else:
                raise RequestException(
                    'Method "{}" not supported'.format(method.upper()), None)
            if resp.ok:
                logger.debug("%s REST API %s res status: %s content: %s",
                             self.client_name, method.upper(),
                             resp.status_code, resp.text)
                if raw_content:
                    return resp.content
                try:
                    return resp.json() if resp.text else None
                except ValueError:
                    logger.error(
                        "%s REST API failed %s req while decoding JSON "
                        "response : %s", self.client_name, method.upper(),
                        resp.text)
                    raise RequestException(
                        "{} REST API failed request while decoding JSON "
                        "response: {}".format(self.client_name, resp.text),
                        resp.status_code, resp.text)
            else:
                logger.error("%s REST API failed %s req status: %s",
                             self.client_name, method.upper(),
                             resp.status_code)
                from pprint import pprint as pp
                from pprint import pformat as pf

                raise RequestException(
                    "{} REST API failed request with status code {}\n"
                    "{}"  # TODO remove
                    .format(self.client_name, resp.status_code,
                            pf(resp.content)),
                    resp.status_code,
                    resp.content)
        except ConnectionError as ex:
            if ex.args:
                if isinstance(ex.args[0], SSLError):
                    errno = "n/a"
                    strerror = "SSL error. Probably trying to access a non " \
                               "SSL connection."
                    logger.error("%s REST API failed %s, SSL error.",
                                 self.client_name, method.upper())
                else:
                    try:
                        match = re.match(r'.*: \[Errno (-?\d+)\] (.+)',
                                         ex.args[0].reason.args[0])
                    except AttributeError:
                        match = False
                    if match:
                        errno = match.group(1)
                        strerror = match.group(2)
                        logger.error(
                            "%s REST API failed %s, connection error: "
                            "[errno: %s] %s", self.client_name, method.upper(),
                            errno, strerror)
                    else:
                        errno = "n/a"
                        strerror = "n/a"
                        logger.error(
                            "%s REST API failed %s, connection error.",
                            self.client_name, method.upper())
            else:
                errno = "n/a"
                strerror = "n/a"
                logger.error("%s REST API failed %s, connection error.",
                             self.client_name, method.upper())

            if errno != "n/a":
                ex_msg = (
                    "{} REST API cannot be reached: {} [errno {}]. "
                    "Please check your configuration and that the API endpoint"
                    " is accessible".format(self.client_name, strerror, errno))
            else:
                ex_msg = ("{} REST API cannot be reached. Please check "
                          "your configuration and that the API endpoint is"
                          " accessible".format(self.client_name))
            raise RequestException(ex_msg,
                                   conn_errno=errno,
                                   conn_strerror=strerror)
        except InvalidURL as ex:
            logger.exception("%s REST API failed %s: %s", self.client_name,
                             method.upper(), str(ex))
            raise RequestException(str(ex))
        except Timeout as ex:
            msg = "{} REST API {} timed out after {} seconds (url={}).".format(
                self.client_name, ex.request.method,
                Settings.REST_REQUESTS_TIMEOUT, ex.request.url)
            logger.exception(msg)
            raise RequestException(msg)
示例#57
0
def sysinfo(dev):
    """Print out full system information."""
    click.echo(click.style("== System info ==", bold=True))
    click.echo(pf(dev.sys_info))
示例#58
0
def basicStatsTable(vals, trait_type=None, cellid=None, heritability=None):
    print("basicStatsTable called - len of vals", len(vals))
    st = {}  # This is the dictionary where we'll put everything for the template
    valsOnly = []
    dataXZ = vals[:]
    for i in range(len(dataXZ)):
        valsOnly.append(dataXZ[i][1])

    (st['traitmean'],
     st['traitmedian'],
     st['traitvar'],
     st['traitstdev'],
     st['traitsem'],
     st['N']) = reaper.anova(valsOnly) #ZS: Should convert this from reaper to R in the future

    #tbl = HT.TableLite(cellpadding=20, cellspacing=0)
    #dataXZ = vals[:]
    dataXZ = sorted(vals, webqtlUtil.cmpOrder)

    print("data for stats is:", pf(dataXZ))
    for num, item in enumerate(dataXZ):
        print(" %i - %s" % (num, item))
    print("  length:", len(dataXZ))

    st['min'] = dataXZ[0][1]
    st['max'] = dataXZ[-1][1]

    numbers = [x[1] for x in dataXZ]
    stats = Stats(numbers)

    at75 = stats.percentile(75)
    at25 = stats.percentile(25)
    print("should get a stack")
    traceback.print_stack()
    print("Interquartile:", at75 - at25)

    #tbl.append(HT.TR(HT.TD("Statistic",align="left", Class="fs14 fwb ffl b1 cw cbrb", width = 180),
    #                HT.TD("Value", align="right", Class="fs14 fwb ffl b1 cw cbrb", width = 60)))
    #tbl.append(HT.TR(HT.TD("N of Samples",align="left", Class="fs13 b1 cbw c222"),
    #                HT.TD(N,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
    #tbl.append(HT.TR(HT.TD("Mean",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
    #                HT.TD("%2.3f" % traitmean,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
    #tbl.append(HT.TR(HT.TD("Median",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
    #                HT.TD("%2.3f" % traitmedian,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
    ##tbl.append(HT.TR(HT.TD("Variance",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
    ##               HT.TD("%2.3f" % traitvar,nowrap="yes",align="left", Class="fs13 b1 cbw c222")))
    #tbl.append(HT.TR(HT.TD("Standard Error (SE)",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
    #                HT.TD("%2.3f" % traitsem,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
    #tbl.append(HT.TR(HT.TD("Standard Deviation (SD)", align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
    #                HT.TD("%2.3f" % traitstdev,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
    #tbl.append(HT.TR(HT.TD("Minimum", align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
    #                HT.TD("%s" % dataXZ[0][1],nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
    #tbl.append(HT.TR(HT.TD("Maximum", align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
    #                HT.TD("%s" % dataXZ[-1][1],nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))



    if (trait_type != None and trait_type == 'ProbeSet'):
        #tbl.append(HT.TR(HT.TD("Range (log2)",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
        #        HT.TD("%2.3f" % (dataXZ[-1][1]-dataXZ[0][1]),nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
        #tbl.append(HT.TR(HT.TD(HT.Span("Range (fold)"),align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
        #        HT.TD("%2.2f" % pow(2.0,(dataXZ[-1][1]-dataXZ[0][1])), nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
        #tbl.append(HT.TR(HT.TD(HT.Span(HT.Href(url="/glossary.html#Interquartile", target="_blank", text="Interquartile Range", Class="non_bold")), align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
        #        HT.TD("%2.2f" % pow(2.0,(dataXZ[int((N-1)*3.0/4.0)][1]-dataXZ[int((N-1)/4.0)][1])), nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
        st['range_log2'] = dataXZ[-1][1]-dataXZ[0][1]
        st['range_fold'] = pow(2.0, (dataXZ[-1][1]-dataXZ[0][1]))
        st['interquartile'] = pow(2.0, (dataXZ[int((st['N']-1)*3.0/4.0)][1]-dataXZ[int((st['N']-1)/4.0)][1]))

        #XZ, 04/01/2009: don't try to get H2 value for probe.
        if not cellid:
            if heritability:
                # This field needs to still be put into the Jinja2 template
                st['heritability'] = heritability
                #tbl.append(HT.TR(HT.TD(HT.Span("Heritability"),align="center", Class="fs13 b1 cbw c222",nowrap="yes"),HT.TD("%s" % heritability, nowrap="yes",align="center", Class="fs13 b1 cbw c222")))

        # Lei Yan
        # 2008/12/19

    return st
示例#59
0
    def __init__(self, dataset, sample_names, this_trait, sample_group_type,
                 header):

        self.dataset = dataset
        self.this_trait = this_trait
        self.sample_group_type = sample_group_type  # primary or other
        self.header = header

        self.sample_list = []  # The actual list
        self.sample_attribute_values = {}

        self.get_attributes()

        #self.sample_qnorm = get_transform_vals(self.dataset, this_trait)

        if self.this_trait and self.dataset and self.dataset.type == 'ProbeSet':
            self.get_extra_attribute_values()

        for counter, sample_name in enumerate(sample_names, 1):
            sample_name = sample_name.replace("_2nd_", "")

            if type(
                    self.this_trait
            ) is list:  #ZS: self.this_trait will be a list if it is a Temp trait
                if counter <= len(
                        self.this_trait) and self.this_trait[counter -
                                                             1] != 'X':
                    sample = webqtlCaseData.webqtlCaseData(
                        name=sample_name,
                        value=float(self.this_trait[counter - 1]))
                else:
                    sample = webqtlCaseData.webqtlCaseData(name=sample_name)
            else:
                #ZS - If there's no value for the sample/strain, create the sample object (so samples with no value are still displayed in the table)
                try:
                    sample = self.this_trait.data[sample_name]
                except KeyError:
                    logger.debug("No sample %s, let's create it now" %
                                 sample_name)
                    sample = webqtlCaseData.webqtlCaseData(name=sample_name)

            #sampleNameAdd = ''
            #if fd.RISet == 'AXBXA' and sampleName in ('AXB18/19/20','AXB13/14','BXA8/17'):
            #    sampleNameAdd = HT.Href(url='/mouseCross.html#AXB/BXA', text=HT.Sup('#'), Class='fs12', target="_blank")
            sample.extra_info = {}
            if self.dataset.group.name == 'AXBXA' and sample_name in (
                    'AXB18/19/20', 'AXB13/14', 'BXA8/17'):
                sample.extra_info['url'] = "/mouseCross.html#AXB/BXA"
                sample.extra_info['css_class'] = "fs12"

            # logger.debug("  type of sample:", type(sample))

            if sample_group_type == 'primary':
                sample.this_id = "Primary_" + str(counter)
            else:
                sample.this_id = "Other_" + str(counter)

            #### For extra attribute columns; currently only used by several datasets - Zach
            if self.sample_attribute_values:
                sample.extra_attributes = self.sample_attribute_values.get(
                    sample_name, {})
                logger.debug("sample.extra_attributes is",
                             pf(sample.extra_attributes))

            self.sample_list.append(sample)

        logger.debug("self.attributes is", pf(self.attributes))

        self.do_outliers()
        #do_outliers(the_samples)
        logger.debug("*the_samples are [%i]: %s" %
                     (len(self.sample_list), pf(self.sample_list)))
示例#60
0
 def dump_raw(dev):
     """Dump whole database."""
     raw = {k: dev[k] for k in dev.keys()}
     _LOGGER.info(pf(raw))