Ejemplo n.º 1
0
def modifyPropertiesFile(propfilename, dic):
    # read existing file
    with open(propfilename, "r") as propfile:
        lines = propfile.readlines()
    # write new version
    with open(propfilename, "w") as propfile:
        # replace old values
        for line in lines:
            words = re.split("[=:]", line)
            # don’t touch lines without = or :
            if len(words) < 2:
                propfile.write(line)
                continue
            # get the key (part before first = or :)
            key = words[0].strip()
            if key in dic:
                val = dic[key]
                if not isinstance(val, basestring):
                    val = CONFIG.get(val[0], val[1])
                # beware =
                val = val.replace("=", "\\=")
                propfile.write(key + " = " + val + "\n")
                del dic[key]
            else:
                propfile.write(line)
        # add new variables
        for key in dic.keys():
            val = dic[key]
            if not isinstance(val, basestring):
                val = CONFIG.get(val[0], val[1])
            # beware =
            val = val.replace("=", "\\=")
            propfile.write(key + " = " + val + "\n")
    putDoneOK()
Ejemplo n.º 2
0
    def __init__(self, classifier, score_columns, experiment, all_test_target_scores,
                 all_test_decoy_scores, merge_results):

        self.classifier = classifier
        self.score_columns = score_columns
        self.mu, self.nu = calculate_params_for_d_score(classifier, experiment)
        self.merge_results = merge_results
        final_score = classifier.score(experiment, True)
        experiment["d_score"] = (final_score - self.mu) / self.nu
        lambda_ = CONFIG.get("final_statistics.lambda")

        all_tt_scores = experiment.get_top_target_peaks()["d_score"]

        use_pemp = CONFIG.get("final_statistics.emp_p")
        self.error_stat, self.target_pvalues = calculate_final_statistics(all_tt_scores,
                                                                          all_test_target_scores,
                                                                          all_test_decoy_scores,
                                                                          lambda_,
                                                                          use_pemp,
                                                                          False)

        self.number_target_pg = len(experiment.df[experiment.df.is_decoy.eq(False)])
        self.number_target_peaks = len(experiment.get_top_target_peaks().df)
        self.dvals = experiment.df.loc[(experiment.df.is_decoy.eq(True)), "d_score"]
        self.target_scores = experiment.get_top_target_peaks().df["d_score"]
        self.decoy_scores = experiment.get_top_decoy_peaks().df["d_score"]
Ejemplo n.º 3
0
def stopTomcat():
    putWait("Shutting down Tomcat engine")
    # verify if running
    if not isTomcatRunning():
        putDoneOK("(not running)")
        return
    # ask and wait for shutdown
    filename = CONFIG.get("tomcat", "catalina_home") + "logs/catalina.out"
    sh_touch(filename)
    with open(filename, 'r') as logfile:
        logfile.seek(0, 2)
        # stop Tomcat
        commandStop = CONFIG.get("tomcat", "catalina_home") + "bin/shutdown.sh"
        if not sh_exec(commandStop):
            putDoneFail()
            return
        # wait for Tomcat’s unloading of the webapps
        while True:
            where = logfile.tell()
            line = logfile.readline()
            if not line:
                time.sleep(1)
                logfile.seek(where)
            else:
                if line.find("destroy") != -1:
                    break
    putDoneOK()
Ejemplo n.º 4
0
def parse_cmdline(args):

    options = dict()
    pathes = []

    if "--help" in args:
        print_help()
        return

    if "--version" in args:
        print_version()
        return

    for arg in args:
        if arg.startswith("--"):
            if "=" in arg:
                pre, __, post = arg.partition("=")
                options[pre[2:]] = post
            else:
                options[arg[2:]] = True
        else:
            pathes.append(arg)

    if not pathes:
        print_help()
        raise Exception("no input file given")

    CONFIG.update(options)
    dump_config(CONFIG.config)

    return pathes
Ejemplo n.º 5
0
def startTomcat():
    putWait("Starting up Tomcat engine")
    # verify if running
    if isTomcatRunning():
        putDoneOK("(already running)")
        return
    # ask and wait for startup
    filename = CONFIG.get("tomcat", "catalina_home") + "logs/catalina.out"
    sh_touch(filename)
    with open(filename, 'r') as logfile:
        logfile.seek(0, 2)
        # start Tomcat
        commandStart = CONFIG.get("tomcat", "catalina_home") + "bin/startup.sh"
        if not sh_exec(commandStart):
            putDoneFail()
            return
        # wait for Tomcat’s loading of the webapps
        while True:
            where = logfile.tell()
            line = logfile.readline()
            if not line:
                time.sleep(1)
                logfile.seek(where)
            else:
                if line.find("Server startup in") != -1:
                    break
    putDoneOK()
Ejemplo n.º 6
0
    def score(self, table):

        prepared_table, __ = prepare_data_table(table, score_columns=self.score_columns)
        texp = Experiment(prepared_table)
        score = self.classifier.score(texp, True)
        texp["d_score"] = (score - self.mu) / self.nu

        s_values, q_values = lookup_s_and_q_values_from_error_table(texp["d_score"].values,
                                                                    self.error_stat.df)
        texp["m_score"] = q_values
        texp["s_value"] = s_values
        logging.info("mean m_score = %e, std_dev m_score = %e" % (np.mean(q_values),
                                                                  np.std(q_values, ddof=1)))
        logging.info("mean s_value = %e, std_dev s_value = %e" % (np.mean(s_values),
                                                                  np.std(s_values, ddof=1)))
        texp.add_peak_group_rank()

        df = table.join(texp[["d_score", "m_score", "peak_group_rank"]])

        if CONFIG.get("compute.probabilities"):
            df = self.add_probabilities(df, texp)

        if CONFIG.get("target.compress_results"):
            to_drop = [n for n in df.columns if n.startswith("var_") or n.startswith("main_")]
            df.drop(to_drop, axis=1, inplace=True)

        return df
Ejemplo n.º 7
0
def execLDAP(msg, ldiffile):
    putWait(msg)
    if sh_exec("ldapadd -x -D\"cn=" + CONFIG.get("ldap", "login") + "," + CONFIG.get("ldap", "base_dn") + "\" " +
               "-w \"" + CONFIG.get("ldap", "password") + "\" -f " + ldiffile):
       putDoneOK()
       return True
    putDoneFail()
    return False
Ejemplo n.º 8
0
def getSetConfig(question, section, option, reader=getWithDefault):
    value = CONFIG.get(section, option)
    if CONFIG.isTrue("global", "accept_defaults"):
        putMessage(question + " : using <" + value + ">")
        return value
    new_value = reader(question, value)
    CONFIG.set(section, option, new_value)
    return new_value
Ejemplo n.º 9
0
def isTomcatRunning():
    try:
        cnx = urllib.urlopen( "http://" + CONFIG.get("global", "host") +
                              ":" + CONFIG.get("tomcat", "http_port") + "/" )
        cnx.close()
        return True
    except IOError:
        return False
Ejemplo n.º 10
0
def configurePropertiesWebApp(pretty_name, filename, section_name, dic):
    putMessage("Configuring " + pretty_name + " (" + filename + ".properties) ...")
    webapp = CONFIG.get(section_name, "name")
    propfilename = (CONFIG.get("tomcat", "catalina_home") + "webapps/" + webapp +
                    "/WEB-INF/classes/" + filename + ".properties")
    waitForFile(propfilename)
    putWait("Modifying file " + filename + ".properties")
    modifyPropertiesFile(propfilename, dic)
Ejemplo n.º 11
0
def parse_config(filename):
    """
    Fnord
    """
    # {{{
    filename = expanduser(expandvars(filename))
    LOG.debug("Parsing configuration in file '%s'",filename)
    CONFIG.read(filename)
Ejemplo n.º 12
0
    def iter_semi_supervised_learning(self, train):
        fdr = CONFIG.get("semi_supervised_learner.iteration_fdr")
        lambda_ = CONFIG.get("semi_supervised_learner.iteration_lambda")
        td_peaks, bt_peaks = self.select_train_peaks(train, "classifier_score", fdr, lambda_)

        model = self.inner_learner.learn(td_peaks, bt_peaks, True)
        w = model.get_parameters()
        clf_scores = model.score(train, True)
        return w, clf_scores
Ejemplo n.º 13
0
    def apply_classifier(self, final_classifier, experiment, all_test_target_scores,
                         all_test_decoy_scores, table, p_score=False):

        lambda_ = CONFIG.get("final_statistics.lambda")

        mu, nu, final_score = self.calculate_params_for_d_score(final_classifier, experiment)
        experiment["d_score"] = (final_score - mu) / nu

        if (CONFIG.get("final_statistics.fdr_all_pg")):
            all_tt_scores = experiment.get_target_peaks()["d_score"]
        else:
            all_tt_scores = experiment.get_top_target_peaks()["d_score"]

        df_raw_stat, num_null, num_total = calculate_final_statistics(all_tt_scores, all_test_target_scores,
                                                 all_test_decoy_scores, lambda_)

        scored_table = self.enrich_table_with_results(table, experiment, df_raw_stat)

        if CONFIG.get("compute.probabilities"):
            logging.info( "" )
            logging.info( "Posterior Probability estimation:" )
            logging.info( "Estimated number of null %0.2f out of a total of %s. " % (num_null, num_total) )

            # Note that num_null and num_total are the sum of the
            # cross-validated statistics computed before, therefore the total
            # number of data points selected will be 
            #   len(data) /  xeval.fraction * xeval.num_iter
            # 
            prior_chrom_null = num_null * 1.0 / num_total
            number_true_chromatograms = (1.0-prior_chrom_null) * len(experiment.get_top_target_peaks().df)
            number_target_pg = len( Experiment(experiment.df[(experiment.df.is_decoy == False) ]).df )
            prior_peakgroup_true = number_true_chromatograms / number_target_pg

            logging.info( "Prior for a peakgroup: %s" % (number_true_chromatograms / number_target_pg))
            logging.info( "Prior for a chromatogram: %s" % str(1-prior_chrom_null) )
            logging.info( "Estimated number of true chromatograms: %s out of %s" % (number_true_chromatograms, len(experiment.get_top_target_peaks().df)) )
            logging.info( "Number of target data: %s" % len( Experiment(experiment.df[(experiment.df.is_decoy == False) ]).df ) )

            # pg_score = posterior probability for each peakgroup
            # h_score = posterior probability for the hypothesis that this peakgroup is true (and all other false)
            # h0_score = posterior probability for the hypothesis that no peakgroup is true

            pp_pg_pvalues = posterior_pg_prob(experiment, prior_peakgroup_true, lambda_=lambda_)
            experiment.df[ "pg_score"]  = pp_pg_pvalues
            scored_table = scored_table.join(experiment[["pg_score"]])

            allhypothesis, h0 = posterior_chromatogram_hypotheses_fast(experiment, prior_chrom_null)
            experiment.df[ "h_score"]  = allhypothesis
            experiment.df[ "h0_score"]  = h0
            scored_table = scored_table.join(experiment[["h_score", "h0_score"]])

        final_statistics = final_err_table(df_raw_stat)
        summary_statistics = summary_err_table(df_raw_stat)

        needed_to_persist = (final_classifier, mu, nu,
                             df_raw_stat.loc[:, ["svalue", "qvalue", "cutoff"]], num_null, num_total)
        return (summary_statistics, final_statistics, scored_table), needed_to_persist
Ejemplo n.º 14
0
def set_printer(vendor, product):
    """
    Set the printer now and forever
    """
    CONFIG['vendor'] = vendor
    CONFIG['product'] = product
    CONFIG.save()

    return redirect(url_for('administration'))
Ejemplo n.º 15
0
 def start_semi_supervised_learning(self, train):
     fdr = CONFIG.get("semi_supervised_learner.initial_fdr")
     lambda_ = CONFIG.get("semi_supervised_learner.initial_lambda")
     td_peaks, bt_peaks = self.select_train_peaks(train, "main_score", fdr, lambda_)
     model = self.inner_learner.learn(td_peaks, bt_peaks, False)
     w = model.get_parameters()
     clf_scores = model.score(train, False)
     clf_scores -= np.mean(clf_scores)
     return w, clf_scores
Ejemplo n.º 16
0
	def __init__(self):
		token = oauth.Token(
						key=CONFIG.get("auth_token_key"), 
						secret=CONFIG.get("auth_token_secret")
						)
		consumer = oauth.Consumer(key=CONFIG.get("consumer_key"), 
								secret=CONFIG.get("consumer_secret")
								)
		self.client = oauth.Client(consumer, token)
Ejemplo n.º 17
0
def create(name, cash_percent=0.0, initial_position="{}"):
    if not CONFIG["portfolios"].has_key(name):
        CONFIG["portfolios"][name] = {}
        CONFIG["portfolios"][name]['$'] = cash_percent
        initial_position = yaml.load(initial_position)
        for sym, amt in initial_position.items():
            CONFIG["portfolios"][name][sym] = amt
        CONFIG.commit()
    else:
        raise StandardError, "Portfolio already exists"
Ejemplo n.º 18
0
 def __init__(self):
     AbstractLearner.__init__(self)
     c_size = int(CONFIG.get("classifier.cache_size", "500"))
     if CONFIG.get("classifier.weight_classes"):
         logging.info("===> doing weighted polySVM")
         self.classifier = sklearn.svm.SVC(cache_size=c_size, kernel="poly", class_weight="auto")
     else:
         logging.info("===> doing non-weighted polySVM")
         self.classifier = sklearn.svm.SVC(cache_size=c_size, kernel="poly")
     self.scaler = sklearn.preprocessing.StandardScaler()
Ejemplo n.º 19
0
def deployWar(pretty_name, section_name):
    webapp_name = CONFIG.get(section_name, "name")
    if CONFIG.isTrue("tomcat", "use_manager"):
        putWait("Deploying " + pretty_name)
        manageTomcat("deploy?path=/" + webapp_name + "&update=true&war=file://" + 
                     sh_pwd() + "/" + CONFIG.get(section_name, "repo"))
    else:
        putMessage("Deploying " + pretty_name + " ...")
        stopTomcat()
        copyWar(CONFIG.get(section_name, "repo"), webapp_name)
        startTomcat()
Ejemplo n.º 20
0
    def _apply_scorer_out_of_core(self, pathes, delim, scorer):

        merge_results = CONFIG.get("multiple_files.merge_results")
        # TODO: merge_resuls has nothing to do with scorer, we need extra class for
        # writing results, maybe lazy....:
        scorer.merge_results = merge_results
        delim_in = CONFIG.get("delim.in")
        scored_tables_lazy = scorer.score_many_lazy(pathes, delim_in)
        final_statistics, summary_statistics = scorer.get_error_stats()
        weights = scorer.classifier.get_parameters()
        return Result(None, None, scored_tables_lazy), None, weights
Ejemplo n.º 21
0
def execDB(msg, db, query):
    putWait(msg)
    if sh_exec("mysql --host=" + CONFIG.get("db", "host") +
                   " --port=" + CONFIG.get("db", "port") +
                   " --user="******"db", "login") +
                   " --password="******"db", "password") +
                   " --database=" + db +
                   " --execute=\"" + query + "\""):
        putDoneOK()
        return True
    putDoneFail()
    return False
    def build_instance_dict(self,ins_dict,notification):

        ins_dict['tenant_id']=notification['tenant_id']
        ins_dict['nova_instance_id']=notification[constants.nova_instance_id]
        ins_dict['name']=notification['display_name']
        ins_dict['user']=CONFIG.get("DEFAULT", "vadx_username")
        ins_dict['password']=CONFIG.get("DEFAULT", "vadx_password")
        ins_dict['status']=notification['state']
        ins_dict['communication_type']=CONFIG.get("DEFAULT", "vadx_communication_type")
        #ins_dict['created_time']=datetime.strptime(notification['created_at'],'%Y-%m-%d %H:%M:%S')
        ins_dict['created_time']=self._format_date(notification['created_at'])
        ins_dict['status_description']=notification['state_description']
        return ins_dict
Ejemplo n.º 23
0
 def add_cors_headers(status, headers, exc_info=None):
     headers = Headers(headers)
     headers.add("Access-Control-Allow-Origin",
                 get_origin(status, headers))
     headers.add("Access-Control-Allow-Headers",
                 cfg.get("cors_headers"))
     headers.add("Access-Control-Allow-Credentials",
                 cfg.get("cors_credentials"))
     headers.add("Access-Control-Allow-Methods",
                 cfg.get("cors_methods"))
     headers.add("Access-Control-Expose-Headers",
                 cfg.get("cors_expose_headers"))
     return start_response(status, headers.to_list(), exc_info)
Ejemplo n.º 24
0
    def _learn(self, experiment):
        is_test = CONFIG.get("is_test")
        if is_test:  # for reliable results
            experiment.df.sort("tg_id", ascending=True, inplace=True)

        learner = self.semi_supervised_learner
        ws = []

        neval = CONFIG.get("xeval.num_iter")
        num_processes = CONFIG.get("num_processes")
        all_test_target_scores = []
        all_test_decoy_scores = []

        logging.info("learn and apply scorer")
        logging.info("start %d cross evals using %d processes" % (neval, num_processes))

        if num_processes == 1:
            for k in range(neval):
                (ttt_scores, ttd_scores, w) = learner.learn_randomized(experiment)
                all_test_target_scores.extend(ttt_scores)
                all_test_decoy_scores.extend(ttd_scores)
                ws.append(w.flatten())
        else:
            pool = multiprocessing.Pool(processes=num_processes)
            while neval:
                remaining = max(0, neval - num_processes)
                todo = neval - remaining
                neval -= todo
                args = ((learner, "learn_randomized", (experiment, )), ) * todo
                res = pool.map(unwrap_self_for_multiprocessing, args)
                ttt_scores = [ti for r in res for ti in r[0]]
                ttd_scores = [ti for r in res for ti in r[1]]
                ws.extend([r[2] for r in res])
                all_test_target_scores.extend(ttt_scores)
                all_test_decoy_scores.extend(ttd_scores)
        logging.info("finished cross evals")
        logging.info("")

        # only use socres from last iteration to build statistical model:
        if CONFIG.get("semi_supervised_learner.stat_best"):
            all_test_target_scores = ttt_scores
            all_test_decoy_scores = ttd_scores

        # we only use weights from last iteration if indicated:
        if CONFIG.get("semi_supervised_learner.use_best"):
            ws = [ws[-1]]

        final_classifier = self.semi_supervised_learner.averaged_learner(ws)

        return final_classifier, all_test_target_scores, all_test_decoy_scores
Ejemplo n.º 25
0
def manageTomcat(query):
    try:
        url = ("http://" + CONFIG.get("tomcat", "login") + ":" + CONFIG.get("tomcat", "password") +
               "@" + CONFIG.get("global", "host") + ":" + CONFIG.get("tomcat", "http_port") +
               "/" + CONFIG.get("tomcat", "manager_path") + "/")
        cnx = urllib.urlopen(url + query)
        ret = cnx.readline()
        cnx.close()
        if ret[0:2] == "OK":
            putDoneOK()
        else:
            putDoneFail(error=ret)
    except IOError as e:
        putDoneFail(error=e)
Ejemplo n.º 26
0
    def run(self):

        self.prefix = self.check_pathes()
        dirname = self.determine_output_dir_name()
        out_pathes = self.create_out_pathes(dirname)

        extra_writes = dict(self.extra_writes(dirname))

        to_check = list(v for p in out_pathes for v in p.values())
        to_check.extend(extra_writes.values())

        if not CONFIG.get("target.overwrite"):
            error = check_if_any_exists(to_check)
            if error:
                return False

        self.check_cols = ["transition_group_id", "run_id", "decoy"]
        if CONFIG.get("export.mayu"):
            self.check_cols += mayu_cols()

        logging.info("config settings:")
        for k, v in sorted(CONFIG.config.items()):
            logging.info("    %s: %s" % (k, v))

        start_at = time.time()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            (result, scorer, weights) = self.run_algo()

        compress = CONFIG.get("target.compress_results")
        needed = time.time() - start_at

        set_pandas_print_options()
        self.print_summary(result)
        self.save_results(result, extra_writes, out_pathes)

        self.save_scorer(scorer, extra_writes)
        self.save_weights(weights, extra_writes)

        seconds = int(needed)
        msecs = int(1000 * (needed - seconds))
        minutes = int(needed / 60.0)

        print "NEEDED",
        if minutes:
            print minutes, "minutes and",

        print "%d seconds and %d msecs wall time" % (seconds, msecs)
        print
Ejemplo n.º 27
0
def http_retriable_request(verb, url, headers={}, authenticate=False, params={}):
    """
    Sends an HTTP request, with automatic retrying in case of HTTP Errors 500 or ConnectionErrors
    _http_retriable_request('POST', 'http://cc.cloudcomplab.ch:8888/app/', headers={'Content-Type': 'text/occi', [...]}
                            , authenticate=True)
    :param verb: [POST|PUT|GET|DELETE] HTTP keyword
    :param url: The URL to use.
    :param headers: Headers of the request
    :param kwargs: May contain authenticate=True parameter, which is used to make requests requiring authentication,
                    e.g. CC requests
    :return: result of the request
    """
    LOG.debug(verb + ' on ' + url + ' with headers ' + headers.__repr__())

    auth = ()
    if authenticate:
        user = CONFIG.get('cloud_controller', 'user')
        pwd = CONFIG.get('cloud_controller', 'pwd')
        auth = (user, pwd)

    if verb in ['POST', 'DELETE', 'GET', 'PUT']:
        try:
            r = None
            if verb == 'POST':
                if authenticate:
                    r = requests.post(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.post(url, headers=headers, params=params)
            elif verb == 'DELETE':
                if authenticate:
                    r = requests.delete(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.delete(url, headers=headers, params=params)
            elif verb == 'GET':
                if authenticate:
                    r = requests.get(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.get(url, headers=headers, params=params)
            elif verb == 'PUT':
                if authenticate:
                    r = requests.put(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.put(url, headers=headers, params=params)
            r.raise_for_status()
            return r
        except requests.HTTPError as err:
            LOG.error('HTTP Error: should do something more here!' + err.message)
            raise err
Ejemplo n.º 28
0
    def createLdifs(self):
        utils.writeFile("Creating the schema as a ldif file (user.ldif)", "user.ldif", """
dn: cn=user,cn=schema,cn=config
objectClass: olcSchemaConfig
cn: user
olcAttributeTypes: ( 1.1.2.1.1 NAME '%(owner)s' DESC 'Owner ID' SUP name )
olcAttributeTypes: ( 1.1.2.1.2 NAME '%(alias)s' DESC 'Alias DN' SUP name )
olcObjectClasses: ( 1.1.2.2.1 NAME 'user' DESC 'Define user' SUP top STRUCTURAL MUST ( %(uid)s $ %(owner)s ) MAY ( %(alias)s ) )
""" % {"uid": self.cget("user_id"), "owner": self.cget("attribute_owner"), "alias": self.cget("attribute_alias")})
        group_value = self.cget("user_group").split("=")[-1]
        utils.writeFile("Creating the user group as a ldif file (usergroup.ldif)", "usergroup.ldif", """
dn: %(group)s,%(dn)s
objectclass: top
objectclass: organizationalUnit
ou: %(group_val)s
description: users
""" % {"group": self.cget("user_group"), "group_val": group_value, "dn": self.cget("base_dn")} )
        utils.writeFile("Creating the user 'superadmin' as a ldif file (superadmin.ldif)", "superadmin.ldif", """
dn: %(uid)s=superadmin,%(group)s,%(dn)s
objectclass: top
objectclass: user
%(uid)s: superadmin
%(owner)s: superadmin
""" % {"uid": self.cget("user_id"), "group": self.cget("user_group"), "dn": self.cget("base_dn"), "owner": self.cget("attribute_owner")} )
        utils.writeFile("Creating the user '%(anonymous)s' as ldif file (anonymous.ldif)", "anonymous.ldif", """
dn: %(uid)s=%(anonymous)s,%(group)s,%(dn)s
objectclass: top
objectclass: user
%(uid)s: %(anonymous)s
%(owner)s: anonymous
""" % {"anonymous": CONFIG.get("global", "anonymous_user"), "uid": self.cget("user_id"), "group": self.cget("user_group"), "dn": self.cget("base_dn"), "owner": self.cget("attribute_owner")} )
Ejemplo n.º 29
0
def create_app(config_name):
    """Creates the app object with the appropriate configuration settings.

    Args:
        {string} config_name: a string representing a dictionary key matched to
        specific configuration options applied to the initialized app.

    Returns:
        the initialized app object
    """

    # Reference global objects for clarity.
    global bootstrap
    global db

    # Create a blank Flask app.
    app = Flask(__name__)

    # Add configuration options to the app.
    config_settings = CONFIG.get(config_name)
    if config_settings:
        app.config.from_object(config_settings)

    # Initialize the extension objects using the newly configured Flask app.
    bootstrap.init_app(app)
    db.init_app(app)

    # Attach URL routes and custom error page handlers to the app.
    from createblueprint import bp_main
    app.register_blueprint(bp_main)

    # Return a fully configured app instance with routes and error handling.
    return app
Ejemplo n.º 30
0
def isActiveMQRunning():
    try:
        cnx = urllib.urlopen(CONFIG.get("activemq", "admin_url"))
        cnx.close()
        return True
    except IOError:
        return False
Ejemplo n.º 31
0
 def process_cpp_files(self, files):
     for fi in files:
         self.__parser_jni_cpp_register_func(fi)
     print(len(self.jni_bridge_map), self.jni_bridge_map)
     utils.write_json(self.jni_bridge_map, CONFIG.local_path("data/jni.json"))
Ejemplo n.º 32
0
def set_config():
    DsCache.clear()
    return CONFIG.set_config(request.json['CONFIG'])
Ejemplo n.º 33
0
    async def refresh_db(self):
        print('received request to refresh database')

        # Setting source data paths

        seed_data_path = CONFIG.get('api_gateway', 'SEED_DATA_PATH')

        # Cleaning up collections
        await db.entities.delete_many({})

        await db.projects.delete_many({})
        await db.domains.delete_many({})
        await db.intents.delete_many({})
        await db.responses.delete_many({})
        await db.stories.delete_many({})
        await db.conversations.delete_many({})
        await db.actions.delete_many({})

        # Inserting Data in collection

        with open(seed_data_path+'projects.json') as json_file:
            data = json.load(json_file)
            await db.projects.insert_many(data)

        # Get project ID

        project = await db.projects.find_one({})
        project_id = project.get('_id')
        print("project ID {}".format(project_id))

        with open(seed_data_path+'domains.json') as json_file:
            data = json.load(json_file)
            await db.domains.insert_many(data)

        await db.domains.update_many({}, {'$set': {'project_id': str(project_id)}})
        domain_id = await db.domains.find_one({})

        with open(seed_data_path+'intents.json') as json_file:
            data = json.load(json_file)
            await db.intents.insert_many(data)

        await db.intents.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})

        with open(seed_data_path+'entities.json') as json_file:
            data = json.load(json_file)
            await db.entities.insert_many(data)

        await db.entities.update_many({}, {'$set': {'project_id': str(project_id)}})

        with open(seed_data_path+'responses.json') as json_file:
            data = json.load(json_file)
            await db.responses.insert_many(data)

        await db.responses.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})

        with open(seed_data_path+'stories.json') as json_file:
            data = json.load(json_file)
            await db.stories.insert_many(data)

        await db.stories.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})

        with open(seed_data_path+'actions.json') as json_file:
            data = json.load(json_file)
            await db.actions.insert_many(data)

        return "Success"
Ejemplo n.º 34
0
    def get_event_list(cls):
        auth = cls.get_auth()

        url = 'http://' + str(CONFIG.get_rest_ip()) + ':' + str(
            CONFIG.get_rest_port()) + '/event'

        req_body = {'url': url}
        req_body_json = json.dumps(req_body)

        header = {
            'Content-Type': 'application/json',
            'Authorization': base64.b64encode(auth)
        }

        cls.CLI_LOG.cli_log(
            'get_event_list:: SEND CMD ---------------------------')

        try:
            url = CONFIG.get_event_list_uri()

            cls.CLI_LOG.cli_log('URL = ' + url)
            cls.CLI_LOG.cli_log('AUTH = ' + auth)

            myResponse = requests.get(url,
                                      headers=header,
                                      data=req_body_json,
                                      timeout=CONFIG.get_rest_timeout())

            cls.CLI_LOG.cli_log('HEADER = ' +
                                json.dumps(header, sort_keys=True, indent=4))
            cls.CLI_LOG.cli_log('BODY = ' +
                                json.dumps(req_body, sort_keys=True, indent=4))

        except:
            # req timeout
            LOG.exception_err_write()
            return False

        cls.CLI_LOG.cli_log(
            'get_event_list:: RECV RES ---------------------------')
        cls.CLI_LOG.cli_log('RESPONSE CODE = ' + str(myResponse.status_code))
        cls.CLI_LOG.cli_log('RESPONSE BODY = ' + str(myResponse.content))

        try:
            res = json.loads(myResponse.content.replace("\'", '"'))
            cls.CLI_LOG.cli_log('BODY = ' +
                                json.dumps(res, sort_keys=True, indent=4))

            cls.HISTORY_LOG.write_history(
                "[%s] --- Current Event History Begin ---",
                str(datetime.now()))

            for line in res['event_list']:
                reason_str = ''
                if type(line['reason']) == list:
                    if len(line['reason']) > 0:
                        reason_str = '\n-- ' + '\n-- '.join(line['reason'])
                else:
                    reason_str = str(line['reason'])

                cls.HISTORY_LOG.write_history(
                    '[%s] %s %s changed from %s to %s %s', line['time'],
                    line['system'], line['item'], line['pre_grade'],
                    line['grade'], reason_str)

            cls.HISTORY_LOG.write_history(
                "[%s] --- Current Event History End ---", str(datetime.now()))

        except:
            LOG.exception_err_write()

        result = json.loads(myResponse.content)

        if myResponse.status_code == 200 and result['Result'] == 'SUCCESS':
            return True
        else:
            return False
Ejemplo n.º 35
0
 def set_cmd_list(cls):
     cls.command_list = CONFIG.get_cmd_list()
Ejemplo n.º 36
0
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from webdriver import ChromeInstance
from perfutils import BrowserPerformance
from logger import Logger
from config import CONFIG
import os
import time
import unittest

Logger = Logger()
logger = Logger.getLogger(name='Mytableauloadtest',
                          level=CONFIG.get('logging').get('level'))


class LoadUrls(unittest.TestCase):
    def test_load_urls(self, event, context):
        # TODO implement

        def get_current_page(driver):
            return driver.title

        instance = ChromeInstance()
        browser = BrowserPerformance(webdriver=instance.driver,
                                     env=os.environ.get(
                                         'ENV', CONFIG.get('default_env')),
                                     application="Mytableau.com",
                                     inputlogger=logger)
        if 'homepage' in event.keys():
            instance.driver.get(event['homepage'])
            instance.wait_for(instance.page_has_loaded)
import numpy as np
import logging
from config import CONFIG

logfile = CONFIG.get('Paths','log_file')
logger = logging.getLogger(__name__)
logging.basicConfig(
    #    filename=logfile,
    filemode='w',
    format='%(asctime)s %(message)s',
    datefmt='%m%d%Y %I:%M:%S',
    level=logging.DEBUG
)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)

logger.info('data generation module.')

logger.info(CONFIG['Paths']['author'])
logger.info(CONFIG.get('Paths','author'))

# data generation config
m = CONFIG.get('data_generation','num_samples')
n = CONFIG.get('data_generation', 'num_features')
X = np.random.random([int(m),int(n)])
y = np.dot(X, np.array(np.arange(1,int(n)+1)),).reshape((int(m),1)) + 3
x_test = np.asarray(int(n)*[22]).reshape((1,int(n)))
y_test = np.asarray(1*[.5]).reshape((1,1))

raw_data_path = CONFIG.get('Paths','raw_data_path') + '/raw_data.csv'
Ejemplo n.º 38
0
from flask.ext.seasurf import SeaSurf
from flask.ext.assets import Environment, Bundle
from flask_debugtoolbar import DebugToolbarExtension
from markdown import markdown

from . import __version__
from config import CONFIG as cfg
import db
from models import Experiment
from analysis import ExportExperiment
import utils

import re

app = Flask(__name__)
app.config['CSRF_DISABLE'] = cfg.get('csrf_disable', False)

csrf = SeaSurf(app)

js = Bundle('js/vendor/jquery.js', 'js/vendor/d3.js',
            'js/vendor/bootstrap.js', 'js/experiment.js', 'js/chart.js',
            'js/sixpack.js', 'js/vendor/underscore-min.js', 'js/vendor/spin.min.js',
            'js/vendor/waypoints.min.js', 'js/vendor/zeroclipboard.min.js',
            output="{0}/sixpack.js".format(cfg.get('asset_path', 'gen')))

css = Bundle('css/vendor/bootstrap.css',
             'css/vendor/bootstrap-responsive.css', 'css/sixpack.css',
             output="{0}/sixpack.css".format(cfg.get('asset_path', 'gen')))

assets = Environment(app)
assets.register('js_all', js)
Ejemplo n.º 39
0
def _gen_report_lines(connection, report_template, report_start, report_end,
                      osbn, rpn):
    """
    Генерирует строки отчёта.
    Парвметры:
    connection - соединение с СУБД
    report_template - имя файла шаблона отчёта
    report_start - дата начала периода
    report_end - дата конца периода
    osbn - номер отделения
    rpn - номер точки приёма платежей
    Используется следующий SQL:
    SELECT pay_date, bic, account, pay_n, COUNT(pay_sum), SUM(pay_sum), SUM(pay_c)
    FROM sbasp WHERE pay_date BETWEEN :date1 AND :date2 AND osbn=:osbn AND rpn=:rpn
    GROUP BY pay_date, bic, account, pay_n WITH ROLLUP
    """
    fields = [
        db.table.c.pay_date, db.table.c.bic, db.table.c.account,
        db.table.c.pay_n,
        func.count(db.table.c.pay_sum),
        func.sum(db.table.c.pay_sum),
        func.sum(db.table.c.pay_c)
    ]
    date_cond = db.table.c.pay_date.between(report_start, report_end)
    stmt = select(fields).where(date_cond).where(
        db.table.c.osbn == osbn).where(db.table.c.rpn == rpn).group_by(
            db.table.c.pay_date).group_by(db.table.c.bic).group_by(
                db.table.c.account).group_by(rollup(db.table.c.pay_n))
    result = connection.execute(stmt)
    report_file = open(report_template)
    template = load(report_file)
    report_file.close()
    date_format = CONFIG.get('date_display_format', CONFIG['date_format'])
    enc = CONFIG['report_encoding']
    report_data = {
        'osbn': osbn,
        'rpn': rpn,
        'report_start': report_start.strftime(date_format),
        'report_end': report_end.strftime(date_format)
    }
    report_line = lambda name, data: template[name].rstrip().rstrip(
        '|').format(**data).encode(enc)
    # Заголовок отчёта
    yield report_line('title', report_data)
    last_date, last_bic, last_account = None, None, None
    for pay_date, bic, account, pay_n, pay_count, pay_sum, pay_c in result:
        row_data = {
            'date': pay_date or last_date,
            'bic': bic or last_bic,
            'account': account or last_account,
            'n': pay_n,
            'count': pay_count,
            'sum': money_str(pay_sum),
            'commission': money_str(pay_c)
        }
        row_data['date'] = row_data['date'].strftime(date_format)
        row_data.update(report_data)
        if pay_date != last_date:
            if not pay_date:
                # Итог за период
                yield report_line('total', row_data)
                continue
            last_date = pay_date
            # Заголовок для даты
            yield report_line('subtitle1', row_data)
        if bic != last_bic:
            if not bic:
                # Подытог по дню
                yield report_line('subtotal1', row_data)
                continue
            last_bic = bic
            #  Заголовок для банка
            yield report_line('subtitle2', row_data)
        if account != last_account:
            if not account:
                # Подытог по банку
                yield report_line('subtotal2', row_data)
                continue
            last_account = account
            # Заголовок для счёта
            yield report_line('subtitle3', row_data)
        if not pay_n:
            # Подытог по счёту
            yield report_line('subtotal3', row_data)
        else:
            # Платёж
            yield report_line('detail', row_data)
    result.close()
Ejemplo n.º 40
0
    report_template - имя файла шаблона отчёта
    report_start - дата начала периода
    report_end - дата конца периода
    osbn - номер отделения
    rpn - номер точки приёма платежей
    """
    print 'Создание файла отчёта:', report_filename
    report = open(report_filename, 'w')
    report.writelines(
        _gen_report_lines(connection, report_template, report_start,
                          report_end, osbn, rpn))
    report.close()


# Выходной каталог для отчётов
report_path = CONFIG.get('report_path', '')
# Создание входного каталога при необходимости
if report_path and not os.path.exists(report_path):
    os.makedirs(report_path)
# Начальная дата для отчёта
report_start = datetime.strptime(CONFIG['report_start'],
                                 CONFIG['date_format']).date()
# Конечная дата для отчёта
report_end = report_start + timedelta(CONFIG['report_length'] - 1)
print 'Формирование отчёта с {} по {}'.format(report_start, report_end)
conn = db.engine.connect()
report_name_template = report_start.strftime(CONFIG['report_name_format'])
# Цикл по отделениям и точкам приёма платежей
for osbn, rpn in CONFIG['osb_rp_list']:
    report_name = report_name_template.format(
        CONFIG['report_length'], osbn, rpn) + CONFIG['report_file_format']
Ejemplo n.º 41
0
from config import CONFIG

_REDIS = None
if CONFIG['cache.type'] == 'redis':
    import redis
    _REDIS = redis.StrictRedis(
        host=CONFIG['cache.redis.host'],
        port=CONFIG['cache.redis.port'],
        db=CONFIG['cache.redis.db'],
        password=CONFIG['cache.redis.password'] or None,
    )
    print(_REDIS, CONFIG['cache.redis.host'], CONFIG['cache.redis.port'],
          CONFIG['cache.redis.password'])

_REDIS_PREFIX = ''
if CONFIG.get("cache.redis.prefix", ""):
    _REDIS_PREFIX = CONFIG["cache.redis.prefix"] + ":"


def put(key, value):
    """
    Save `value` with `key`, and serialize it if needed
    """

    if _REDIS_PREFIX:
        key = _REDIS_PREFIX + key

    if CONFIG["cache.type"] == "redis" and _REDIS:
        if isinstance(value, (dict, list)):
            value = json.dumps(value)
Ejemplo n.º 42
0
def links(handler):
    for p in CONFIG.keys():
        handler.response.write('<a href="login/{p}">{p}</a><br />'.format(p=p))
    handler.response.write('<br /><br />')
Ejemplo n.º 43
0
app = Flask(__name__)
csrf = SeaSurf(app)

js = Bundle('js/vendor/jquery.js',
            'js/vendor/d3.js',
            'js/vendor/bootstrap.js',
            'js/experiment.js',
            'js/chart.js',
            'js/sixpack.js',
            'js/vendor/underscore-min.js',
            'js/vendor/spin.min.js',
            'js/vendor/waypoints.min.js',
            'js/vendor/zeroclipboard.min.js',
            filters=['closure_js'],
            output="{0}/sixpack.js".format(cfg.get('asset_path', 'gen')))

css = Bundle('css/vendor/bootstrap.css',
             'css/vendor/bootstrap-responsive.css',
             'css/sixpack.css',
             filters=['yui_css'],
             output="{0}/sixpack.css".format(cfg.get('asset_path', 'gen')))

assets = Environment(app)
assets.register('js_all', js)
assets.register('css_all', css)


@app.route('/_status')
@utils.service_unavailable_on_connection_error
def status():
Ejemplo n.º 44
0
from awsapi import AwsAPI
from logger import Logger
from config import CONFIG
import inspect
import json
import os
import sys

__author__ = 'aftabalam01'

PERF_ENABLE_ENV = os.getenv("PERFORMANCE_ENABLE",
                            CONFIG.get('performance_enable'))
ENV = os.getenv("ENV", CONFIG.get('default_env'))
Logger = Logger()


class BrowserPerformance(object):
    """
    input Browser windows driver. this util will execute java script to capture performance 
    navigation API timing as json and then post it to aws sqs
    """
    def __init__(self,
                 webdriver,
                 env=ENV,
                 application="Enterprise Applications",
                 inputlogger=Logger.getLogger(
                     name=__name__, level=CONFIG.get('logging').get('level'))):
        self.awsapi = AwsAPI(environment=ENV)
        self.logger = inputlogger
        self.application_name = application
        self.webdriver = webdriver
Ejemplo n.º 45
0
    def test_load_urls(self, event, context):
        # TODO implement

        def get_current_page(driver):
            return driver.title

        instance = ChromeInstance()
        browser = BrowserPerformance(webdriver=instance.driver,
                                     env=os.environ.get(
                                         'ENV', CONFIG.get('default_env')),
                                     application="Mytableau.com",
                                     inputlogger=logger)
        if 'homepage' in event.keys():
            instance.driver.get(event['homepage'])
            instance.wait_for(instance.page_has_loaded)
            page_data = get_current_page(instance.driver)
            browser.set_page_context(action='LoginPage',
                                     testname=__name__,
                                     pageContext='LoginPage')
            browser.capture_navigation_timing()
            if "Log In" in get_current_page(instance.driver):
                username = instance.driver.find_element(By.ID, "os_username")
                password = instance.driver.find_element(By.ID, "os_password")
                username.send_keys(os.getenv('LOGIN_ID'))
                password.send_keys(os.getenv('LOGIN_PASSWD'))
                login = instance.driver.find_element(By.ID, "loginButton")
                login.click()
                instance.wait_for(instance.page_has_loaded)
                page_data = get_current_page(instance.driver)
                print("logged {}".format(page_data))
                browser.set_page_context(action='loginButton',
                                         testname=__name__,
                                         pageContext='LoginPage')
                browser.capture_navigation_timing()
            url_count = len(event['uris'])
            i_count = 0
            for pages in event['uris']:
                page = instance.driver.get(event['homepage'] + pages)
                page_data = get_current_page(instance.driver)
                print("pages {}".format(page_data))
                browser.set_page_context(action=page_data,
                                         testname=__name__,
                                         pageContext='LoginPage')
                browser.capture_navigation_timing()

                if i_count % 5 == 0 or i_count == url_count:  # either fifth urls or last one from this list given to user will be edited
                    try:
                        edit = instance.driver.find_element(
                            By.ID, "editPageLink")  # open edit
                        edit.click()
                        # check if Edit in tittle
                        if "Edit" in get_current_page(instance.driver):
                            # somerandom edit
                            page.send_keys('loadtestedit')
                            # find publish button
                            publish = instance.driver.find_element(
                                By.ID, "rte-button-publish")
                            publish.click()
                    except:
                        print("edit link is not found")

                i_count += 1
                time.sleep(10)

        instance.driver.close()
        return page_data
Ejemplo n.º 46
0
    cfg.use_mask = args.use_mask
    cfg.use_bg = args.use_bg
    cfg.elbo_weights = {
        'kl_latent': args.kl_latent,
        'kl_spatial': args.kl_spatial,
        'exp_attention': args.exp_attention,
        'exp_nll': args.exp_nll,
        'query_nll': args.query_nll
    }
    # I/O path configurations
    cfg.DATA_ROOT = args.input_dir
    cfg.ckpt_base = args.output_dir

    ###########################################
    # Config gpu usage
    ###########################################
    cfg.nodes = args.nodes
    cfg.gpus = args.gpus
    cfg.nrank = args.nrank
    cfg.gpu_start = args.gpu_start
    cfg.world_size = args.gpus * args.nodes  #

    cfg = running_cfg(cfg)
    train(cfg.gpu_start, cfg)


##############################################################################
if __name__ == "__main__":
    cfg = CONFIG()
    main(cfg)
Ejemplo n.º 47
0
    def set_log_config(cls):
        if (CONFIG.get_cli_log().upper()) == 'ON':
            cls.cli_log_flag = True

        if (CONFIG.get_trace_log().upper()) == 'ON':
            cls.trace_log_flag = True
Ejemplo n.º 48
0
from __future__ import division
from __future__ import print_function
from sklearn import metrics
from sklearn.metrics.pairwise import cosine_similarity
import time
import torch
import torch.nn as nn
from src.utils.utils import *
from src.models.gcn import GCN
from config import CONFIG

cfg = CONFIG()

if len(sys.argv) != 2:
    sys.exit("Use: python -m src.train <dataset>")

datasets = ['dblp', 'M10', 'covid', 'covid_title']
dataset = sys.argv[1]

if dataset not in datasets:
    sys.exit("wrong dataset name")
cfg.dataset = dataset

# Set random seed
seed = 1
np.random.seed(seed)
torch.manual_seed(seed)
# if torch.cuda.is_available():
#     torch.cuda.manual_seed(seed)

# Settings
Ejemplo n.º 49
0
    def get_auth():
        id = CONFIG.get_rest_id().strip()
        pw = CONFIG.get_rest_pw().strip()
        auth = id + ':' + pw

        return auth
Ejemplo n.º 50
0
def make_request():
    r = requests.get(CONFIG.HTTP_SERVER_URL())
    print(r.content)
Ejemplo n.º 51
0
    def process_cmd(cls, cmd):
        try:
            # remove space
            cmd = cmd.strip()

            if len(cmd.strip()) == 0:
                cls.set_cli_ret_flag(True)
                return
            elif (cmd.startswith('onos-shell ')
                  or cmd.startswith('os-shell ')) and len(cmd.split(' ')) > 2:
                pass
            elif cmd not in cls.cli_validate_list:
                tmp = cmd.split(' ')

                if tmp[0] == 'sys':
                    if len(tmp) == 1:
                        print 'system name is missing.'
                    if len(tmp) >= 2:
                        print '[' + cmd[4:] + '] is invalid system name.'
                else:
                    print '[' + cmd + '] is undefined command.'
                cls.set_cli_ret_flag(True)
                return
            elif cmd.startswith('sys '):
                cls.set_cli_ret_flag(True)

                tmp = cmd.split(' ')
                if len(tmp) == 2:
                    cls.selected_sys = (cmd.split(' '))[1]
                    cls.CLI_LOG.cli_log('CHANGE TARGET SYSTEM = ' +
                                        cls.selected_sys)
                    return
            else:
                cls.set_cli_ret_flag(True)
                tmp = cmd.split(' ')

                if (len(tmp) == 1
                        and CONFIG.get_config_instance().has_section(cmd)):
                    param = CONFIG.cli_get_value(
                        cmd, CONFIG.get_cmd_opt_key_name()).replace(',', '|')
                    param = param.replace(' ', '')
                    print 'This command requires parameter.'
                    print cmd + ' [' + param + ']'
                    return

            cls.set_cli_ret_flag(False)

            tmr = threading.Timer(3, cls.check_timeout)
            tmr.start()

            cls.CLI_LOG.cli_log('START SEND COMMAND = ' + cmd)

            ret_code, myResponse = cls.send_rest(cmd)

            # rest timeout
            if ret_code == -1:
                return

            cls.set_cli_ret_flag(True)

            if (myResponse.status_code == 200):
                cls.parsingRet(myResponse.content)
            else:
                print 'response-code = ' + str(myResponse.status_code)
                print 'content = ' + myResponse.content
        except:
            LOG.exception_err_write()
Ejemplo n.º 52
0
    )
    app.process_pool = process_pool
    logger.info('Services started')
    yield
    input_queue_listener_task.cancel()
    app.process_pool.shutdown(wait=True)
    logger.info('Services stopped')


if __name__ == '__main__':
    with suppress(KeyboardInterrupt):
        handler = logging.StreamHandler()
        logger.addHandler(handler)
        formatter = logging.Formatter('%(asctime)s  %(levelname)s: %(message)s')
        handler.setFormatter(formatter)
        if CONFIG.get('debug'):
            logger.setLevel(logging.DEBUG)
        app = web.Application()
        app.cleanup_ctx.append(repository_process)
        app.cleanup_ctx.append(files_storage_process)
        app.cleanup_ctx.append(queue_listener_process)
        setup_aiohttp_apispec(app)
        app.middlewares.append(validation_middleware)
        app.add_routes([
            web.post('/api/v1/image', load_image),
            web.get('/api/v1/image/{image_id}', get_image),
            web.get('/api/v1/image/{image_id}/check', check_status),
        ])
        web.run_app(
            app,
            host=CONFIG.get('host'),
Ejemplo n.º 53
0
    def send_rest(cls, cmd):
        auth = cls.get_auth()

        tmp = cmd.split(' ')
        param = ''
        system = ''

        if cmd.startswith('onos-shell ') or cmd.startswith('os-shell '):
            sys_name = tmp[1]
            param = cmd[len(tmp[0]) + 1 + len(sys_name) + 1:]
            system = sys_name
        else:
            system = cls.selected_sys

            if len(tmp) == 2:
                param = tmp[1]

        cmd = tmp[0]

        req_body = {'command': cmd, 'system': system, 'param': param}
        req_body_json = json.dumps(req_body)

        header = {
            'Content-Type': 'application/json',
            'Authorization': base64.b64encode(auth)
        }

        cls.CLI_LOG.cli_log('send_rest:: SEND CMD ---------------------------')

        try:
            url = CONFIG.get_cmd_addr()
            cls.CLI_LOG.cli_log('URL = ' + url)
            cls.CLI_LOG.cli_log('AUTH = ' + auth)

            myResponse = requests.get(url,
                                      headers=header,
                                      data=req_body_json,
                                      timeout=CONFIG.get_rest_timeout())

            cls.CLI_LOG.cli_log('COMMAND = ' + cmd)
            cls.CLI_LOG.cli_log('SYSTEM = ' + cls.selected_sys)
            cls.CLI_LOG.cli_log('HEADER = ' +
                                json.dumps(header, sort_keys=True, indent=4))
            cls.CLI_LOG.cli_log('BODY = ' +
                                json.dumps(req_body, sort_keys=True, indent=4))

        except:
            # req timeout
            LOG.exception_err_write()
            return -1, None

        cls.CLI_LOG.cli_log('send_rest:: RECV RES ---------------------------')
        cls.CLI_LOG.cli_log('RESPONSE CODE = ' + str(myResponse.status_code))

        try:
            cls.CLI_LOG.cli_log(
                'BODY = ' +
                json.dumps(json.loads(myResponse.content.replace("\'", '"')),
                           sort_keys=True,
                           indent=4))
        except:
            cls.CLI_LOG.cli_log('BODY = ' + myResponse.content)

        return 1, myResponse
Ejemplo n.º 54
0
def is_robot(user_agent):
    if user_agent is None:
        return False
    regex = re.compile(r"{0}".format(cfg.get('robot_regex')), re.I)
    return regex.search(unquote(user_agent)) is not None
Ejemplo n.º 55
0
import redis
from redis.connection import PythonParser

from config import CONFIG as cfg

# Because of a bug (https://github.com/andymccurdy/redis-py/issues/318) with
# script reloading in `redis-py, we need to force the `PythonParser` to prevent
# sixpack from crashing if redis restarts (or scripts are flushed).
if cfg.get('redis_sentinels'):
    from redis.sentinel import Sentinel, SentinelConnectionPool
    service_name = cfg.get('redis_sentinel_service_name')
    sentinel = Sentinel(sentinels=cfg.get('redis_sentinels'),
                        password=cfg.get('redis_password', None),
                        socket_timeout=cfg.get('redis_socket_timeout'))
    pool = SentinelConnectionPool(service_name, sentinel,
                                db=cfg.get('redis_db'),
                                max_connections=cfg.get('redis_max_connections'),
                                parser_class=PythonParser)
else:
    from redis.connection import ConnectionPool
    pool = ConnectionPool(host=cfg.get('redis_host'),
                        port=cfg.get('redis_port'),
                        password=cfg.get('redis_password', None),
                        db=cfg.get('redis_db'),
                        max_connections=cfg.get('redis_max_connections'),
                        parser_class=PythonParser)

REDIS = redis.StrictRedis(connection_pool=pool)
DEFAULT_PREFIX = cfg.get('redis_prefix')

Ejemplo n.º 56
0
def require_args():

    # timestamp
    stt = time.strftime('%Y%m%d-%H%M%S', time.gmtime())
    tt = int(time.time())

    cfg.add_argument('--session',
                     default=stt,
                     type=str,
                     help='session name (default: %s)' % stt)
    cfg.add_argument('--sess-dir',
                     default='sessions',
                     type=str,
                     help='directory to store session. (default: sessions)')
    cfg.add_argument('--print-args',
                     action='store_true',
                     help='do nothing but print all args. (default: False)')
    cfg.add_argument('--seed',
                     default=tt,
                     type=int,
                     help='session random seed. (default: %d)' % tt)
    cfg.add_argument('--brief',
                     action='store_true',
                     help='print log with priority higher than debug. '
                     '(default: False)')
    cfg.add_argument(
        '--debug',
        action='store_true',
        help='if debugging, no log or checkpoint files will be stored. '
        '(default: False)')
    cfg.add_argument('--gpus',
                     default='',
                     type=str,
                     help='available gpu list. (default: \'\')')
    cfg.add_argument('--resume',
                     default=None,
                     type=str,
                     help='path to resume session. (default: None)')
    cfg.add_argument('--restart',
                     action='store_true',
                     help='load session status and start a new one. '
                     '(default: False)')
Ejemplo n.º 57
0
class SourceFileScanner:
    """
    通过扫每一个文件,根据后缀找出java、c/c++文件,然后找出JNI接口的定义。
    TODO:
    1. (完成)获取目录下所有目标文件。
    2.初步提取Java和C++ JNI接口。
    """
    FILE_LIST_PATH = CONFIG.local_path("data/file_list.txt")
    TARGET_EXT = [".java", ".h", ".cc", ".c", ".cpp", ".aidl"]
    CPP_EXT = [".h", ".cc", ".cpp", ".c"]
    SCAN_DIRS = ["frameworks"]

    def __init__(self, aosp_path=CONFIG.DEFAULT_AOSP_PATH):
        self.file_list = []
        self.file_ext_mp = {}
        self.__scan_files(aosp_path)
        self.__process_files()

    def __scan_files(self, base_dir):
        if osp.exists(self.FILE_LIST_PATH):
            utils.write_log("Load file list from cache: %s", self.FILE_LIST_PATH)
            with open(self.FILE_LIST_PATH) as fp:
                for line in fp:
                    self.file_list.append(line.rstrip())
            return
        for ch in self.SCAN_DIRS:
            dir_path = osp.join(base_dir, ch)
            files = self.__walk_dir(dir_path)
            with open(self.FILE_LIST_PATH, "wb") as fp:
                for fi in files:
                    fp.write((fi + "\n").encode("utf-8"))
            self.file_list += files

    def __walk_dir(self, pt):
        all_files = []
        files = os.listdir(pt)
        for fp in files:
            cur_path = osp.join(pt, fp)
            if osp.isdir(cur_path):
                all_files += self.__walk_dir(cur_path)
            else:
                if os.path.isfile(cur_path):
                    name, ext = osp.splitext(fp)
                    if ext in self.TARGET_EXT:
                        all_files.append(cur_path)
                # else:
                #     print("is not file", cur_path, os.path.exists(cur_path))
        return all_files

    def __process_files(self):
        processor_map = {
            ".java": self.__process_java_file,
            ".c": self.__process_cpp_file,
            ".cc": self.__process_cpp_file,
            ".cpp": self.__process_cpp_file,
            ".h": self.__process_cpp_file,
            ".aidl": self.__process_aidl
        }
        # for fp in self.file_list:
        #     pass

    def __process_java_file(self, file):
        pass

    def __process_cpp_file(self, file):
        pass

    def __process_aidl(self, file):
        pass

    def get_files(self, ext):
        if ext not in self.file_ext_mp:
            self.file_ext_mp[ext] = filter(lambda f: f.endswith(ext), self.file_list)
        return self.file_ext_mp[ext]

    def get_cpp_files(self):
        for ext in self.CPP_EXT:
            for item in self.get_files(ext):
                yield item

    def statistics(self):
        num = len(self.file_list)
        utils.write_log("Total Files: %d", num)
        java_num = len(list(filter(lambda f: f.endswith(".java"), self.file_list)))
        cpp_num = len(list(filter(lambda f: f.endswith(".c") or f.endswith("h")
                                            or f.endswith("cc") or f.endswith("cpp"), self.file_list)))
        aidl_num = len(list(filter(lambda f: f.endswith(".aidl"), self.file_list)))
        utils.write_log("java files: %d", java_num)
        utils.write_log("cpp files: %d", cpp_num)
        utils.write_log("aidl files: %d", aidl_num)
Ejemplo n.º 58
0
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_babel import Babel
from config import CONFIG
from flask_cors import CORS

# Define the WSGI application object
APP = Flask(__name__)

# CORS Simple Usage
CORS = CORS(APP) # TOdO: Implementar CORS Dinâmico?

# Configurations
__ENV = os.environ["FLASK_ENV"] if "FLASK_ENV" in os.environ.keys(
) else "default"
APP.config.from_object(CONFIG.get(__ENV))

with APP.app_context():
    # Chamando da configuração o esquema da senha
    PS = current_app.config['PASSWORD_SCHEMES'] or ['pbkdf2_sha512']
    # Chamando da configuração qtd. itens por pagina
    PP = current_app.config['PER_PAGE'] or 100

# Define the database object which is imported
# by modules and controllers
DB = SQLAlchemy(APP, session_options={"autoflush": False})

# Object serialization and deserialization, lightweight and fluffy
MA = Marshmallow(APP)

# Babel adds i18n and l10n support to any Flask application
Ejemplo n.º 59
0
class TRACE():
    TRACE_LOG = None
    trace_l2_cond_list = []
    trace_l3_cond_list = []

    compute_id = ''
    compute_list = {}

    @classmethod
    def set_trace_log(cls, trace_log):
        cls.TRACE_LOG = trace_log

    @classmethod
    def send_trace(cls, ip, condition):
        try:
            # req trace
            cls.TRACE_LOG.trace_log('START TRACE | ip = ' + ip +
                                    ', condition = ' + condition)
        except:
            LOG.exception_err_write()

    @classmethod
    def set_cnd_list(cls):
        cls.compute_id = CONFIG.get_trace_cpt_id()
        cpt_list = CONFIG.get_trace_cpt_list()

        for cpt in cpt_list.split(','):
            cpt = cpt.strip()

            tmp = cpt.split(':')

            if len(tmp) == 2:
                cls.compute_list[tmp[0]] = tmp[1]

        cls.trace_l2_cond_list = CONFIG.get_cnd_list('l2')
        cls.trace_l3_cond_list = CONFIG.get_cnd_list('l3')

    @staticmethod
    def valid_IPv4(address):
        try:
            parts = address.split(".")

            if len(parts) != 4:
                return False
            for item in parts:
                if len(item) > 3:
                    return False
                if not 0 <= int(item) <= 255:
                    return False
            return True
        except:
            LOG.exception_err_write()
            return False

    ssh_options = '-o StrictHostKeyChecking=no ' \
                  '-o ConnectTimeout=' + str(CONFIG.get_ssh_timeout())

    @classmethod
    def ssh_exec(cls, username, node, command):
        command = 'ovs-appctl ofproto/trace br-int \'' + command + '\''

        cls.TRACE_LOG.trace_log('START TRACE | username = '******', ip = ' + node + ', condition = ' + command)

        cmd = 'ssh %s %s@%s %s' % (cls.ssh_options, username, node, command)
        cls.TRACE_LOG.trace_log('Command: ' + cmd)

        try:
            result = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
            output, error = result.communicate()

            if result.returncode != 0:
                cls.TRACE_LOG.trace_log("SSH_Cmd Fail, cause => " + error)
                return 'SSH FAIL\nCOMMAND = ' + command + '\nREASON = ' + error
            else:
                cls.TRACE_LOG.trace_log("ssh command execute successful\n" +
                                        output)
                return cls.parsing(output)
        except:
            LOG.exception_err_write()

    @staticmethod
    def parsing(output):
        try:
            result = ''
            result_final = ''
            result_flow = ''
            last_action = ''
            final_flag = False
            lines = output.splitlines()

            for line in lines:
                line = line.strip()

                if final_flag:
                    result_final = result_final + line + '\n'

                    if line.startswith('Datapath actions:') and "drop" in line:
                        result = 'result = [DROP]\n'
                    else:
                        result = 'result = [SUCCESS]\n'

                    continue

                if (line.startswith('Bridge:') or line.startswith('Rule:')):
                    result_flow = result_flow + line + '\n'
                elif (line.startswith('Flow:')
                      or line.startswith('OpenFlow actions=')):
                    result_flow = result_flow + line + '\n\n'

                    if line.startswith('OpenFlow actions='):
                        if 'output' in line:
                            last_action = 'Last OpenFlow action = [' + line[
                                line.find('output'):] + ']\n\n'
                        else:
                            last_action = 'Last ' + line + '\n\n'

                elif line.startswith('Final flow:'):
                    result_final = result_final + line + '\n'
                    final_flag = True

            return result + last_action + result_final + '\n' + result_flow
        except:
            LOG.exception_err_write()
            return 'parsing error\n' + output
Ejemplo n.º 60
0
import numpy as np
import os
import librosa
from get_breath_sound import detSinusouds as detSin, ssh as ssh, getSSHVUV as sshVUV
from config import CONFIG

config = CONFIG()


def get_unvoiced_intervals(x, fs):
    x = x / (1.01 * np.max(np.abs(x)))
    signal_length = x.size
    detSinusoids, mxLinear, sinPeaksMag, sinPeaksBin = detSin.getSinusoids(
        x, fs)
    H = 0.005 * fs  # 5ms
    N = 1024  # FFT size
    resntFreq, sshVal = ssh.sumSpectHarm(detSinusoids, fs, H, N)
    sshVal = np.power(sshVal, 2)
    sshVal = sshVal / np.max(sshVal)
    begVoic, endVoic = sshVUV.sshVUV(sshVal, H, fs, N)
    timeBegVoicSamp = np.array(begVoic * H, dtype='int32')
    timeEndVoicSamp = np.array(endVoic * H, dtype='int32')
    timeBegUnvoicSamp = np.concatenate([[0], timeEndVoicSamp])
    timeEndUnvoicSamp = np.concatenate([timeBegVoicSamp, [signal_length - 1]])
    length = 0
    for i in range(len(timeBegUnvoicSamp)):
        length += timeEndUnvoicSamp[i] - timeBegUnvoicSamp[i] + 1
    y = np.zeros(length)
    end = 0
    for i in range(len(timeBegUnvoicSamp)):
        interval_length = timeEndUnvoicSamp[i] + 1 - timeBegUnvoicSamp[i]