Ejemplo n.º 1
0
    def __init__(self, classifier, score_columns, experiment,
                 all_test_target_scores, all_test_decoy_scores, merge_results):

        self.classifier = classifier
        self.score_columns = score_columns
        self.mu, self.nu = calculate_params_for_d_score(classifier, experiment)
        self.merge_results = merge_results
        final_score = classifier.score(experiment, True)
        experiment["d_score"] = (final_score - self.mu) / self.nu
        lambda_ = CONFIG.get("final_statistics.lambda")

        all_tt_scores = experiment.get_top_target_peaks()["d_score"]

        use_pemp = CONFIG.get("final_statistics.emp_p")
        use_pfdr = CONFIG.get("final_statistics.pfdr")

        self.error_stat, self.target_pvalues = calculate_final_statistics(
            all_tt_scores, all_test_target_scores, all_test_decoy_scores,
            lambda_, use_pemp, use_pfdr)

        self.number_target_pg = len(
            experiment.df[experiment.df.is_decoy.eq(False)])
        self.number_target_peaks = len(experiment.get_top_target_peaks().df)
        self.dvals = experiment.df.loc[(experiment.df.is_decoy.eq(True)),
                                       "d_score"]
        self.target_scores = experiment.get_top_target_peaks().df["d_score"]
        self.decoy_scores = experiment.get_top_decoy_peaks().df["d_score"]
Ejemplo n.º 2
0
    def score(self, table):

        prepared_table, __ = prepare_data_table(
            table, score_columns=self.score_columns)
        texp = Experiment(prepared_table)
        score = self.classifier.score(texp, True)
        texp["d_score"] = (score - self.mu) / self.nu

        s_values, q_values = lookup_s_and_q_values_from_error_table(
            texp["d_score"].values, self.error_stat.df)
        texp["m_score"] = q_values
        texp["s_value"] = s_values
        logging.info("mean m_score = %e, std_dev m_score = %e" %
                     (np.mean(q_values), np.std(q_values, ddof=1)))
        logging.info("mean s_value = %e, std_dev s_value = %e" %
                     (np.mean(s_values), np.std(s_values, ddof=1)))
        texp.add_peak_group_rank()

        df = table.join(texp[["d_score", "m_score", "peak_group_rank"]])

        if CONFIG.get("compute.probabilities"):
            df = self.add_probabilities(df, texp)

        if CONFIG.get("target.compress_results"):
            to_drop = [
                n for n in df.columns
                if n.startswith("var_") or n.startswith("main_")
            ]
            df.drop(to_drop, axis=1, inplace=True)

        return df
Ejemplo n.º 3
0
    def send(self, indicator, host, value):
        """Send an email alert"""
        msg = MIMEText("""
Monitoring alert for %(host)s. Indicator %(indicator)s is %(comparison)s threshold.
Value: %(value).2f%(unit)s, threshold: %(threshold)s%(unit)s.""" % {
            'host': host,
            'indicator': indicator.name.upper(),
            'comparison': 'above' if indicator.comparison == 'gt' else 'below',
            'value': value,
            'threshold': indicator.threshold,
            'unit': indicator.unit
        })
        msg['Subject'] = '[%s] Monitoring alert for %s' % (
            host,
            indicator.name.upper()
        )
        smtp_i = None
        try:
            smtp_i = smtplib.SMTP(self.smtp_host)
            smtp_i.sendmail(
                CONFIG.get('mail', 'MAIL_FROM'),
                str(CONFIG.get('mail', 'MAIL_TO')).split(','),
                msg.as_string()
            )
        except Exception as e:
            click.secho('Mail send error: %s' % e, fg='red')
        finally:
            if smtp_i:
                smtp_i.quit()
Ejemplo n.º 4
0
    def score(self, table):

        prepared_table, __ = prepare_data_table(table, score_columns=self.score_columns)
        texp = Experiment(prepared_table)
        score = self.classifier.score(texp, True)
        texp["d_score"] = (score - self.mu) / self.nu

        s_values, q_values = lookup_s_and_q_values_from_error_table(texp["d_score"].values,
                                                                    self.error_stat.df)
        texp["m_score"] = q_values
        texp["s_value"] = s_values
        logging.info("mean m_score = %e, std_dev m_score = %e" % (np.mean(q_values),
                                                                  np.std(q_values, ddof=1)))
        logging.info("mean s_value = %e, std_dev s_value = %e" % (np.mean(s_values),
                                                                  np.std(s_values, ddof=1)))
        texp.add_peak_group_rank()

        df = table.join(texp[["d_score", "m_score", "peak_group_rank"]])

        if CONFIG.get("compute.probabilities"):
            df = self.add_probabilities(df, texp)

        if CONFIG.get("target.compress_results"):
            to_drop = [n for n in df.columns if n.startswith("var_") or n.startswith("main_")]
            df.drop(to_drop, axis=1, inplace=True)

        return df
Ejemplo n.º 5
0
def load_dataset():
    """加载各主题对话数据
    :return: Dict
    {
        '电影': [],
        '数码产品': [],
        '音乐': [],
        '美食': [],
        '体育': []
    }
    """
    print('开始加载数据集...')
    data = {}
    for topic in os.listdir(CONFIG.get('DATA_DIR')):
        data[topic] = []
        topic_path = os.path.join(CONFIG.get('DATA_DIR'), topic)
        print('加载<%s>主题数据...' % topic)
        for file in tqdm(os.listdir(topic_path)):
            with open(os.path.join(topic_path, file), 'r',
                      encoding='utf-8') as f:
                # 将文件名作为唯一标识id
                sess = json.load(f)
                sess['pid'] = file.split('.')[0]
                data[topic].append(sess)

    print('结束数据集加载....')
    return data
Ejemplo n.º 6
0
def calc_atr(clO):
    '''
     Function to calculate the ATR (average timeframe rate)
     This is the average candle variation in pips for the desired
     timeframe. The variation is measured as the abs diff
     (in pips) between the high and low of the candle

     Parameters
     ----------
     clO: CandleList object
          Used for calculation

     Returns
     -------
     float
     '''
    length = 0
    tot_diff_in_pips = 0
    for c in clO.data['candles']:
        high_val = c["high{0}".format(CONFIG.get('general', 'bit'))]
        low_val = c["low{0}".format(CONFIG.get('general', 'bit'))]
        diff = abs(high_val - low_val)
        tot_diff_in_pips = tot_diff_in_pips + float(
            calculate_pips(clO.data['instrument'], diff))
        length += 1

    return round(tot_diff_in_pips / length, 3)
Ejemplo n.º 7
0
def pytest_runtest_makereport(item):
    """
    Extends the PyTest Plugin to take and embed screenshot in html report, whenever test fails.
    :param item:
    """
    pytest_html = item.config.pluginmanager.getplugin('html')
    outcome = yield
    report = outcome.get_result()
    extra = getattr(report, 'extra', [])

    if report.when == 'call' or report.when == "setup":
        xfail = hasattr(report, 'wasxfail')
        if (report.skipped and xfail) or (report.failed and not xfail):
            dirpng = CONFIG.get('dir_png', './')
            if os.path.exists(dirpng) and os.path.isdir(dirpng):
                pass
            else:
                os.mkdir(dirpng)
            file_name = dirpng + time.strftime(
                "%Y-%m-%d-%H_%M_%S", time.localtime(time.time())) + ".png"
            file_name1 = CONFIG.get('dir_png_html', './') + time.strftime(
                "%Y-%m-%d-%H_%M_%S", time.localtime(time.time())) + ".png"
            _capture_screenshot(file_name)
            if file_name:
                html = '<div><img src="%s" alt="screenshot" style="width:304px;height:228px;" ' \
                       'onclick="window.open(this.src)" align="right"/></div>' % file_name1
                extra.append(pytest_html.extras.html(html))
        report.extra = extra
Ejemplo n.º 8
0
    def __init__(self, classifier, score_columns, experiment, all_test_target_scores,
                 all_test_decoy_scores, merge_results):

        self.classifier = classifier
        self.score_columns = score_columns
        self.mu, self.nu = calculate_params_for_d_score(classifier, experiment)
        self.merge_results = merge_results
        final_score = classifier.score(experiment, True)
        experiment["d_score"] = (final_score - self.mu) / self.nu
        lambda_ = CONFIG.get("final_statistics.lambda")

        all_tt_scores = experiment.get_top_target_peaks()["d_score"]

        use_pemp = CONFIG.get("final_statistics.emp_p")
        self.error_stat, self.target_pvalues = calculate_final_statistics(all_tt_scores,
                                                                          all_test_target_scores,
                                                                          all_test_decoy_scores,
                                                                          lambda_,
                                                                          use_pemp,
                                                                          False)

        self.number_target_pg = len(experiment.df[experiment.df.is_decoy.eq(False)])
        self.number_target_peaks = len(experiment.get_top_target_peaks().df)
        self.dvals = experiment.df.loc[(experiment.df.is_decoy.eq(True)), "d_score"]
        self.target_scores = experiment.get_top_target_peaks().df["d_score"]
        self.decoy_scores = experiment.get_top_decoy_peaks().df["d_score"]
Ejemplo n.º 9
0
def stopTomcat():
    putWait("Shutting down Tomcat engine")
    # verify if running
    if not isTomcatRunning():
        putDoneOK("(not running)")
        return
    # ask and wait for shutdown
    filename = CONFIG.get("tomcat", "catalina_home") + "logs/catalina.out"
    sh_touch(filename)
    with open(filename, 'r') as logfile:
        logfile.seek(0, 2)
        # stop Tomcat
        commandStop = CONFIG.get("tomcat", "catalina_home") + "bin/shutdown.sh"
        if not sh_exec(commandStop):
            putDoneFail()
            return
        # wait for Tomcat’s unloading of the webapps
        while True:
            where = logfile.tell()
            line = logfile.readline()
            if not line:
                time.sleep(1)
                logfile.seek(where)
            else:
                if line.find("destroy") != -1:
                    break
    putDoneOK()
Ejemplo n.º 10
0
def modifyPropertiesFile(propfilename, dic):
    # read existing file
    with open(propfilename, "r") as propfile:
        lines = propfile.readlines()
    # write new version
    with open(propfilename, "w") as propfile:
        # replace old values
        for line in lines:
            words = re.split("[=:]", line)
            # don’t touch lines without = or :
            if len(words) < 2:
                propfile.write(line)
                continue
            # get the key (part before first = or :)
            key = words[0].strip()
            if key in dic:
                val = dic[key]
                if not isinstance(val, basestring):
                    val = CONFIG.get(val[0], val[1])
                # beware =
                val = val.replace("=", "\\=")
                propfile.write(key + " = " + val + "\n")
                del dic[key]
            else:
                propfile.write(line)
        # add new variables
        for key in dic.keys():
            val = dic[key]
            if not isinstance(val, basestring):
                val = CONFIG.get(val[0], val[1])
            # beware =
            val = val.replace("=", "\\=")
            propfile.write(key + " = " + val + "\n")
    putDoneOK()
Ejemplo n.º 11
0
def startTomcat():
    putWait("Starting up Tomcat engine")
    # verify if running
    if isTomcatRunning():
        putDoneOK("(already running)")
        return
    # ask and wait for startup
    filename = CONFIG.get("tomcat", "catalina_home") + "logs/catalina.out"
    sh_touch(filename)
    with open(filename, 'r') as logfile:
        logfile.seek(0, 2)
        # start Tomcat
        commandStart = CONFIG.get("tomcat", "catalina_home") + "bin/startup.sh"
        if not sh_exec(commandStart):
            putDoneFail()
            return
        # wait for Tomcat’s loading of the webapps
        while True:
            where = logfile.tell()
            line = logfile.readline()
            if not line:
                time.sleep(1)
                logfile.seek(where)
            else:
                if line.find("Server startup in") != -1:
                    break
    putDoneOK()
Ejemplo n.º 12
0
def overlay_full_legend(metric, df):
    tqdm_bar_format = CONFIG.get('default', 'tqdm_bar_format')
    date_df = df[df[metric] > 0]
    date_df = date_df.groupby('date')[metric].size().reset_index(name='count')
    max_df = date_df.loc[date_df['count'].idxmax()]
    max_date = max_df['date']

    sel_coordinates = (int(CONFIG.get('default', 'legend_x1')),
                       int(CONFIG.get('default', 'legend_y1')),
                       int(CONFIG.get('default', 'legend_x2')),
                       int(CONFIG.get('default', 'legend_y2')))

    max_file = 'images/%s_%s.png' % (metric, max_date)
    max_img = Image.open(max_file).convert('RGBA')
    selection = max_img.crop(sel_coordinates)

    file_list = sorted(glob.glob('images/%s_20*.png' % metric))
    with tqdm(total=len(file_list) - 1, bar_format=tqdm_bar_format) as pbar:
        for idx, file in enumerate(file_list):
            if file != max_file:
                pbar.set_description(
                    datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' - ' +
                    'updating map image legends')
                pbar.update(1)
                this_file = Image.open(file).convert('RGBA')
                this_file.paste(selection, sel_coordinates)
                this_file.save(file)
Ejemplo n.º 13
0
def configurePropertiesWebApp(pretty_name, filename, section_name, dic):
    putMessage("Configuring " + pretty_name + " (" + filename + ".properties) ...")
    webapp = CONFIG.get(section_name, "name")
    propfilename = (CONFIG.get("tomcat", "catalina_home") + "webapps/" + webapp +
                    "/WEB-INF/classes/" + filename + ".properties")
    waitForFile(propfilename)
    putWait("Modifying file " + filename + ".properties")
    modifyPropertiesFile(propfilename, dic)
Ejemplo n.º 14
0
def execLDAP(msg, ldiffile):
    putWait(msg)
    if sh_exec("ldapadd -x -D\"cn=" + CONFIG.get("ldap", "login") + "," + CONFIG.get("ldap", "base_dn") + "\" " +
               "-w \"" + CONFIG.get("ldap", "password") + "\" -f " + ldiffile):
       putDoneOK()
       return True
    putDoneFail()
    return False
Ejemplo n.º 15
0
def isTomcatRunning():
    try:
        cnx = urllib.urlopen( "http://" + CONFIG.get("global", "host") +
                              ":" + CONFIG.get("tomcat", "http_port") + "/" )
        cnx.close()
        return True
    except IOError:
        return False
Ejemplo n.º 16
0
 def __init__(self):
     self.client = InfluxDBClient(
         CONFIG.get('influxdb', 'INFLUX_HOST'),
         CONFIG.get('influxdb', 'INFLUX_PORT'),
         CONFIG.get('influxdb', 'INFLUX_USER'),
         CONFIG.get('influxdb', 'INFLUX_PASSWORD'),
         CONFIG.get('influxdb', 'INFLUX_DATABASE'),
     )
Ejemplo n.º 17
0
 def __init__(self):
     setup_logging(CONFIG.get('log_file'))
     self.redis = Redis()
     self.controller = Controller()
     self.sensor = globals()[CONFIG.get('sensor')]()
     self.decision = globals()[CONFIG.get('decision')]()
     self.event = Event()
     signal.signal(signal.SIGTERM, self.cleanup)
Ejemplo n.º 18
0
def data():
    try:
        global driver
        # 获取浏览器
        chrome_ops = Options()
        driver = webdriver.Chrome(options=chrome_ops)
        # 隐式等待
        driver.implicitly_wait(CONFIG.get('implicitly_wait', 5))
        driver.maximize_window()
        # 登录页面
        driver.get(CONFIG.get('url', ''))
    # 创建浏览器失败则断言失败重新运行
    except Exception as e:
        driver.close()
        pytest.skip(msg='创建浏览器失败:' + getError(), allow_module_level=True)

    # 校验是否登录成功
    try:
        # 输入账号
        input_name = waitFind(
            driver, By.XPATH,
            '''//*[@id="app"]/div/form/div[2]/div/div/input''')
        input_name.clear()
        input_name.send_keys(CONFIG.get('username', ''))
        # 输入密码
        input_passwork = waitFind(
            driver, By.XPATH,
            '''//*[@id="app"]/div/form/div[3]/div/div/input''')
        input_passwork.clear()
        input_passwork.send_keys(CONFIG.get('passwork', ''))
        # 点击登录
        log_in = waitFind(
            driver, By.XPATH,
            '''//button[@class='el-button el-button--primary']''')
        log_in.click()
        ass = waitFind(
            driver, By.XPATH,
            '''//*[@id="tags-view-container"]/div/div[1]/div/span''')
        # 判断是否登录成功
        if ass.text.strip() != '个人中心':
            driver.close()
            raise Exception('登陆系统失败')

        # 获取从cookies中获取token
        cookies = driver.get_cookies()
        for cookie in cookies:
            if cookie['name'] == 'vue_admin_template_token':
                CONFIG['token'] = cookie['value']
                break

    # 异常处理
    except Exception as e:
        driver.close()
        # 失败直接跳过全部用例
        pytest.skip(msg='登陆系统失败:' + getError(), allow_module_level=True)

    return driver, CONFIG
Ejemplo n.º 19
0
def gen_colorscale():
    large_color = Color(CONFIG.get('default', 'large_color'))
    small_color = Color(CONFIG.get('default', 'small_color'))
    colors = list(large_color.range_to(small_color, 7))
    colors_converted = []
    for color in colors:
        colors_converted.append(color.hex)
    colors_converted.append('#fff')
    return colors_converted[::-1]
Ejemplo n.º 20
0
def prepare_trade(tb_obj, type, SL, ic, harea_sel, delta, add_pips):
    '''
    Prepare a Trade object
    and check if it is taken

    Parameters
    ----------
    tb_obj : TradeBot object
    type : str,
            Type of trade. 'short' or 'long'
    SL : float,
        Adjusted (by '__get_trade_type') SL price
    ic : Candle object
        Indecision candle for this trade
    harea_sel : HArea of this trade
        delta : Timedelta object corresponding to
        the time that needs to be increased
    add_pips : Number of pips above/below SL and entry
        price to consider for recalculating
        the SL and entry. Default : None

    Returns
    -------
    Trade object
    '''
    startO = ic.time + delta
    if type == 'short':
        # entry price will be the low of IC
        entry_p = getattr(ic, "low{0}".format(CONFIG.get('general', 'bit')))
        if add_pips is not None:
            SL = round(add_pips2price(tb_obj.pair,
                                      SL, add_pips), 4)
            entry_p = round(substract_pips2price(tb_obj.pair,
                                                 entry_p, add_pips), 4)
    elif type == 'long':
        # entry price will be the high of IC
        entry_p = getattr(ic, "high{0}".format(CONFIG.get('general', 'bit')))
        if add_pips is not None:
            entry_p = add_pips2price(tb_obj.pair,
                                     entry_p, add_pips)
            SL = substract_pips2price(tb_obj.pair,
                                      SL, add_pips)

    startO = ic.time+delta
    t = Trade(
        id='{0}.bot'.format(tb_obj.pair),
        start=startO.strftime('%Y-%m-%d %H:%M:%S'),
        pair=tb_obj.pair,
        timeframe=tb_obj.timeframe,
        type=type,
        entry=entry_p,
        SR=harea_sel.price,
        SL=SL,
        RR=CONFIG.getfloat('trade_bot', 'RR'),
        strat='counter')

    return t
Ejemplo n.º 21
0
    def iter_semi_supervised_learning(self, train):
        fdr = CONFIG.get("semi_supervised_learner.iteration_fdr")
        lambda_ = CONFIG.get("semi_supervised_learner.iteration_lambda")
        td_peaks, bt_peaks = self.select_train_peaks(train, "classifier_score", fdr, lambda_)

        model = self.inner_learner.learn(td_peaks, bt_peaks, True)
        w = model.get_parameters()
        clf_scores = model.score(train, True)
        return w, clf_scores
Ejemplo n.º 22
0
 def start_semi_supervised_learning(self, train):
     fdr = CONFIG.get("semi_supervised_learner.initial_fdr")
     lambda_ = CONFIG.get("semi_supervised_learner.initial_lambda")
     td_peaks, bt_peaks = self.select_train_peaks(train, "main_score", fdr, lambda_)
     model = self.inner_learner.learn(td_peaks, bt_peaks, False)
     w = model.get_parameters()
     clf_scores = model.score(train, False)
     clf_scores -= np.mean(clf_scores)
     return w, clf_scores
Ejemplo n.º 23
0
    def apply_classifier(self, final_classifier, experiment, all_test_target_scores,
                         all_test_decoy_scores, table, p_score=False):

        lambda_ = CONFIG.get("final_statistics.lambda")

        mu, nu, final_score = self.calculate_params_for_d_score(final_classifier, experiment)
        experiment["d_score"] = (final_score - mu) / nu

        if (CONFIG.get("final_statistics.fdr_all_pg")):
            all_tt_scores = experiment.get_target_peaks()["d_score"]
        else:
            all_tt_scores = experiment.get_top_target_peaks()["d_score"]

        df_raw_stat, num_null, num_total = calculate_final_statistics(all_tt_scores, all_test_target_scores,
                                                 all_test_decoy_scores, lambda_)

        scored_table = self.enrich_table_with_results(table, experiment, df_raw_stat)

        if CONFIG.get("compute.probabilities"):
            logging.info( "" )
            logging.info( "Posterior Probability estimation:" )
            logging.info( "Estimated number of null %0.2f out of a total of %s. " % (num_null, num_total) )

            # Note that num_null and num_total are the sum of the
            # cross-validated statistics computed before, therefore the total
            # number of data points selected will be 
            #   len(data) /  xeval.fraction * xeval.num_iter
            # 
            prior_chrom_null = num_null * 1.0 / num_total
            number_true_chromatograms = (1.0-prior_chrom_null) * len(experiment.get_top_target_peaks().df)
            number_target_pg = len( Experiment(experiment.df[(experiment.df.is_decoy == False) ]).df )
            prior_peakgroup_true = number_true_chromatograms / number_target_pg

            logging.info( "Prior for a peakgroup: %s" % (number_true_chromatograms / number_target_pg))
            logging.info( "Prior for a chromatogram: %s" % str(1-prior_chrom_null) )
            logging.info( "Estimated number of true chromatograms: %s out of %s" % (number_true_chromatograms, len(experiment.get_top_target_peaks().df)) )
            logging.info( "Number of target data: %s" % len( Experiment(experiment.df[(experiment.df.is_decoy == False) ]).df ) )

            # pg_score = posterior probability for each peakgroup
            # h_score = posterior probability for the hypothesis that this peakgroup is true (and all other false)
            # h0_score = posterior probability for the hypothesis that no peakgroup is true

            pp_pg_pvalues = posterior_pg_prob(experiment, prior_peakgroup_true, lambda_=lambda_)
            experiment.df[ "pg_score"]  = pp_pg_pvalues
            scored_table = scored_table.join(experiment[["pg_score"]])

            allhypothesis, h0 = posterior_chromatogram_hypotheses_fast(experiment, prior_chrom_null)
            experiment.df[ "h_score"]  = allhypothesis
            experiment.df[ "h0_score"]  = h0
            scored_table = scored_table.join(experiment[["h_score", "h0_score"]])

        final_statistics = final_err_table(df_raw_stat)
        summary_statistics = summary_err_table(df_raw_stat)

        needed_to_persist = (final_classifier, mu, nu,
                             df_raw_stat.loc[:, ["svalue", "qvalue", "cutoff"]], num_null, num_total)
        return (summary_statistics, final_statistics, scored_table), needed_to_persist
Ejemplo n.º 24
0
    def iter_semi_supervised_learning(self, train):
        fdr = CONFIG.get("semi_supervised_learner.iteration_fdr")
        lambda_ = CONFIG.get("semi_supervised_learner.iteration_lambda")
        td_peaks, bt_peaks = self.select_train_peaks(train, "classifier_score", fdr, lambda_)

        model = self.inner_learner.learn(td_peaks, bt_peaks, True)
        w = model.get_parameters()
        clf_scores = model.score(train, True)
        return w, clf_scores
Ejemplo n.º 25
0
	def __init__(self):
		token = oauth.Token(
						key=CONFIG.get("auth_token_key"), 
						secret=CONFIG.get("auth_token_secret")
						)
		consumer = oauth.Consumer(key=CONFIG.get("consumer_key"), 
								secret=CONFIG.get("consumer_secret")
								)
		self.client = oauth.Client(consumer, token)
Ejemplo n.º 26
0
    def _init_player(self, player):
        self.player_obj = player
        self.go_controls = {
            keys_dict[CONFIG.get('PlayerMovementControls', 'forward')]: 'forward',
            keys_dict[CONFIG.get('PlayerMovementControls', 'backward')]: 'backward',
            keys_dict[CONFIG.get('PlayerMovementControls', 'left')]: 'left',
            keys_dict[CONFIG.get('PlayerMovementControls', 'right')]: 'right',

        }
Ejemplo n.º 27
0
 def start_semi_supervised_learning(self, train):
     fdr = CONFIG.get("semi_supervised_learner.initial_fdr")
     lambda_ = CONFIG.get("semi_supervised_learner.initial_lambda")
     td_peaks, bt_peaks = self.select_train_peaks(train, "main_score", fdr, lambda_)
     model = self.inner_learner.learn(td_peaks, bt_peaks, False)
     w = model.get_parameters()
     clf_scores = model.score(train, False)
     clf_scores -= np.mean(clf_scores)
     return w, clf_scores
Ejemplo n.º 28
0
 def __init__(self):
     AbstractLearner.__init__(self)
     c_size = int(CONFIG.get("classifier.cache_size", "500"))
     if CONFIG.get("classifier.weight_classes"):
         logging.info("===> doing weighted polySVM")
         self.classifier = sklearn.svm.SVC(cache_size=c_size, kernel="poly", class_weight="auto")
     else:
         logging.info("===> doing non-weighted polySVM")
         self.classifier = sklearn.svm.SVC(cache_size=c_size, kernel="poly")
     self.scaler = sklearn.preprocessing.StandardScaler()
Ejemplo n.º 29
0
    async def on_trainModel(self, request):
        res_data = await request.json()
        # export model to rasa folder
        result = await ExportProject.main(res_data['sessionId'], res_data['projectObjectId'], 'DEPLOY')

        from rasa.train import train_async
        import aiohttp

        print(result)

        base_path = CONFIG.get('api_gateway', 'DEPLOY_MODEL_PATH')
        config = "config.yml"
        training_files = "data/"
        domain = "domain.yml"
        output = "models/"

        base_path = base_path + res_data['projectObjectId'] + "/"

        config = base_path + config
        training_files = base_path + training_files
        domain = base_path + domain
        output = base_path + output

        model_path = await train_async(domain, config, [training_files], output)

        # Upload model to Rasa Server

        if model_path is not None:

            model_name = os.path.basename(model_path)
            load_model_path = "/app/models/"+res_data['projectObjectId']+"/models/"+model_name
            print(load_model_path)

            async with aiohttp.ClientSession() as session:
                async with session.put(CONFIG.get('api_gateway', 'RASA_URL'),
                                       data=json.dumps({'model_file': str(load_model_path)}),
                                       headers={'content-type': 'application/json'}
                                       ) as resp:
                    json_resp = await resp.json()
                    print("Response from Rasa {}".format(resp.status))

            result = await ProjectsModel.update_project_model({"object_id": str(res_data['projectObjectId']),
                                                               "model_name": model_name,
                                                               "state": "Published"})

            return web.json_response({"status": "Success", "message": "Model Published successfully"})
            #await sio.emit('publishMessage', {"status": "Success", "message": "Model Published successfully"}, namespace='/modelpublish')
            #result = await ProjectsModel.get_projects()
            #return web.json_response(result)
            #await sio.emit('respModelPublish', result, namespace='/modelpublish')
        else:
            await sio.emit('publishMessage', {"status": "Error", "message": "Error while training model"}, namespace='/modelpublish')


#sio.register_namespace(ModelPublish('/modelpublish'))
Ejemplo n.º 30
0
def run_page(topargus, mbot):
    slog.debug("run_page alive")
    subject = 'TOPARGUS 常规定时监控'
    contents = [
        'TOPARGUS_host: {0}'.format(topargus.default_index()),
        'TOPARGUS_name: {0}'.format(topargus.get_topargus_name()),
        'TOPARGUS_info: {0}'.format(topargus.get_topargus_net_info()),
    ]

    ret = topargus.home()
    ret = list(ret)
    if len(ret) == 2 and ret[1] != None:
        if ret[1].endswith('png'):  # picture
            pic = mbot.make_pic_inline(ret[1])
            contents.append("[首页]")
            contents.append(pic)
            contents.append("\n\n\n")

    ret = topargus.alarm()
    ret = list(ret)
    if len(ret) == 2 and ret[1] != None:
        if ret[1].endswith('png'):  # picture
            pic = mbot.make_pic_inline(ret[1])
            contents.append("[告警页面]")
            contents.append(pic)
            contents.append("\n\n\n")

    ret = topargus.packet()
    ret = list(ret)
    if len(ret) == 2 and ret[1] != None:
        if ret[1].endswith('png'):  # picture
            pic = mbot.make_pic_inline(ret[1])
            contents.append("[收包情况]")
            contents.append(pic)
            contents.append("\n\n\n")

    ret = topargus.network()
    ret = list(ret)
    if len(ret) == 2 and ret[1] != None:
        if ret[1].endswith('png'):  # picture
            pic = mbot.make_pic_inline(ret[1])
            contents.append("[P2P网络]")
            contents.append(pic)
            contents.append("\n\n\n")

    contents.append("MAIL END")
    ret = mbot.send_mail(CONFIG.get('target_email_adr'), subject, contents)
    if ret:
        slog.info('send alarm_api mail to {0} ok'.format(
            json.dumps(CONFIG.get('target_email_adr'))))
        return True
    else:
        slog.warning('send alarm_api mail to {0} error'.format(
            json.dumps(CONFIG.get('target_email_adr'))))
        return False
Ejemplo n.º 31
0
def get_df_slice(begin_date=None, end_date=None):
    data_file = os.path.join(CONFIG.get('default', 'data_dir'),
                             CONFIG.get('default', 'data_file'))
    df = pd.read_csv(data_file, dtype={'fips': str})
    if begin_date is None:
        df = df[df['date'] >= CONFIG.get('default', 'begin_date')]
    else:
        df = df[df['date'] >= begin_date]
    if end_date is not None:
        df = df[df['date'] <= end_date]
    return df
Ejemplo n.º 32
0
def get_reddit():
    client_id = CONFIG.get('Reddit', 'client_id')
    client_secret = CONFIG.get('Reddit', 'client_secret')
    username = CONFIG.get('Reddit', 'username')
    password = CONFIG.get('Reddit', 'password')
    reddit = praw.Reddit(client_id=client_id,
                         client_secret=client_secret,
                         username=username,
                         password=password,
                         user_agent='toy bot v1.0 by /u/spoosman')
    return reddit
Ejemplo n.º 33
0
    def _apply_scorer_out_of_core(self, pathes, delim, scorer):

        merge_results = CONFIG.get("multiple_files.merge_results")
        # TODO: merge_resuls has nothing to do with scorer, we need extra class for
        # writing results, maybe lazy....:
        scorer.merge_results = merge_results
        delim_in = CONFIG.get("delim.in")
        scored_tables_lazy = scorer.score_many_lazy(pathes, delim_in)
        final_statistics, summary_statistics = scorer.get_error_stats()
        weights = scorer.classifier.get_parameters()
        return Result(None, None, scored_tables_lazy), None, weights
Ejemplo n.º 34
0
    def _apply_scorer_out_of_core(self, pathes, delim, scorer):

        merge_results = CONFIG.get("multiple_files.merge_results")
        # TODO: merge_resuls has nothing to do with scorer, we need extra class for
        # writing results, maybe lazy....:
        scorer.merge_results = merge_results
        delim_in = CONFIG.get("delim.in")
        scored_tables_lazy = scorer.score_many_lazy(pathes, delim_in)
        final_statistics, summary_statistics = scorer.get_error_stats()
        weights = scorer.classifier.get_parameters()
        return Result(None, None, scored_tables_lazy), None, weights
Ejemplo n.º 35
0
def deployWar(pretty_name, section_name):
    webapp_name = CONFIG.get(section_name, "name")
    if CONFIG.isTrue("tomcat", "use_manager"):
        putWait("Deploying " + pretty_name)
        manageTomcat("deploy?path=/" + webapp_name + "&update=true&war=file://" + 
                     sh_pwd() + "/" + CONFIG.get(section_name, "repo"))
    else:
        putMessage("Deploying " + pretty_name + " ...")
        stopTomcat()
        copyWar(CONFIG.get(section_name, "repo"), webapp_name)
        startTomcat()
Ejemplo n.º 36
0
def main_loop():
    bleMQTT()
    global scanner
    if CONFIG.get('bleDevice') == 1:
        scanner = BeaconReceiver(callback, CONFIG.get('serialPort'),
                                 CONFIG.get('baudrate'), CONFIG.get('timeout'))
    else:
        scanner = BeaconScanner(callback)
    scanner.start()
    heartbeatMQTT()
    heartbeat()
Ejemplo n.º 37
0
    def __init__(self, inputlogger=Logger.getLogger(__name__, level=CONFIG.get('logging').get('level')),
                 environment=os.environ.get('ENV',CONFIG.get('default_env'))):
        """
        init sets up logger and credentials for api
        :param inputlogger: custom logger
        """
        self.logger = inputlogger

        credentials = CONFIG.get('aws').get(environment)
        self.aws_region = credentials.get("region")
        self.aws_buckets = credentials.get('buckets')
        self.client = None
Ejemplo n.º 38
0
    async def on_trynow(self, request):
        res_data = await request.json()
        print("----------- Inside Try now --from SID {}--------------".format(res_data['sessionId']))
        result = await ExportProject.main(res_data['sessionId'], res_data['projectObjectId'], 'SESSION')
        print(result)

        if result is not None:
            return web.json_response({"status": "Error", "message": result})

        import rasa.model as model
        from rasa.core.agent import Agent
        from rasa.core.tracker_store import MongoTrackerStore
        from rasa.core.domain import Domain
        from rasa.train import train_async
        from rasa.utils.endpoints import EndpointConfig

        base_path = CONFIG.get('api_gateway', 'SESSION_MODEL_PATH')
        config = "config.yml"
        training_files = "data/"
        domain = "domain.yml"
        output = "models/"

        endpoints = EndpointConfig(url="http://action_server:5055/webhook")

        base_path = base_path + res_data['sessionId'] + "/"

        config = base_path + config
        training_files = base_path + training_files
        domain = base_path + domain
        output = base_path + output
        start_time = time.time()
        try:
            model_path = await train_async(domain, config, [training_files], output, additional_arguments={"augmentation_factor": 10})
            end_time = time.time()
            print("it took this long to run: {}".format(end_time - start_time))
            unpacked = model.get_model(model_path)
            domain = Domain.load(domain)
            _tracker_store = MongoTrackerStore(domain=domain,
                                                host=CONFIG.get('api_gateway', 'MONGODB_URL'),
                                                db=CONFIG.get('api_gateway', 'MONGODB_NAME'),
                                                username=None,
                                                password=None,
                                                auth_source="admin",
                                                collection="conversations",
                                                event_broker=None)
            print("***************  Actions Endpoint as per data ********** {}".format(endpoints.url))
            self.agent = Agent.load(unpacked, tracker_store=_tracker_store, action_endpoint=endpoints)
            return web.json_response({"status": "Success", "message": "Ready to chat"})
            #await sio.emit('chatResponse', {"status": "Success", "message": "Ready to chat"}, namespace='/trynow', room=sid)
        except Exception as e:
            print("Exception while try Now ---  "+str(e))
            #await sio.emit('chatResponse', {"status": "Error", "message": repr(e)}, namespace='/trynow', room=sid)
            return web.json_response({"status": "Error", "message": repr(e)})
Ejemplo n.º 39
0
    def run(self):

        self.prefix = self.check_pathes()
        dirname = self.determine_output_dir_name()
        out_pathes = self.create_out_pathes(dirname)

        extra_writes = dict(self.extra_writes(dirname))

        to_check = list(v for p in out_pathes for v in p.values())
        to_check.extend(extra_writes.values())

        if not CONFIG.get("target.overwrite"):
            error = check_if_any_exists(to_check)
            if error:
                return False

        self.check_cols = ["transition_group_id", "run_id", "decoy"]
        if CONFIG.get("export.mayu"):
            self.check_cols += mayu_cols()
            if 'm_score' in self.check_cols:
                self.check_cols.remove(
                    'm_score')  # The m_score is calculated by the learner
                #  and should not be in the OpenSwathWorkflow output

        logging.info("config settings:")
        for k, v in sorted(CONFIG.config.items()):
            logging.info("    %s: %s" % (k, v))

        start_at = time.time()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            (result, scorer, weights) = self.run_algo()

        needed = time.time() - start_at

        set_pandas_print_options()
        self.print_summary(result)
        pvalues = None if scorer is None else scorer.target_pvalues
        self.save_results(result, extra_writes, out_pathes, pvalues)

        self.save_scorer(scorer, extra_writes)
        self.save_weights(weights, extra_writes)

        seconds = int(needed)
        msecs = int(1000 * (needed - seconds))
        minutes = int(needed / 60.0)

        print "NEEDED",
        if minutes:
            print minutes, "minutes and",

        print "%d seconds and %d msecs wall time" % (seconds, msecs)
        print
Ejemplo n.º 40
0
def execDB(msg, db, query):
    putWait(msg)
    if sh_exec("mysql --host=" + CONFIG.get("db", "host") +
                   " --port=" + CONFIG.get("db", "port") +
                   " --user="******"db", "login") +
                   " --password="******"db", "password") +
                   " --database=" + db +
                   " --execute=\"" + query + "\""):
        putDoneOK()
        return True
    putDoneFail()
    return False
Ejemplo n.º 41
0
    def _learn(self, experiment):
        is_test = CONFIG.get("is_test")
        if is_test:  # for reliable results
            experiment.df.sort("tg_id", ascending=True, inplace=True)

        learner = self.semi_supervised_learner
        ws = []

        neval = CONFIG.get("xeval.num_iter")
        num_processes = CONFIG.get("num_processes")
        all_test_target_scores = []
        all_test_decoy_scores = []

        logging.info("learn and apply scorer")
        logging.info("start %d cross evals using %d processes" %
                     (neval, num_processes))

        if num_processes == 1:
            for k in range(neval):
                (ttt_scores, ttd_scores,
                 w) = learner.learn_randomized(experiment)
                all_test_target_scores.extend(ttt_scores)
                all_test_decoy_scores.extend(ttd_scores)
                ws.append(w.flatten())
        else:
            pool = multiprocessing.Pool(processes=num_processes)
            while neval:
                remaining = max(0, neval - num_processes)
                todo = neval - remaining
                neval -= todo
                args = ((learner, "learn_randomized", (experiment, )), ) * todo
                res = pool.map(unwrap_self_for_multiprocessing, args)
                ttt_scores = [ti for r in res for ti in r[0]]
                ttd_scores = [ti for r in res for ti in r[1]]
                ws.extend([r[2] for r in res])
                all_test_target_scores.extend(ttt_scores)
                all_test_decoy_scores.extend(ttd_scores)
        logging.info("finished cross evals")
        logging.info("")

        # only use socres from last iteration to build statistical model:
        if CONFIG.get("semi_supervised_learner.stat_best"):
            all_test_target_scores = ttt_scores
            all_test_decoy_scores = ttd_scores

        # we only use weights from last iteration if indicated:
        if CONFIG.get("semi_supervised_learner.use_best"):
            ws = [ws[-1]]

        final_classifier = self.semi_supervised_learner.averaged_learner(ws)

        return final_classifier, all_test_target_scores, all_test_decoy_scores
Ejemplo n.º 42
0
 def add_cors_headers(status, headers, exc_info=None):
     headers = Headers(headers)
     headers.add("Access-Control-Allow-Origin",
                 get_origin(status, headers))
     headers.add("Access-Control-Allow-Headers",
                 cfg.get("cors_headers"))
     headers.add("Access-Control-Allow-Credentials",
                 cfg.get("cors_credentials"))
     headers.add("Access-Control-Allow-Methods",
                 cfg.get("cors_methods"))
     headers.add("Access-Control-Expose-Headers",
                 cfg.get("cors_expose_headers"))
     return start_response(status, headers.to_list(), exc_info)
Ejemplo n.º 43
0
 def add_cors_headers(status, headers, exc_info=None):
     headers = Headers(headers)
     headers.add("Access-Control-Allow-Origin",
                 get_origin(status, headers))
     headers.add("Access-Control-Allow-Headers",
                 cfg.get("cors_headers"))
     headers.add("Access-Control-Allow-Credentials",
                 cfg.get("cors_credentials"))
     headers.add("Access-Control-Allow-Methods",
                 cfg.get("cors_methods"))
     headers.add("Access-Control-Expose-Headers",
                 cfg.get("cors_expose_headers"))
     return start_response(status, headers.to_list(), exc_info)
    def build_instance_dict(self,ins_dict,notification):

        ins_dict['tenant_id']=notification['tenant_id']
        ins_dict['nova_instance_id']=notification[constants.nova_instance_id]
        ins_dict['name']=notification['display_name']
        ins_dict['user']=CONFIG.get("DEFAULT", "vadx_username")
        ins_dict['password']=CONFIG.get("DEFAULT", "vadx_password")
        ins_dict['status']=notification['state']
        ins_dict['communication_type']=CONFIG.get("DEFAULT", "vadx_communication_type")
        #ins_dict['created_time']=datetime.strptime(notification['created_at'],'%Y-%m-%d %H:%M:%S')
        ins_dict['created_time']=self._format_date(notification['created_at'])
        ins_dict['status_description']=notification['state_description']
        return ins_dict
Ejemplo n.º 45
0
def http_retriable_request(verb, url, headers={}, authenticate=False, params={}):
    """
    Sends an HTTP request, with automatic retrying in case of HTTP Errors 500 or ConnectionErrors
    _http_retriable_request('POST', 'http://cc.cloudcomplab.ch:8888/app/', headers={'Content-Type': 'text/occi', [...]}
                            , authenticate=True)
    :param verb: [POST|PUT|GET|DELETE] HTTP keyword
    :param url: The URL to use.
    :param headers: Headers of the request
    :param kwargs: May contain authenticate=True parameter, which is used to make requests requiring authentication,
                    e.g. CC requests
    :return: result of the request
    """
    LOG.debug(verb + ' on ' + url + ' with headers ' + headers.__repr__())

    auth = ()
    if authenticate:
        cfg_user = CONFIG.get('cloud_controller', 'user')
        user = os.environ.get('CC_USER', cfg_user)
        cfg_pwd = CONFIG.get('cloud_controller', 'pwd')
        pwd = os.environ.get('CC_PASSWORD', cfg_pwd)

        auth = (user, pwd)

    if verb in ['POST', 'DELETE', 'GET', 'PUT']:
        try:
            r = None
            if verb == 'POST':
                if authenticate:
                    r = requests.post(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.post(url, headers=headers, params=params)
            elif verb == 'DELETE':
                if authenticate:
                    r = requests.delete(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.delete(url, headers=headers, params=params)
            elif verb == 'GET':
                if authenticate:
                    r = requests.get(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.get(url, headers=headers, params=params)
            elif verb == 'PUT':
                if authenticate:
                    r = requests.put(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.put(url, headers=headers, params=params)
            r.raise_for_status()
            return r
        except requests.HTTPError as err:
            LOG.error('HTTP Error: should do something more here!' + err.message)
            raise err
Ejemplo n.º 46
0
class ServerSettingsForm(Form):
    inputs = {
        'key_path':
        FormTextInput(label_text='Key Path',
                      default=CONFIG.get('server', 'key_path')),
        'server_dns':
        FormTextInput(label_text='Server DNS',
                      default=CONFIG.get('server', 'server_dns')),
    }

    def submit(self):
        CONFIG.set('server', 'key_path', self.key_path.get())
        CONFIG.set('server', 'server_dns', self.server_dns.get())
        save_config()
Ejemplo n.º 47
0
 def __init__(self,
              webdriver,
              env=ENV,
              application="Enterprise Applications",
              inputlogger=Logger.getLogger(
                  name=__name__, level=CONFIG.get('logging').get('level'))):
     self.awsapi = AwsAPI(environment=ENV)
     self.logger = inputlogger
     self.application_name = application
     self.webdriver = webdriver
     self.testenv = env
     self.config = CONFIG.get('aws').get(ENV)
     self.perf_metric_sqs_url = self.config.get('perf_metric_sqs_url')
     self.perf_metric_s3 = self.config.get('perf_metric_s3')
Ejemplo n.º 48
0
def manageTomcat(query):
    try:
        url = ("http://" + CONFIG.get("tomcat", "login") + ":" + CONFIG.get("tomcat", "password") +
               "@" + CONFIG.get("global", "host") + ":" + CONFIG.get("tomcat", "http_port") +
               "/" + CONFIG.get("tomcat", "manager_path") + "/")
        cnx = urllib.urlopen(url + query)
        ret = cnx.readline()
        cnx.close()
        if ret[0:2] == "OK":
            putDoneOK()
        else:
            putDoneFail(error=ret)
    except IOError as e:
        putDoneFail(error=e)
Ejemplo n.º 49
0
def setup(config):
    read_config(config)

    hosts = str(CONFIG.get('general', 'HOSTS')).split(',')

    indicator_module = import_module('indicators')
    indicator_classes = str(CONFIG.get('general', 'indicators')).split(',')
    indicators = [getattr(indicator_module, c_name)() for c_name in indicator_classes]

    alerter_module = import_module('alerters')
    alerter_classes = str(CONFIG.get('general', 'ALERTERS')).split(',')
    alerters = [getattr(alerter_module, c_name)() for c_name in alerter_classes]

    return hosts, indicators, alerters
Ejemplo n.º 50
0
    def _learn(self, experiment):
        is_test = CONFIG.get("is_test")
        if is_test:  # for reliable results
            experiment.df.sort("tg_id", ascending=True, inplace=True)

        learner = self.semi_supervised_learner
        ws = []

        neval = CONFIG.get("xeval.num_iter")
        num_processes = CONFIG.get("num_processes")
        all_test_target_scores = []
        all_test_decoy_scores = []

        logging.info("learn and apply scorer")
        logging.info("start %d cross evals using %d processes" % (neval, num_processes))

        if num_processes == 1:
            for k in range(neval):
                (ttt_scores, ttd_scores, w) = learner.learn_randomized(experiment)
                all_test_target_scores.extend(ttt_scores)
                all_test_decoy_scores.extend(ttd_scores)
                ws.append(w.flatten())
        else:
            pool = multiprocessing.Pool(processes=num_processes)
            while neval:
                remaining = max(0, neval - num_processes)
                todo = neval - remaining
                neval -= todo
                args = ((learner, "learn_randomized", (experiment, )), ) * todo
                res = pool.map(unwrap_self_for_multiprocessing, args)
                ttt_scores = [ti for r in res for ti in r[0]]
                ttd_scores = [ti for r in res for ti in r[1]]
                ws.extend([r[2] for r in res])
                all_test_target_scores.extend(ttt_scores)
                all_test_decoy_scores.extend(ttd_scores)
        logging.info("finished cross evals")
        logging.info("")

        # only use socres from last iteration to build statistical model:
        if CONFIG.get("semi_supervised_learner.stat_best"):
            all_test_target_scores = ttt_scores
            all_test_decoy_scores = ttd_scores

        # we only use weights from last iteration if indicated:
        if CONFIG.get("semi_supervised_learner.use_best"):
            ws = [ws[-1]]

        final_classifier = self.semi_supervised_learner.averaged_learner(ws)

        return final_classifier, all_test_target_scores, all_test_decoy_scores
Ejemplo n.º 51
0
 def postConfigure(self):
     self.setSecuredURL()
     self.cset("db_jndi", "ETADB")
     url = self.cget("url")
     # configure database connection for callbacks
     murl = "jdbc:mysql://" + CONFIG.get("db", "host") + ":" + CONFIG.get("db", "port") + "/" + self.cget("db_name") + "?autoReconnect=true"
     self.cset("callback_db_url", murl)
     epcis_url = CONFIG.get("epcis", "url")
     if not epcis_url.endswith("/"):
         epcis_url += "/"
     CONFIG.set("epcis", "query_url", epcis_url + "query")
     CONFIG.set("epcis", "capture_url", epcis_url + "capture")
     CONFIG.set("epcilon", "subscription_url", url + "ided_query")
     CONFIG.set("epcilon", "iota_ided", "True")
Ejemplo n.º 52
0
    def run(self):

        self.prefix = self.check_pathes()
        dirname = self.determine_output_dir_name()
        out_pathes = self.create_out_pathes(dirname)

        extra_writes = dict(self.extra_writes(dirname))

        to_check = list(v for p in out_pathes for v in p.values())
        to_check.extend(extra_writes.values())

        if not CONFIG.get("target.overwrite"):
            error = check_if_any_exists(to_check)
            if error:
                return False

        self.check_cols = ["transition_group_id", "run_id", "decoy"]
        if CONFIG.get("export.mayu"):
            self.check_cols += mayu_cols()

        logging.info("config settings:")
        for k, v in sorted(CONFIG.config.items()):
            logging.info("    %s: %s" % (k, v))

        start_at = time.time()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            (result, scorer, weights) = self.run_algo()

        compress = CONFIG.get("target.compress_results")
        needed = time.time() - start_at

        set_pandas_print_options()
        self.print_summary(result)
        self.save_results(result, extra_writes, out_pathes)

        self.save_scorer(scorer, extra_writes)
        self.save_weights(weights, extra_writes)

        seconds = int(needed)
        msecs = int(1000 * (needed - seconds))
        minutes = int(needed / 60.0)

        print "NEEDED",
        if minutes:
            print minutes, "minutes and",

        print "%d seconds and %d msecs wall time" % (seconds, msecs)
        print
Ejemplo n.º 53
0
def http_retriable_request(verb, url, headers={}, authenticate=False, params={}):
    """
    Sends an HTTP request, with automatic retrying in case of HTTP Errors 500 or ConnectionErrors
    _http_retriable_request('POST', 'http://cc.cloudcomplab.ch:8888/app/', headers={'Content-Type': 'text/occi', [...]}
                            , authenticate=True)
    :param verb: [POST|PUT|GET|DELETE] HTTP keyword
    :param url: The URL to use.
    :param headers: Headers of the request
    :param kwargs: May contain authenticate=True parameter, which is used to make requests requiring authentication,
                    e.g. CC requests
    :return: result of the request
    """
    LOG.debug(verb + ' on ' + url + ' with headers ' + headers.__repr__())

    auth = ()
    if authenticate:
        user = CONFIG.get('cloud_controller', 'user')
        pwd = CONFIG.get('cloud_controller', 'pwd')
        auth = (user, pwd)

    if verb in ['POST', 'DELETE', 'GET', 'PUT']:
        try:
            r = None
            if verb == 'POST':
                if authenticate:
                    r = requests.post(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.post(url, headers=headers, params=params)
            elif verb == 'DELETE':
                if authenticate:
                    r = requests.delete(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.delete(url, headers=headers, params=params)
            elif verb == 'GET':
                if authenticate:
                    r = requests.get(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.get(url, headers=headers, params=params)
            elif verb == 'PUT':
                if authenticate:
                    r = requests.put(url, headers=headers, auth=auth, params=params)
                else:
                    r = requests.put(url, headers=headers, params=params)
            r.raise_for_status()
            return r
        except requests.HTTPError as err:
            LOG.error('HTTP Error: should do something more here!' + err.message)
            raise err
Ejemplo n.º 54
0
def create_app(config_name):
    """Creates the app object with the appropriate configuration settings.

    Args:
        {string} config_name: a string representing a dictionary key matched to
        specific configuration options applied to the initialized app.

    Returns:
        the initialized app object
    """

    # Reference global objects for clarity.
    global bootstrap
    global db

    # Create a blank Flask app.
    app = Flask(__name__)

    # Add configuration options to the app.
    config_settings = CONFIG.get(config_name)
    if config_settings:
        app.config.from_object(config_settings)

    # Initialize the extension objects using the newly configured Flask app.
    bootstrap.init_app(app)
    db.init_app(app)

    # Attach URL routes and custom error page handlers to the app.
    from createblueprint import bp_main
    app.register_blueprint(bp_main)

    # Return a fully configured app instance with routes and error handling.
    return app
Ejemplo n.º 55
0
    def createLdifs(self):
        utils.writeFile("Creating the schema as a ldif file (user.ldif)", "user.ldif", """
dn: cn=user,cn=schema,cn=config
objectClass: olcSchemaConfig
cn: user
olcAttributeTypes: ( 1.1.2.1.1 NAME '%(owner)s' DESC 'Owner ID' SUP name )
olcAttributeTypes: ( 1.1.2.1.2 NAME '%(alias)s' DESC 'Alias DN' SUP name )
olcObjectClasses: ( 1.1.2.2.1 NAME 'user' DESC 'Define user' SUP top STRUCTURAL MUST ( %(uid)s $ %(owner)s ) MAY ( %(alias)s ) )
""" % {"uid": self.cget("user_id"), "owner": self.cget("attribute_owner"), "alias": self.cget("attribute_alias")})
        group_value = self.cget("user_group").split("=")[-1]
        utils.writeFile("Creating the user group as a ldif file (usergroup.ldif)", "usergroup.ldif", """
dn: %(group)s,%(dn)s
objectclass: top
objectclass: organizationalUnit
ou: %(group_val)s
description: users
""" % {"group": self.cget("user_group"), "group_val": group_value, "dn": self.cget("base_dn")} )
        utils.writeFile("Creating the user 'superadmin' as a ldif file (superadmin.ldif)", "superadmin.ldif", """
dn: %(uid)s=superadmin,%(group)s,%(dn)s
objectclass: top
objectclass: user
%(uid)s: superadmin
%(owner)s: superadmin
""" % {"uid": self.cget("user_id"), "group": self.cget("user_group"), "dn": self.cget("base_dn"), "owner": self.cget("attribute_owner")} )
        utils.writeFile("Creating the user '%(anonymous)s' as ldif file (anonymous.ldif)", "anonymous.ldif", """
dn: %(uid)s=%(anonymous)s,%(group)s,%(dn)s
objectclass: top
objectclass: user
%(uid)s: %(anonymous)s
%(owner)s: anonymous
""" % {"anonymous": CONFIG.get("global", "anonymous_user"), "uid": self.cget("user_id"), "group": self.cget("user_group"), "dn": self.cget("base_dn"), "owner": self.cget("attribute_owner")} )
Ejemplo n.º 56
0
def participate(experiment, alternatives, client_id,
    force=None,
    record_force=False,
    traffic_fraction=None,
    prefetch=False,
    datetime=None,
    redis=None):

    exp = Experiment.find_or_create(experiment, alternatives, traffic_fraction=traffic_fraction, redis=redis)

    alt = None
    if force and force in alternatives:
        alt = Alternative(force, exp, redis=redis)

        if record_force:
            client = Client(client_id, redis=redis)
            alt.record_participation(client, datetime)

    elif not cfg.get('enabled', True):
        alt = exp.control
    elif exp.winner is not None:
        alt = exp.winner
    else:
        client = Client(client_id, redis=redis)
        alt = exp.get_alternative(client, dt=datetime, prefetch=prefetch)

    return alt
Ejemplo n.º 57
0
def isActiveMQRunning():
    try:
        cnx = urllib.urlopen(CONFIG.get("activemq", "admin_url"))
        cnx.close()
        return True
    except IOError:
        return False
Ejemplo n.º 58
0
    def postUnpack(self):
        if self.cget("use_gamma"):
            lambda_path = CONFIG.get("tomcat", "catalina_home") + "webapps/" + self.cget("name")
            gamma_path = self.cget("gamma_path")
            utils.sh_mkdir_p(gamma_path)
            detar_command = "tar -C " + gamma_path + " -xaf " + self.cget("gamma_repo")
            if utils.sh_exec(detar_command):
                gamma_path = gamma_path + "/GaMMa"
                utils.sh_cp(gamma_path+"/src/scripts/gamma.js", lambda_path+"/scripts")
                utils.sh_cp(gamma_path+"/src/styles/gamma-style.css", lambda_path+"/styles")
                openlayers_repo = gamma_path + "/src/OpenLayers-2.12.tar.gz"
                detar_command_openlayers = "tar -C " + lambda_path + " -xaf " + openlayers_repo
                if utils.sh_exec(detar_command_openlayers):
                    jsp_queryepcis = lambda_path + "/jsp/pages/queryepcis.jsp"
                    jsp_trace = lambda_path + "/jsp/pages/trace.jsp"
                    cmd = """sed -i '
/<\/head>/i\\
    <link rel="stylesheet" type="text/css" href="OpenLayers-2.12/theme/default/style.css">\\
    <link rel="stylesheet" type="text/css" href="styles/gamma-style.css">\\
    <script type="text/javascript" src="./OpenLayers-2.12/OpenLayers.js"></script>\\
    <script type="text/javascript" src="scripts/gamma.js"></script>' %(file)s
sed -i '
/<\/body>/i\\
<div id="map" class="smallmap"></div>\\
<script type="text/javascript">\\
    gamma_init(); /* OpenLayers and div map init */\\
    initShowOnMap("eventItems");\\
</script>' %(file)s """
                    utils.sh_exec(cmd % dict(file=jsp_queryepcis))
                    utils.sh_exec(cmd % dict(file=jsp_trace))
Ejemplo n.º 59
0
def toggle_experiment_archive(experiment_name):
    experiment = find_or_404(experiment_name)
    if experiment.is_archived():
        experiment.unarchive()
    else:
        experiment.archive()

    return redirect(cfg.get("sixpack_ui_domain", '') + url_for('details', experiment_name=experiment.name))
Ejemplo n.º 60
0
def is_ignored_ip(ip_address):
    # Ignore invalid/local IP addresses
    try:
        inet_aton(unquote(ip_address))
    except:
        return False  # TODO Same as above not sure of default

    return unquote(ip_address) in cfg.get('ignored_ip_addresses')