Example #1
0
def testing_b2(df, min_max_scaler_all, model, weight):
    #print weight
    col = list(df.columns.values)
    X = df[col[1:-1]].as_matrix()
    Y = df[col[-1]].as_matrix()
    if Y.tolist().count(1) != 0 and Y.tolist().count(0) != 0:
        #print Y.tolist().count(1)
        #print Y.tolist().count(0)
        X = min_max_scaler_all.transform(X)
        Y_pred_proba = model.predict_proba(X)[:, 1]
        dummy = Y_pred_proba
        dummy[dummy >= weight] = 1
        dummy[dummy < weight] = 0
        Y_pred = dummy
        tp, fp, fn, tn, error = check(Y, Y_pred)
        mcc, acc, pre, rec, spe, f1m, auc = metrics.main(
            tp, fp, fn, tn, Y, Y_pred)
        #print len(X), len(Y)
        print str(mcc) + '\t' + str(acc) + '\t' + str(pre) + '\t' + str(
            rec) + '\t' + str(spe) + '\t' + str(f1m) + '\t' + str(
                auc) + '\t' + str(Y.tolist().count(1)) + '\t' + str(
                    Y.tolist().count(0))
        #print rec
    else:
        print
Example #2
0
def main(config_file=None):
	if config_file is None:
		logging.error('must specify config file as first paramater, exiting')
		exit()
	logging.info('main started')
	logging.info('pulling data started')
	pulldata.main(config_file=config_file)
	logging.info('pulling data finished')
	logging.info('starting metrics')
	metrics.main(config_file=config_file)
	logging.info('finished metrics')
	logging.info('getting recommended portfolio')
	recommended_portfolios.main(config_file=config_file)
	logging.info('finished getting recommended portfolio')
	logging.info('transfering to robinhood')
	robinhoodtransfer.main(config_file=config_file)
	logging.info('finished transfering to robinhood')
	logging.info('buying and selling stocks')
	traderobinhood.main(config_file=config_file)
	logging.info('finished buying and selling')
	logging.info('updating spreadsheets')
	performance.main(config_file=config_file)
	logging.info('finished updating spreadsheets')
	logging.info('main finished')
Example #3
0
def testing(df, min_max_scaler_all, model):
    col = list(df.columns.values)
    X = df[col[1:-1]].as_matrix()
    Y = df[col[-1]].as_matrix()
    if Y.tolist().count(1) != 0 and Y.tolist().count(0) != 0:
        #print Y.tolist().count(1)
        #print Y.tolist().count(0)
        X = min_max_scaler_all.transform(X)
        tp, fp, fn, tn, error = check(Y, model.predict(X))
        mcc, acc, pre, rec, spe, f1m, auc = metrics.main(
            tp, fp, fn, tn, Y, model.predict(X))
        #print len(X), len(Y)
        print str(mcc) + '\t' + str(acc) + '\t' + str(pre) + '\t' + str(
            rec) + '\t' + str(spe) + '\t' + str(f1m) + '\t' + str(
                auc) + '\t' + str(Y.tolist().count(1)) + '\t' + str(
                    Y.tolist().count(0))
        #print rec
    else:
        print
Example #4
0
    base_path = config.project_root
    logging.info(base_path)
    model_path = config.weights_root
    logging.info(model_path)

    logging.warning('EVALUATING ON TEST SET')
    input_path = config.test_data_root
    output_path = os.path.join(model_path, 'predictions')

    path_pred = os.path.join(output_path, 'prediction')
    utils.makefolder(path_pred)
    path_eval = os.path.join(output_path, 'eval')
        
    gt_exists = config.gt_exists      #True if it exists the ground_truth images, otherwise set False.
                                      #if True it will be defined evalutation (eval)
   

    init_iteration = score_data(input_path,
                                output_path,
                                model_path,
                                config=config,
                                do_postprocessing=True,
                                gt_exists)


    if gt_exists:
        logging.info('Evaluation of the test images')
        path_gt = os.path.join(config.test_data_root , 'mask')
        metrics.main(path_gt, path_pred, path_eval)
Example #5
0
def getReport():
    return jsonify({
        'reported': True,
        'report': metrics.main(True, session['username'])
    })
Example #6
0
def main():
    base = parse('2014-09-02')
    numdays = 58
    date_list = [base + timedelta(days=x) for x in range(0, numdays)]
    for date in date_list:
        metrics.main(date.strftime("%Y-%m-%d"))
Example #7
0
def process(preview):
    """Convert a video file to music.

    This first reads the paths of all helper tools,
    as well as the track-specific settings influencing MIDI generation.
    The video file must be already selected by the user.
    If preview is True, only the MIDI file will be created and played back.
    Otherwise a copy of the original video with the MIDI track will be made.
    """
    modified = False
    # Prompting the user for helper executables
    if window.config["old_pythonpath"] == "":
        target = filedialog.askopenfilename(
            title="Please locate the Python 2.7 interpreter",
            initialdir=os.getcwd(),
            filetypes=(("Python executable", "*.exe"), ))
        window.config["old_pythonpath"] = target
        modified = True
    if window.config["ffmpegpath"] == "":
        target = filedialog.askopenfilename(
            title="Please locate the ffmpeg binary",
            initialdir=os.getcwd(),
            filetypes=(("ffmpeg executable", "*.exe"), ))
        window.config["ffmpegpath"] = target
        modified = True
    if window.config["vlcpath"] == "":
        target = filedialog.askopenfilename(
            title="Please locate the VLC media player",
            initialdir=os.getcwd(),
            filetypes=(("VLC executable", "*.exe"), ))
        window.config["vlcpath"] = target
        modified = True
    # Update the config file, if needed
    if modified:
        file = open("config.txt", "w")
        for key in window.config:
            file.write(key + "=" + window.config[key] + "\n")
        file.close()

    settings = []
    instr = [
        "piano", "guitar", "cello", "flute", "vibraphone", "marimba",
        "strings", "drums"
    ]
    for i in range(0, window.index, window.con_count):
        temp = {}
        temp["muted"] = window.vars[i + 1].get()
        if temp["muted"] == 1:
            continue  # Skipping muted tracks
        temp["feature"] = window.vars[i].get()
        temp["instrument"] = str(instr.index(window.vars[i + 2].get()))
        temp["controls"] = window.vars[i + 3].get()
        temp["scale"] = window.vars[i + 4].get()
        settings.append(dict(temp))
    # Launch the processing sequence
    metrics.main(window, settings, preview)
    if not preview:
        filename = filedialog.asksaveasfilename(title="Save file...",
                                                initialdir=os.getcwd(),
                                                filetypes=(("MP4 videos",
                                                            "*.mp4"), ))
        if filename:
            if ".mp4" not in filename:
                filename += ".mp4"
            os.replace(window.scriptpath + "\\test.mp4", filename)
Example #8
0
def record_metrics(scope):
    # Partial report stored in update.
    update = metrics.main(False, 'metrics-tracker')

    # Load a list of present charts in DB.
    charts = [chart[0] for chart in to_sql('SELECT name FROM sqlite_master WHERE type = "table";', "r", "chart_data.db")['details']]

    # Used to convert hours, days, and weeks to hours.
    hours_to_other = {
        'hours': 1,
        'days': 24,
        'weeks': 168
    }

    """
        Write data to DB. table_prefix and scope determine which table.
        table_prefix should look like "section_optionalSubSection_". Ex. "cpu_" or " mem_Swap_".
        table_prefix and data are both strings (data is CSV).
    """
    def record(table_prefix, data, keys):
        # Determine table to write to.
        table = table_prefix + "_" + scope

        # If table doesn't exist it is created.
        if not table in charts:
            # Build table creation command and then run it.
            create_table = "CREATE TABLE '" + table + "' ('time' DATETIME"
            for key in keys:
                create_table += ", '" + key + "' NUMERIC"
            create_table += ")"
            to_sql(create_table, 'w', 'chart_data.db')

        # Insert data into table.
        to_sql("INSERT INTO '" + table + "' VALUES (CURRENT_TIMESTAMP" + data + ");" , 'w', 'chart_data.db')

        # Clean old entries
        oldest_time = datetime.now() - timedelta(hours = int(record_age_limits[scope]) * hours_to_other[scope])
        oldest_time_str = oldest_time.strftime('%Y-%m-%d %H:%M:%S')
        to_sql('DELETE from "' + table + '" where time < strftime("%Y-%m-%d %H:%M:%S", "' + oldest_time_str + '")', 'w', 'chart_data.db')

    """
        Prepares data (a dictionary) to be entered into an SQL command.
        Returns (String of CSV as entered in SQL, List of keys to correspond with the CSV).
    """
    def dict_to_sql_part(data):
        # Initiate return values.
        sql_part = ""
        keys = []

        # Iterate through data building return values.
        for key in data:
            sql_part += ', ' + data[key]
            keys.append(key)

        # Return items ready for SQL entry.
        return (sql_part, keys)

    # Proccess and record info for each monitored section.

    # Sensors section.
    for device in update["sensors"]:
        (sens_update, keys) = dict_to_sql_part(update['sensors'][device]['values'])
        record('sens_' + update["sensors"][device]["name0"], sens_update, keys)

    # Mem section.
    for mem_group in update["memory"]:
        (mem_update, keys) = dict_to_sql_part(update['memory'][mem_group])
        record('mem_' + mem_group , mem_update, keys)

    # CPU section.
    (cpu_update, keys) = dict_to_sql_part(update['cpu'])
    record('cpu', cpu_update, keys)

    # Storage section.
    processed_sto = {}
    for ld in update['logical_volumes']:
        processed_sto[ld['mount_point'].replace('/', '_')] = ld['use_percent'][:-1]
    (sto_update, keys) = dict_to_sql_part(processed_sto)
    record('sto', sto_update, keys)