コード例 #1
0
ファイル: app.py プロジェクト: Amine-Zitoun/hardpick
def res():
    comp = request.args.get('comp', None)
    budget = request.args.get('budget', None)
    print(comp, budget)
    # Searches for mega pc
    mega = search_megapc(comp, budget)
    mega_prd = mega['prd'].encode('ascii', 'ignore').decode('unicode_escape')

    # Searches for sbs
    sbs = search_sbs(comp, budget)
    sbs_prd = sbs['prd'].encode('ascii', 'ignore').decode('unicode_escape')

    # Search extreme
    extrme = search_extreme(comp, budget)
    extrme_prd = extrme['prd'].encode('ascii',
                                      'ignore').decode('unicode_escape')

    # Search tunisianet
    tn = search_tunisia(comp, budget)
    tn_prd = tn['prd'].encode('ascii', 'ignore').decode('unicode_escape')

    win1 = benchmark(sbs, tn, comp)
    win2 = benchmark(mega, extrme, comp)

    final = benchmark(win1, win2, comp)
    result = {'data': [mega, sbs, extrme, tn], 'win': final}
    return jsonify(result)
コード例 #2
0
ファイル: manyrecurrent.py プロジェクト: asley/schooltool
def main():
    print "ZCML took %.3f seconds." % measure(load_ftesting_zcml)
    print "Setup took %.3f seconds." % measure(setup_benchmark)
    benchmark("Daily calendar view with many recurrent events", daily_view)
    benchmark("Weekly calendar view with many recurrent events", weekly_view)
    benchmark("Monthly calendar view with many recurrent events", monthly_view)
    benchmark("Yearly calendar view with many recurrent events", yearly_view)
コード例 #3
0
ファイル: nonrecurrent.py プロジェクト: l1ph0x/schooltool-2
def main():
    print "ZCML took %.3f seconds." % measure(load_ftesting_zcml)
    print "Setup took %.3f seconds." % measure(setup_benchmark)
    benchmark("Daily calendar view with many simple events", daily_view)
    benchmark("Weekly calendar view with many simple events", weekly_view)
    benchmark("Monthly calendar view with many simple events", monthly_view)
    benchmark("Yearly calendar view with many simple events", yearly_view)
コード例 #4
0
def train(training_data, model_path=DEFAULT_DATA_PATH, test_data=None):
    X_train, y_train = read_train_data(training_data)

    logging.info("Training on {} examples for {} labels".format(len(X_train), len(set(y_train))))
    logging.info("Starting the training")
    prediction_pipeline.fit(preprocess_pipeline.fit_transform(X_train, y_train), y_train)

    if test_data != None:
        X_test, y_test = read_test_data(test_data, y_train)
        logging.info("Evaluating the model")
        X_test = preprocess_pipeline.transform(X_test)
        benchmark(prediction_pipeline, X_train, y_train, X_test, y_test, verbose=2)

    logging.info("Storing the model to {}".format(model_path))
    joblib.dump(prediction_pipeline, model_path)
コード例 #5
0
def main():
    print "ZCML took %.3f seconds." % measure(load_ftesting_zcml)
    print "Setup took %.3f seconds." % measure(setup_benchmark)
    benchmark("Daily calendar view on the start date.", daily_view_start_date)
    benchmark("Daily calendar view a year after the start date.",
              daily_view_in_a_year)
    benchmark("Daily calendar view ten years after the start date.",
              daily_view_in_ten_years)
コード例 #6
0
ファイル: recurrent.py プロジェクト: asley/schooltool
def main():
    print "ZCML took %.3f seconds." % measure(load_ftesting_zcml)
    print "Setup took %.3f seconds." % measure(setup_benchmark)
    benchmark("Daily calendar view on the start date.",
              daily_view_start_date)
    benchmark("Daily calendar view a year after the start date.",
              daily_view_in_a_year)
    benchmark("Daily calendar view ten years after the start date.",
              daily_view_in_ten_years)
コード例 #7
0
ファイル: main.py プロジェクト: yult0821/norse
def run_benchmark(function, label):
    config = BenchmarkConfig(
        batch_sizes=[2**i for i in range(FLAGS.batches)],
        device=FLAGS.device,
        dt=FLAGS.dt,
        label=label,
        runs=FLAGS.runs,
        sequence_length=FLAGS.sequence_length,
        start=FLAGS.start,
        stop=FLAGS.stop,
        step=FLAGS.step,
    )

    collector = partial(collect, label=label)
    results = benchmark(function, collector, config)

    timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
    filename = f"{timestamp}-{label}.csv"
    pd.DataFrame(results).to_csv(filename)
コード例 #8
0
        'autosklearn-v': AutoSklearnVanillaBenchmark,
        'autosklearn-m': AutoSklearnMetaBenchmark,
        'autosklearn-e': AutoSklearnEnsBenchmark,
        'tpot': TPOTBenchmark,
        'recipe': RecipeBenchmark
    }
    if model in model_to_bench:
        model_to_bench[model]().benchmark(dataset_file,
                                          output_file,
                                          time_limit=time,
                                          dataset_test_file=dataset_test_file,
                                          config=config)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('input_file', help='Dataset file')
    parser.add_argument('output_file', help='Benchmark result file')
    parser.add_argument('-t', '--time', type=int, help='Time budget')
    parser.add_argument('-m', '--model', help='AutoML Model')
    parser.add_argument('-te', '--test_file', help='Dataset test file')
    parser.add_argument('-c', '--config', nargs='*')
    args = parser.parse_args()

    benchmark(dataset_file=args.input_file,
              output_file=args.output_file,
              time=args.time,
              model=args.model,
              dataset_test_file=args.test_file,
              config=args.config)