示例#1
0
def test_speed_parse_and_convert():
    print()

    ureg = uc.getGlobalUnitRegistry()
    q = ureg.makeQuantity('100 W')

    pureg = pint.UnitRegistry()
    pq = pureg.Quantity(100, 'W')

    bm = Benchmark()
    bm.run(lambda: pureg.Quantity("100 m").to("cm"))
    pint_runtime = bm.measurement

    bm.run(lambda: ureg.makeQuantity("100 m").to("cm"))
    uc_runtime = bm.measurement

    print("parse and convert: 100 m -> cm")
    print("Pint", pint_runtime)
    print("UnitConvert", uc_runtime)
    print("Speedup", Speedup(pint_runtime, uc_runtime))

    print()
    print()

    bm = Benchmark()
    bm.run(lambda: pureg.Quantity("100 W").to("g cm mm / hour / min / ms"))
    pint_runtime = bm.measurement

    bm.run(lambda: ureg.makeQuantity("100 W").to("g cm mm / hour / min / ms"))
    uc_runtime = bm.measurement

    print("parse and convert: 100 W -> g cm mm / hr / min / ms")
    print("Pint", pint_runtime)
    print("UnitConvert", uc_runtime)
    print("Speedup", Speedup(pint_runtime, uc_runtime))
示例#2
0
def run_experiment(experiment_var, random_seed):

    evals_at_targets_df = pd.DataFrame()

    for i, dim in enumerate(experiment_var):

        a = 1
        b = -1
        # random initial solution with elements between -1 and 1
        theta0 = (b - a) * np.random.rand(dim + 1, 1) + a

        # allow more iterations in higher dimensions
        parms.max_iterations = parms.max_iterations * dim

        error_list, sample_evals = ex.run_problem(
            dim, sample_size, num_targets, num_subintervals, cost_function,
            theta0, balance, noise, parms, random_seed)

        ############# benchmark optimization run

        target_values = ex.create_targets(error_list, num_targets)

        benchmarker = bm.Benchmark(sample_evals, target_values, error_list)

        evals_at_targets = benchmarker.benchmark()
        evals_at_targets_df[i] = evals_at_targets

    return evals_at_targets_df
示例#3
0
def addDcmbmkBenchmarks(gem5FusionRoot, suites, benchmarks):
    suites.append('dcmbmk')
    dcmbmkSEBinDir = os.path.join(gem5FusionRoot,
                                  'benchmarks/dcmbmk-image/bin')
    dcmbmkSEInpDir = os.path.join(gem5FusionRoot,
                                  'benchmarks/dcmbmk-image/inputs')
    dcmbmkFSBinDir = os.path.join('dcmbmk/bin')
    dcmbmkFSInpDir = os.path.join('dcmbmk/inputs')
    # Note: this can/should be a symlink and/or get passed in
    dcmbmkRcSDir = os.path.join(gem5FusionRoot, 'full_system_files/runscripts')
    dcmbmkCmdLines = {}

    benchNames = [
        'cmem', 'diverge', 'global', 'icache1', 'icache2', 'icache3',
        'icache4', 'shared', 'sync', 'texture2', 'texture4'
    ]

    for benchName in benchNames:
        bench = Benchmark(suite='dcmbmk',
                          name=benchName,
                          executable='gem5_fusion_%s' % benchName,
                          seBinDir=dcmbmkSEBinDir,
                          seInpDir=os.path.join(dcmbmkSEInpDir, benchName),
                          fsBinDir=dcmbmkFSBinDir,
                          fsInpDir=os.path.join(dcmbmkFSInpDir, benchName),
                          rcSDir=dcmbmkRcSDir,
                          simSizes=['default'],
                          cmdLines=dcmbmkCmdLines)
        benchmarks.append(bench)
    def testBenchmarkScript(self):

        test_comp = sb.launch(
            'HardLimit')  #this is the component you want to test.
        t = 5  #time for benchmark to recalcuate confidence interval in seconds
        packets = 100  #num of packets in BenchmarkGen component to calculate output rate
        size = 1000  #size of packets for BenchmarkGen component to generate
        samples_away = 10  #samples away from the first
        plotFlag = 0  #plot results?
        debugFlag = 1  #show debug information

        # Benchmark start
        bench1 = Benchmark.Benchmark(test_comp, t, packets, size, samples_away,
                                     plotFlag, debugFlag)
        bench1.run()
示例#5
0
 def __init__(self,
              Universe='Stoxx 50',
              Dates=['01-01-2015', '01-01-2017'],
              Frequency=1,
              Histo_Length=6,
              Wght_Const=[0.0, 10.0]):
     self._Universe = Universe
     self._Benchmark = bench.Benchmark()
     self._Dates = Dates
     self._Frequency = Frequency
     self._Histo_Length = Histo_Length
     self._Wght_Constraint = Wght_Const
     self._Wght_Histo = pd.DataFrame()
     self._Perf = pd.Series()
     self._Weights = None
     self._Opt_Strat = 'EW'
示例#6
0
def test_speed_simple():
    ureg = uc.getGlobalUnitRegistry()
    q = ureg.makeQuantity('100 mile/hour')

    pureg = pint.UnitRegistry()
    pq = pureg.Quantity(100, 'mile/hour')

    bm = Benchmark()

    bm.run(lambda: q.to("km/s"))
    uc_runtime = bm.measurement

    bm.run(lambda: pq.to("km/s"))
    pint_runtime = bm.measurement

    print("convert: 100 mph -> km/s")
    print("Pint", pint_runtime)
    print("UnitConvert", uc_runtime)
    print("Speedup", Speedup(pint_runtime, uc_runtime))
示例#7
0
def test_pint_calc_with_uc_conversion():
    print()
    ureg = uc.getGlobalUnitRegistry()
    pureg = pint.UnitRegistry()

    l = pureg.Quantity(100, 'in')
    w = pureg.Quantity(100, 'cm')

    A = l * w

    bm = Benchmark()
    bm.run(lambda: A.to("acre"))
    pint_runtime = bm.measurement

    bm.run(lambda: pureg.Quantity(
        ureg.makeQuantity(str(A)).to("acre").value(), "acre"))
    uc_runtime = bm.measurement

    print("pint calc with uc conversion")
    print("Pint", pint_runtime)
    print("UnitConvert", uc_runtime)
    print("Speedup", Speedup(pint_runtime, uc_runtime))
#!/usr/bin/env python
#
#	Script to use Benchmark.py class
#
#
#

from ossie.utils import sb
import Benchmark

## Component to test
upzero_comp = sb.launch('UpZero')
tunefilter_comp = sb.launch('TuneFilterDecimate')
fastfilter_comp = sb.launch('fastfilter')

# Benchmark Parameters
test_comp = upzero_comp  #this is the component you want to test.
t = 5  #time for benchmark to recalcuate confidence interval in seconds
packets = 100  #num of packets in BenchmarkGen component to calculate output rate
size = 1000  #size of packets for BenchmarkGen component to generate
samples_away = 10  #samples away from the first
plotFlag = 0  #plot results?
debugFlag = 1  #show debug information

# Benchmark start
bench1 = Benchmark.Benchmark(test_comp, t, packets, size, samples_away,
                             plotFlag, debugFlag)
bench1.run()

print 'script done'
示例#9
0
theta, error_list, sample_evals = solver.gradient_descent(cost_function, 
                                                          mini_batcher, 
                                                          X_train, 
                                                          y_train, 
                                                          theta0)
print("Bias = ", theta[0])
print("Coefficients = ", theta[1:]) 

############# Create benchmarker #############

num_targets = 20
target_values = ex.create_targets(error_list, num_targets)

benchmarker = bm.Benchmark(sample_evals, 
                 target_values, 
                 error_list)

############# predicting output for test set #############

y_pred, err, majority_err, minority_err = benchmarker.test_error(cost_function, 
                                                                 X_test, 
                                                                 y_test, 
                                                                 theta, 
                                                                 verbose = True)
############# Plots #############

# plot data
num_points = 200
# plt.figure()
# mp.plot_data(cost_function, num_points, 8, dataset.data)