예제 #1
0
def run_experiment(experiment_var, random_seed):

    evals_at_targets_df = pd.DataFrame()

    for i, dim in enumerate(experiment_var):

        a = 1
        b = -1
        # random initial solution with elements between -1 and 1
        theta0 = (b - a) * np.random.rand(dim + 1, 1) + a

        # allow more iterations in higher dimensions
        parms.max_iterations = parms.max_iterations * dim

        error_list, sample_evals = ex.run_problem(
            dim, sample_size, num_targets, num_subintervals, cost_function,
            theta0, balance, noise, parms, random_seed)

        ############# benchmark optimization run

        target_values = ex.create_targets(error_list, num_targets)

        benchmarker = bm.Benchmark(sample_evals, target_values, error_list)

        evals_at_targets = benchmarker.benchmark()
        evals_at_targets_df[i] = evals_at_targets

    return evals_at_targets_df
예제 #2
0
def addDcmbmkBenchmarks(gem5FusionRoot, suites, benchmarks):
    suites.append('dcmbmk')
    dcmbmkSEBinDir = os.path.join(gem5FusionRoot,
                                  'benchmarks/dcmbmk-image/bin')
    dcmbmkSEInpDir = os.path.join(gem5FusionRoot,
                                  'benchmarks/dcmbmk-image/inputs')
    dcmbmkFSBinDir = os.path.join('dcmbmk/bin')
    dcmbmkFSInpDir = os.path.join('dcmbmk/inputs')
    # Note: this can/should be a symlink and/or get passed in
    dcmbmkRcSDir = os.path.join(gem5FusionRoot, 'full_system_files/runscripts')
    dcmbmkCmdLines = {}

    benchNames = [
        'cmem', 'diverge', 'global', 'icache1', 'icache2', 'icache3',
        'icache4', 'shared', 'sync', 'texture2', 'texture4'
    ]

    for benchName in benchNames:
        bench = Benchmark(suite='dcmbmk',
                          name=benchName,
                          executable='gem5_fusion_%s' % benchName,
                          seBinDir=dcmbmkSEBinDir,
                          seInpDir=os.path.join(dcmbmkSEInpDir, benchName),
                          fsBinDir=dcmbmkFSBinDir,
                          fsInpDir=os.path.join(dcmbmkFSInpDir, benchName),
                          rcSDir=dcmbmkRcSDir,
                          simSizes=['default'],
                          cmdLines=dcmbmkCmdLines)
        benchmarks.append(bench)
예제 #3
0
def main():
    # Asks user and gets all input
    Company, period_to_look, period_of_change, percent_change = Ask_dates.Ask_dates(
    )

    # Reset the companies benchmark files or not
    Reset = False

    # Create new file to store Data
    Create_files.Creation(Company, Reset)

    # Convert period to timeframes for API
    timeframe = Period_to_timeframe.Period_to_timeframe(period_to_look)

    # Calls on the Google_data's function Trending_google with our variables.
    # If not enough data is available, then returns a 0. (int)
    Trend_data = Google_data.Trending_google(Company, timeframe)

    if (Trend_data != 0):
        # Get our benchmarks
        Benchmark.Calc(Trend_data,
                       Company[0])  # Needs to be fixed for list of companies

        # Alarm!
        alarm.Num(Trend_data, Company[0], percent_change, period_of_change)

    else:
        print "\n \nERROR MESSAGE: No Trending data! \n"
        print "Trend data:" + str((Trend_data))
        return 1
예제 #4
0
    def estimatedRender (self, idWork, idZone):
        """Realiza el render tama�o sello y escribe los datos obtenidos en la pizarra"""

        blenderFile = Util.getFileName(self.getCurrentFiles(), 'blend')
        ficheroBlender = os.path.join(self.getWorkDir(), blenderFile)
        
        salida = os.path.join(self.getWorkDir(), str(idWork) + '_' + str(idZone))
        zone = self.getZone(idZone)
        x1, y1 = zone.x1, zone.y1
        x2, y2 = zone.x2, zone.y2

        # Se toma la hora para actualizar el registro en la pizarra.
        begin = time.time()
        # Render con el script EstimatedRender.py.
        os.system('blender -b ' + ficheroBlender + ' -P ./EstimatedRender.py -f 1 ' + salida + ' ' + str(x1) + ' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2) + ' ' + str(RES))
        end = time.time()

        os.system('mv ' + salida + '0001.png ' + salida + EXT)

        # Actualizaci�n en la pizarra con el tiempo estimado en el render tama�o "sello".
        self.getBlackboard().incrementEstimatedRenderTime(int(end - begin))
        
        if len(self.getHistoric()) == 0:
            self.setCoefficient(Benchmark.getValue() / self.getBenchmarkValue())
        finalTime = int(RES * (end - begin) / self.getCoefficient())
        self.getBlackboard().update(idWork, idZone, finalTime)
예제 #5
0
    def testBenchmarkScript(self):

        test_comp = sb.launch(
            'HardLimit')  #this is the component you want to test.
        t = 5  #time for benchmark to recalcuate confidence interval in seconds
        packets = 100  #num of packets in BenchmarkGen component to calculate output rate
        size = 1000  #size of packets for BenchmarkGen component to generate
        samples_away = 10  #samples away from the first
        plotFlag = 0  #plot results?
        debugFlag = 1  #show debug information

        # Benchmark start
        bench1 = Benchmark.Benchmark(test_comp, t, packets, size, samples_away,
                                     plotFlag, debugFlag)
        bench1.run()
예제 #6
0
 def __init__(self,
              Universe='Stoxx 50',
              Dates=['01-01-2015', '01-01-2017'],
              Frequency=1,
              Histo_Length=6,
              Wght_Const=[0.0, 10.0]):
     self._Universe = Universe
     self._Benchmark = bench.Benchmark()
     self._Dates = Dates
     self._Frequency = Frequency
     self._Histo_Length = Histo_Length
     self._Wght_Constraint = Wght_Const
     self._Wght_Histo = pd.DataFrame()
     self._Perf = pd.Series()
     self._Weights = None
     self._Opt_Strat = 'EW'
예제 #7
0
def call(funcao, list_entradas):
    if funcao == 0:
        return ben.rastrigin(list_entradas)[0]
    elif funcao == 1:
        return ben.ackley(list_entradas)[0]
    elif funcao == 2:
        return ben.keane(list_entradas)[0]
    elif funcao == 3:
        return ben.shubert(list_entradas)[0]
    elif funcao == 4:
        return ben.shubert3(list_entradas)[0]
    elif funcao == 5:
        return ben.schaffer(list_entradas)[0]
    elif funcao == 6:
        return ben.zdt1(list_entradas)
    elif funcao == 7:
        return ben.zdt2(list_entradas)
    else:
        return None
예제 #8
0
def test_speed_simple():
    ureg = uc.getGlobalUnitRegistry()
    q = ureg.makeQuantity('100 mile/hour')

    pureg = pint.UnitRegistry()
    pq = pureg.Quantity(100, 'mile/hour')

    bm = Benchmark()

    bm.run(lambda: q.to("km/s"))
    uc_runtime = bm.measurement

    bm.run(lambda: pq.to("km/s"))
    pint_runtime = bm.measurement

    print("convert: 100 mph -> km/s")
    print("Pint", pint_runtime)
    print("UnitConvert", uc_runtime)
    print("Speedup", Speedup(pint_runtime, uc_runtime))
예제 #9
0
def test_speed_medium():
    print()
    ureg = uc.getGlobalUnitRegistry()
    q = ureg.makeQuantity('100 W')

    pureg = pint.UnitRegistry()
    pq = pureg.Quantity(100, 'W')

    bm = Benchmark()
    bm.run(lambda: pq.to("g cm mm / hour / min / ms"))
    pint_runtime = bm.measurement

    bm.run(lambda: q.to("g cm mm / hour / min / ms"))
    uc_runtime = bm.measurement

    print("convert: 100 W -> g cm mm / hr / min / ms")
    print("Pint", pint_runtime)
    print("UnitConvert", uc_runtime)
    print("Speedup", Speedup(pint_runtime, uc_runtime))
예제 #10
0
def test_pint_calc_with_uc_conversion():
    print()
    ureg = uc.getGlobalUnitRegistry()
    pureg = pint.UnitRegistry()

    l = pureg.Quantity(100, 'in')
    w = pureg.Quantity(100, 'cm')

    A = l * w

    bm = Benchmark()
    bm.run(lambda: A.to("acre"))
    pint_runtime = bm.measurement

    bm.run(lambda: pureg.Quantity(
        ureg.makeQuantity(str(A)).to("acre").value(), "acre"))
    uc_runtime = bm.measurement

    print("pint calc with uc conversion")
    print("Pint", pint_runtime)
    print("UnitConvert", uc_runtime)
    print("Speedup", Speedup(pint_runtime, uc_runtime))
    def __init__(self):
        print("LogisticRegression initializing")
        # Chargement initial des données (mails)
        csvValuesColumnNumber = 57
        csvFilePath = "spambase/spambase.data"
        mailDataset = pd.read_csv(csvFilePath, header=None)  # names=names,
        mailDataset.drop(columns=[
            26, 27
        ])  # Drop columns "Georges & 650" contextual false-positives
        # Split des colonnes en deux : les valeurs (dataFieldsValues) et le label pour chaque mail (dataLabels)
        # permettant de savoir si c'est un spam (1) ou non
        dataFieldsValues = mailDataset.iloc[:, :-1].values
        # : signifie "tout" -> :-1 signifie "toutes les colonnes sauf la dernière"
        dataLabels = mailDataset.iloc[:, csvValuesColumnNumber].values
        # train_test_split(counts, df['label'], test_size=0.1, random_state=69)

        # Split des lignes de spambase et shuffle pour avoir un échantillon aléatoire
        # X_train : valeurs d'entraînement
        # y_train : labels d'entraînement (associés à chaque valeur)
        # X_test : valeurs pour le test
        # y_test : labels pour vérifier le test
        iterationNumber = 2
        # Permet d'avoir des jeux de test identiques pour chaque itération
        a2_X_train = []
        a2_X_test = []
        a2_y_train = []
        a2_y_test = []
        # Jeu de tests scalé
        a2_X_train_scaled = []
        a2_X_test_scaled = []

        from sklearn.preprocessing import StandardScaler

        for i in range(0, iterationNumber):
            X_train, X_test, y_train, y_test = train_test_split(
                dataFieldsValues, dataLabels, test_size=0.2,
                shuffle=True)  # test_size = 1 - train_size
            a2_X_train.append(X_train)
            a2_X_test.append(X_test)
            a2_y_train.append(y_train)
            a2_y_test.append(y_test)

            scaler = StandardScaler()
            scaler.fit(X_train)

            X_train_scaled = scaler.transform(X_train)
            X_test_scaled = scaler.transform(X_test)
            a2_X_train_scaled.append(X_train_scaled)
            a2_X_test_scaled.append(X_test_scaled)

        predictionArrayErrorRatio = [
        ]  # prédiction, valeurs à comparer à y_test
        predictionArrayName = []
        predictionArrayTimeTookMs = []

        iterationNumber = len(a2_X_train)

        if iterationNumber <= 0:
            return

        for iIteration in range(0, iterationNumber):

            errorOccured = False
            randSeed = int(
                time.time() * 10000000000
            ) % 4294967295  # Modulo la valeur d'un int non signé : 2^32 - 1

            print("predictWith randSeed = " + str(randSeed))
            np.random.seed(randSeed)

            startTimeMs = int(time.time() * 1000)

            print("predictWith  " + "LR")
            from sklearn.linear_model import LogisticRegression

            classifier = LogisticRegression(random_state=0,
                                            solver='lbfgs',
                                            multi_class='ovr',
                                            max_iter=100000)
            classifier.fit(a2_X_train[iIteration],
                           a2_y_train[iIteration])  # X_train_scaled
            y_predict = classifier.predict(
                a2_X_test[iIteration])  # X_test_scaled

            if not errorOccured:
                print(classification_report(a2_y_test[iIteration], y_predict))

            elapsedTimeMs = int(time.time() * 1000) - startTimeMs

            localPredictErrorRatio = np.mean(
                y_predict != a2_y_test[iIteration])

            predictionArrayErrorRatio.append(localPredictErrorRatio)
            predictionArrayName.append("LR")
            predictionArrayTimeTookMs.append(elapsedTimeMs)

        predictionArrayErrorRatioScaled = [
        ]  # prédiction, valeurs à comparer à y_test
        predictionArrayNameScaled = []
        predictionArrayTimeTookMsScaled = []

        iterationNumber = len(a2_X_train_scaled)

        if iterationNumber <= 0:
            return

        for iIteration in range(0, iterationNumber):
            errorOccured = False
            randSeed = int(
                time.time() * 10000000000
            ) % 4294967295  # Modulo la valeur d'un int non signé : 2^32 - 1

            print("predictWith randSeed = " + str(randSeed))
            np.random.seed(randSeed)

            startTimeMs = int(time.time() * 1000)

            print("predictWith  " + "LR")
            from sklearn.linear_model import LogisticRegression

            classifier = LogisticRegression(random_state=0,
                                            solver='lbfgs',
                                            multi_class='ovr',
                                            max_iter=100000)
            classifier.fit(a2_X_train_scaled[iIteration],
                           a2_y_train[iIteration])  # X_train_scaled
            y_predict = classifier.predict(
                a2_X_test_scaled[iIteration])  # X_test_scaled

            if not errorOccured:
                print(classification_report(a2_y_test[iIteration], y_predict))

            elapsedTimeMs = int(time.time() * 1000) - startTimeMs

            localPredictErrorRatio = np.mean(
                y_predict != a2_y_test[iIteration])

            predictionArrayErrorRatioScaled.append(localPredictErrorRatio)
            predictionArrayNameScaled.append("LR")
            predictionArrayTimeTookMsScaled.append(elapsedTimeMs)

        Benchmark.drawBenchmarkForMultipleValues(
            'Non Scalé - Taux d\'erreur en fonction de l\'algo utilisé',
            'Algo utilisé', 'Erreur moyenne', predictionArrayErrorRatio,
            predictionArrayName)
        Benchmark.drawBenchmarkForMultipleValues(
            'Scalé - Taux d\'erreur en fonction de l\'algo utilisé',
            'Algo utilisé', 'Erreur moyenne', predictionArrayErrorRatioScaled,
            predictionArrayNameScaled)
        Benchmark.drawBenchmarkForMultipleValues(
            "Non Scalé - Temps pris par algorithme", "Algo utilisé",
            "Temps pris (ms)", predictionArrayTimeTookMs, predictionArrayName)
        Benchmark.drawBenchmarkForMultipleValues(
            "Scalé - Temps pris par algorithme", "Algo utilisé",
            "Temps pris (ms)", predictionArrayTimeTookMsScaled,
            predictionArrayNameScaled)
예제 #12
0
 def notifyBenchmarkValue (self):
     """Notifica al Master el tiempo empleado en la ejecuci�n del benchmark"""
     self.getMaster().benchmarkValue(int(Benchmark.getValue()))
예제 #13
0
## Higher order benchmarking

The above solves the `CartPole-v0` env. However, we want to benchmark more envs. In addition, a single solve isn't representative, especially given the randomness inherent in many of these environments. What would give more specific info is to do the above benchmarking, but run it a number of times to create a distribution.

This is easily doable with the `Evolve` class and the `Benchmark.py` functions. Briefly, an `Evolve` object creates an `Agent` class object for a given env, and then `Evolve.evolve()` does RWG to try and solve that env. It returns the solve time (in number of generations).

`Benchmark.benchmark_envs()` takes a list of envs. For each, it creates a dist of the solve times, by (some specified number of times) creating a new `Evolve` object and solving the env.

Here's a simple usage, from `scripts/benchmark_example.py`:

```
import path_utils
import Benchmark

Benchmark.benchmark_envs(['CartPole-v0'], N_dist=100, N_gen=1000)
```

This will only benchmark `CartPole-v0`. It will create a distribution of solve times from `N_dist` instances of that env. Each one will have a max number of generations `N_gen` (if it doesn't solve in that time, it gets marked as the maximum time; this might be suboptimal because it's underestimating these outliers).

This produces:

<p align="center">
  <img width="640" height="480" src="misc/CartPole-v0_solve_gen_dist.png">
</p>

Something curious: even though it seems to have a well-defined Gamma-like (?) distribution shape, there are always some at the maximum `N_gen` (meaning they didn't solve). This is curious, since every iteration of `evolve()` is independent. However, since we're just testing for `mean_score` > `best_score`, it's possible that it gets a "lucky" set of weights that got a high score for its 3 episode trials, but couldn't solve it. Then, later sets that might not get as high a 3-episode score, but *would* solve it, don't get tested. This has to be looked at more.

In addition, it creates a timestamped directory in the `outputs` directory for the benchmarking run. Within that, it creates:

* For each env benchmarked, a directory with the FF plot for each run
예제 #14
0
#!/opt/local/bin/python3.3

from Benchmark import *

if __name__ == "__main__":
	b = Benchmark()

	print( b.validate_with_knn() )
	print( b.validate_with_rf() )
예제 #15
0
theta, error_list, sample_evals = solver.gradient_descent(cost_function, 
                                                          mini_batcher, 
                                                          X_train, 
                                                          y_train, 
                                                          theta0)
print("Bias = ", theta[0])
print("Coefficients = ", theta[1:]) 

############# Create benchmarker #############

num_targets = 20
target_values = ex.create_targets(error_list, num_targets)

benchmarker = bm.Benchmark(sample_evals, 
                 target_values, 
                 error_list)

############# predicting output for test set #############

y_pred, err, majority_err, minority_err = benchmarker.test_error(cost_function, 
                                                                 X_test, 
                                                                 y_test, 
                                                                 theta, 
                                                                 verbose = True)
############# Plots #############

# plot data
num_points = 200
# plt.figure()
# mp.plot_data(cost_function, num_points, 8, dataset.data)
예제 #16
0
#!/usr/bin/env python
#
#	Script to use Benchmark.py class
#
#
#

from ossie.utils import sb
import Benchmark

## Component to test
upzero_comp = sb.launch('UpZero')
tunefilter_comp = sb.launch('TuneFilterDecimate')
fastfilter_comp = sb.launch('fastfilter')

# Benchmark Parameters
test_comp = upzero_comp  #this is the component you want to test.
t = 5  #time for benchmark to recalcuate confidence interval in seconds
packets = 100  #num of packets in BenchmarkGen component to calculate output rate
size = 1000  #size of packets for BenchmarkGen component to generate
samples_away = 10  #samples away from the first
plotFlag = 0  #plot results?
debugFlag = 1  #show debug information

# Benchmark start
bench1 = Benchmark.Benchmark(test_comp, t, packets, size, samples_away,
                             plotFlag, debugFlag)
bench1.run()

print 'script done'
import path_utils
import Benchmark
import os
'''
For testing various benchmarking examples.

'''

#['CartPole-v0', 'MountainCar-v0', 'MountainCarContinuous-v0', 'Pendulum-v0']
Benchmark.benchmark_envs([
    'CartPole-v0', 'MountainCar-v0', 'MountainCarContinuous-v0', 'Pendulum-v0'
],
                         N_gen=2000,
                         N_dist=25,
                         NN='FFNN_multilayer',
                         N_hidden_layers=0,
                         N_hidden_units=0,
                         act_fn='linear')

exit()

Benchmark.benchmark_vary_params(
    {
        'env_name': 'MountainCar-v0',
        'NN': 'FFNN_multilayer'
    }, {
        'N_hidden_units': [2, 4],
        'N_hidden_layers': [0, 1],
        'act_fn': ['tanh', 'relu']
    },
    N_gen=5,