예제 #1
0
    def get(self, configuration):
        experiment = Experiment()
        experiment.model_trainer = self.model_trainer_factory.get(
            configuration)
        experiment.model_tester = self.model_tester_factory.get(configuration)
        experiment.model = self.model_factory.get(configuration)

        return experiment
예제 #2
0
    def __init__(self, logger_obj, n, i, seed=5):
        folds = n_fold_split(Amigos.SUBJECTS_IDS, n, seed=seed)

        self.test_ids = folds[i]["test"]
        self.val_ids = folds[i]["val"]
        self.train_ids = folds[i]["train"]

        Experiment.__init__(self, "Amigos", logger_obj, SIGNALS_LEN, dataset_name_suffix=f"_{n}fold_{i:02d}")
 def test_save(self, tmp_path):
     dataset_path = Path('ml/input/test/sample.csv')
     o = ProjectModel()
     artifacts_handler = ExperimentArtifacts(
         run_tag='test',
         model_name=o.model_id,
         base_path=tmp_path,
     )
     e = Experiment(
         run_tag='test',
         model=o,
         input_dir=dataset_path,
         artifacts_handler=artifacts_handler,
     )
     e.run()
     assert o.model_path.is_file()
 def test_load(self, tmp_path):
     dataset_path = Path('ml/input/test/sample.csv')
     o = ProjectModel()
     artifacts_handler = ExperimentArtifacts(
         run_tag='test',
         model_name=o.model_id,
         base_path=tmp_path,
     )
     e = Experiment(
         run_tag='test',
         model=o,
         input_dir=dataset_path,
         artifacts_handler=artifacts_handler,
     )
     e.run()
     o2 = ProjectModel()
     o2.load(tmp_path)
     model = o.model
     model_ = o2.model
     assert type(model_) == type(
         model)  # must return the same type of object
     assert model_ is not model  # object identity MUST be different
 def test_predict(self, tmp_path):
     dataset_path = Path('ml/input/test/sample.csv')
     o = ProjectModel()
     artifacts_handler = ExperimentArtifacts(
         run_tag='test',
         model_name=o.model_id,
         base_path=tmp_path,
     )
     e = Experiment(
         run_tag='test',
         model=o,
         input_dir=dataset_path,
         artifacts_handler=artifacts_handler,
     )
     e.run()
     p = o.predict(
         ids=["1"],
         X=[
             "406,-2.3122265423263,1.95199201064158,-1.60985073229769,3.9979055875468,-0.522187864667764,-1.42654531920595,-2.53738730624579,1.39165724829804,-2.77008927719433,-2.77227214465915,3.20203320709635,-2.89990738849473,-0.595221881324605,-4.28925378244217,0.389724120274487,-1.14074717980657,-2.83005567450437,-0.0168224681808257,0.416955705037907,0.126910559061474,0.517232370861764,-0.0350493686052974,-0.465211076182388,0.320198198514526,0.0445191674731724,0.177839798284401,0.261145002567677,-0.143275874698919,0,1"
             .split(",")[:-1]
         ],  # noqa: E501
     )
     assert "1" in p
     assert p["1"] in [0.0, 1.0]
예제 #6
0
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('density/synthetic/mv_nonlinear')

    conf.num_workers = 4
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 10000, '1': 10000}

    exp.data_loader = registry.mv_nonlinear()

    exp.model_factory = TfTrainEvalModelFactory(
        Model(name="RNADE_deep_normal"))

    exp.hyper_param_search = GridSearch([
        Categorical([32, 64, 128], name='km'),
        Categorical([64, 128, 512], name='sh'),
        Categorical([1, 2, 3, 4, 5], name='nh'),
        Categorical([128], name='bs'),
        Categorical([1], name='rs'),
        Categorical(['AdamOptimizer'], name='opt'),
        Categorical([1e-4, 1e-3, 1e-2], name='opt_lr'),
    ])
예제 #7
0
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('density/synthetic/sin_t')

    conf.num_workers = 4
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 10000, '1': 10000}

    exp.data_loader = registry.sin_t_noise()

    exp.model_factory = TfTrainEvalModelFactory(Model(name="RNADE_laplace"))

    exp.hyper_param_search = GridSearch([
        Categorical([1, 16, 32, 64, 128], name='km'),
        Categorical([1, 16, 32, 64, 128], name='sh'),
        Categorical([128], name='bs'),
        Categorical([1], name='rs'),
        Categorical(['AdamOptimizer'], name='opt'),
        Categorical([1e-4, 1e-3, 1e-2], name='opt_lr'),
    ])

    exp.early_stopping = EarlyStop(monitor_every_epoch=1, patience=[30])
예제 #8
0
        for i in range(len(pop_list)):
            fitness *= self.cal_pop_fitness(hmm, pop_list[i],
                                            observation_list[i])
        return fitness

    def roulette_wheel_selection(self, population, fitness):
        """Perform roulette wheel selection given the current population"""
        next_population = np.zeros(population.shape, dtype=int)
        normalized_fitness = fitness / sum(fitness)
        for i in range(0, self.population_size):
            next_population[i, :] = population[
                np.random.choice(population.shape[0], p=normalized_fitness), :]
        return next_population


if __name__ == "__main__":
    experiment = Experiment(trans_file="test_hmm_transition_matrix.csv",
                            emis_file="hmm_observations_emission.csv",
                            n_train_samples=2,
                            n_test_samples=2,
                            sample_length=20,
                            seed=1)
    experiment.initialize()
    possible_states = [
        list(x) for x in list(product(np.arange(4), np.arange(4)))
    ]
    filter = ParticleFilter(experiment)
    print(experiment.y_test)
    filter.run()
    print(filter.y_pred)
예제 #9
0
from experiment.experiment import Experiment
import numpy as np
from itertools import product
import os
from experiment.result_analysis import ResultAnalysis

"""Main file: running the experiments with predetermined settings"""

if __name__ == "__main__":
    DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources'))
    # listing the possible separable states given the hmm
    possible_states = [list(x) for x in list(product(np.arange(4), np.arange(4)))]
    # listing the algorithms investigated in the experiment
    gfilter4 = GeneticFilter("AGF4", possible_states, genetic_operation_resolution=4)
    gfilter5 = GeneticFilter("AGF5", possible_states, genetic_operation_resolution=5)
    gfilter6 = GeneticFilter("AGF6", possible_states, genetic_operation_resolution=6)
    pfilter = ParticleFilter("APF")
    viterbi = Viterbi("VA")
    algorithms = [gfilter4, gfilter5, gfilter6, pfilter, viterbi]
    # creating the experiment
    experiment = Experiment(algorithms=algorithms,
                            trans_file=os.path.join(DATA_DIR, 'test_2_hmm_transition_matrix.csv'),
                            emis_file=os.path.join(DATA_DIR, 'hmm_observations_emission.csv'),
                            n_train_samples=10, n_test_samples=5000, sample_length=100, seed=1)
    # initialize and run the experiment
    experiment.initialize()
    experiment.run()
    # analyzing the results obtained in the experiment
    result = ResultAnalysis(experiment, algorithms)
    result.run()
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('density/synthetic/inv_sin_t')

    conf.num_workers = 4
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 10000, '1': 10000}

    exp.data_loader = registry.inv_sin_t()

    exp.model_factory = TfTrainEvalModelFactory(
        Model(name="MONDE_copula_const_cov"))

    exp.hyper_param_search = GridSearch([
        Categorical([32, 64, 128], name='hxy_sh'),
        Categorical([1, 2, 3], name='hxy_nh'),
        Categorical([32, 64, 128], name='x_sh'),
        Categorical([1, 2, 3], name='x_nh'),
        Categorical([16, 32], name='hxy_x'),
        Categorical([0.05, 0.01], name='clr'),
        Categorical([128], name='bs'),
        Categorical([1], name='rs'),
예제 #11
0
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('density/synthetic/inv_sin_normal')

    conf.num_workers = 4
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 10000, '1': 10000}

    exp.data_loader = registry.inv_sin_normal()

    exp.model_factory = TfTrainEvalModelFactory(Model(name="MAF"))

    exp.hyper_param_search = GridSearch([
        Categorical([1, 2, 3, 4, 5], name='nb'),
        Categorical([32, 128, 256], name='sh'),
        Categorical([1, 2, 3], name='nh'),
        Categorical([16, 32, 64], name='shc'),
        Categorical([True, False], name='bn'),
        Categorical([128], name='bs'),
        Categorical([1], name='rs'),
        Categorical(['AdamOptimizer'], name='opt'),
        Categorical([1e-4, 1e-3, 1e-2], name='opt_lr'),
예제 #12
0
    for t, b in enumerate(beaches):
        print b
        task = tasks[t]
        ma = MultiAgent()

        print 'Creating agents...'
        for i in range(noAgents):
            learner = QLearner(features, task.env.noActions, i)
            agent = Agent(learner,
                          num_features=features,
                          num_actions=task.env.noActions,
                          num_agents=noAgents,
                          index=i)
            ma.addAgent(agent)

        exp = Experiment(task, ma)

        print 'Running experiment...'
        distribution = exp.doTrials(number=trials)
        results = task.globalRewards

        saveToCSV(results, b, '00_' + comment + typeR, 'RND-SGM')

        sigma = np.array([np.std(r) for r in results])
        mean = np.array([np.mean(r) for r in results])

        win = 500
        err = [sigma[i] for i in range(0, len(mean), win)]
        avg = [mean[i] for i in range(0, len(mean), win)]

        l, = plt.plot(mean, color=colors[t], label=b, alpha=0.8)
예제 #13
0
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('density/synthetic/mv_nonlinear')

    conf.num_workers = 4
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 10000, '1': 10000}

    exp.data_loader = registry.mv_nonlinear()

    exp.model_factory = TfTrainEvalModelFactory(Model(name="PumondePFor"))

    exp.hyper_param_search = GridSearch([
        Categorical([64, 128, 256], name='xs'),
        Categorical([1, 2, 3], name='xn'),
        Categorical([64, 128, 256], name='hxys'),
        Categorical([1, 2, 3], name='hxyn'),
        Categorical([0, 16], name='hxyxs'),
        Categorical([64, 128, 256], name='xycs'),
        Categorical([1, 2, 3], name='xycn'),
        Categorical([128], name='bs'),
        Categorical([1], name='rs'),
예제 #14
0
controller.initialize(0.)
learner = Q(
    epsilon=0.5
)  # comes by default with explorer = EpsilonGreedyExplorer(epsilon = 0.3, decay = 0.9999)
agent = LearningAgent(controller, learner)
print('New Q agent created')

environment = SupplyChainEnv(normalized_prod_dataset[0:48],
                             normalized_price_dataset[0:48], discrete_battery,
                             discrete_vessel, discrete_elec_regime,
                             discrete_grid_price, discrete_prod,
                             discrete_buy_sell)

task = SupplyTaskMDP(environment)

experiment = Experiment(task, agent)

#Training
num_epochs = 1000
horizon = 48  # 24 interactions = horizon of 1 day

battery_states = []
h2_vessel_states = []
actions_electrolyzer = []
actions_grid = []
eval_rewards = []
for epoch in range(num_epochs):
    actions_electrolyzer_day = []
    actions_grid_day = []
    battery_states_day = []
    h2_vessel_states_day = []
예제 #15
0
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('density/synthetic/sin_normal')

    conf.num_workers = 4
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 10000, '1': 10000}

    exp.data_loader = registry.sin_normal_noise()

    exp.model_factory = TfTrainEvalModelFactory(Model(name="MONDE_AR_MADE"))

    exp.hyper_param_search = GridSearch([
        Categorical(['sigm'], name='tr'),
        Categorical([32, 64, 128], name='sh'),
        Categorical([1, 2, 3], name='nh'),
        Categorical([16], name='xs'),
        Categorical([128], name='bs'),
        Categorical([1], name='rs'),
        Categorical(['AdamOptimizer'], name='opt'),
        Categorical([1e-4, 1e-3, 1e-2], name='opt_lr'),
    ])
예제 #16
0
from skopt.space import Categorical

from conf import conf
from data.registry import fx
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.conf import tf_conf
from models.tensorflow.pumonde2 import Pumonde2
from models.tensorflow.tf_simple_train_eval import TfSimpleTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('classification/fx_3')

    conf.max_num_epochs = -1
    conf.num_workers = 6
    conf.visible_device_list = [0, 1]
    conf.shuffle_train_data = True
    conf.precision = "32"
    conf.eval_batch_size = 10000

    tf_conf.eval_throttle_secs = 0
    tf_conf.save_summary_epochs = 1
    tf_conf.save_checkpoints_epochs = 1
    tf_conf.check_nans = True
    tf_conf.start_eval_step = 1
    tf_conf.per_process_gpu_memory_fraction = 0.2

    symbols = ["AUDCAD", "AUDJPY", "AUDNZD", "EURCHF", "NZDCAD", "NZDJPY", "NZDUSD", "USDCHF", "USDJPY",
               "EURUSD", "GBPUSD", "USDCAD"]
예제 #17
0
    @staticmethod
    def modify_date_time():
        """Utility method for experiment results folder saving"""
        result = str(datetime.datetime.today().replace(microsecond=0))
        for char in "-: ":
            result = result.replace(char, "_")
        return result


if __name__ == "__main__":
    DATA_DIR = os.path.abspath(
        os.path.join(os.path.dirname(__file__), 'resources'))
    experiment = Experiment(
        trans_file=os.path.join(DATA_DIR, 'test_hmm_transition_matrix.csv'),
        emis_file=os.path.join(DATA_DIR, 'hmm_observations_emission.csv'),
        n_train_samples=2,
        n_test_samples=2,
        sample_length=20,
        seed=1)
    experiment.initialize()
    possible_states = [
        list(x) for x in list(product(np.arange(4), np.arange(4)))
    ]
    filter = GeneticFilter(experiment, "a", possible_states)
    filter.run()
    viterbi = Viterbi(experiment, "v")
    viterbi.run()
    result = ResultAnalysis(experiment, [filter, viterbi])
    result.run()
예제 #18
0
disaggregated_clairvoyant_rewards = [
    optimal_disaggregated_super_arm_value
    for _ in range(timesteps_context_generation)
]

############################################
## Define GPTS prior
############################################

GPTS_prior = lambda x: 3 * x

############################################
## Perform experiments
############################################

experiment = Experiment(original_environment, budget_discretization_steps,
                        daily_budget, GPTS_prior)

print("------ GPTS stationary ------")
(stationary_rewards, stationary_final_environment,
 stationary_final_subcampaign_algos, regression_errors_max,
 regression_errors_sum) = experiment.perform(timesteps_stationary)

print("------ GPTS context generation ------")
(context_generation_rewards, context_generation_final_environment,
 context_generation_final_subcampaign_algos) = experiment.perform(
     timesteps_context_generation, context_generation_rate)

############################################
## Plot results
############################################
예제 #19
0
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('density/synthetic/uci_large/hepmass')

    conf.num_workers = 2
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 20000, '1': 20000}

    exp.data_loader = registry.hepmass(x_slice=slice(0), y_slice=slice(None))

    exp.model_factory = TfTrainEvalModelFactory(Model(name="MONDE_AR_BLOCK"))

    exp.hyper_param_search = GridSearch([
        Categorical([8, 10], name='nl'),
        Categorical([60, 80], name='nb'),
        Categorical(['tanh'], name='tr'),
        Categorical([128], name='bs'),
        Categorical([1], name='rs'),
        Categorical(['AdamOptimizer'], name='opt'),
        Categorical([1e-3], name='opt_lr'),
    ])
예제 #20
0
import logging
from experiment.experiment import Experiment
from ms_logging.ms_logger_configurator import MsLoggerConfigurator
from scenarios.ac_internal_scenario import AcInternalScenario
from scenarios.dc_current_pulse_scenario import DcCurrentPulseScenario
from scenarios.dc_linear_sweep_scenario import DcLinearSweepScenario
from scenarios.ocv_test_scenario import OcvTestScenario
from scenarios.ten_seconds_discharge import TenSecondsDischargeScenario


if __name__ == '__main__':
    MsLoggerConfigurator().configure_logging(log_level=logging.INFO)
    log = logging.getLogger("test")
    recipes = [
        OcvTestScenario,
        AcInternalScenario,
        DcCurrentPulseScenario,
        DcLinearSweepScenario,
        TenSecondsDischargeScenario
    ]
    e = Experiment(recipes, 'dummy_experiment')
    e.run()
예제 #21
0
skip_params = {'num_input_channels': config['maps_number'],
               'num_channels_down': [8, 16, 24],
               'num_channels_up': [8, 16, 24],
               'num_channels_skip': [4, 4, 4]}

pregrid_params = {'num_input_channels': config['maps_number'],
                  'num_output_channels': 2,
                  'num_channels_down': [8, 16, 24],
                  'num_channels_up': [8, 16, 24],
                  'num_channels_skip': [4, 4, 4]}

config['net_params'] = [skip_params, pregrid_params]

net = spatial.Net(input_depth=config['maps_number'], pic_size=config['input_size'], skip_args_main=skip_params,
                  skip_args_grid=pregrid_params).type(dtype)


video = generateSyntheticData()
target = preprocessTarget(video, config["video_length"], config["output_size"])
loss = SpatialLoss()
spatial_maps_generator = SpatialMapsGenerator(config["maps_number"])
batch_generator = BatchGenerator(target, spatial_maps_generator, config[
                                 "input_size"], config["input_size"])

parameters = list(net.parameters())
for var in spatial_maps_generator.spatial_variables.values():
    parameters.append(var)

experiment = Experiment(config, parameters, batch_generator, net, loss)
experiment.run()
예제 #22
0
        ))

    # ロガーに追加
    logger.addHandler(stream_handler)

    logger.info(f"Read Hyper Parameter file.")
    hp = HP(name="hartmann6_test",
            file_path="params/sample_param_hartmann6.yaml",
            logger=logger)

    pprint(hp.to_dict())

    # check hp
    logger.info(f"Check contents of Hyper Param.")
    hp.check_params(Hartmann6Trainer.required_params)

    # prepare and exec the experiment
    logger.info(f"Exec Experiment.")
    experiment = Experiment(hp, Hartmann6Trainer, logger=logger)
    experiment.run()

    best_result_x, best_result_y = experiment.get_best_result()
    best_result_x = best_result_x.detach().numpy().tolist(
    ) if best_result_x is not None else "No variable exists"
    best_result_y = best_result_y.detach().numpy().tolist()[0]
    logger.info(
        f'The best result of the experiment is (x:{hp.get_var_dict_from_tensor(best_result_x)}, y:{best_result_y}.'
    )

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
예제 #23
0
from skopt.space import Categorical

from conf import conf
from data import registry
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.model import Model
from models.tensorflow.tf_train_eval import TfTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('classification/fx')

    conf.num_workers = 4
    conf.visible_device_list = [0, 1]
    conf.eval_batch_size = {'0': 10000, '1': 10000}

    symbols = [
        "AUDCAD", "AUDJPY", "AUDNZD", "EURCHF", "NZDCAD", "NZDJPY", "NZDUSD",
        "USDCHF", "USDJPY", "EURUSD", "GBPUSD", "USDCAD"
    ]

    exp.data_loader = registry.fx(x_slice=slice(None, -3),
                                  y_slice=slice(-3, None),
                                  ar_terms=1,
                                  start='2018-01-01',
                                  end='2018-03-31',
                                  symbols=symbols,
                                  predicted_idx=None,
                                  resample="1min")
예제 #24
0
import pytest
from experiment.experiment import Experiment

edit_distance_tests = [
    pytest.param([], [], 0, id='trivial'),
    pytest.param(['P', 'O', 'L', 'Y', 'N', 'O', 'M', 'I', 'A', 'L'],
                 ['E', 'X', 'P', 'O', 'N', 'E', 'N', 'T', 'I', 'A', 'L'],
                 6,
                 id='correct numebr of edits'),
    pytest.param(['P', 'O', 'L', 'Y', 'N', 'O', 'M', 'I', 'A', 'L'], [],
                 10,
                 id='tracks removing all elements'),
    pytest.param([], ['H', 'E', 'L', 'L', 'O'],
                 5,
                 id='works for only adding characters'),
    pytest.param(['H', 'E', 'L', 'L', 'O'], ['J', 'E', 'L', 'L', 'O'],
                 1,
                 id='works when only one replacement is needed'),
    pytest.param(['H', 'E', 'L', 'L', 'O'], ['A', 'B', 'C', 'D', 'E'],
                 5,
                 id='works when all letters need to be replaced')
]

exp = Experiment()


@pytest.mark.parametrize('M, N, result_ed', edit_distance_tests)
def test_edit_distance(M, N, result_ed):
    assert exp.edit_distance(M, N) == result_ed
예제 #25
0
from skopt.space import Categorical

from conf import conf
from data.data_utils import PercentileAnyGreaterLabelTransform
from data.registry import fx
from experiment.early_stop import EarlyStop
from experiment.experiment import Experiment
from experiment.hyper_param_opt import GridSearch
from models.tensorflow.conf import tf_conf
from models.tensorflow.nn_classifier import NNClassifier
from models.tensorflow.tf_simple_train_eval import TfSimpleTrainEvalModelFactory

if __name__ == '__main__':
    exp = Experiment('classification/fx_3')

    conf.max_num_epochs = -1
    conf.num_workers = 2
    conf.visible_device_list = [0, 1]
    conf.shuffle_train_data = True
    conf.precision = "32"
    conf.eval_batch_size = 10000

    tf_conf.eval_throttle_secs = 0
    tf_conf.save_summary_epochs = 1
    tf_conf.save_checkpoints_epochs = 1
    tf_conf.check_nans = True
    tf_conf.start_eval_step = 1
    tf_conf.per_process_gpu_memory_fraction = 0.2

    symbols = [
        "AUDCAD", "AUDJPY", "AUDNZD", "EURCHF", "NZDCAD", "NZDJPY", "NZDUSD",
예제 #26
0
#!/usr/bin/env python3

import speech_recognition as sr
from pathlib import Path
import json
from os import path
from experiment.experiment import Experiment
from google_cloud import sample_long_running_recognize

exp = Experiment()

# use the audio file as the audio source
r = sr.Recognizer()

# recognize speech using Sphinx
try:
    # print("Sphinx thinks you said " + r.recognize_sphinx(audio))
    exp.run_experiment('CMUSphinx', r.recognize_sphinx, 'sound_bytes')
except sr.UnknownValueError:
    print("Sphinx could not understand audio")
except sr.RequestError as e:
    print("Sphinx error; {0}".format(e))

exp.run_experiment('GoogleCloud', sample_long_running_recognize, 'uri')