Exemple #1
0
    def __init__(self, module_choices):
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.from_demo_app = False
        ex = sacred.Experiment("capreolus")
        self.ex = ex
        ex.path = "capreolus"
        ex.captured_out_filter = sacred.utils.apply_backspaces_and_linefeeds

        self.module2cls = self.get_module_to_class(module_choices)

        # now the Modules to load have been determined, so we pass their configs to sacred and determine
        # which modules each config key should be associated with (based on first module to set it).
        # later modules can override keys, but the key remains associated with the initial module.
        # this is in order of lowest to highest precedence since later values override earlier ones with sacred
        self.parameters_to_module = self.get_parameters_to_module(ex)
        self.parameter_types = self.get_parameter_types(ex)

        self.parameters_to_module, self.parameter_types = self.get_parameters_to_module_for_missing_parameters(ex)
        self.parameters_to_module, self.parameter_types = self.get_parameters_to_module_for_feature_parameters(ex)
        self.module_to_parameters = self.get_module_to_parameters()
        self.check_for_invalid_keys()

        sacred.commands._format_config = functools.partial(
            _format_config_by_module, parameters_to_module=self.parameters_to_module
        )
def test_get_parameters_to_module():
    pipeline = Pipeline({})
    ex = sacred.Experiment("capreolus")

    parameters_to_module = pipeline.get_parameters_to_module(ex)
    assert parameters_to_module == {
        "collection": "module",
        "index": "module",
        "searcher": "module",
        "benchmark": "module",
        "reranker": "module",
        "expid": "stateless",
        "earlystopping": "stateless",
        "predontrain": "stateless",
        "fold": "stateless",
        "maxdoclen": "pipeline",
        "maxqlen": "pipeline",
        "batch": "pipeline",
        "niters": "pipeline",
        "itersize": "pipeline",
        "gradacc": "pipeline",
        "lr": "pipeline",
        "seed": "pipeline",
        "sample": "pipeline",
        "softmaxloss": "pipeline",
        "dataparallel": "pipeline",
    }
Exemple #3
0
    def on_context_enter(self):
        super().on_context_enter()

        self.sacred_experiment = sacred.Experiment(self._prefix)
        self.sacred_experiment.observers.append(self._mongo_observer)

        def experiment_main(_run):
            self.sacred_run = _run
            self.run_object_available.set()
            self.done.wait()
            if self.stored_exception is not None:
                raise self.stored_exception

            return self.stored_result

        self.sacred_experiment.main(experiment_main)

        sacred_compatible_config_dict = {
            str(k).replace(".", "/"): v
            for k, v in configuration.dump_custom_dict().items()
        }

        if len(sacred_compatible_config_dict) > 0:
            self.sacred_experiment.add_config(**sacred_compatible_config_dict)

        self.run_object_available = threading.Event()
        self.done = threading.Event()
        self.sacred_thread = threading.Thread(target=self.sacred_experiment.run)
        self.sacred_thread.start()
        self.run_object_available.wait()

        instrumentation.update_run_id(self.sacred_run._id)
Exemple #4
0
def sacred_init(name):
    ex = sacred.Experiment(name)
    ex.path = name
    sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append("CUDA_VISIBLE_DEVICES")
    sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append("USER")
    ex.captured_out_filter = sacred.utils.apply_backspaces_and_linefeeds

    # manually parse args to find model name so we can run the model-specific config function
    pipeline_defaults = ex.config(pipeline_config)()
    mname = pipeline_defaults["model"]
    for arg in sys.argv:
        if arg.startswith("model="):
            mname = arg[len("model=") :]

    model_cls = _load_model(mname)
    model_defaults = ex.config(model_cls.config)()
    defaults = pipeline_defaults.copy()
    for k, v in model_defaults.items():
        assert k not in defaults, "pipeline params overlap with model params: {%s}" % (k)
        defaults[k] = v
    types = {k: forced_types.get(type(v), type(v)) for k, v in defaults.items()}

    ex.config(pipeline_config)
    ex.config(model_cls.config)

    return ex, model_cls, defaults, types
def test_check_for_invalid_keys():
    pipeline = Pipeline({})
    ex = sacred.Experiment("capreolus")
    pipeline.check_for_invalid_keys()

    pipeline.parameters_to_module["foo_bar"] = "reranker"

    with pytest.raises(ValueError):
        pipeline.check_for_invalid_keys()
def get_sacred_experiment(name, observer='mongo', capture_output=True):
    ex = sacred.Experiment(name)
    if observer == 'mongo':
        ex.observers.append(
            MongoObserver(url='mongodb://{{cookiecutter.mongo_user}}:'
                          '{{cookiecutter.mongo_password}}@127.0.0.1:27017',
                          db_name='sacred'))
    else:
        ex.observers.append(FileStorageObserver('data/sacred/'))

    if not capture_output:
        SETTINGS.CAPTURE_MODE = 'no'
    return ex
def test_get_parameters_to_module_including_missing_and_extractors():
    """
        Calls Pipeline.__init__() which in turn calls
        1. self.get_parameters_to_module
        2. get_parameters_to_module_for_missing_parameters
        3. get_parameters_to_module_for_feature_parameters
    """
    pipeline = Pipeline({})
    ex = sacred.Experiment("capreolus")

    # parameters_to_module, parameter_types = pipeline.get_parameters_to_module_for_missing_parameters(ex)

    assert pipeline.parameters_to_module == {
        "collection": "module",
        "benchmark": "module",
        "reranker": "module",
        "expid": "stateless",
        "predontrain": "stateless",
        "earlystopping": "stateless",
        "maxdoclen": "pipeline",
        "maxqlen": "pipeline",
        "batch": "pipeline",
        "niters": "pipeline",
        "itersize": "pipeline",
        "gradacc": "pipeline",
        "lr": "pipeline",
        "seed": "pipeline",
        "sample": "pipeline",
        "softmaxloss": "pipeline",
        "dataparallel": "pipeline",
        # AnseriniIndex specific config
        "stemmer": "index",
        "indexstops": "index",
        # BM25Grid specific config
        "index": "module",
        # Robust04Benchmark specific config
        "fold": "stateless",
        "searcher": "module",
        "rundocsonly": "benchmark",
        # PACRR specific config
        "mingram": "reranker",
        "maxgram": "reranker",
        "nfilters": "reranker",
        "idf": "reranker",
        "kmax": "reranker",
        "combine": "reranker",
        "nonlinearity": "reranker",
        # EmbedText specific config
        "embeddings": "extractor",
        "keepstops": "extractor",
    }
Exemple #8
0
    def _create_experiment(self, experiment_name, interactive=False):
        """ Create a sacred.Experiment containing config options for the chosen modules (and their dependencies) """

        chosen = self._extract_choices_from_argv(self.rewritten_args)
        self.rewritten_args = self._rewrite_argv_for_ingredients(self.rewritten_args)

        ingredients, ingredient_commands = self._create_module_ingredients(chosen)
        # for ingredient in ingredients:
        #    print_ingredient(ingredient)

        self.ex = sacred.Experiment(experiment_name, ingredients=ingredients, interactive=interactive)

        self.ex.ingredient_lookup = {}

        def _traverse_and_add_ingredients(children):
            for child in children:
                self.ex.ingredient_lookup[child.path] = child
                _traverse_and_add_ingredients(child.ingredients)

        _traverse_and_add_ingredients(self.ex.ingredients)

        # add task commands
        for command_name, command_func in self.task.commands.items():
            partial_func = partial(self._command_wrapper, command_func=command_func)
            partial_func.__name__ = command_name
            captured_func = self.ex.capture(partial_func)
            captured_func.unobserved = False  # TODO check
            self.ex.commands[command_name] = captured_func

        # add ingredient commands, which are subtly different from experiment-level commands (tasks).
        # We capture the function using the experiment config (as before), however,
        # we add the command name to the ingredient rather than the experiment so that sacred parses it correctly.
        for command_name, command_func, path, ingredient in ingredient_commands:
            partial_func = partial(self._ingredient_command_wrapper, command_func=command_func, path=path)
            partial_func.__name__ = command_name
            captured_func = self.ex.capture(partial_func)
            captured_func.unobserved = False  # TODO check
            ingredient.commands[command_name] = captured_func

        return self.ex
def test_get_parameter_types(mocker):
    pipeline = Pipeline({})
    ex = sacred.Experiment("capreolus")

    def mock_config(method_that_generates_input_dict):
        input_dict = method_that_generates_input_dict()

        # Just messing with the types to make sure that get_parameter_types does what it should
        input_dict.update({"index": None, "niters": True})
        return lambda: input_dict

    mocker.patch.object(ex, "config", mock_config)
    parameter_types = pipeline.get_parameter_types(ex)
    assert parameter_types == {
        "pipeline": type("string"),  # "pipeline" key is added by the method
        "collection": type("robust04"),
        "earlystopping": forced_types[type(True)],
        "index": forced_types[type(None)],
        "searcher": type("bm25grid"),
        "benchmark": type("robust04.title.wsdm20demo"),
        "reranker": type("PACRR"),
        "expid": type("debug"),
        "predontrain": forced_types[type(True)],
        "fold": type("s1"),
        "maxdoclen": type(800),
        "maxqlen": type(4),
        "batch": type(32),
        "niters": forced_types[type(True)],
        "itersize": type(4096),
        "gradacc": type(1),
        "lr": type(0.001),
        "seed": type(123_456),
        "sample": type("simple"),
        "softmaxloss": forced_types[type(True)],
        "dataparallel": type("none"),
    }
Exemple #10
0
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import sacred
import random
import numpy as np
import shutil
import sys
import nexus_pytorch.evaluation.autoencoders.mnist.ingredients as ingredients
import torchvision
from nexus_pytorch.scenarios.standard_dataset.standard_dataset import StandardDataset

ex = sacred.Experiment('mnist_autoencoder_all',
                       ingredients=[
                           ingredients.gpu_ingredient,
                           ingredients.training_ingredient,
                           ingredients.model_debug_ingredient
                       ])


@ex.capture
def log_dir_path(folder, _config):
    return os.path.join('.', f'results/log_{_config["seed"]}', folder)


@ex.capture
def exp_dir_path(folder, _config):
    return os.path.join('.', folder)


def save_checkpoint(state, is_best, folder='./'):
Exemple #11
0
from typing import Any, Dict, Iterable, Mapping, Optional

from imitation.util import util
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sacred

from evaluating_rewards import serialize
from evaluating_rewards.analysis import gridworld_rewards, stylesheets, visualize
from evaluating_rewards.analysis.distances import heatmaps, reward_masks
from evaluating_rewards.analysis.reward_figures import gridworld_reward_heatmap
from evaluating_rewards.distances import tabular
from evaluating_rewards.scripts import script_utils

plot_gridworld_heatmap_ex = sacred.Experiment("plot_gridworld_heatmap")


@plot_gridworld_heatmap_ex.config
def default_config():
    """Default configuration values."""
    normalize = False

    # Dataset parameters
    log_root = serialize.get_output_dir(
    )  # where results are read from/written to
    discount = 0.99
    reward_subset = None

    # Figure parameters
    kind = "npec"
from nara_wpe.utils import stft as _stft, istft as _istft
from pb_bss.extraction import mask_module
from pb_bss.extraction import (
    apply_beamforming_vector,
    get_power_spectral_density_matrix,
    get_single_source_bf_vector,
)
from pb_bss.evaluation.wrapper import OutputMetrics
from pb_bss.distribution import CACGMMTrainer
from pb_bss import initializer
from pb_bss.permutation_alignment import DHTVPermutationAlignment

from sms_wsj.database import SmsWsj, AudioReader
from sms_wsj.io import dump_audio, dump_json

experiment = sacred.Experiment('Ref systems')


@experiment.config
def config():
    dataset = ['cv_dev93', 'test_eval92']  # or 'test_eval92'
    Observation = None

    stft_size = 512
    stft_shift = 128
    stft_window_length = None
    stft_window = 'hann'


@experiment.named_config
def observation():
Exemple #13
0
import os
import torch
import sacred
from torch.autograd import Variable
import torch.nn.functional as F
import nexus_pytorch.evaluation.preliminary.dropout.ingredients as ingredients
import nexus_pytorch.evaluation.preliminary.dropout.model.training_utils as utils
from torchvision.utils import save_image
from tqdm import tqdm

ex = sacred.Experiment(
    'dropout_nexus_generation',
    ingredients=[
        ingredients.training_ingredient, ingredients.model_ingredient,
        ingredients.model_debug_ingredient, ingredients.gpu_ingredient,
        ingredients.evaluation_ingredient,
        ingredients.generation_ingredient
    ])


@ex.capture
def log_dir_path(folder, _config):
    return os.path.join('.', f'results/log_{_config["seed"]}/', folder)

@ex.capture
def exp_dir_path(folder, _config):
    return os.path.join('.', folder)



@ex.capture
Exemple #14
0
import torch
import sacred
import random
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
import nexus_pytorch.evaluation.preliminary.hierarchy.s_mvae.ingredients as ingredients
from nexus_pytorch.scenarios.multimodal_dataset.multimodal_dataset import MultimodalDataset
import nexus_pytorch.evaluation.preliminary.hierarchy.s_mvae.model.training_utils as t_utils
import nexus_pytorch.evaluation.preliminary.hierarchy.s_mvae.model.evaluation_utils as e_utils
from nexus_pytorch.evaluation.autoencoders.image.train import Image_AE

ex = sacred.Experiment('hierarchy_s_mvae_evaluate_rank',
                       ingredients=[
                           ingredients.training_ingredient,
                           ingredients.model_ingredient,
                           ingredients.model_debug_ingredient,
                           ingredients.gpu_ingredient,
                           ingredients.evaluation_ingredient
                       ])


@ex.capture
def log_dir_path(folder, _config):
    return os.path.join('.', f'results/log_{_config["seed"]}/', folder)


@ex.capture
def exp_dir_path(folder, _config):
    return os.path.join('.', folder)

Exemple #15
0
import os
import torch
import sacred
from torch.autograd import Variable
import torch.nn.functional as F
import nexus_pytorch.evaluation.standard.fashion.mvae.ingredients as ingredients
import nexus_pytorch.evaluation.standard.fashion.mvae.model.training_utils as utils
from torchvision.utils import save_image
from tqdm import tqdm

ex = sacred.Experiment(
    'fashion_mvae_generation',
    ingredients=[
        ingredients.training_ingredient, ingredients.model_ingredient,
        ingredients.model_debug_ingredient, ingredients.gpu_ingredient,
        ingredients.evaluation_ingredient, ingredients.generation_ingredient
    ])


@ex.capture
def log_dir_path(folder, _config):
    return os.path.join('.', f'results/log_{_config["seed"]}/', folder)


@ex.capture
def exp_dir_path(folder, _config):
    return os.path.join('.', folder)


@ex.capture
def sample(model, _config, _run):
import sys, time, os, itertools, importlib, copy
from common_utils import read_qrel, config_logger
from config import train_test_years, file2name, qrelfdir
from eval_utils import read_run, jud_label, label_jud, year_label_jud, get_epoch_from_val, get_model_param
from year_2_qids import qid_year, year_qids, get_qrelf
import numpy as np, matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pandas import DataFrame as df
import logging, warnings

import sacred
from sacred.utils import apply_backspaces_and_linefeeds

ex = sacred.Experiment('metrics')
ex.path = 'metrics'  # name of the experiment
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('CUDA_VISIBLE_DEVICES')
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('USER')
ex.captured_out_filter = apply_backspaces_and_linefeeds

from config import default_params
default_params = ex.config(default_params)


def create_docpairs(qid_cwid_label, test_qids, qid_year):
    docpairs = {}
    for qid in qid_cwid_label:
        assert qid in test_qids
        year = qid_year[qid]
        docpairs.setdefault(year, {})
import pickle
from typing import Any, Dict, Iterable, Mapping

from imitation.data import rollout
from imitation.util import util as imit_util
import numpy as np
import sacred

from evaluating_rewards import datasets
from evaluating_rewards.analysis import util
from evaluating_rewards.distances import common_config, tabular
from evaluating_rewards.rewards import base
from evaluating_rewards.scripts import script_utils
from evaluating_rewards.scripts.distances import common

erc_distance_ex = sacred.Experiment("erc_distance")
logger = logging.getLogger("evaluating_rewards.scripts.distances.erc")

common.make_config(erc_distance_ex)


@erc_distance_ex.config
def default_config(env_name):
    """Default configuration values."""
    computation_kind = "sample"  # either "sample" or "mesh"
    corr_kind = "pearson"  # either "pearson" or "spearman"
    discount = 0.99  # discount rate when calculating return
    n_episodes = 1024  # number of episodes to compute correlation w.r.t.

    # n_samples and n_mean_samples only applicable for sample approach
    trajectory_factory = datasets.trajectory_factory_from_serialized_policy
Exemple #18
0
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 10})
import matplotlib.pyplot as plt
from keras.utils import plot_model
import keras.backend as K
import utils.select_doc_pos
from sacred.utils import apply_backspaces_and_linefeeds
from utils.utils import load_test_data, DumpWeight, dump_modelplot, pred_label, trunc_dir
from utils.config import treceval, perlf, rawdoc_mat_dir, file2name, default_params, qrelfdir
from utils.year_2_qids import get_train_qids, get_qrelf
from utils.common_utils import read_qrel, SoftFailure
from utils.ngram_nfilter import get_ngram_nfilter

K.get_session()
ex = sacred.Experiment('predict')
ex.path = 'predict'
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('CUDA_VISIBLE_DEVICES')
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('USER')
ex.captured_out_filter = apply_backspaces_and_linefeeds

from utils.config import default_params
default_params = ex.config(default_params)


def plot_curve(epoch_err_ndcg_loss, outdir, plot_id, p):
    epoches, errs, ndcgs, maps, losses = zip(*epoch_err_ndcg_loss)
    losses = [loss/10000.0 for loss in losses]
    fig, ax = plt.subplots()
    rects1 = ax.plot(epoches, ndcgs, 'b--')
    rects2 = ax.plot(epoches, maps, color='r')
    # The id of this experiment is stored in the magical _run object we get from the
    # decorator.
    output_dir = '{}/{}'.format(root, run_id)
    if os.path.exists(output_dir):
        # Directory may already exist if run_id is None (in case of an unobserved
        # test-run)
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)

    # Tell the experiment that this output dir is also used for tensorflow summaries
    experiment.info.setdefault("tensorflow", {}).setdefault("logdirs", [])\
        .append(output_dir)
    return output_dir


ex = sc.Experiment()
# reduce output of progress bars
ex.captured_out_filter = apply_backspaces_and_linefeeds
ex.observers.append(get_observer())


@ex.main
def main(modelname, net_config, gan_config, disc_config, datasetSem,
         datasetGAN, datasetDisc, starting_weights, flag_measure, output_mat,
         flag_entropy, thresholds, start, _run):
    for key in gan_config:
        setattr(a, key, gan_config[key])
    for key in disc_config:
        setattr(b, key, disc_config[key])
    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)
Script of the VarIDEC model.
"""

import uuid
from datetime import date
import tensorflow as tf
from tqdm import tqdm
import sacred
from sacred.stflow import LogFileWriter
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn import metrics
from VarIDEC_model import VarIDEC
from utils import cluster_purity

ex = sacred.Experiment("hyperopt")
ex.observers.append(
    sacred.observers.FileStorageObserver.create("../sacred_runs"))
ex.captured_out_filter = sacred.utils.apply_backspaces_and_linefeeds


@ex.config
def ex_config():
    """Sacred configuration for the experiment.
    Params:
        num_epochs (int): Number of training epochs.
        patience (int): Patience for the early stopping.
        batch_size (int): Batch size for the training.
        latent_dim (int): Dimensionality of the VarIDEC's latent space.
        num_clusters (int): Number of clusters.
        learning_rate (float): Learning rate for the optimization.
in case of a change in the database location by using
the old sms_wsj.json as intermediate json.
However, this script does not change the speaker
and utterance combination, log weights, etc. which are
specified in the intermediate json.

"""

from sms_wsj.database.write_files import check_files, KEY_MAPPER
from sms_wsj.database.utils import _example_id_to_rng
import json
import sacred
from pathlib import Path
from lazy_dataset.database import JsonDatabase

ex = sacred.Experiment('Write SMS-WSJ json after wav files are written')


def create_json(db_dir, intermediate_json_path, write_all, snr_range=(20, 30)):
    db = JsonDatabase(intermediate_json_path)
    json_dict = dict(datasets=dict())
    database_dict = db.data['datasets']

    if write_all:
        key_mapper = KEY_MAPPER
    else:
        key_mapper = {'observation': 'observation'}

    for dataset_name, dataset in database_dict.items():
        dataset_dict = dict()
        for ex_id, ex in dataset.items():
import sacred

from graphrl.agents.random_agent import RandomAgentParams
from graphrl.environments.pacman.pacman_gym import PacmanEnv
from graphrl.environments.wrappers import RenderEnv

ex = sacred.Experiment('test_pacman_random')


@ex.config
def config():
    env = {
        'layout_file': 'assets/pacman/mediumClassic',
        'ghost_type': 'random',
        'render': True
    }


@ex.capture
def build_env(env):
    render = env.pop('render')
    env = PacmanEnv(**env)
    if render:
        env = RenderEnv(env)
    return env


@ex.automain
def main(_run):
    env = build_env()
    agent_params = RandomAgentParams()
Exemple #23
0
import os
import torch
import sacred
import random
import numpy as np
from tqdm import tqdm
from nexus_pytorch.evaluation.classifiers.sound.train import Sound_Classifier
import nexus_pytorch.evaluation.multimodal.mmvae.ingredients as ingredients
from nexus_pytorch.scenarios.multimodal_dataset.multimodal_dataset import MultimodalDataset
import nexus_pytorch.evaluation.multimodal.mmvae.model.training_utils as utils

ex = sacred.Experiment('multimodal_mmvae_evaluate_accuracy_sound',
                       ingredients=[
                           ingredients.training_ingredient,
                           ingredients.model_ingredient,
                           ingredients.model_debug_ingredient,
                           ingredients.gpu_ingredient,
                           ingredients.evaluation_ingredient
                       ])


@ex.capture
def log_dir_path(folder, _config):
    return os.path.join('.', f'results/log_{_config["seed"]}/', folder)


@ex.capture
def exp_dir_path(folder, _config):
    return os.path.join('.', folder)

Exemple #24
0
import json
import pickle
import random
import time

import numpy as np
import sklearn.metrics
import tensorflow as tf
import keras.backend as K
from keras.models import Model, Sequential
from keras.optimizers import Adam
from keras.layers import Dense, Embedding, Input, TimeDistributed, Activation, Masking, Convolution1D, MaxPooling1D, Flatten, AveragePooling1D, GlobalAveragePooling1D

import sacred
from sacred.utils import apply_backspaces_and_linefeeds
ex = sacred.Experiment('train')
ex.path = 'train'
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('CUDA_VISIBLE_DEVICES')
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('USER')
ex.captured_out_filter = apply_backspaces_and_linefeeds

from redutil import datagen, config, ValMetrics
config = ex.config(config)


def build_model(p):
    """ build a Keras model using the parameters in p """
    max_posts = p['max_posts']
    max_length = p['max_length']
    filters = p['filters']
    filtlen = p['filtlen']
Exemple #25
0
import os
import io
import fnmatch
import shutil
import subprocess
from pathlib import Path

import numpy as np
import sacred
import soundfile
import warnings

import dlp_mpi

ex = sacred.Experiment('Write WSJ waves')

kaldi_root = Path(os.environ['KALDI_ROOT'])


def read_nist_wsj(path, expected_sample_rate=16000):
    """
    Converts a nist/sphere file of wsj and reads it with soundfile.

    :param path: file path to audio file.
    :param audioread_function: Function to use to read the resulting audio file
    :return:
    """
    cmd = [
        kaldi_root / 'tools' / 'sph2pipe_v2.5' / 'sph2pipe',
        '-f',
Exemple #26
0
import os.path as osp
import pickle
import re
import tempfile
from typing import Any, Dict

import numpy as np
import pandas as pd
import ray
import sacred
from sacred.observers import FileStorageObserver
from sklearn.manifold import TSNE

from aprl.common import utils

fit_model_ex = sacred.Experiment("tsne_fit_model")
logger = logging.getLogger("aprl.activations.tsne.fit_model")


@fit_model_ex.config
def base_config():
    ray_server = None  # by default will launch a server
    init_kwargs = {}  # passed to ray.init()
    activation_dir = None
    output_root = None
    data_type = "ff_policy"
    num_components = 2
    num_observations = None
    seed = 0
    perplexity = 250
    _ = locals()  # quieten flake8 unused variable warning
Exemple #27
0
"""Configuration settings for train_dagger, training DAgger from synthetic demos."""

import os

import sacred
import torch as th

from imitation.scripts.common import common
from imitation.scripts.common import demonstrations as demos_common
from imitation.scripts.common import train

train_imitation_ex = sacred.Experiment(
    "train_imitation",
    ingredients=[
        common.common_ingredient,
        demos_common.demonstrations_ingredient,
        train.train_ingredient,
    ],
)


@train_imitation_ex.config
def config():
    bc_kwargs = dict(
        batch_size=32,
        l2_weight=3e-5,  # L2 regularization weight
        optimizer_cls=th.optim.Adam,
        optimizer_kwargs=dict(lr=4e-4, ),
    )
    bc_train_kwargs = dict(
        n_epochs=None,  # Number of BC epochs per DAgger training round
Exemple #28
0
import os
import torch
import sacred
import random
import numpy as np
from tqdm import tqdm
from nexus_pytorch.evaluation.classifiers.trajectory.train import Trajectory_Classifier
import nexus_pytorch.evaluation.multimodal.nexus.ingredients as ingredients
from nexus_pytorch.scenarios.multimodal_dataset.multimodal_dataset import MultimodalDataset
import nexus_pytorch.evaluation.multimodal.nexus.model.training_utils as utils

ex = sacred.Experiment(
    'multimodal_nexus_evaluate_accuracy_trajectory',
    ingredients=[
        ingredients.training_ingredient, ingredients.model_ingredient,
        ingredients.model_debug_ingredient, ingredients.gpu_ingredient,
        ingredients.evaluation_ingredient
    ])


@ex.capture
def log_dir_path(folder, _config):
    return os.path.join('.', f'results/log_{_config["seed"]}/', folder)

@ex.capture
def exp_dir_path(folder, _config):
    return os.path.join('.', folder)

def get_model_by_config(config, cuda):
    model_evaluation_config = config['evaluation']
    model, _ = utils.load_checkpoint(model_evaluation_config['file_local'], cuda)
Exemple #29
0
import pandas as pd
import sacred
import seaborn as sns

from evaluating_rewards import serialize
from evaluating_rewards.analysis import results, stylesheets, visualize
from evaluating_rewards.analysis.distances import aggregated
from evaluating_rewards.distances import common_config
from evaluating_rewards.scripts import script_utils
from evaluating_rewards.scripts.distances import epic, erc, npec, rollout_return

Vals = Mapping[Tuple[str, str], Any]
ValsFiltered = Mapping[str, Mapping[Tuple[str, str], pd.Series]]
DistanceFnMapping = Mapping[str, Callable[..., sacred.run.Run]]

combined_distances_ex = sacred.Experiment("combined_distances")
logger = logging.getLogger(
    "evaluating_rewards.scripts.pipeline.combined_distances")

DISTANCE_EXS = {
    "epic": epic.epic_distance_ex,
    "erc": erc.erc_distance_ex,
    "npec": npec.npec_distance_ex,
    "rl": rollout_return.rollout_distance_ex,
}


@combined_distances_ex.config
def default_config():
    """Default configuration for combined_distances."""
    vals_paths = []
Exemple #30
0
import os

import sacred

from imitation.scripts.config.common import DEFAULT_INIT_RL_KWARGS
from imitation.util import util

expert_demos_ex = sacred.Experiment("expert_demos")


@expert_demos_ex.config
def expert_demos_defaults():
    env_name = "CartPole-v1"  # The gym.Env name
    total_timesteps = int(1e6)  # Number of training timesteps in model.learn()
    num_vec = 8  # Number of environments in VecEnv
    parallel = True  # Use SubprocVecEnv (generally faster if num_vec>1)
    normalize = True  # Use VecNormalize
    normalize_kwargs = dict()  # kwargs for `VecNormalize`
    max_episode_steps = None  # Set to positive int to limit episode horizons
    n_episodes_eval = 50  # Num of episodes for final ep reward mean evaluation

    init_rl_kwargs = dict(DEFAULT_INIT_RL_KWARGS)

    # If specified, overrides the ground-truth environment reward
    reward_type = None  # override reward type
    reward_path = None  # override reward path

    rollout_save_interval = -1  # Num updates between saves (<=0 disables)
    rollout_save_final = True  # If True, save after training is finished.
    rollout_save_n_timesteps = None  # Min timesteps saved per file, optional.
    rollout_save_n_episodes = None  # Num episodes saved per file, optional.