示例#1
0
from sacred import Ingredient
from .init import init_model

model = Ingredient('model')
init_model = model.capture(init_model)
    """

    :param builder_type: str. Defaults to 'energy_builder'. Possible values include:
        'energy_builder', 'force_builder', 'ts_builder'.
    :param kwargs: kwargs passed directly to Builder classes
    :return: Builder object specified by 'builder_type'
    """
    kwargs["radial_factory"] = get_radial_factory(
        kwargs.get("radial_factory", "multi_dense"), kwargs.get("radial_kwargs", None)
    )
    if builder_type == "energy_builder":
        return EnergyBuilder(**kwargs)
    elif builder_type == "force_builder":
        return ForceBuilder(**kwargs)
    elif builder_type == "cartesian_builder":
        return CartesianBuilder(**kwargs)
    elif builder_type == "siamese_builder":
        return SiameseBuilder(**kwargs)
    elif builder_type == "classifier_builder":
        return ClassifierBuilder(**kwargs)
    else:
        raise ValueError(
            "arg `builder_type` had value: {} which is not supported. Check "
            "ingredient docs for supported string identifiers".format(builder_type)
        )


# ===== Logger Ingredient(s) ===== #
logger_ingredient = Ingredient("metric_logger")
get_logger = logger_ingredient.capture(SacredMetricLogger)
示例#3
0
bxent_loss = {'name': 'bxent', 'params': {}}
xent_loss = {'name': 'xent', 'params': {}}
accuracy = {'name': 'acc',
            'params': {'output_transform': thresholded_output_transform}}
mse_loss = {'name': 'mse', 'params': {}}
kl_div = {'name': 'kl-div', 'params': {}}


training = Ingredient('training')
training.add_named_config('vae', loss=vae_loss,
                          metrics=[reconstruction_loss, kl_div])
training.add_named_config('bvae', loss=bvae_loss,
                          metrics=[reconstruction_loss, kl_div])
training.add_named_config('capconst', loss=cap_const,
                          metrics=[reconstruction_loss, kl_div])
training.add_named_config('2afc', loss=bxent_loss,
                          metrics=[bxent_loss, accuracy])
training.add_named_config('mafc', loss=xent_loss,
                          metrics=[xent_loss, accuracy])
training.add_named_config('recons_nll', loss=reconstruction_loss,
                          metrics=[reconstruction_loss])


init_optimizer = training.capture(init_optimizer)


@training.capture
def init_metrics(loss, metrics):
    metrics = list(map(dict.copy, metrics))
    return _init_metrics(loss, metrics)
import sys
from sacred import Ingredient

if sys.path[0] != '../src':
    sys.path.insert(0, '../src')

from model.rnn import init_rnn

model = Ingredient('model')
init_rnn = model.capture(init_rnn)


@model.capture
def init_model(device):
    model = init_rnn()
    return model.to(device=device)


@model.command(unobserved=True)
def show():
    model = init_model()
    print(model)
示例#5
0
from sacred import Ingredient
from .setup import setup_training, set_seed_and_device
from .engine import run_training

training = Ingredient('training')
set_seed_and_device = training.capture(set_seed_and_device)
setup_training = training.capture(setup_training)
run_training = training.capture(run_training)
import sys
from sacred import Ingredient
from ignite.engine import Events

if sys.path[0] != '../src':
    sys.path.insert(0, '../src')

from training.handlers import *
from training.loss import init_metrics
from training.optimizer import init_optimizer
from training.engine import create_rnn_trainer, create_rnn_evaluator

training = Ingredient('training')

init_metrics = training.capture(init_metrics)
init_optimizer = training.capture(training_optimizer)
create_rnn_trainer = training.capture(create_rnn_trainer)
create_rnn_evaluator = training.capture(create_rnn_evalautor)
示例#7
0
def experiment(
    dataset: Callable[..., Bunch],
    estimator: Callable[..., BaseEstimator],
    *,
    save_train: bool = False,
) -> Experiment:
    """
    Prepare a Scikit-learn experiment as a Sacred experiment.

    Prepare a Scikit-learn experiment indicating a dataset and an estimator and
    return it as a Sacred experiment.

    Parameters
    ----------
    dataset : function
        Dataset fetch function. Might receive any argument. Must return a
        :external:obj:`Bunch` with ``data``, ``target`` (might be ``None``),
        ``inner_cv`` (might be ``None``) and ``outer_cv``
        (might be ``None``).
    estimator : function
        Estimator initialization function. Might receive any keyword argument.
        Must return an initialized sklearn-compatible estimator.

    Returns
    -------
    experiment : Experiment
        Sacred experiment, ready to be run.

    """
    dataset_ingredient = Ingredient("dataset")
    dataset = dataset_ingredient.capture(dataset)
    estimator_ingredient = Ingredient("estimator")
    estimator = estimator_ingredient.capture(estimator)
    experiment = Experiment(ingredients=(
        dataset_ingredient,
        estimator_ingredient,
    ), )

    @experiment.automain
    def run() -> None:
        """Run the experiment."""
        data = dataset()

        # Metaparameter search
        cv = getattr(data, "inner_cv", None)

        try:
            e = estimator(cv=cv)
        except TypeError as exception:
            warn(f"The estimator does not accept cv: {exception}")
            e = estimator()

        # Model assessment
        if getattr(data, "test_indices", None):
            _benchmark_one(
                experiment=experiment,
                estimator=e,
                data=data,
                save_train=save_train,
            )
        elif getattr(data, "outer_cv", None) is not None:
            _benchmark_partitions(
                experiment=experiment,
                estimator=e,
                data=data,
                save_train=save_train,
            )

    return experiment
示例#8
0
def experiment(dataset, estimator):
    """Prepare a Scikit-learn experiment as a Sacred experiment.

    Prepare a Scikit-learn experiment indicating a dataset and an estimator and
    return it as a Sacred experiment.

    Parameters
    ----------
    dataset : function
        Dataset fetch function. Might receive any argument. Must return a Bunch
        with data, target (might be None), inner_cv (might be None) and outer_cv
        (might be None).
    estimator : function
        Estimator initialization function. Might receive any keyword argument.
        Must return an initialized sklearn-compatible estimator.

    Returns
    -------
    experiment : Experiment
        Sacred experiment, ready to be run.

    """

    _dataset = Ingredient('dataset')
    dataset = _dataset.capture(dataset)
    _estimator = Ingredient('estimator')
    estimator = _estimator.capture(estimator)
    experiment = Experiment(ingredients=(_dataset, _estimator))

    @experiment.automain
    def run():
        """Run the experiment."""
        data = dataset()

        # Metaparameter search
        X = data.data
        y = data.target
        cv = None
        explicit_cv_folds = False
        if hasattr(data, 'inner_cv'):
            cv = data.inner_cv
            explicit_cv_folds = hasattr(data.inner_cv, '__iter__')
            if explicit_cv_folds:
                # Explicit CV folds
                X = np.array([]).reshape((0, *data.inner_cv[0][0].shape[1:]))
                y = np.array([]).reshape((0, *data.inner_cv[0][1].shape[1:]))
                cv = []
                for i, (X_, y_, X_test_, y_test_) in enumerate(data.inner_cv):
                    X = np.concatenate((X, X_, X_test_))
                    y = np.concatenate((y, y_, y_test_))
                    cv = cv + [-1]*len(X_) + [i]*len(X_test_)
                cv = PredefinedSplit(cv)
        try:
            e = estimator(cv=cv)
        except Exception as exception:
            warn(f'The estimator does not accept cv: {exception}')
            e = estimator()
        if explicit_cv_folds:
            e.fit(X, y=y)
            e.fit = e.best_estimator_.fit

        # Model assessment
        if hasattr(data, 'data_test') and (data.data_test is not None):
            # Test partition
            e.fit(X, y=y)
            try:
                with NamedTemporaryFile() as tmpfile:
                    joblib.dump(e, tmpfile.name)
                    experiment.add_artifact(tmpfile.name,
                                            name='estimator.joblib')
            except Exception as exception:
                warn(f'Artifact save failed: {exception}')
            experiment.log_scalar('score_mean', e.score(data.data_test,
                                  y=data.target_test))
            experiment.log_scalar('score_std', 0.0)
            for output in ('transform', 'predict'):
                if hasattr(e, output):
                    with open(os.path.join(mkdtemp(), f'{output}.npy'),
                              'wb+') as tmpfile:
                        np.save(tmpfile, getattr(e, output)(data.data_test))
                        experiment.add_artifact(tmpfile.name)
        elif hasattr(data, 'outer_cv'):
            # Outer CV
            if hasattr(data.outer_cv, '__iter__'):
                # Explicit CV folds
                scores = {'test_score': list(), 'train_score': list(),
                          'fit_time': list(), 'score_time': list(),
                          'estimator': list()}
                outputs = {'transform': list(), 'predict': list()}
                for X, y, X_test, y_test in data.outer_cv:
                    t0 = process_time()
                    e.fit(X, y=y)
                    t1 = process_time()
                    test_score = e.score(X_test, y=y_test)
                    t2 = process_time()
                    scores['test_score'].append(test_score)
                    scores['train_score'].append(e.score(X, y=y))
                    scores['fit_time'].append(t1 - t0)
                    scores['score_time'].append(t2 - t1)
                    scores['estimator'].append(e)
                    for output in ('transform', 'predict'):
                        if hasattr(e, output):
                            outputs[output].append([getattr(e, output)(X_test)])
                for output in ('transform', 'predict'):
                    if outputs[output]:
                        with open(os.path.join(mkdtemp(), f'{output}.npy'),
                                  'wb+') as tmpfile:
                            np.save(tmpfile, np.array(outputs[output]))
                            experiment.add_artifact(tmpfile.name)
            else:
                # Automatic/indexed CV folds
                scores = cross_validate(e, data.data, y=data.target,
                                        cv=data.outer_cv,
                                        return_train_score=True,
                                        return_estimator=True)
            try:
                with NamedTemporaryFile() as tmpfile:
                    joblib.dump(e, tmpfile.name)
                    experiment.add_artifact(tmpfile.name, name='scores.joblib')
            except Exception as exception:
                warn(f'Artifact save failed: {exception}')
            experiment.log_scalar('score_mean',
                                  np.nanmean(scores['test_score']))
            experiment.log_scalar('score_std', np.nanstd(scores['test_score']))

    return experiment
示例#9
0
import matplotlib.pyplot as plt

if sys.path[0] != '../src':
    sys.path.insert(0, '../src')

# from dataset.tdisc import load_tdata
from dataset.sprites import load_sprites
from dataset.shapes3d import load_shapes3d
from dataset.mpi import load_mpi3d
from dataset.transforms import Triplets

import configs.datasplits as splits

dataset = Ingredient('dataset')
load_sprites = dataset.capture(load_sprites)
load_shapes3d = dataset.capture(load_shapes3d)
load_mpi3d = dataset.capture(load_mpi3d)
load_composition = dataset.capture(Triplets)

dataset.add_config(setting='unsupervised')
dataset.add_named_config('unsupervised', setting='unsupervised')
dataset.add_named_config('supervised', setting='supervised')


@dataset.capture
def get_dataset(dataset):
    if dataset == 'dsprites':
        dataset_loader = load_sprites
    elif dataset == 'shapes3d':
        dataset_loader = load_shapes3d
示例#10
0
from sacred import Ingredient
from .dummy import load_data

dataset = Ingredient('dataset')
load_delayed_addition = dataset.capture(load_data)