예제 #1
0
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from sacred import Experiment
from sacred.observers import FileStorageObserver

import os
import datetime

import audio_models
import dataset
from train import train
from test import test

ex = Experiment('UNet_Speech_Separation', interactive=True)
ex.observers.append(FileStorageObserver.create('my_runs'))


@ex.config
def cfg():
    model_config = {
        'model_variant':
        'unet',  # The type of model to use, from ['unet', capsunet', basic_capsnet']
        'data_type':
        'mag',  # From [' mag', 'mag_phase', 'real_imag', 'mag_real_imag']
        'initialisation_test':
        False,  # Whether or not to calculate test metrics before training
        'loading': False,  # Whether to load an existing checkpoint
        'checkpoint_to_load': "136/136-6",  # Checkpoint format: run/run-step
        'saving': True,  # Whether to take checkpoints
        'save_by_epochs':
        True,  # Checkpoints at end of each epoch or every 'save_iters'?
예제 #2
0
from data_set_file import create_huge_data_set, create_encoding_deconding_dict
from model_creation import create_model
from trainning import train_model, load_model_weights, create_scheduler
from test_metrics import calcul_metric_concours

import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import random
from torch.utils.data import DataLoader

#Trucs sacred
experiment_sacred = Experiment("Doodle_Boys")
experiment_sacred.observers.append(
    FileStorageObserver.create(
        url='https://cloud.mongodb.com/v2/5c9a79f8014b76c39199e239#clusters'))


#Configs
@experiment_sacred.config
def configuration():

    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'
    path_save_model = "saves_model/model_4_classes.tar"
    path_load_existing_model = None
    # path_load_existing_model = "saves_model/model_4_classes.tar"
    path_model_weights_test = "saves_model/model_4_classes.tar"

    use_gpu = True

    do_training = True
    
    # list configs in active directory
    configs = os.listdir( '../configs/active' )

    # iterate over each config and perform experiment
    for config_file in configs:

        # set config path
        config_path = f'../configs/active/{ config_file }'

        print( f'Running model using configuration located at {config_path}' )

        # load config file
        config = json.load(open(config_path))

        # get experiment path
        experiment_name = config['experiment']['name']
        experiment_path = f'../experiments/{ experiment_name }'

        # initialize experiment
        experiment = Experiment(experiment_name)
        experiment.captured_out_filter = apply_backspaces_and_linefeeds
        experiment.observers.append(FileStorageObserver.create(experiment_path))

        # wrap run function (sacred reasons)
        def wrapper():
            run( config, config_path )

        # run experiment
        experiment.automain( wrapper )
예제 #4
0
import nn_fdk as nn
import time
import pylab
import sys
sys.path.append('../nn_fdk/')
t = time.time()

from sacred.observers import FileStorageObserver
from sacred import Experiment
from os import environ
import h5py
name_exp = 'noisy_FDKs'
ex = Experiment(name_exp, ingredients=[])

FSpath = '/export/scratch2/lagerwer/NNFDK_results/' + name_exp
ex.observers.append(FileStorageObserver.create(FSpath))


# %%
@ex.config
def cfg():
    it_i = 0
    pix = 1024
    det_rad = 0
    nTD, nTrain = 10, int(1e6)
    nVD, nVal = 5, int(1e6)
    exp_type = 'noise'

    if exp_type == 'noise':
        phantom = 'Fourshape_test'
        PH = '4S'
예제 #5
0
            "img_cols": img_rows,
            "img_channels": 1,
        },
        "encoder_parameters": {
            "encoder_dropout_rate": 0.2,
            "conv_layer_sizes": [16, 32, 64, 128, 128],
            "conv_kernel_size": (3, 3),
            "final_conv_size": 256,
            "final_conv_kernel": (3, 3)
        },
        "decoder_parameters": {
            "decoder_enc_size": 128,
            "stamp_size": stamp_size,
            "nr_of_stamps": 40,
            "stamps_per_canvas": 3,
            "gumbel_parameters": {
                "tau_init": 7,
                "anneal_rate": 0.01,
                "min_temperature": 0.2,
                "steps_per_epoch": steps_per_epoch
            },
            "coord_tensor_size": img_rows - stamp_size + 1
        },
        "loss": "mse"
    }
}

stamp_network.ex.add_config(config_file)
stamp_network.ex.observers[0] = FileStorageObserver.create(
        basedir=os.path.join('runs', cur_file_name))
stamp_network.ex.run(options={'--name': cur_file_name})
#!/usr/bin/env python3

import sacred
from sacred.observers import FileStorageObserver

experiment = sacred.Experiment('observe')

@experiment.config
def configuration():
    recipient = 'observer'
    message = 'hello {0}!'.format(recipient)

@experiment.main
def main(message):
    print(message)

experiment.observers.append(
    FileStorageObserver.create('run')
)
experiment.run()
예제 #7
0
from sacred import Experiment
from sacred.observers import FileStorageObserver
from data_set_file import create_huge_data_set

#Trucs sacred
experiment_sacred = Experiment("Doodle_Boys")
experiment_sacred.observers.append(
    FileStorageObserver.create('my_runs_v_alpha'))


#Configs
@experiment_sacred.config
def configuration():
    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'

    nb_row_per_classe = 400

    pass


#Main
@experiment_sacred.automain
def main_program(path_data, nb_row_per_classe):

    size_image_train = 224
    data_train = create_huge_data_set(path_data,
                                      nb_rows=nb_row_per_classe,
                                      size_image=size_image_train)

    data_valid = create_huge_data_set(path_data,
                                      nb_rows=100,
예제 #8
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from sacred import Experiment
from sacred.observers import FileStorageObserver

ex = Experiment('train')
ex.observers.append(FileStorageObserver.create('train'))


@ex.config
def config():
    data: str = ''
    # required
    workdir: str = ''
    # required

    data_shape = ()

    batch_size = 128
    optimizer = 'torch.optim.SGD'
    optimizer_kargs = dict(lr=learning_rate, momentum=0.09)

    ### check arguments ###
    assert datadir, f'datadir is required'
    assert workdir, f'workdir is required'

    ### END check arguments ###

    def setup(workdir):
        workdir = Path(workdir)
예제 #9
0
 def setup(workdir):
     workdir = Path(workdir)
     logdir = workdir / 'log'
     ex.observers.append(FileStorageObserver.create(logdir))
예제 #10
0
def main_console():
    observer = FileStorageObserver.create(osp.join('output', 'sacred',
                                                   'train'))
    train_ex.observers.append(observer)
    train_ex.run_commandline()
import os
import sys
import pandas as pd
from tempfile import NamedTemporaryFile
from collections import defaultdict
from sacred import Experiment
from skopt import gp_minimize
from skopt.space import Real, Integer, Categorical
from skopt.utils import use_named_args
sys.path.insert(0, '../../src')
from sacred.observers import FileStorageObserver
from run_scripts.run_cnp_tcn_endtoend import ex as mgp_fit_experiment
import tensorflow as tf
ex = Experiment('hyperparameter_search_CNP_TCN_endtoend')
ex.observers.append(
    FileStorageObserver.create('cnp_tcn_hyperparameter_search_runs'))


@ex.config
def cfg():
    hyperparameter_space = {
        'learning_rate': ('Real', 0.0005, 0.005, 'log-uniform'),
        'n_channels': ('Integer', 15, 90),  # = n_filters here
        'levels': ('Integer', 4, 9),  # causal dilation levels
        'kernel_size': ('Integer', 2, 5),
        'batch_size': ('Integer', 10, 300),
        'dropout': ('Real', 0.0, 0.1, 'uniform'),
        'l2_penalty': (
            'Real', 0.01, 100, 'log-uniform'
        ),  # not a standard lambda as loss is normalized by number of weights!
        'encoder_output_size': ('Integer', 100, 512),

from deeplib.training import validate




import torch.optim as optim
import torch.nn as nn

from torch.optim.lr_scheduler import  LambdaLR
from torch.utils.data import DataLoader

#Trucs sacred
experiment_sacred=Experiment("Doodle_Boys")
experiment_sacred.observers.append(FileStorageObserver.create('my_runs_v_alpha'))



#Configs
@experiment_sacred.config
def configuration():

    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'
    path_save_model="saves_model/model_info.tar"
    use_gpu = True




    do_training=False
예제 #13
0
def search_hyperparameter_space(train_module, n_random_starts, n_calls,
                                overrides, evaluation_metric, nan_replacement,
                                load_result, _rnd, _run, _log):
    """Search hyperparameter space of an experiment."""
    import exp
    train_module = getattr(exp, train_module)
    train_experiment = train_module.EXP
    # Add observer to child experiment to store all intermediate results
    if _run.observers:
        run_dir = _run.observers[0].dir
        train_experiment.observers.append(
            FileStorageObserver.create(os.path.join(run_dir, 'model_runs')))

        # Also setup callback to store intermediate hyperparameter search
        # results in a checkpoint file
        callbacks = [
            SkoptCheckpointCallback(
                os.path.join(run_dir, 'result_checkpoint.pck'))
        ]
    else:
        callbacks = []

    # Setup search space
    search_space = build_search_space()

    # Setup objective and logging of all run results
    results = []
    evaluated_parameters = []
    _run.result = {}

    @skopt.utils.use_named_args(search_space)
    def objective(**params):
        for key in params.keys():
            if isinstance(params[key], np.int64):
                # Strangeley, it seems like we dont get 'real' ints here,
                # but a numpy datatypes. So lets cast them.
                params[key] = int(params[key])
        # Need to do this here in order to get rid
        # of leftovers from previous evaluations
        plt.close('all')

        # Update the parameters we go with constant overrides
        params.update(overrides)

        # Transform the search space and overrides into structure of nested
        # dicts
        # This workaround as sacred does not allow '.' in dict keys
        params = {
            key.replace('__', '.'): value for key, value in params.items()
        }

        # Convert to nested dict
        transformed_params = {}
        for key, value in params.items():
            # This function is from sacred and used to convert x.y=z notation
            # into nested dicts: {'x': {'y': z}}
            set_by_dotted_path(transformed_params, key, value)

        _log.debug(f'Running training with parameters: {transformed_params}')
        try:
            # Run the experiment and update config according to overrides
            # to overrides and sampled parameters
            run = train_experiment.run(config_updates=transformed_params)
            result = run.result
            results.append(result)
            print(f'Current Run Result: -----> {result} <----')
            # gp optimize does not handle nan values, thus we need
            # to return something fake if we diverge before the end
            # of the first epoch
            res = - result[evaluation_metric] #negative as we default to auprc which needs to be maximized
            if np.isfinite(res):
                return_value = res 
            else:
                return_value = nan_replacement
            result['hyperparam_optimization_objective'] = return_value
        except Exception as e:
            _log.error('An exception occured during fitting: {}'.format(e))
            results.append({})
            return_value = nan_replacement
            result = {}

        # Store the results into sacred infrastructure
        # Ensures they can be used even if the experiment is terminated
        params.update(result)
        evaluated_parameters.append(params)
        _run.result['parameter_evaluations'] = evaluated_parameters
        with NamedTemporaryFile(suffix='.csv') as f:
            df = pd.DataFrame(evaluated_parameters)
            df.to_csv(f.name)
            _run.add_artifact(f.name, 'parameter_evaluations.csv')
        return return_value

    # Load previous evaluations if given
    if load_result:
        _log.info('Loading previous evaluations from {}'.format(load_result))
        with _run.open_resource(load_result, 'rb') as f:
            loaded_res = skopt.load(f)
        x0 = loaded_res.x_iters
        y0 = loaded_res.func_vals
        if n_random_starts != 0:
            _log.warning('n_random_starts is {} and not 0, '
                         'will evaluate further random points '
                         'after loading stored result'.format(n_random_starts))
    else:
        x0 = None
        y0 = None

    res_gp = skopt.gp_minimize(
        objective, search_space, x0=x0, y0=y0, n_calls=n_calls,
        n_random_starts=n_random_starts, random_state=_rnd, callback=callbacks
    )

    # Store final optimization results
    with NamedTemporaryFile(suffix='.pck') as f:
        res_without_func = remove_functions_from_skopt_res(res_gp)
        skopt.dump(res_without_func, f.name)
        _run.add_artifact(f.name, 'result.pck')

    best_parameters = {
        variable.name: value
        for variable, value in zip(search_space, res_gp.x)
    }
    return {
        'parameter_evaluations': evaluated_parameters,
        'Best score': res_gp.fun,
        'best_parameters': best_parameters
    }
def cfg(data_config):
    model_config = {  # Load pretrained model for testing
        "model_name": "DORN_nyu_nohints",
        "model_params": {
            "in_channels":
            3,
            "in_height":
            257,
            "in_width":
            353,
            "sid_bins":
            data_config["sid_bins"],
            "offset":
            data_config["offset"],
            "min_depth":
            data_config["min_depth"],
            "max_depth":
            data_config["max_depth"],
            "alpha":
            data_config["alpha"],
            "beta":
            data_config["beta"],
            "frozen":
            True,
            "pretrained":
            True,
            "state_dict_file":
            os.path.join("models", "torch_params_nyuv2_BGR.pth.tar"),
        },
        "model_state_dict_fn": None
    }
    ckpt_file = None  # Keep as None
    save_outputs = True
    seed = 95290421
    small_run = 0

    # hyperparams = ["sgd_iters", "sinkhorn_iters", "sigma", "lam", "kde_eps", "sinkhorn_eps"]
    pdict = model_config["model_params"]
    del pdict

    # print(data_config.keys())
    output_dir = os.path.join(
        "results",
        data_config["data_name"],  # e.g. nyu_depth_v2
        "{}_{}".format("test", small_run),
        model_config["model_name"])  # e.g. DORN_nyu_nohints

    safe_makedir(output_dir)
    ex.observers.append(
        FileStorageObserver.create(os.path.join(output_dir, "runs")))

    cuda_device = "0"  # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(
        device, os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.
예제 #15
0
from importlib import import_module
from pip import get_installed_distributions

import numpy as np
from tqdm import tqdm
from sacred import Experiment
from sacred.observers import FileStorageObserver
import torchvision.datasets

from benchmark.data import Iterator


ex = Experiment('benchmark')
project_root = Path(__file__).resolve().parent.parent
data_dir = project_root / 'results'
ex.observers.append(FileStorageObserver.create(str(data_dir)))


@ex.config
def config():
    """

    """
    project_root = str(project_root)
    ngpu = 1 # ngpu = 0 corresponds to cpu-mode
    data_type = 'image' # You can choise data-type from this list ['image', 'sequence', 'mnist', 'cifer-10']. 'image' and 'sequence' are dummy data.

    assert data_type in ['image', 'sequence', 'mnist', 'cifer-10'], \
        "Your data_type[{}] is not supported.".format(data_type)

    batch_size = 128    
예제 #16
0
from torch.nn import GRU
from torch.optim import Adam
from torchcontrib.optim import SWA
from upb_audio_tagging_2019.data import (split_dataset, MixUpDataset,
                                         Extractor, Augmenter,
                                         DynamicTimeSeriesBucket,
                                         EventTimeSeriesBucket, Collate,
                                         batch_to_device)
from upb_audio_tagging_2019.model import CRNN, batch_norm_update
from upb_audio_tagging_2019.modules import CNN2d, CNN1d, fully_connected_stack
from upb_audio_tagging_2019.paths import exp_dir, jsons_dir
from upb_audio_tagging_2019.utils import timestamp

ex = Exp('upb_audio_tagging_2019')
storage_dir = exp_dir / timestamp()
observer = FileStorageObserver.create(str(storage_dir))
ex.observers.append(observer)


@ex.config
def config():
    debug = False

    # Data configuration
    use_noisy = True
    split = 0
    fold = None
    curated_reps = 7
    mixup_probs = [1 / 3, 2 / 3]
    extractor = {
        'input_sample_rate': 44100,
import os.path as osp
import random
import time
from datetime import datetime

import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver

import configuration
import siamese_model
from utils.misc_utils import auto_select_gpu, mkdir_p, save_cfgs

ex = Experiment(configuration.RUN_NAME)
ex.observers.append(FileStorageObserver.create(osp.join(configuration.LOG_DIR, 'sacred')))


@ex.config
def configurations():
  # Add configurations for current script, for more details please see the documentation of `sacred`.
  # REFER: http://sacred.readthedocs.io/en/latest/index.html
  model_config = configuration.MODEL_CONFIG
  train_config = configuration.TRAIN_CONFIG
  track_config = configuration.TRACK_CONFIG


def _configure_learning_rate(train_config, global_step):
  lr_config = train_config['lr_config']

  num_batches_per_epoch = \
예제 #18
0
from sacred import Experiment, Ingredient
from sacred.observers import FileStorageObserver
import numpy as np
from table_logger import TableLogger

gpu = Ingredient('gpu')
proteins = Experiment('Unsupervised Protein',
                      ingredients=[gpu, training_params] +
                      ModelBuilder.hparams + TaskBuilder.params)

folder_name = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
logdir = os.environ.get('PROTEIN_LOGDIR', 'results')
if not os.path.isdir('results'):
    os.mkdir('results')
proteins.observers.append(
    FileStorageObserver.create(os.path.join('results', folder_name)))


def filter_text(text):
    pattern = re.compile(r"Epoch\s+\d+:")
    text = '\n'.join(
        filter(lambda line: not pattern.match(line), text.split('\n')))
    return text


proteins.captured_out_filter = filter_text


@gpu.config
def gpu_config():
    """Configure the gpu"""
예제 #19
0
        tmp_ex.run("print_config")
    if tmp_ex.current_run is not None and "parent_config" in tmp_ex.current_run.config:
        return fetch_parents(tmp_ex.current_run.config["parent_config"],
                             [current_path] + parents)
    else:
        return [current_path] + parents


configs = fetch_parents(path)
ex = Experiment('treeqn')
for path in configs:
    ex.add_config(path)

ex.logger = logger

ex.observers.append(FileStorageObserver.create('./results'))


@ex.config
def my_config(save_folder, env_id, architecture, label, name):
    pytorch_version = torch.__version__
    # Timestamp experiment directory
    save_folder = get_timestamped_dir(save_folder)

    # Environment switches
    # obs_dtype as str does the job and plays nice with sacred
    obs_dtype, input_mode = 'uint8', "atari"
    if "push" in env_id:
        obs_dtype, input_mode = 'float32', "push"
    if "blocksworld" in env_id:
        obs_dtype, input_mode = 'float32', "blocksworld"
예제 #20
0
try:
    get_ipython()
except:
    IPY = False

from sacred import Experiment
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import ode
from sacred.observers import FileStorageObserver
from visdom_observer.visdom_observer import VisdomObserver
import time
import os

ex = Experiment('rendevouz_control', interactive=IPY)
fobs = FileStorageObserver.create('my_runs')
ex.observers.append(fobs)
ex.observers.append(VisdomObserver())


@ex.config
def system_config():
    """System parameters"""
    # 'spring' constants
    K1 = 1
    K2 = 1

    # damping constants
    nu1 = -1
    nu2 = -1
    dim = 2
from graspy.utils import binarize, symmetrize
from src.models import fit_a_priori
from src.utils import save_obj
import pandas as pd

from joblib import Parallel, delayed

ex = Experiment("Run LDT")

current_file = basename(__file__)[:-3]

sacred_file_path = Path(f"./maggot_models/models/runs/{current_file}")

slack_obs = SlackObserver.from_config("slack.json")

file_obs = FileStorageObserver.create(sacred_file_path)

ex.observers.append(slack_obs)
ex.observers.append(file_obs)


@ex.config
def config():
    # Variables defined in config get automatically passed to main
    n_components_range = list(range(1, 55))  # noqa
    directed = True  # noqa: F841


def fit_ldt(left_graph, right_graph, n_components, n_bootstraps=500):
    ldt = LatentDistributionTest(n_components=n_components,
                                 n_bootstraps=n_bootstraps)
예제 #22
0
        else:
            d[k] = v
    return d


if __name__ == '__main__':
    params = deepcopy(sys.argv)

    # Get the defaults from default.yaml
    with open(
            os.path.join(os.path.dirname(__file__), "config", "default.yaml"),
            "r") as f:
        try:
            config_dict = yaml.load(f)
        except yaml.YAMLError as exc:
            assert False, "default.yaml error: {}".format(exc)
    # Load algorithm and env base configs
    env_config = _get_config(params, "--env-config", "envs")
    alg_config = _get_config(params, "--config", "algs")
    config_dict = recursive_dict_update(config_dict, env_config)
    config_dict = recursive_dict_update(config_dict, alg_config)
    # now add all the config to sacred
    ex.add_config(config_dict)

    # Save to disk by default for sacred
    logger.info("Saving to FileStorageObserver in results/sacred.")
    file_obs_path = os.path.join(results_path, "sacred")
    ex.observers.append(FileStorageObserver.create(file_obs_path))

    ex.run_commandline(params)
예제 #23
0
import time
from datetime import datetime

import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver

import configuration
from utils.misc_utils import auto_select_gpu, mkdir_p, save_cfgs

from models.bisenet import BiseNet

ex = Experiment(configuration.RUN_NAME)
ex.observers.append(
    FileStorageObserver.create(osp.join(configuration.LOG_DIR, 'sacred')))

# TODO: num_classes need to fix
num_classes = 7


@ex.config
def configurations():
    # Add configurations for current script, for more details please see the documentation of `sacred`.
    model_config = configuration.MODEL_CONFIG
    train_config = configuration.TRAIN_CONFIG


def _configure_learning_rate(train_config, global_step):
    lr_config = train_config['lr_config']
예제 #24
0
def main_console():
    observer = FileStorageObserver.create(
        osp.join("output", "sacred", "analyze"))
    analysis_ex.observers.append(observer)
    analysis_ex.run_commandline()
from sacred.observers import FileStorageObserver
from example_get_results import get_results
from example_main_loop import ex

# Creates a simple observer or loads an existing one. Will generate example_mnist_mlp_runs if the folder does not exist
ex.observers.append(FileStorageObserver.create('example_mnist_mlp_runs'))
# Runs one experiment. Will generate a folder inside example_mnist_mlp_runs with the name equal to the experiment id
ex.run(config_updates={
    "lr": 1e-4,
})
get_results()  # Gets the results after the experiment has finished
# Runs another two experiments
ex.run(config_updates={"neurons_per_layer": (300, 100), "batch_size": 256})
get_results()
ex.run(config_updates={
    "dropout": 0.4,
})
get_results()
from sacred import Experiment
from sacred.observers import FileStorageObserver

ex = Experiment()
ex.observers.append(FileStorageObserver.create("my_runs"))

@ex.config
def my_config():
    message = "hello world"

@ex.automain
def my_main(message):
    print(message)
예제 #27
0
import gym, time, os, seaborn
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from memory_profiler import profile

from sacred import Experiment
from sacred.observers import FileStorageObserver
from tensorflow.saved_model import simple_save
from human_aware_rl.directory import CHECKPOINT_DIR

PPO_DATA_DIR = os.path.join(CHECKPOINT_DIR, 'ppo_runs' + os.path.sep)
# PPO_DATA_DIR = 'data/ppo_runs/'

ex = Experiment('PPO')
ex.observers.append(FileStorageObserver.create(PPO_DATA_DIR + 'ppo_exp'))

from overcooked_ai_py.utils import load_pickle, save_pickle
from overcooked_ai_py.agents.agent import RandomAgent, GreedyHumanModel, AgentPair
from overcooked_ai_py.planning.planners import NO_COUNTERS_PARAMS, MediumLevelPlanner
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld

from human_aware_rl.baselines_utils import get_vectorized_gym_env, create_model, update_model, get_agent_from_saved_model
from human_aware_rl.utils import create_dir_if_not_exists, reset_tf, delete_dir_if_exists, set_global_seed
from human_aware_rl.imitation.behavioural_cloning import get_bc_agent_from_saved
from human_aware_rl.experiments.bc_experiments import BEST_BC_MODELS_PATH


# PARAMS
@ex.config
예제 #28
0
            "train_adversarial": train_ex,
        }
        ex = experiments[sacred_ex_name]

        observer = FileStorageObserver.create("sacred")
        ex.observers.append(observer)

        # Apply base configs
        base_named_configs.extend(config.get("named_configs", []))
        base_config_updates.update(config.get("config_updates", {}))
        config["named_configs"] = base_named_configs
        config["config_updates"] = base_config_updates

        run = ex.run(**config, options={"--run": run_name})

        # Ray Tune has a string formatting error if raylet completes without
        # any calls to `reporter`.
        reporter(done=True)

        assert run.status == "COMPLETED"
        return run.result

    return inner


if __name__ == "__main__":
    observer = FileStorageObserver.create(
        os.path.join("output", "sacred", "parallel"))
    parallel_ex.observers.append(observer)
    parallel_ex.run_commandline()
예제 #29
0
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state

from modl.utils.system import get_output_dir

# Add examples to known modules
sys.path.append(
    path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from exps.exp_decompose_images import exp as single_exp

exp = Experiment('multi_decompose_images')
basedir = join(get_output_dir(), 'multi_decompose_images')
if not os.path.exists(basedir):
    os.makedirs(basedir)
exp.observers.append(FileStorageObserver.create(basedir=basedir))


@exp.config
def config():
    n_jobs = 15
    n_seeds = 1
    seed = 1


@single_exp.config
def config():
    batch_size = 200
    learning_rate = 0.92
    reduction = 10
    alpha = 0.1
예제 #30
0
from data_set_file import create_huge_data_set, create_encoding_deconding_dict, generate_random_dataset, create_dict_nb_ligne
from model_creation import create_model, create_ensemble_model, create_ensemble_model_moy
from trainning import train_model, load_model_weights, create_scheduler
from test_metrics import calcul_metric_concours

import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import random
from torch.utils.data import DataLoader

#Trucs sacred
experiment_sacred = Experiment("Doodle_Boys")
experiment_sacred.observers.append(
    FileStorageObserver.create('runs_sacred/model_ensemble'))


#Configs
@experiment_sacred.config
def configuration():

    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'

    path_save_model = "saves_model/model_poids_ensemble.tar"
    #path_load_existing_model = "saves_model/model_poids_random.tar"
    path_load_existing_model = None
    path_model_weights_test = "saves_model/model_poids_ensemble.tar"

    list_path_model_ensemble = [
        "saves_model/model_poids_random.tar",
예제 #31
0
"""
Train a MNIST classifier from a sparse judge combined with a debate.
"""
import time
import numpy as np

from sacred import Experiment
from sacred.observers import FileStorageObserver

from judge import MNISTJudge, FashionJudge
from debate import Debate
from agent import DebateAgent, DebateClassifier

ex = Experiment("train_classifier_via_debate")
ex.observers.append(FileStorageObserver.create("experiments"))


@ex.config
def cfg():
    N_to_mask = 4
    judge_path = None
    dataset = None
    rollouts = 1000
    N_epochs = 1
    batch_size = 128
    learning_rate = 1e-4
    learning_rate_decay = False
    classifier_path = None
    cheat_debate = False
    only_update_for_wins = True
    precomputed_debate_results_restricted_first_path = None
예제 #32
0
from sacred.observers import FileStorageObserver
from numpy.random import random

inner_ex = Experiment('inner_ex')


@inner_ex.config
def baseline_config():
    exponent = 2
    offset = 10


@inner_ex.named_config
def high_offset():
    offset = 50


@inner_ex.main
def my_inner_experiment(exponent, offset):
    max_rand_val = 0
    for i in range(10000000):
        rand_val = random()
        new_val = rand_val**exponent + offset
        max_rand_val = max(new_val, max_rand_val)
    return max_rand_val


if __name__ == "__main__":
    observer = FileStorageObserver.create('inner_results')
    inner_ex.observers.append(observer)
    inner_ex.run_commandline()
예제 #33
0
"""A standard machine learning task without much sacred magic."""
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn import svm, datasets, model_selection

ex = Experiment("svm")

ex.observers.append(
    FileStorageObserver.create("my_runs")
)
ex.add_config({  # Configuration is explicitly defined as dictionary.
    "C": 1.0,
    "gamma": 0.7,
    "kernel": "rbf",
    "seed": 42
})


def get_model(C, gamma, kernel):
    return svm.SVC(C=C, kernel=kernel, gamma=gamma)


@ex.main  # Using main, command-line arguments will not be interpreted in any special way.
def run(_config):
    X, y = datasets.load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
    clf = get_model(_config["C"], _config["gamma"], _config["kernel"])  # Parameters are passed explicitly.
    clf.fit(X_train, y_train)
    return clf.score(X_test, y_test)

예제 #34
0
    for i in range(N_LBFGS_STEPS):
        optimizer.step(closure)
    torch.save(
        polished_model.state_dict(),
        str((Path(trial.logdir) / trial._checkpoint.value).parent /
            'polished_model.pth'))
    eye = torch.eye(polished_model.size)
    x = (eye[:, :, None, None] * torch.eye(2)).unsqueeze(-1)
    y = polished_model(x[:, trainable.br_perm])
    loss = nn.functional.mse_loss(y, trainable.target_matrix)
    return loss.item()


ex = Experiment('Ops_factorization')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path(
    'config/slack.json')  # Add webhook_url there for Slack notification
if slack_config_path.exists():
    ex.observers.append(SlackObserver.from_config(str(slack_config_path)))


@ex.config
def fixed_order_config():
    size = 8  # Size of matrix to factor, must be power of 2
    ntrials = 20  # Number of trials for hyperparameter tuning
    nsteps = 400  # Number of steps per epoch
    nmaxepochs = 200  # Maximum number of epochs
    result_dir = 'results'  # Directory to store results
    nthreads = 1  # Number of CPU threads per job
    smoke_test = False  # Finish quickly for testing
예제 #35
0
import torch.optim as optim
from tqdm import tqdm

from constants import *
sys.path.append(BASE_DIR)
from goggles.loss import CustomLoss2
from goggles.models.semantic_ae import SemanticAutoencoder
from goggles.opts import DATASET_MAP, DATA_DIR_MAP
from goggles.utils.vis import \
    get_image_from_tensor, save_prototype_patch_visualization


_make_cuda = lambda x: x.cuda() if torch.cuda.is_available() else x

ex = Experiment('goggles-experiment')
ex.observers.append(FileStorageObserver.create(os.path.join(ALL_RUNS_DIR)))


def _provision_run_dir(run_dir):
    new_dirs = [LOGS_DIR_NAME, IMAGES_DIR_NAME, PROTOTYPES_DIR_NAME]
    new_dirs = list(map(lambda d: os.path.join(run_dir, d), new_dirs))
    for new_dir in new_dirs:
        os.makedirs(new_dir)
    return new_dirs


@ex.config
def default_config():
    seed = 42                # RNG seed for the experiment
    dataset = 'cub'            # Dataset to be used (cub/awa2)
    filter_class_ids = None  # Class IDs used for training, uses all classes if None
예제 #36
0
    with suppress_stdout():
        tmp_ex.run("print_config")
    if tmp_ex.current_run is not None and "parent_config" in tmp_ex.current_run.config:
        return fetch_parents(tmp_ex.current_run.config["parent_config"], [current_path] + parents)
    else:
        return [current_path] + parents


configs = fetch_parents(path)
ex = Experiment('treeqn')
for path in configs:
    ex.add_config(path)

ex.logger = logger

ex.observers.append(FileStorageObserver.create('./results'))


@ex.config
def my_config(save_folder, env_id, architecture, label, name):
    pytorch_version = torch.__version__
    # Timestamp experiment directory
    save_folder = get_timestamped_dir(save_folder)

    # Environment switches
    # obs_dtype as str does the job and plays nice with sacred
    obs_dtype, input_mode = 'uint8', "atari"
    if "push" in env_id:
        obs_dtype, input_mode = 'float32', "push"

    if architecture == "dqn":