Example #1
0
    with suppress_stdout():
        tmp_ex.run("print_config")
    if tmp_ex.current_run is not None and "parent_config" in tmp_ex.current_run.config:
        return fetch_parents(tmp_ex.current_run.config["parent_config"], [current_path] + parents)
    else:
        return [current_path] + parents


configs = fetch_parents(path)
ex = Experiment('treeqn')
for path in configs:
    ex.add_config(path)

ex.logger = logger

ex.observers.append(FileStorageObserver.create('./results'))


@ex.config
def my_config(save_folder, env_id, architecture, label, name):
    pytorch_version = torch.__version__
    # Timestamp experiment directory
    save_folder = get_timestamped_dir(save_folder)

    # Environment switches
    # obs_dtype as str does the job and plays nice with sacred
    obs_dtype, input_mode = 'uint8', "atari"
    if "push" in env_id:
        obs_dtype, input_mode = 'float32', "push"

    if architecture == "dqn":
Example #2
0
def main_console():
    observer = FileStorageObserver(osp.join("output", "sacred",
                                            "expert_demos"))
    expert_demos_ex.observers.append(observer)
    expert_demos_ex.run_commandline()
Example #3
0
            if "no-mongo" in _v:
                # if "--no-mongo" == _v:
                del params[_i]
                no_mongodb = True
                break

        # If there is no url set for the mongodb, we cannot use it
        if not no_mongodb and "db_url" not in config_dict:
            no_mongodb = True
            logger.error("No 'db_url' to use for Sacred MongoDB")

        if not no_mongodb:
            db_url = config_dict["db_url"]
            db_name = config_dict["db_name"]
            mongo_client = setup_mongodb(db_url, db_name)

        # Save to disk by default for sacred, even if we are using the mongodb
        logger.info("Saving to FileStorageObserver in results/sacred.")
        file_obs_path = os.path.join(results_path, "sacred")
        while True:
            try:
                ex.observers.append(FileStorageObserver.create(file_obs_path))
                break
            except FileExistsError:
                # sometimes we see race condition
                logger.info(
                    "Creating FileStorageObserver failed. Trying again...")
                time.sleep(1)

    ex.run_commandline(params)
Example #4
0
def main():
    observer = FileStorageObserver.create(osp.join('data', 'sacred', 'score'))
    score_ex.observers.append(observer)
    score_ex.run_commandline()
    score_ex_logger.info("Sacred run completed, files stored at {}".format(observer.dir))
def hook(config, command_name, logger):
    if config['group_dir'] == None:
        raise Exception(f'group_dir is {config["group_dir"]}')
    else:
        util.require_dir(config['group_dir'])
    ex.observers.append(FileStorageObserver(config['group_dir']))
Example #6
0
        session_ids,
        _dir / 'audio',
        test_run=test_run,
    )
    if mpi.IS_MASTER:
        print('Finished experiment dir:', _dir)


if __name__ == '__main__':

    # Custom parsing of sacred --file_storage option.
    # This allows to give this option a default.
    argv = [*sys.argv]
    import argparse
    from pb_chime5 import git_root

    parser = argparse.ArgumentParser()
    parser.add_argument('-F',
                        '--file_storage',
                        default=git_root / 'sacred',
                        help='add a file storage observer')

    parsed, args = parser.parse_known_args()
    argv = argv[:1] + args

    if mpi.IS_MASTER:
        path = Path(parsed.file_storage).expanduser().resolve()
        experiment.observers.append(FileStorageObserver.create(str(path)))

    experiment.run_commandline(argv)
def main():
    observer = FileStorageObserver(osp.join("data", "sacred", "multi_score"))
    multi_score_ex.observers.append(observer)
    multi_score_ex.run_commandline()
    def started_event(self, ex_info, command, host_info, start_time, config,
                      meta_info, _id):
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        if config["result_folder"] is not None:
            result_folder = config["result_folder"].strip("/").split("/")[-1]
            custom_id = f"{timestamp}_ablation_waypoints_{result_folder}"
        else:
            custom_id = f"{timestamp}_ablation_waypoints"
        return custom_id  # started_event returns the _run._id


ex = Experiment("mujoco-ablation-waypoints")
ex.observers = [
    SetID(),
    FileStorageObserver.create("results/mujoco/ablation_waypoints"),
]


class LatentSpaceTargetStateRewardWrapper(gym.Wrapper):
    def __init__(self, env, latent_space, target_states):
        self.env = env
        self.latent_space = latent_space
        self.target_states = [ts / np.linalg.norm(ts) for ts in target_states]
        self.state = None
        self.timestep = 0
        super().__init__(env)

    def reset(self):
        obs = super().reset()
        self.state = self.latent_space.encoder(obs)
Example #9
0
def cfg(data_config):
    model_config = {  # Load pretrained model for testing
        "model_name": "DORN_median_matching",
        "model_params": {
            "in_channels":
            3,
            "in_height":
            257,
            "in_width":
            353,
            "sid_bins":
            data_config["sid_bins"],
            "offset":
            data_config["offset"],
            "min_depth":
            data_config["min_depth"],
            "max_depth":
            data_config["max_depth"],
            "alpha":
            data_config["alpha"],
            "beta":
            data_config["beta"],
            "frozen":
            True,
            "pretrained":
            True,
            "state_dict_file":
            os.path.join("models", "torch_params_nyuv2_BGR.pth.tar"),
        },
        "model_state_dict_fn": None
    }
    ckpt_file = None  # Median matching eval
    dataset_type = "val"
    save_outputs = True
    seed = 95290421
    small_run = 0
    entry = None

    # print(data_config.keys())

    output_dir = os.path.join(
        "results",
        data_config["data_name"],  # e.g. nyu_depth_v2
        "{}_{}".format(dataset_type, small_run),
        model_config["model_name"])  # e.g. DORN_nyu_nohints

    safe_makedir(output_dir)
    ex.observers.append(
        FileStorageObserver.create(os.path.join(output_dir, "runs")))
    ##

    cuda_device = "0"  # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(
        device, os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.
#Sacred
#Sources
#https://github.com/gereleth/kaggle-telstra/blob/master/Automatic%20model%20tuning%20with%20Sacred%20and%20Hyperopt.ipynb
#https://github.com/maartjeth/sacred-example-pytorch
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
from sacred.observers import SlackObserver
from sacred.utils import apply_backspaces_and_linefeeds

EXPERIMENT_NAME = 'experiment'
DATABASE_NAME = 'experiments'
URL_NAME = 'mongodb://localhost:27017/'

ex = Experiment()
ex.observers.append(FileStorageObserver.create('results'))
ex.observers.append(FileStorageObserver.create('results-bert-aws'))
ex.observers.append(FileStorageObserver.create('results-bert-google'))
ex.observers.append(FileStorageObserver.create('results-features'))
#ex.observers.append(MongoObserver.create(url=URL_NAME, db_name=DATABASE_NAME))
ex.captured_out_filter = apply_backspaces_and_linefeeds

#Send a message to slack if the run is succesfull or if it failed
slack_obs = SlackObserver.from_config('slack.json')
ex.observers.append(slack_obs)

#Device
if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")
import numpy as np
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import datasets
import models

import evaluate_classification
import evaluate_regression

ex = Experiment('uncertainty-quality')
ex.captured_out_filter = apply_backspaces_and_linefeeds
ex.observers.append(FileStorageObserver.create('runs/', template='template_regression.html'))


@ex.config
def cfg():
    seed = 1337
    num_experiments = 10

    dataset_settings = {
        'name': 'protein_structure',
    }

    model_settings = {
        'name': 'mlp',
        'dropout': 0.05,
        'layers': [50, 50],
        'epochs': 300,
        'batch_size': 100,
def main():
    observer = FileStorageObserver.create(
        osp.join('data', 'sacred', 'tsne_activations'))
    generate_activations_ex.observers.append(observer)
    generate_activations_ex.run_commandline()
Example #13
0
def compute_wc_multiple(config_grid):
    ex.observers.append(FileStorageObserver(config_grid.root_path))
    ex.observers.append(SetID('myid'))
    for config in config_grid.configs_from_grid():
        ex.observers[1] = SetID(config.uid)
        ex.run(config_updates={'config': config, 'seed': config.seed})
Example #14
0
import time
from datetime import datetime

import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver

import configuration
import siamese_model
from utils.misc_utils import auto_select_gpu, mkdir_p, save_cfgs
from utils.train_utils import load_caffenet

ex = Experiment(configuration.RUN_NAME)
ex.observers.append(
    FileStorageObserver.create(osp.join(configuration.LOG_DIR, 'sacred')))


@ex.config
def configurations():
    # Add configurations for current script, for more details please see the documentation of `sacred`.
    # REFER: http://sacred.readthedocs.io/en/latest/index.html
    model_config = configuration.MODEL_CONFIG
    train_config = configuration.TRAIN_CONFIG
    track_config = configuration.TRACK_CONFIG


def _configure_learning_rate(train_config, global_step):
    lr_config = train_config['lr_config']

    num_batches_per_epoch = \
Example #15
0
from importlib import import_module
from pip import get_installed_distributions

import numpy as np
from tqdm import tqdm
from sacred import Experiment
from sacred.observers import FileStorageObserver
import torchvision.datasets

from benchmark.data import Iterator


ex = Experiment('benchmark')
project_root = Path(__file__).resolve().parent.parent
data_dir = project_root / 'results'
ex.observers.append(FileStorageObserver.create(str(data_dir)))


@ex.config
def config():
    """

    """
    project_root = str(project_root)
    ngpu = 1 # ngpu = 0 corresponds to cpu-mode
    data_type = 'image' # You can choise data-type from this list ['image', 'sequence', 'mnist', 'cifer-10']. 'image' and 'sequence' are dummy data.

    assert data_type in ['image', 'sequence', 'mnist', 'cifer-10'], \
        "Your data_type[{}] is not supported.".format(data_type)

    batch_size = 128    
from sacred.observers import FileStorageObserver, SlackObserver
from graspy.models import SBMEstimator, DCSBMEstimator
from src.data import load_left, load_right
from graspy.utils import binarize, symmetrize
from src.models import fit_a_priori
from src.utils import save_obj

ex = Experiment("Fit a priori")

current_file = basename(__file__)[:-3]

sacred_file_path = Path(f"./maggot_models/models/runs/{current_file}")

slack_obs = SlackObserver.from_config("slack.json")

file_obs = FileStorageObserver.create(sacred_file_path)

ex.observers.append(slack_obs)
ex.observers.append(file_obs)


@ex.config
def config():
    # Variables defined in config get automatically passed to main

    directed = True  # noqa: F841


def run_fit(seed, directed):
    # run left
    graph, labels = load_left()
import os.path as osp
import random
import time
from datetime import datetime

import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver

import configuration
import siamese_model
from utils.misc_utils import auto_select_gpu, mkdir_p, save_cfgs

ex = Experiment(configuration.RUN_NAME)
ex.observers.append(FileStorageObserver.create(osp.join(configuration.LOG_DIR, 'sacred')))


@ex.config
def configurations():
  # Add configurations for current script, for more details please see the documentation of `sacred`.
  # REFER: http://sacred.readthedocs.io/en/latest/index.html
  model_config = configuration.MODEL_CONFIG
  train_config = configuration.TRAIN_CONFIG
  track_config = configuration.TRACK_CONFIG


def _configure_learning_rate(train_config, global_step):
  lr_config = train_config['lr_config']

  num_batches_per_epoch = \
Example #18
0
def main():
    observer = FileStorageObserver.create(osp.join('data', 'sacred', 'train'))
    train_ex.observers.append(observer)
    train_ex.run_commandline()
monkey_patch_nifti_image()

from sklearn.model_selection import train_test_split

from modl.input_data.fmri.rest import get_raw_rest_data
from modl.decomposition.fmri import fMRIDictFact, rfMRIDictionaryScorer
from modl.plotting.fmri import display_maps
from modl.utils.system import get_output_dir

from sacred import Experiment

import pandas as pd

exp = Experiment('decomppose_fmri')
base_artifact_dir = join(get_output_dir(), 'decompose_fmri')
exp.observers.append(FileStorageObserver.create(basedir=base_artifact_dir))


@exp.config
def config():
    n_components = 70
    batch_size = 100
    learning_rate = 0.92
    method = 'dictionary only'
    reduction = 1
    alpha = 1e-4
    n_epochs = 100
    verbose = 30
    n_jobs = 5
    step_size = 1e-5
    source = 'adhd_4'
Example #20
0
import torch
from torch import optim
from torchvision.datasets import FashionMNIST
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torchvision import transforms
from torchvision.utils import save_image

from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver, MongoObserver
from model import simple_cnn


##sacred stuff
ex = Experiment("Fashion Modell MTL")
ex.observers.append(FileStorageObserver.create('reports\\FashionMnistExperiments'))
ex.observers.append(MongoObserver())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
loss_fn = torch.nn.CrossEntropyLoss()
#ex.observers.append()

@ex.config
def config():
    batch_size = 128
    epochs = 15
    lr = 1e-3
    train_split_ratio = 0.8
    path = ""   ##output path for saving images for example
    notes = "Experiment description"
    cfg_dict = {} ## if there are to many hyper_params, collect here and dependy inject the dict
Example #21
0
from baselines.policy.mlp_policy import MlpPolicy
from baselines.policy.cnn_policy import CnnPolicy
from baselines.pois import pois
from baselines.pois.parallel_sampler import ParallelSampler

# Sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver

# Create experiment
ex = Experiment('POIS')
# Set a File Observer
if os.environ.get('SACRED_RUNS_DIRECTORY') is not None:
    print("Sacred logging at:", os.environ.get('SACRED_RUNS_DIRECTORY'))
    ex.observers.append(
        FileStorageObserver.create(os.environ.get('SACRED_RUNS_DIRECTORY')))
if os.environ.get('SACRED_SLACK_CONFIG') is not None:
    print("Sacred is using slack.")
    ex.observers.append(
        SlackObserver.from_config(os.environ.get('SACRED_SLACK_CONFIG')))


@ex.config
def custom_config():
    seed = 0
    env = 'rllab.cartpole'
    num_episodes = 100
    max_iters = 500
    horizon = 500
    iw_method = 'is'
    iw_norm = 'none'
Example #22
0
from data_set_file import create_huge_data_set, create_encoding_deconding_dict, generate_random_dataset, create_dict_nb_ligne
from model_creation import create_model
from trainning import train_model, load_model_weights, create_scheduler
from test_metrics import calcul_metric_concours

import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import random
from torch.utils.data import DataLoader

#Trucs sacred
experiment_sacred = Experiment("Doodle_Boys")
experiment_sacred.observers.append(
    FileStorageObserver.create('runs_sacred/model_data_random'))


#Configs
@experiment_sacred.config
def configuration():

    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'

    path_save_model = "saves_model/model_poids_random.tar"
    #path_load_existing_model = "saves_model/model_poids_random.tar"
    path_load_existing_model = None
    path_model_weights_test = "saves_model/model_poids_random.tar"

    use_gpu = True
Example #23
0
import os.path as osp
import os
import numpy as np
import time
import torch
from torch_geometric.data import DataLoader
import torch.utils.tensorboard as tb

from lib.datasets.config import get_output_dir, get_tb_dir
from lib.datasets.datasets import Datasets
from lib.network.complete_net import completeNet
from lib.network.utils import weighted_binary_cross_entropy, hungarian

ex = Experiment('Training network')

ex.observers.append(FileStorageObserver('../gdrive/MyDrive/KamMOT_trenowanie/logs'))

ex.add_config('config/training.yaml')

@ex.automain
def main(_config, _log, _run, training):

    torch.manual_seed(training['seed'])
    torch.cuda.manual_seed(training['seed'])
    np.random.seed(training['seed'])
    torch.backends.cudnn.deterministic = True

    print(_config)

    output_dir = osp.join(get_output_dir(training['cnn_encoder']) + '_runID_' + _run._id)
    tb_dir = osp.join(get_tb_dir(training['cnn_encoder']) + '_runID_' + _run._id)
Example #24
0
from ingredients.dataset import deladd, load_deladd as load_dataset
from ingredients.model import model, init_model
from ingredients.training import training, init_metrics, init_optimizer, \
                                 create_rnn_trainer, create_rnn_evaluator, \
                                 Tracer, ModelCheckpoint, LRScheduler

# Add configs
training.add_config('configs/dummy-training.yaml')
deladd.add_config('configs/dummy-dataset.yaml')
model.add_config('dummy-lstm.yaml')

# Set up experiment
ex = Experiment(name='deladd', ingredients=[deladd, model, training])
ex.add_config(no_cuda=False, save_folder='../../data/sims/deladd/temp/')
ex.add_package_dependency('torch', torch.__version__)
ex.observers.append(FileStorageObserver.create('../data/sims/test/'))


# Functions
@ex.capture
def set_seed_and_device(seed, no_cuda):
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available() and not no_cuda:
        torch.cuda.manual_seed(seed)
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    return device
Example #25
0
import numpy as np
import ddf_fdk as ddf
import nn_fdk as nn
import time
import pylab
t = time.time()

from sacred.observers import FileStorageObserver
from sacred import Experiment
from os import environ
name_exp = 'MSD'
ex = Experiment(name_exp, ingredients=[])

FSpath = '/export/scratch2/lagerwer/NNFDK_results/' + name_exp
ex.observers.append(FileStorageObserver.create(FSpath))


# %%
@ex.config
def cfg():
    phantom = 'Fourshape_test'
    nVD = 1
    nTD = 1
    train = False


# %%


@ex.automain
Example #26
0
def main():
    observer = FileStorageObserver(osp.join("data", "sacred", "tsne"))
    tsne_ex.observers.append(observer)
    tsne_ex.run_commandline()
from sacred import  Experiment
from sacred.observers import FileStorageObserver
from data_set_file import create_huge_data_set,create_encoding_deconding_dict
from model_creation import create_model
from trainning import  LRPolicy,train_model

import torch.optim as optim
import torch.nn as nn

from torch.optim.lr_scheduler import  LambdaLR
from torch.utils.data import DataLoader

#Trucs sacred
experiment_sacred=Experiment("Doodle_Boys")
experiment_sacred.observers.append(FileStorageObserver.create('my_runs_v_alpha'))



#Configs
@experiment_sacred.config
def configuration():

    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'
    path_save_model="saves_model/model_info.tar"



    nb_row_per_classe=400


    use_gpu=True
from data_set_file import create_huge_data_set, create_encoding_deconding_dict
from model_creation import create_model
from trainning import train_model, load_model_weights, create_scheduler
from test_metrics import calcul_metric_concours

import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import random
from torch.utils.data import DataLoader

#Trucs sacred
experiment_sacred = Experiment("Doodle_Boys")
experiment_sacred.observers.append(
    FileStorageObserver.create('my_runs_v_alpha'))


#Configs
@experiment_sacred.config
def configuration():

    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'
    path_save_model = "saves_model/model_4_classes.tar"
    path_load_existing_model = None
    # path_load_existing_model = "saves_model/model_4_classes.tar"
    path_model_weights_test = "saves_model/model_4_classes.tar"

    use_gpu = True

    do_training = True
Example #29
0
from sacred.observers import MongoObserver
import fastai

import news_utils.fastai
from bpemb import BPEmb

import news_utils.clean.german

EX_PA = Path('/mnt/data/group07/johannes/ompc/pplmexp_short2')

ex = Experiment('shortppompclm2')
from sacred.observers import FileStorageObserver

# ex.observers.append(MongoObserver.create(db_name='shortppompclm2'))
ex.observers.append(FileStorageObserver.create('my_runs'))


@ex.config
def my_config():
    bs = 128
    epochs_start = 0
    epochs = 10  #math.ceil(random.uniform(1, 5))
    drop_mult = 0.5559  #random.uniform(0.5, 1)

    exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
    model_id = '2019_ 3_27_14_30_09_921754'  # best model after 5 epochs


@ex.main
def my_main(epochs, drop_mult, exp_id, bs, epochs_start, model_id):
# writes the results
# cf. https://github.com/IDSIA/sacred/issues/174
class SetID(RunObserver):
    priority = 50  # very high priority to set id

    def started_event(self, ex_info, command, host_info, start_time, config,
                      meta_info, _id):
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        custom_id = "{}_learn_dynamics_{}_{}".format(timestamp,
                                                     config["env_name"],
                                                     config["problem_spec"])
        return custom_id  # started_event returns the _run._id


ex = Experiment("learn_dynamics_model")
ex.observers = [SetID(), FileStorageObserver.create("results")]


def _get_log_folders(checkpoint_base, tensorboard_base, label):
    checkpoint_folder = os.path.join(checkpoint_base, label)
    tensorboard_folder = os.path.join(tensorboard_base, label)
    os.makedirs(checkpoint_folder, exist_ok=True)
    os.makedirs(tensorboard_folder, exist_ok=True)
    return checkpoint_folder, tensorboard_folder


def train_latent_space_model(
    env,
    hidden_layer_size,
    rnn_state_size,
    n_rollouts,
#!/usr/bin/env python3

import sacred
from sacred.observers import FileStorageObserver

experiment = sacred.Experiment('observe')

@experiment.config
def configuration():
    recipient = 'observer'
    message = 'hello {0}!'.format(recipient)

@experiment.main
def main(message):
    print(message)

experiment.observers.append(
    FileStorageObserver.create('run')
)
experiment.run()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))

import numpy as np
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import datasets
import models
import tqdm
from scipy.stats import entropy
from sklearn.linear_model import LogisticRegression

ex = Experiment('uncertainty-quality')
ex.captured_out_filter = apply_backspaces_and_linefeeds
ex.observers.append(FileStorageObserver.create('runs/'))


@ex.config
def cfg():
    seed = 1337

    dataset_settings = {
        'name': 'cifar10',
    }

    model_settings = {
        'name': 'cnn',
        'epochs': 100,
    }
Example #33
0
"""A standard machine learning task without much sacred magic."""
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn import svm, datasets, model_selection

ex = Experiment("svm")

ex.observers.append(
    FileStorageObserver.create("my_runs")
)
ex.add_config({  # Configuration is explicitly defined as dictionary.
    "C": 1.0,
    "gamma": 0.7,
    "kernel": "rbf",
    "seed": 42
})


def get_model(C, gamma, kernel):
    return svm.SVC(C=C, kernel=kernel, gamma=gamma)


@ex.main  # Using main, command-line arguments will not be interpreted in any special way.
def run(_config):
    X, y = datasets.load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
    clf = get_model(_config["C"], _config["gamma"], _config["kernel"])  # Parameters are passed explicitly.
    clf.fit(X_train, y_train)
    return clf.score(X_test, y_test)

import tensorflow as tf
import numpy as np
from os import listdir, path
from enhancersdata import EnhancersData
from sacred import Experiment
from sacred.observers import FileStorageObserver

ex = Experiment()
ex.observers.append(FileStorageObserver.create('log'))


@ex.config
def general_config():
    general_cfg = {
        "seq_length": 1000,
        "num_outs": 2,
        "batch_size": 100,
        "num_epochs": 40
    }


@ex.config
def cnn_config():
    conv1_cfg = {"num_filters": 16, "filter_size": [4, 15]}
    conv2_cfg = {"num_filters": 16, "filter_size": [1, 5]}
    conv3_cfg = {"num_filters": 32, "filter_size": [1, 5]}
    pool1_cfg = {"kernel_size": 10, "stride": 10}
    pool2_cfg = {"kernel_size": 3, "stride": 3}
    pool3_cfg = {"kernel_size": 3, "stride": 3}
    dropout_keep_prob = 0.5
Example #35
0
import torch.optim as optim
from tqdm import tqdm

from constants import *
sys.path.append(BASE_DIR)
from goggles.loss import CustomLoss2
from goggles.models.semantic_ae import SemanticAutoencoder
from goggles.opts import DATASET_MAP, DATA_DIR_MAP
from goggles.utils.vis import \
    get_image_from_tensor, save_prototype_patch_visualization


_make_cuda = lambda x: x.cuda() if torch.cuda.is_available() else x

ex = Experiment('goggles-experiment')
ex.observers.append(FileStorageObserver.create(os.path.join(ALL_RUNS_DIR)))


def _provision_run_dir(run_dir):
    new_dirs = [LOGS_DIR_NAME, IMAGES_DIR_NAME, PROTOTYPES_DIR_NAME]
    new_dirs = list(map(lambda d: os.path.join(run_dir, d), new_dirs))
    for new_dir in new_dirs:
        os.makedirs(new_dir)
    return new_dirs


@ex.config
def default_config():
    seed = 42                # RNG seed for the experiment
    dataset = 'cub'            # Dataset to be used (cub/awa2)
    filter_class_ids = None  # Class IDs used for training, uses all classes if None
Example #36
0
    policy_path: A path to the serialized policy.
    log_dir: The directory to log intermediate output to. (As of 2019-07-19
        this is just episode-by-episode reward from bench.Monitor.)

  Returns:
    Statistics returned by `imitation.util.rollout.rollout_stats`.
  """
    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info('Logging to %s', log_dir)

    venv = util.make_vec_env(env_name,
                             num_vec,
                             seed=_seed,
                             parallel=parallel,
                             log_dir=log_dir)
    if render:
        venv = InteractiveRender(venv)
    # TODO(adam): add support for videos using VideoRecorder?

    policy = serialize.load_policy(policy_type, policy_path, venv)
    stats = rollout.rollout_stats(policy, venv, n_timesteps=timesteps)

    return stats


if __name__ == "__main__":
    observer = FileStorageObserver.create(
        osp.join('output', 'sacred', 'policy_eval'))
    policy_eval_ex.observers.append(observer)
    policy_eval_ex.run_commandline()