from sacred import Experiment
from sacred.observers import file_storage
from trainer import Trainer
from crumpets.presets import IMAGENET_MEAN, IMAGENET_STD


def expname(script=None):
    if script is None:
        script = sys.argv[0]
    return pt.splitext(pt.basename(script))[0]


ROOT = pt.abspath(pt.dirname(__file__)) + ''
#ROOT = pt.join(ROOT, '..')
EXP_FOLDER = pt.join(ROOT, '../exp')
exp = Experiment(' Experiment: AE with Classifier training')


# contruct the loader which will give the batches like a dataloader
# this function should also divide the data for each process
def make_loader(
        file,
        batch_size,
        device,
        world_size,
        rank,
        nworkers,  # this is important for faster computation, increasing this uses more cores of the cpu to process the images
        size=96,
        image_rng=None,
        image_params=None,
        gpu_augmentation=False,
示例#2
0
# Copyright (c) 2020 Brno University of Technology
# Copyright (c) 2020 Nippon Telegraph and Telephone corporation (NTT).
# All rights reserved
# By Katerina Zmolikova, November 2020.

from sacred import Experiment
from sacred.observers import FileStorageObserver
from pathlib import Path
from dolphinbin.inference_gmm import run_inference
import json

ex = Experiment()


@ex.config
def config():
    i_split = 0
    n_split = 1
    sset = 'tt'

    dataset = Path(f'../../data/wsj0-mix_spat/{sset}_n1015_mix.json')
    outdir = Path(f'Out/{sset}')
    logdir = Path('./logs')
    modeldir = Path('../../exp/gmm/models')
    model = modeldir / 'GMM.h5'
    ex.observers.append(
        FileStorageObserver.create(logdir / f'{sset}_{i_split}_{n_split}'))

    use_gpu = False

    nfft = 512
import os.path as osp
import random
import time
from datetime import datetime

import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver

import configuration
from utils.misc_utils import auto_select_gpu, mkdir_p, save_cfgs

from models.bisenet import BiseNet

ex = Experiment(configuration.RUN_NAME)
ex.observers.append(FileStorageObserver.create(osp.join(configuration.LOG_DIR, 'sacred')))

# TODO: num_classes need to fix
num_classes = 7


@ex.config
def configurations():
  # Add configurations for current script, for more details please see the documentation of `sacred`.
  model_config = configuration.MODEL_CONFIG
  train_config = configuration.TRAIN_CONFIG


def _configure_learning_rate(train_config, global_step):
  lr_config = train_config['lr_config']
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from sacred import Experiment
from sacred.observers import FileStorageObserver

import os
import datetime

import audio_models
import dataset
from train import train
from test import test

ex = Experiment('UNet_Speech_Separation', interactive=True)
ex.observers.append(FileStorageObserver.create('my_runs'))


@ex.config
def cfg():
    model_config = {
        'model_variant':
        'unet',  # The type of model to use, from ['unet', capsunet', basic_capsnet']
        'data_type':
        'mag',  # From [' mag', 'mag_phase', 'real_imag', 'mag_real_imag']
        'phase_weight':
        0.00001,  # When using a model which learns to estimate phase, defines how much
        # weight phase loss should be given against magnitude loss
        'initialisation_test':
        False,  # Whether or not to calculate test metrics before training
        'loading': False,  # Whether to load an existing checkpoint
        'checkpoint_to_load': "136/136-6",  # Checkpoint format: run/run-step
示例#5
0
文件: main.py 项目: yyf17/MAVEN
from os.path import dirname, abspath
from copy import deepcopy
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from utils.logging import get_logger
import yaml

from run import run

SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
logger = get_logger()

ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds

results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")


@ex.main
def my_main(_run, _config, _log, env_args):
    # Setting the random seed throughout the modules
    np.random.seed(_config["seed"])
    th.manual_seed(_config["seed"])
    env_args['seed'] = _config["seed"]

    # run the framework
    run(_run, _config, _log)
from os.path import basename
from pathlib import Path
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
from graspy.datasets import load_drosophila_left
from graspy.utils import binarize, symmetrize, is_fully_connected
from graspy.plot import heatmap
from src.utils import select_sbm, select_rdpg
import matplotlib.pyplot as plt

ex = Experiment("Drosophila model selection 1")

current_file = basename(__file__)[:-3]

sacred_file_path = Path(f"./maggot_models/models/runs/{current_file}")

slack_obs = SlackObserver.from_config("slack.json")

file_obs = FileStorageObserver.create(sacred_file_path)

ex.observers.append(slack_obs)
ex.observers.append(file_obs)


@ex.config
def config():
    """Variables defined in config get automatically passed to main"""
    n_block_try_range = list(range(1, 11))  # noqa: F841
示例#7
0
import numpy as np
import torch
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
from sklearn.cluster import SpectralClustering

from graph_utils import (
    delete_isolated_ccs,
    normalize_weights_array,
    np_layer_array_to_graph_weights_array,
    weights_to_graph,
)
from utils import load_masked_weights_numpy, load_model_weights_numpy

clust_exp = Experiment('cluster_model')
clust_exp.captured_out_filter = apply_backspaces_and_linefeeds
clust_exp.observers.append(FileStorageObserver('clustering_runs'))


@clust_exp.config
def basic_config():
    num_clusters = 4
    weights_path = "./models/mlp_kmnist.pth"
    mask_path = None
    net_type = 'mlp'
    normalize_weights = True
    epsilon = 1e-9
    eigen_solver = 'arpack'
    _ = locals()
    del _
示例#8
0
from sacred.observers import MongoObserver

from wireless.agents.random_agent import RandomAgent
from wireless.agents.round_robin_agent import *
from wireless.agents.proportional_fair import *

# Load agent parameters
with open('config/config_agent.json') as f:
    ac = json.load(f)

# Configure experiment
with open('config/config_sacred.json') as f:
    sc = json.load(f)  # Sacred Configuration
    ns = sc["sacred"][
        "n_metrics_points"]  # Number of points per episode to log in Sacred
    ex = Experiment(ac["agent"]["agent_type"])
    ex.add_config(sc)
    ex.add_config(ac)
mongo_db_url = f'mongodb://{sc["sacred"]["sacred_user"]}:{sc["sacred"]["sacred_pwd"]}@' +\
               f'{sc["sacred"]["sacred_host"]}:{sc["sacred"]["sacred_port"]}/{sc["sacred"]["sacred_db"]}'
# ex.observers.append(MongoObserver(url=mongo_db_url, db_name=sc["sacred"]["sacred_db"]))  # Uncomment to save to DB

# Load environment parameters
with open('config/config_environment.json') as f:
    ec = json.load(f)
    ex.add_config(ec)


@ex.automain
def main(_run):
    n_eps = _run.config["agent"]["n_episodes"]
示例#9
0
from shutil import rmtree

from schnetpack.sacred.calculator_ingredients import (calculator_ingradient,
                                                      build_calculator)
from schnetpack.sacred.simulator_ingredients import (simulator_ingredient,
                                                     build_simulator)
from schnetpack.sacred.integrator_ingredients import (integrator_ingredient,
                                                      build_integrator)
from schnetpack.sacred.system_ingredients import (system_ingredient,
                                                  build_system)
from schnetpack.sacred.thermostat_ingredients import thermostat_ingredient, \
    build_thermostat

md = Experiment('md',
                ingredients=[
                    simulator_ingredient, calculator_ingradient,
                    integrator_ingredient, system_ingredient,
                    thermostat_ingredient
                ])

SETUP_STRING_WIDTH = 30
SETUP_STRING = "\n\n{:s}\n{:s}\n{:s}".format(SETUP_STRING_WIDTH * "=",
                                             f'{{:^{SETUP_STRING_WIDTH}s}}',
                                             SETUP_STRING_WIDTH * '=')


@md.config
def config():
    """configuration for the simulation experiment"""
    simulation_dir = 'experiment'
    simulation_steps = 1000
    device = 'cpu'
示例#10
0
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') #Stride of 1

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #2x2 max pooling, stride = 2

def max_pool_3x3(x):
    return tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')

from sacred import Experiment
# from sacred.observers import MongoObserver
ex = Experiment('DSB CONVNET EXPERIMENT')
# ex.observers.append(MongoObserver.create())

@ex.config
def config():
    RUN_NAME = 'CRPS-MODEL-3.0'
    DATA_DIR = 'netdata'
    ITERS = 100000
    START_ITER = 0
    MODEL_LOAD_PATH = None
    PIC_WIDTH = 32
    ### Architectural Hyperparameters
    NUM_INPUTS = 9       # Number of input channels
    NUM_OUTPUTS = 600    # Number of output classes in the softmax layer
    NUM_REPS = 144
from models.models import Net, BasicBlock, ObliqueGeneralizedBlock, StiefelGeneralizedBlock, LinearBlock
import torch
import torch.nn as nn
import ops.optim as optim
from tqdm import trange
import random
from random_words import RandomWords
import numpy as np
from sacred import Experiment
from sacred.observers import MongoObserver
from ops.computeParams import optimize_sigmaw_sigmab
from sacred.settings import SETTINGS
# SETTINGS.CAPTURE_MODE = 'sys'

rw = RandomWords()
ex = Experiment(name=rw.random_word() + str(random.randint(0, 100)))
# ex.observers.append(MongoObserver.create(db_name='isonetry-hyperparams2'))
ex.observers.append(
    MongoObserver.create(
        url='mongodb://***:***@***.***.com/admin?authMechanism=SCRAM-SHA-1',
        db_name='isontery-hyperparams2'))


@ex.pre_run_hook
def set_logger_stream(_run):
    _run.root_logger.handlers[0].stream = sys.stderr


@ex.named_config
def stiefel_penalized():
    manifold = 'stiefel_penalized'
示例#12
0
def test_missing_config_raises():
    ex = Experiment('exp')
    ex.main(lambda a: None)
    with pytest.raises(MissingConfigError):
        ex.run()
示例#13
0

But because we are using a ``ConfigScope`` that constructs the message from a
recipient we can also just modify that::

  $ ./03_hello_config_scope.py with recipient='Bob'
  INFO - hello_config_scope - Running command 'main'
  WARNING - hello_config_scope - No observers have been added to this run
  INFO - hello_config_scope - Started
  Hello Bob!
  INFO - hello_config_scope - Completed after 0:00:00
"""
from __future__ import division, print_function, unicode_literals
from sacred import Experiment

ex = Experiment('hello_config_scope')


# A ConfigScope is a function like this decorated with @ex.config
# All local variables of this function will be put into the configuration
@ex.config
def cfg():
    recipient = "world"
    message = "Hello {}!".format(recipient)


# again we can access the message here by taking it as an argument
@ex.automain
def main(message):
    print(message)
示例#14
0
from sacred import Experiment
from sacred.observers import FileStorageObserver
from data_set_file import create_huge_data_set, create_encoding_deconding_dict, generate_random_dataset
from model_creation import create_model
from trainning import train_model, load_model_weights, create_scheduler
from test_metrics import calcul_metric_concours

import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import random
from torch.utils.data import DataLoader

#Trucs sacred
experiment_sacred = Experiment("Doodle_Boys")
experiment_sacred.observers.append(
    FileStorageObserver.create('runs_sacred/model_data_random'))


#Configs
@experiment_sacred.config
def configuration():

    path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'

    path_save_model = "saves_model/model_poids_random.tar"
    path_load_existing_model = "saves_model/model_poids_random.tar"
    path_model_weights_test = "saves_model/model_poids_random.tar"

    use_gpu = True
示例#15
0
import tempfile
import warnings

from sacred import Experiment
from sacred.observers import FileStorageObserver

from aprl.envs.multi_agent import make_dummy_vec_multi_env, make_subproc_vec_multi_env
from modelfree.common import utils
from modelfree.envs.gym_compete import GymCompeteToOurs, game_outcome
from modelfree.envs.observation_masking import make_mask_agent_wrappers
from modelfree.envs.wrappers import TrajectoryRecorder, VideoWrapper, make_env, simulate
from modelfree.policies.loader import load_policy
from modelfree.policies.wrappers import NoisyAgentWrapper
from modelfree.visualize.annotated_gym_compete import AnnotatedGymCompete

score_ex = Experiment('score')
score_ex_logger = logging.getLogger('score_agent')


def announce_winner(sim_stream):
    """This function determines the winner of a match in one of the gym_compete environments.
    :param sim_stream: a stream of obs, rewards, dones, infos from one of the gym_compete envs.
    :return: the index of the winning player, or None if it was a tie."""
    for _, _, dones, infos in sim_stream:
        for done, info in zip(dones, infos):
            if done:
                yield game_outcome(info)


@score_ex.capture
def get_empirical_score(venv, agents, episodes, timesteps, render, record_traj, _run):
示例#16
0
from __future__ import division
import os
import pandas as pd
import numpy as np
import datetime
from sklearn.model_selection import KFold
from sklearn.metrics import mean_absolute_error
import sys
sys.path = [''] + sys.path 
from src.allstate_data import AllStateData
from sacred import Experiment
from sacred.observers import MongoObserver
import colored_traceback.always
from sklearn.linear_model import Ridge as algo
ex = Experiment('Ridge')
ex.observers.append(MongoObserver.create(db_name = "allstate"))
@ex.config
def my_config():
    output_path = "Ridge"
    datadir = 'input'
    n_folds = 10
    clfparams = {}
    include = []
    exclude = []
    featureparams = {'shift':200.0}
    save_oob_predictions  = True
    save_test_predictions = True
    skip_cross_validation = False

@ex.main
def experiment(output_path,
import os
import pandas as pd
import numpy as np
import xgboost as xgb
import datetime
from sklearn.cross_validation import StratifiedKFold
from sacred import Experiment
from src.telstra_data import TelstraData, multiclass_log_loss
from sacred.observers import MongoObserver

ex = Experiment('gradient_boosting')
ex.observers.append(MongoObserver.create(db_name="telstra"))


@ex.config
def my_config():
    series = "XGB"
    n_folds = 10
    num_trees = 2000
    early_stopping_rounds = 50
    verbose_eval = 0
    featureparams = {
        "location_min_count": 0,
        "n_common_events": 20,
        "n_common_log_features": 60,
        "n_common_resources": 10,
        "n_label_encoded_log_features": 4
    }
    aggregateparams = {"loc_agg_prior_weight": 3.0}
    include = []
    exclude = []
from sacred import Experiment
from sacred.observers import MongoObserver
from numpy.random import permutation
from sklearn import svm, datasets

ex = Experiment('iris_rbf_svm')

#MongoDB settings

DATABASE_URL = "172.18.65.219:27017"
DATABASE_NAME = "demo_db"

m_observer = MongoObserver.create(url=DATABASE_URL, db_name=DATABASE_NAME)
ex.observers.append(m_observer)


#hyperparameters
@ex.config
def my_config():
    C = 1.0
    gamma = 0.7


@ex.automain
def run(C, gamma, _run):
    #load a dataset
    iris = datasets.load_iris()

    #permute the dataset
    per = permutation(iris.target.size)
    iris.data = iris.data[per]
示例#19
0
  - run the experiment with a fixed seed and set the reverse parameter to true.
    Notice how the results are the same, but in slightly different order.
    This shows that calls to different functions do not interfere with one
    another::

      :$ ./06_randomness.py with seed=12345 reverse=True numbers=3 -l WARNING
      695891797
      [57, 79, 86]
      [28, 90, 92]
      [82, 9, 3]

"""

from sacred import Experiment

ex = Experiment('randomness')


@ex.config
def cfg():
    reverse = False
    numbers = 1


@ex.capture
def do_random_stuff(numbers, _rnd):
    print([_rnd.randint(1, 100) for _ in range(numbers)])


@ex.capture
def do_more_random_stuff(_seed):
示例#20
0
def ex():
    return Experiment("tensorflow_tests")
from transformer_rankers.eval import results_analyses_tools

from transformers import BertTokenizer, BertForSequenceClassification
from sacred.observers import FileStorageObserver
from sacred import Experiment
from IPython import embed

import torch
import pandas as pd
import argparse
import logging
import sys
import json
import os

ex = Experiment('BERT-ranker experiment')

logging.basicConfig(level=logging.INFO,
                    format="%(asctime)s [%(levelname)s] %(message)s",
                    handlers=[logging.StreamHandler(sys.stdout)])


@ex.main
def run_experiment(args):
    args.run_id = str(ex.current_run._id)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    tokenizer = BertTokenizer.from_pretrained(args.transformer_model)
    #Load datasets
示例#22
0
文件: main.py 项目: pitcany/hitachi
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds

from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.vec_env.vec_normalize import VecNormalize

# Own imports
from envs import make_env
from storage import RolloutStorage
import utils
import tensorflow as tf

# Create Sacred Experiment
ex = Experiment("POMRL")
ex.captured_out_filter = apply_backspaces_and_linefeeds
# np.seterr(all='raise')

# Get name of environment yaml file.
# Should be specified in command line using
# 'python main.py with environment.config_file=name_of_env_config_file.yaml'
environment_yaml = utils.get_environment_yaml(ex)

# Add defautl.yaml and the <environment_name>.yaml file to the sacred configuration
DIR = os.path.dirname(sys.argv[0])
DIR = '.' if DIR == '' else DIR
ex.add_config(DIR + '/conf/default.yaml')
ex.add_config(DIR + '/conf/' + environment_yaml)

from sacred.observers import FileStorageObserver
示例#23
0
$ python rhn_train.py

"""
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import time
import os

import numpy as np
import tensorflow as tf

from sacred import Experiment
from .rhn import Model
from .data.reader import data_iterator

ex = Experiment('rhn_prediction')
logging = tf.logging


class Config:
    pass


C = Config()


@ex.config
def hyperparameters():
    data_path = 'data'
    dataset = 'ptb'
    init_scale = 0.04
示例#24
0
#!/usr/bin/env python
# coding=utf-8
"""
This example shows how to apply a filter function to the captured output
of a run. This is often useful when using progress bars or similar in the text
UI and you don't want to store formatting characters like backspaces and
linefeeds in the database.
"""

import sys
import time

from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds

ex = Experiment('progress')

# try commenting out the line below to see the difference in captured output
ex.captured_out_filter = apply_backspaces_and_linefeeds


def write_and_flush(*args):
    for arg in args:
        sys.stdout.write(arg)
    sys.stdout.flush()


class ProgressMonitor:
    def __init__(self, count):
        self.count, self.progress = count, 0
示例#25
0
from ica_benchmark.scoring import SCORING_FN_DICT, apply_pairwise_parallel
from ica_benchmark.processing.ica import get_all_methods, get_ica_instance

from ica_benchmark.io.load import BCI_IV_Comp_Dataset

from sacred.observers import MongoObserver, FileStorageObserver
from sacred import Experiment

import json

from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator

from mne.time_frequency import psd_multitaper

ex = Experiment("experiment")
ex.observers.append(FileStorageObserver('my_runs'))
ex.observers.append(
    MongoObserver(
        url='mongodb://*****:*****@localhost:27017',
        db_name='sacred',
    ))

train_filepaths = list(
    Path("/home/paulo/Documents/datasets/BCI_Comp_IV_2a/gdf").glob(
        "*0[1,3,4,7,8]T.gdf"))
test_filepaths = list(
    Path("/home/paulo/Documents/datasets/BCI_Comp_IV_2a/gdf").glob(
        "*0[2,9,6,5]T.gdf"))

ICA_N_COMPONENTS = None
import numpy as np
from sacred import Experiment

from lib.datasets import ds
from lib.datasets import SequentialHdF5Dataset
from lib.model import net
from lib.model import WorldModel, VRNN
from lib.geco import GECO
from lib.utils import create_video
from tqdm import tqdm
from pathlib import Path
import shutil
import random
random.seed(0)

ex = Experiment('VIDEOS', ingredients=[ds, net])


@ex.config
def cfg():
    test = {
        'batch_size': 16,
        'output_size': [3, 64, 64],
        'max_num_videos': 8,
        'mode': 'test',
        'rollouts': 5,
        'rollouts_to_keep': 64,
        'store_slots': False,
        'model': 'OP3',
        'context_len': 1,
        'seq_len': 1,
import sacred
from sacred import Experiment
from sacred.observers.file_storage import FileStorageObserver
import torch.nn as nn
import numpy as np
import torchvision
from zeiss_umbrella.integrated_gradient.utils import calculate_outputs_and_gradients
from zeiss_umbrella.integrated_gradient.integrated_gradients import random_baseline_integrated_gradients
from zeiss_umbrella.integrated_gradient.visualization import visualize
from zeiss_umbrella.fundus.setting_parser import get_baseline, get_optimizer, get_loss
from zeiss_umbrella.fundus.train import *
from zeiss_umbrella.fundus.adversarial import fgsm_k_image, fgsm_image, pgd
from zeiss_umbrella.fundus.data import get_fundus_train
from datetime import datetime as dt

ex = Experiment('integrated gradients')
template = ""
ex.observers.append(
    FileStorageObserver(FILE_OBSERVER_BASE_PATH, FILE_OBSERVER_RESOURCE_PATH,
                        FILE_OBSERVER_SOURCE_PATH, template))

# uncomment if you use progress bars
# from sacred.utils import apply_backspaces_and_linefeeds
# ex.captured_out_filter = apply_backspaces_and_linefeeds
# for more info see https://sacred.readthedocs.io/en/latest/collected_information.html#live-information


@ex.config
def my_config():
    adv_training_config = {'type': 'fgsm'}
    experiments_path = '/home/jiwu/interpretable-fundus/fundus_experiments'
示例#28
0
文件: run.py 项目: inkyusa/BuilT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import pprint
import torch

from easydict import EasyDict as edict
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds

from src.models.mnist import Mnist
from src.trainer import Trainer

ex = Experiment('orsum')
ex.captured_out_filter = apply_backspaces_and_linefeeds


@ex.config
def cfg():
    description = ''


@ex.main
def main(_run, _config):
    config = edict(_config)
    pprint.PrettyPrinter(indent=2).pprint(config)


@ex.command
示例#29
0
from sacred.utils import InvalidConfigError, MissingConfigError

import padertorch as pt
import padertorch.contrib.examples.tasnet.tasnet
import paderbox as pb
import numpy as np

from sacred.observers.file_storage import FileStorageObserver
from lazy_dataset.database import JsonDatabase

from padertorch.contrib.neumann.chunking import RandomChunkSingle
from padertorch.contrib.ldrude.utils import get_new_folder

nickname = "dprnn"
ex = Experiment(nickname)


def get_storage_dir():
    # Sacred should not add path_template to the config
    # -> move this few lines to a function
    path_template = Path(os.environ["STORAGE"]) / 'pth_models' / nickname
    path_template.mkdir(exist_ok=True, parents=True)
    return get_new_folder(path_template, mkdir=False)


@ex.config
def config():
    debug = False
    batch_size = 4  # Runs on 4GB GPU mem. Can safely be set to 12 on 12 GB (e.g., GTX1080)
    chunk_size = 32000  # 4s chunks @8kHz
    # list configs in active directory
    configs = os.listdir('../configs/active')

    # iterate over each config and perform experiment
    for config_file in configs:

        # set config path
        config_path = f'../configs/active/{ config_file }'

        print(f'Running model using configuration located at {config_path}')

        # load config file
        config = json.load(open(config_path))

        # get experiment path
        experiment_name = config['experiment']['name']
        experiment_path = f'../experiments/{ experiment_name }'

        # initialize experiment
        experiment = Experiment(experiment_name)
        experiment.captured_out_filter = apply_backspaces_and_linefeeds
        experiment.observers.append(
            FileStorageObserver.create(experiment_path))

        # wrap run function (sacred reasons)
        def wrapper():
            run(config, config_path)

        # run experiment
        experiment.automain(wrapper)