示例#1
0
def init():
    log_id = str(int(time.time()*10)%(60*60*24*365*10))+str(os.getpid())
    global logger
    logger = logging.getLogger(str(log_id))

    logger.setLevel(logging.DEBUG)  
      
    # write to file 
    fh = logging.FileHandler('ex.log')  
    fh.setLevel(logging.DEBUG)
      
    # write to console
    ch = logging.StreamHandler()  
    ch.setLevel(logging.DEBUG)  
      
    # Handler format
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\n\t%(message)s')  
    fh.setFormatter(formatter)  
    ch.setFormatter(formatter)  
      
    logger.addHandler(fh)
    logger.addHandler(ch)
      
    global ex
    ex = Experiment('TrajMap')
    ex.logger = logger
    ex.observers.append(MongoObserver.create(url='10.60.43.110:27017', db_name='TurnHMM'))
    #ex.observers.append(MongoObserver.create(url='127.0.0.1:27017', db_name='nTrajMap'))
    return ex, logger
示例#2
0
def objective(args_):

    # arguments to pass as config_updates dict
    global args
    # result to pass to hyperopt
    global result
    # command-line arguments 
    global parse_args

    try:
        ex = Experiment('Hyperopt')
        logger.debug("Adding observer for {}, DB {}".format(parse_args.mongo_db_address,parse_args.mongo_db_name))
        ex.observers.append(MongoObserver.create(url=parse_args.mongo_db_address, db_name=parse_args.mongo_db_name))
        
        pythia_args = make_args_for_pythia(args_)
        args = mp.get_args(**pythia_args) 
        ex.main(run_with_global_args)
        r = ex.run(config_updates=pythia_args)
        logger.debug("Experiment result: {}\n"
                     "Report to hyperopt: {}".format(r.result, result))

        return result

    except:
        raise
        #If we somehow cannot get to the MongoDB server, then continue with the experiment
        logger.warning("Running without Sacred")
        run_with_global_args()
示例#3
0
def test_named_config_not_found_raises():
    ex = Experiment('exp')
    ex.main(lambda: None)
    with pytest.raises(NamedConfigNotFoundError,
                       match='Named config not found: "not_there". '
                             'Available config values are:'):
        ex.run(named_configs=('not_there',))
示例#4
0
def fetch_parents(current_path, parents=[]):
    tmp_ex = Experiment('treeqn')
    tmp_ex.add_config(current_path)
    with suppress_stdout():
        tmp_ex.run("print_config")
    if tmp_ex.current_run is not None and "parent_config" in tmp_ex.current_run.config:
        return fetch_parents(tmp_ex.current_run.config["parent_config"], [current_path] + parents)
    else:
        return [current_path] + parents
示例#5
0
def test_config_added_raises():
    ex = Experiment('exp')
    ex.main(lambda: None)

    with pytest.raises(
            ConfigAddedError,
            match=r'Added new config entry that is not used anywhere.*\n'
                  r'\s*Conflicting configuration values:\n'
                  r'\s*a=42'):
        ex.run(config_updates={'a': 42})
示例#6
0
def test_circular_dependency_raises():
    # create experiment with circular dependency
    ing = Ingredient('ing')
    ex = Experiment('exp', ingredients=[ing])
    ex.main(lambda: None)
    ing.ingredients.append(ex)

    # run and see if it raises
    with pytest.raises(CircularDependencyError, match='exp->ing->exp'):
        ex.run()
示例#7
0
def objective(args_):

    global args
    global result
    global parse_args
    args=args_

    try:
        ex = Experiment('Hyperopt')
        ex.observers.append(MongoObserver.create(url=parse_args.mongo_db_address, db_name=parse_args.mongo_db_name))
        ex.main(lambda: run_with_global_args())
        r = ex.run(config_updates=args)
        print(r)

        return result

    except:
        raise
        #If we somehow cannot get to the MongoDB server, then continue with the experiment
        print("Running without Sacred")
        run_with_global_args()
示例#8
0
class Job(metaclass=ABCMeta):
    def __init__(
        self,
        exp_config: dict = None,
        add_defaults: bool = True,
        mongo_hostnames: list = None,
    ):
        exp_config = exp_config or dict()
        if add_defaults:
            self.exp_config = self.add_config_defaults(exp_config)
        else:
            self.exp_config = exp_config
        if mongo_hostnames is None:
            mongo_hostnames = ["tater"]
        self.mongo_hostnames = mongo_hostnames

        self._experiment = None
        self._observers = []

    @property
    def default_observers(self):
        observers = []
        if socket.gethostname() in self.mongo_hostnames:
            observers.append(
                MongoObserver(
                    url=
                    f"mongodb://*****:*****@localhost:27017/?authMechanism=SCRAM-SHA-1",
                    db_name="db",
                ))
        observers.append(
            FileStorageObserver(
                self.exp_config.get("storage_dir", "./sacred_storage")))
        return observers

    @property
    def experiment(self):
        """
        Experiment object required for Sacred.

        :return: sacred.Experiment object.
        """
        if self._experiment is None:
            self._experiment = Experiment(
                name=self.exp_config.get("name"),
                ingredients=self.exp_config.get("ingredients"),
            )
            observers = self._observers or self.default_observers
            self._experiment.observers.extend(observers)
            self._experiment.add_config(self.exp_config)
            if not self.exp_config["run_config"]["capture_output"]:
                self._experiment.captured_out_filter = (
                    lambda *args, **kwargs: "Output capturing turned off.")
        return self._experiment

    @staticmethod
    def set_config_defaults(d: dict, values: dict):
        for k, v in values.items():
            d.setdefault(k, v)

    def add_config_defaults(self, ec: dict):
        for name, conf in self.config_defaults.items():
            if name in ec:
                self.set_config_defaults(ec[name], conf)
            else:
                ec.setdefault(name, conf)
        return ec

    def update_observers(self, o: List[RunObserver]):
        """
        ONLY USE BEFORE CALLING `self.experiment` AS OBSERVERS CANNOT BE SET AFTER THE EXPERIMENT
        IS CREATED.

        :param o: List of sacred RunObservers to update Job observers.
        """
        self._observers.extend(o)

    def override_observers(self, o: List[RunObserver]):
        """
        ONLY USE BEFORE CALLING `self.experiment`. Replace defaults with new list of
        RunObserver objects.
        :param o: List of new sacred RunObservers
        """
        self._observers = o

    @abstractmethod
    def _main(self, run, seed, fitable, fitable_config, loader_config):
        """
        Private method containing the actual work completed by the job. Implemented is a default
        workflow for a basic keras/kerastuner type job.

        :param run: sacred.Run object. See sacred documentation for more details on utility.
        :param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object.
            Model-like which contains a fit method.
        :param fitable_config: Optional dict. Contains data which can be used to create a new
            fitable instance.
        :param loader_config: Optional dict. Contains data which can be used to create a new
            DataLoader instance.
        """
        pass

    def run(
        self,
        fitable: Model = None,
        fitable_config: dict = None,
        loader_config: dict = None,
    ):
        """
        Exposed method of the particular job. Runs whatever work is entailed by the job based on
        the content provided in `self.exp_config`.
        """
        @self.experiment.main
        def main(_run, _seed):
            self.exp_config["run_config"]["root_dir"] = Path(
                _run.observers[0].dir).absolute()
            self._main(_run, _seed, fitable, fitable_config, loader_config)

        self.experiment.run()

    @abstractmethod
    def _load_data(self, config):
        """
        Obtains a loader using ingredients.get_loader and self.exp_config['loader_config']

        :return: Loader object and the data returned by that Loader's get_data method.
        """
        pass

    @abstractmethod
    def _load_fitable(self, loader, fitable_config):
        """
        Defines and compiles a fitable (keras.model or keras_tuner.tuner) which implements
        a 'fit' method. This method calls either get_builder, or get_hyper_factory, depending on
        which type of fitable is beind loaded.

        :return: Model or Tuner object.
        """
        pass

    @abstractmethod
    def _fit(self, run, fitable, data, callbacks):
        """

        :param run: sacred.Run object. See sacred documentation for details on utility.
        :param fitable: tensorflow.keras.Model object.
        :param data: tuple. train and validation data in the form (train, val), where train is
            the tuple (x_train, y_train).
        :param callbacks: Optional list. List of tensorflow.keras.Callback objects to pass to
            fitable.fit method.
        :return: tensorflow.keras.Model object.
        """
        pass

    @abstractmethod
    def _test_fitable(self, run, fitable, test_data):
        """
        :param fitable: tensorflow.keras.Model object.
        :param test_data: tuple. contains (x_test, y_test).
        :return: float. Scalar test_loss value.
        """
        pass

    @abstractmethod
    def _save_fitable(self, run, fitable):
        """
        :param run: sacred.Run object. see sacred documentation for more details on utility.
        :param fitable: tensorflow.keras.Model object.
        """
        pass

    @abstractmethod
    def _new_model_path(self, i):
        pass

    @property
    def config_defaults(self):
        """
        Defines default values for the various config dictionaries required for the Job.

        :return: dict. Experiment dictionary containing necessary config(s) for the Job.
        """
        return {
            "ingredients": [data_ingredient, builder_ingredient],
            "run_config": copy(cd.run_config),
            "loader_config": copy(cd.loader_config),
            "builder_config": copy(cd.builder_config),
            "tb_config": copy(cd.tb_config),
            "lr_config": copy(cd.lr_config),
        }
from __future__ import print_function
import logging
import os
import os.path as osp
import random
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver
import configuration
import siamesefc_model
from utils.misc_utils import auto_select_gpu, mkdir_p, save_cfgs

ex = Experiment(configuration.RUN_NAME)
ex.observers.append(
    FileStorageObserver.create(osp.join(configuration.LOG_DIR, 'sacred')))


@ex.config
def configurations():
    # Add configurations for current script, for more details please see the documentation of `sacred`.
    # REFER: http://sacred.readthedocs.io/en/latest/index.html
    model_config = configuration.MODEL_CONFIG
    train_config = configuration.TRAIN_CONFIG
    track_config = configuration.TRACK_CONFIG


def _configure_learning_rate(train_config, global_step):
    lr_config = train_config['lr_config']
示例#10
0
def prepare():
    global ex
    ex = Experiment('SeqIDLoc')
    ex.observers.append(MongoObserver.create(url='localhost:27017', db_name='SeqIDLoc'))
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from torch.nn import CrossEntropyLoss, MSELoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef

from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule

logger = logging.getLogger(__name__)
from sacred import Experiment

bert_ex = Experiment('bert_multimodal_transformer')
from sacred.observers import MongoObserver
from global_configs import *
url_database = conf_url_database
mongo_database_name = conf_mongo_database_name
bert_ex.observers.append(
    MongoObserver.create(url=url_database, db_name=mongo_database_name))


class InputExample(object):
    """A single training/test example for simple sequence classification."""
    def __init__(self, guid, text_a, text_b=None, label=None):
        """Constructs a InputExample.

        Args:
            guid: Unique id for the example.
import numpy as np
import torch
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import mean_squared_error
from transformer.Optim import ScheduledOptim
from sacred import Experiment

from bots import TransformerBot
from dataset import read_dataset
from io_utils import export_validation, export_test

logging.basicConfig(level=logging.WARNING)

ex = Experiment('Transformer')
ex.add_source_file("preprocess.py")
ex.add_source_file("prepare_seq_data.py")


@ex.named_config
def no_tf_2l():
    batch_size = 128
    model_details = {
        "odrop": 0.25,
        "edrop": 0.25,
        "hdrop": 0.1,
        "d_model": 128,
        "d_inner_hid": 256,
        "n_layers": 2,
        "n_head": 4,
示例#13
0
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(os.path.dirname(__file__))
from get_data import get_data
from models.active_model import ActiveLearningExperiment
from models.strategy import random_query
from models.utils import ObstructedY
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
import copy
import traceback
from sacred import Experiment
from misc.config import *
from kaggle_ninja import *
from utils import ExperimentResults, binary_metrics
from experiment_runner import fit_AL_on_folds
ex = Experiment("fit_active_learning")
from sklearn.metrics import auc
from sklearn.linear_model import SGDClassifier
from models.balanced_models import *

@ex.config
def my_config():
    experiment_detailed_name = "active_uncertanity_sampling"
    batch_size = 10
    seed = -1
    timeout = -1
    id_folds = -1
    warm_start_percentage = 0
    force_reload = False

    # Required args. could comment out
示例#14
0
"""A standard machine learning task without much sacred magic."""
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn import svm, datasets, model_selection

ex = Experiment("svm")

ex.observers.append(
    FileStorageObserver.create("my_runs")
)
ex.add_config({  # Configuration is explicitly defined as dictionary.
    "C": 1.0,
    "gamma": 0.7,
    "kernel": "rbf",
    "seed": 42
})


def get_model(C, gamma, kernel):
    return svm.SVC(C=C, kernel=kernel, gamma=gamma)


@ex.main  # Using main, command-line arguments will not be interpreted in any special way.
def run(_config):
    X, y = datasets.load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
    clf = get_model(_config["C"], _config["gamma"], _config["kernel"])  # Parameters are passed explicitly.
    clf.fit(X_train, y_train)
    return clf.score(X_test, y_test)

示例#15
0
    loadFilters, PreprocessorList, \
    SummaryDescriptionPreprocessor, SABDEncoderPreprocessor, SABDBoWPreprocessor
from example_generator.offline_pair_geneneration import NonNegativeRandomGenerator, RandomGenerator, KRandomGenerator, \
    MiscNonZeroRandomGen, PreSelectedGenerator, MiscOfflineGenerator, PositivePreSelectedGenerator, \
    ProductComponentRandomGen
from metrics.metric import AverageLoss, MeanScoreDistance, ConfusionMatrix, PredictionCache, cmAccuracy, cmPrecision, \
    cmRecall, Accuracy, AccuracyWrapper, PrecisionWrapper, RecallWrapper, LossWrapper
from metrics.ranking import PreselectListRanking, DeshmukhRanking, GeneralScorer, SunRanking, generateRecommendationList
from model.compare_aggregate import SABD
from model.loss import TripletLoss
from util.jsontools import JsonLogFormatter
from util.siamese_util import processCategoricalParam
from util.torch_util import thresholded_output_transform
from util.training_loop_util import logMetrics, logRankingResult, logConfusionMatrix

ex = Experiment("filter_model")

logger = logging.getLogger()
logger.setLevel(logging.INFO)
logHandler = logging.StreamHandler()
formatter = JsonLogFormatter()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)

ex.logger = logger


@ex.config
def cfg():
    # Set here all possible parameters; You have to change some parameters values.
    bug_database = None
示例#16
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description='Two layer linear regression')
    parser.add_argument("image_feature_file_train",
                        type=str,
                        help="Image Feature file for the training set")
    parser.add_argument("text_feature_file_train",
                        type=str,
                        help="Text Feature file for the training set")
    parser.add_argument("image_feature_file_test",
                        type=str,
                        help="Image Feature file for the test set")
    parser.add_argument("text_feature_file_test",
                        type=str,
                        help="Text Feature file for the test set")
    parser.add_argument("word_vector_file",
                        type=str,
                        help="Text file containing the word vectors")

    # Optional Args
    parser.add_argument("--learning_rate",
                        type=float,
                        default=.001,
                        help="Learning Rate")
    parser.add_argument("--epochs",
                        type=int,
                        default=200,
                        help="Number of epochs to run for")
    parser.add_argument("--batch_size",
                        type=int,
                        default=128,
                        help="Batch size to use for training")
    parser.add_argument("--network",
                        type=str,
                        default="200,200",
                        help="Define a neural network as comma separated layer sizes")
    parser.add_argument("--model_type",
                        type=str,
                        default="mse",
                        choices=['mse', 'negsampling', 'fast0tag'],
                        help="Loss function to use for training")
    parser.add_argument("--in_memory",
                        action='store_true',
                        default="store_false",
                        help="Load training image features into memory for faster training")
    parser.add_argument("--model_input_path",
                        type=str,
                        default=None,
                        help="Model input path (to continue training)")
    parser.add_argument("--model_output_path",
                        type=str,
                        default=None,
                        help="Model output path (to save training)")
    parser.add_argument("--max_pos",
                        type=int,
                        default=5,
                        help="Max number of positive examples")
    parser.add_argument("--max_neg",
                        type=int,
                        default=10,
                        help="Max number of negative examples")

    global args
    args = parser.parse_args()

    try:
        # Sacred Imports
        from sacred import Experiment
        from sacred.observers import MongoObserver

        from sacred.initialize import Scaffold

        # Monkey patch to avoid having to declare all our variables
        def noop(item):
            pass
        Scaffold._warn_about_suspicious_changes = noop

        ex = Experiment('Regress2sum')
        ex.observers.append(MongoObserver.create(url=os.environ['MONGO_DB_URI'],
                                             db_name='attalos_experiment'))
        ex.main(lambda: convert_args_and_call_model())
        ex.run(config_updates=args.__dict__)
    except ImportError:
        # We don't have sacred, just run the script
        convert_args_and_call_model()
示例#17
0
def test_missing_config_raises():
    ex = Experiment('exp')
    ex.main(lambda a: None)
    with pytest.raises(MissingConfigError):
        ex.run()
示例#18
0
    path = "./conf/default.yaml"


def fetch_parents(current_path, parents=[]):
    tmp_ex = Experiment('treeqn')
    tmp_ex.add_config(current_path)
    with suppress_stdout():
        tmp_ex.run("print_config")
    if tmp_ex.current_run is not None and "parent_config" in tmp_ex.current_run.config:
        return fetch_parents(tmp_ex.current_run.config["parent_config"], [current_path] + parents)
    else:
        return [current_path] + parents


configs = fetch_parents(path)
ex = Experiment('treeqn')
for path in configs:
    ex.add_config(path)

ex.logger = logger

ex.observers.append(FileStorageObserver.create('./results'))


@ex.config
def my_config(save_folder, env_id, architecture, label, name):
    pytorch_version = torch.__version__
    # Timestamp experiment directory
    save_folder = get_timestamped_dir(save_folder)

    # Environment switches
示例#19
0
python -m padertorch.contrib.examples.wavenet.train
"""
import os
from pathlib import Path

from lazy_dataset.database import JsonDatabase
from padertorch.contrib.examples.audio_synthesis.wavenet.data import \
    prepare_dataset
from padertorch.contrib.examples.audio_synthesis.wavenet.model import WaveNet
from padertorch.io import get_new_storage_dir
from padertorch.train.optimizer import Adam
from padertorch.train.trainer import Trainer
from sacred import Experiment, commands
from sacred.observers import FileStorageObserver

ex = Experiment('wavenet')


@ex.config
def config():
    database_json = (str((Path(os.environ['NT_DATABASE_JSONS_DIR']) /
                          'librispeech.json').expanduser())
                     if 'NT_DATABASE_JSONS_DIR' in os.environ else None)
    assert database_json is not None, (
        'database_json cannot be None.\n'
        'Either start the training with "python -m padertorch.contrib.examples.'
        'audio_synthesis.wavenet.train with database_json=</path/to/json>" '
        'or make sure there is an environment variable "NT_DATABASE_JSONS_DIR"'
        'pointing to a directory with a "librispeech.json" in it (see README '
        'for the JSON format).')
    training_sets = ['train_clean_100', 'train_clean_360', 'train_other_500']
示例#20
0
def run_for_class(clas, it=1):
    print('work on ' + clas)
    torch.cuda.empty_cache()
    data_clas = setup_data(clas)
    encoder_name = 'encoder_' + best_lm_exp_id
    drop_mult = 1

    #learn = text_classifier_learner(data_clas, drop_mult=drop_mult, embed_prevent_first=0)
    #earn.load_encoder(encoder_name)

    all_lrs = []
    #for _ in range(3):
    #    all_lrs.append(news_utils.fastai.get_optimal_lr(learn, runs=1))
    #optim_lr = max(all_lrs)

    ex = Experiment(db_name + '_' + clas)
    ex.observers.append(MongoObserver.create(db_name=db_name + '_' + clas))

    @ex.config
    def my_config():
        exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
        factor = 2.6
        wd = 0.01
        moms = (0.8, 0.7)
        full_epochs = 200
        bs = 64
        embed_prevent = 0
        mod = 'simle_fit'
        input_p = 0.3
        lr = 0.001
        embed_p = 0.1

    @ex.main
    def run_exp(exp_id, drop_mult, input_p, embed_p, lr, moms, wd, factor,
                full_epochs):

        lrs = [lr / (factor**(4 - x)) for x in range(4)] + [lr]

        learn = text_classifier_learner(data_clas,
                                        drop_mult=drop_mult,
                                        embed_prevent_first=0)
        learn.load_encoder(encoder_name)

        learn.metrics += [
            KappaScore(),
            news_utils.fastai.F1Macro(),
            news_utils.fastai.F1Weighted(),
            news_utils.fastai.PrecisionMacro(),
            news_utils.fastai.RecallMacro()
        ]

        learn.callbacks += [
            SaveModelCallback(learn, name=exp_id, monitor='kappa_score'),
            EarlyStoppingCallback(learn,
                                  monitor='kappa_score',
                                  patience=20,
                                  mode='max'),
            news_utils.fastai.SacredLogger(learn, ex),
        ]

        for i in range(1, 5):
            epochs = 1
            if i in [1, 2, 3]:
                learn.freeze_to(-i)
            else:
                learn.unfreeze()
                epochs = full_epochs
            learn.fit(epochs, np.array(lrs))

    for _ in range(it):
        ex.run(config_updates={"drop_mult": drop_mult})
示例#21
0
文件: main.py 项目: Lab41/attalos
def main():
    import argparse

    parser = argparse.ArgumentParser(description='Two layer linear regression')
    parser.add_argument("image_feature_file_train",
                        type=str,
                        help="Image Feature file for the training set")
    parser.add_argument("text_feature_file_train",
                        type=str,
                        help="Text Feature file for the training set")
    parser.add_argument("image_feature_file_test",
                        type=str,
                        help="Image Feature file for the test set")
    parser.add_argument("text_feature_file_test",
                        type=str,
                        help="Text Feature file for the test set")
    parser.add_argument("word_vector_file",
                        type=str,
                        help="Text file containing the word vectors")

    # Optional Args
    parser.add_argument("--learning_rate",
                        type=float,
                        default=0.001,
                        help="Learning Rate")
    parser.add_argument("--num_epochs",
                        type=int,
                        default=200,
                        help="Number of epochs to run for")
    parser.add_argument("--batch_size",
                        type=int,
                        default=128,
                        help="Batch size to use for training")
    parser.add_argument("--model_type",
                        type=str,
                        default="multihot",
                        choices=['multihot', 'naivesum', 'wdv', 'negsampling', 'fast0tag'],
                        help="Loss function to use for training")
    parser.add_argument("--in_memory",
                        action='store_true',
                        default="store_false",
                        help="Load training image features into memory for faster training")
    parser.add_argument("--model_input_path",
                        type=str,
                        default=None,
                        help="Model input path (to continue training)")
    parser.add_argument("--model_output_path",
                        type=str,
                        default=None,
                        help="Model output path (to save training)")

    # new args
    parser.add_argument("--hidden_units",
                        type=str,
                        default="200",
                        help="Define a neural network as comma separated layer sizes. If log-reg, then set to '0'.")
    parser.add_argument("--cross_eval",
                        action="store_true",
                        default=False,
                        help="Use if test dataset is different from training dataset")
    parser.add_argument("--word_vector_type",
                        type=str,
                        choices=[t.name for t in WordVectorTypes],
                        help="Format of word_vector_file")
    parser.add_argument("--epoch_verbosity",
                        type=int,
                        default=10,
                        help="Epoch verbosity rate")
    parser.add_argument("--verbose_eval",
                        action="store_true",
                        default=False,
                        help="Use to run evaluation against test data every epoch_verbosity")
    parser.add_argument("--optim_words",
                        action="store_true",
                        default=False,
                        help="If using negsampling model_type, use to jointly optimize words")
    parser.add_argument("--ignore_posbatch",
                        action="store_true",
                        default=False,
                        help="Sample, ignoring from positive batch instead of examples. This should be taken out in future iters.")
    parser.add_argument("--joint_factor",
                        type=float,
                        default=1.0,
                        help="Multiplier for learning rate in updating joint optimization")
    parser.add_argument("--use_batch_norm",
                        action="store_true",
                        default=False,
                        help="Do we want to use batch normalization? Default is False")
    parser.add_argument("--opt_type",
                        type=str,
                        default="adam",
                        help="What type of optimizer would you like? Choices are (adam,sgd)")
    parser.add_argument("--weight_decay",
                        type=float,
                        default=0.0,
                        help="Weight decay to manually decay every 10 epochs. Default=0 for no decay.")
    parser.add_argument("--scale_words",
                        type=float,
                        default=1.0,
                        help="Scale the word vectors. If set to zero, scale by L2-norm. Otherwise, wordvec=scale x wordvec")
    parser.add_argument("--scale_images",
                        type=float,
                        default=1.0,
                        help="Scale the word vectors. If set to zero, scale by L2-norm. Otherwise, imvec=scale x imvec. ")
    parser.add_argument("--fast_sample",
                        action="store_true",
                        default=False,
                        help="Fast sample based on distribution, only use in large dictionaries")


    args = parser.parse_args()

    try:
        # Sacred Imports
        from sacred import Experiment
        from sacred.observers import MongoObserver

        from sacred.initialize import Scaffold

        # Monkey patch to avoid having to declare all our variables
        def noop(item):
            pass

        Scaffold._warn_about_suspicious_changes = noop

        ex = Experiment('Attalos')
        ex.observers.append(MongoObserver.create(url=os.environ['MONGO_DB_URI'],
                                                 db_name='attalos_experiment'))
        ex.main(lambda: convert_args_and_call_model(args))
        ex.run(config_updates=args.__dict__)
    except ImportError:
        # We don't have sacred, just run the script
        logger.warn('Not using Sacred. Your results will not be saved')
        convert_args_and_call_model(args)
示例#22
0
    FlattenSingletonVecEnv,
    MergeAgentVecEnv,
    make_dummy_vec_multi_env,
    make_subproc_vec_multi_env,
)
from aprl.envs.observation_masking import make_mask_agent_wrappers
import aprl.envs.wrappers
from aprl.policies.loader import load_backward_compatible_model, load_policy, mpi_unavailable_error
from aprl.policies.wrappers import MultiPolicyWrapper
from aprl.training.embedded_agents import CurryVecEnv, TransparentCurryVecEnv
from aprl.training.logger import setup_logger
from aprl.training.lookback import DebugVenv, LookbackRewardVecWrapper, OldMujocoResettableWrapper
from aprl.training.scheduling import ConstantAnnealer, Scheduler
from aprl.training.shaping_wrappers import apply_embedded_agent_wrapper, apply_reward_wrapper

train_ex = Experiment("train")
pylog = logging.getLogger("aprl.train")


def _save(model, root_dir, save_callbacks):
    os.makedirs(root_dir, exist_ok=True)
    model_path = osp.join(root_dir, "model.pkl")
    model.save(model_path)
    for f in save_callbacks:
        f(root_dir)


@train_ex.capture
def old_ppo2(
    _seed,
    env,
示例#23
0
This allows me to configure experiment by JSON file.

Author: Yuhuang Hu
Email : [email protected]
"""

from sacred import Experiment

import os
import cPickle as pickle
import numpy as np
import cv2

from spikefuel import tools, gui, helpers

exp = Experiment("DVS Recording - Lipreading")

exp.add_config({
    "lipreading_dir": "",
    "lipreading_stats_path": "",
    "recording_save_path": "",
    "viewer_id": 1,
    "screen_height": 0,
    "screen_width": 0,
    "work_win_scale": 0.9,
    "bg_color": [255, 0, 0],
    "fps": 0
})


@exp.automain
示例#24
0
from pathlib import Path
from pprint import pprint
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import InvalidConfigError, MissingConfigError
from tqdm import tqdm

import padertorch as pt
from padertorch.contrib.neumann.evaluation import compute_means
from .train import prepare_dataset

# Unfortunately need to disable this since conda scipy needs update
warnings.simplefilter(action='ignore', category=FutureWarning)

experiment_name = 'tasnet'
ex = Experiment(experiment_name)

JSON_BASE = os.environ.get('NT_DATABASE_JSONS_DIR', None)


@ex.config
def config():
    debug = False
    dump_audio = False

    # Model config
    model_path = ''
    assert len(model_path) > 0, 'Set the model path on the command line.'
    checkpoint_name = 'ckpt_best_loss.pth'
    experiment_dir = None
    if experiment_dir is None:
示例#25
0
from os.path import basename
from pathlib import Path

import numpy as np
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
from graspy.models import SBMEstimator, DCSBMEstimator
from src.data import load_left, load_right
from graspy.utils import binarize, symmetrize
from src.models import fit_a_priori
from src.utils import save_obj
import pandas as pd

from joblib import Parallel, delayed

ex = Experiment("Run LDT")

current_file = basename(__file__)[:-3]

sacred_file_path = Path(f"./maggot_models/models/runs/{current_file}")

slack_obs = SlackObserver.from_config("slack.json")

file_obs = FileStorageObserver.create(sacred_file_path)

ex.observers.append(slack_obs)
ex.observers.append(file_obs)


@ex.config
def config():
示例#26
0
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers.embeddings import Embedding
from keras.layers.wrappers import TimeDistributed
from keras.layers.pooling import GlobalAveragePooling1D
from keras.callbacks import Callback, ModelCheckpoint
from keras.optimizers import Adam
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
from heise_online_dataset import heise_online_ingredient, load_data, get_word_count
from train_lstm import c_score


ex = Experiment('Dense_Average_Classification', ingredients=[heise_online_ingredient])
ex.observers.append(MongoObserver.create())
ex.captured_out_filter = apply_backspaces_and_linefeeds


@ex.capture
def log_performance(_run, logs):
    _run.add_artifact("weights.hdf5")
    _run.log_scalar("loss", float(logs.get('loss')))
    _run.log_scalar("binary_accuracy", float(logs.get('binary_accuracy')))
    _run.log_scalar("c_score", float(logs.get('c_score')))
    _run.log_scalar("val_loss", float(logs.get('val_loss')))
    _run.log_scalar("val_binary_accuracy", float(logs.get('val_binary_accuracy')))
    _run.log_scalar("val_c_score", float(logs.get('val_c_score')))
    _run.result = float(logs.get('val_c_score'))
示例#27
0
import csv
import sys
import pandas as pd
import numpy as np
from sacred import Experiment
from sacred.observers import MongoObserver

from agents.agent import DDPG_Agent
from agents.agent_simple import DDPG as DDPG_Agent_Simple
from agents.policy_search import PolicySearch_Agent
from agents.random_binary_agent import Random_Binary_Agent
from collections import deque
from tasks import TakeOff_Task
import visuals as vs

ex = Experiment()
ex.observers.append(MongoObserver.create(db_name='sacred'))

@ex.config
def config():

    # Noise process
    exploration_mu = 0.
    exploration_theta = 0.7
    exploration_sigma = 0.2

    # Replay memory
    buffer_size = 100000
    batch_size = 256

    # Algorithm parameters
示例#28
0
import numpy as np
import tensorflow as tf
import random
import time

import model
import sample
import encoder
from tqdm import tqdm
from sacred import Experiment
from sacred.observers import MongoObserver

CHECKPOINT_DIR = 'checkpoint'
SAMPLE_DIR = 'samples'

ex = Experiment('gpt-2-finetune-tf')

ex.observers.append(
    MongoObserver.create(url='localhost:27017', db_name='experiments'))


def maketree(path):
    try:
        os.makedirs(path)
    except:
        pass


def load_dataset(enc, path):
    paths = []
    if os.path.isfile(path):
示例#29
0
from sacred.observers import MongoObserver
from scipy.special import expit
from sacred import Experiment
import tensorflow as tf
import collections
import tflearn
import extend_recurrent
import numpy as np
import json
import sys
import lrp
import os
import heatmap
import pickle

ex = Experiment('IMDBMovieReview-LSTM')


@ex.config
def config():

    db = ""
    if db == "mongo":
        print("Using mongodb for logging")
        ex.observers.append(MongoObserver.create())
    elif db == "file":
        print("Using local file storage for logging")
        ex.observers.append(FileStorageObserver.create('SacredRunLog'))

    net_arch_layers = ['lstm', 'fc', 'output']
    tensorboard_verbose = 3
示例#30
0
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') #Stride of 1

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #2x2 max pooling, stride = 2


from sacred import Experiment
#from sacred.observers import MongoObserver
ex = Experiment('DSB GATE EXPERIMENT')
#ex.observers.append(MongoObserver.create())

@ex.config
def config():
    RUN_NAME = 'CRPS-MODEL-3.0'
    DATA_DIR = 'netdata'
    ITERS = 100000
    START_ITER = 0
    MODEL_LOAD_PATH = None
    PIC_WIDTH = 32
    ### Architectural Hyperparameters
    DEPTH_1 = 20         # The output depth of the first convolutional layer
    DEPTH_2 = 40         # The output depth of the second convolutional layer
    DEPTH_3 = 80         # The output depth of the second convolutional layer
    DEPTH_4 = 150        # The output depth of the second convolutional layer
示例#31
0
from os.path import basename
from pathlib import Path
import numpy as np
import pandas as pd
from joblib import Parallel, delayed, wrap_non_picklable_objects
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver

from src.utils import gen_B, gen_sbm, save_obj
from src.models import select_sbm

ex = Experiment("SBM model selection")

current_file = basename(__file__)[:-3]

pickle_path = Path("./maggot_models/simulations/outs/")
sacred_file_path = Path(f"./maggot_models/simulations/runs/{current_file}")

slack_obs = SlackObserver.from_config("slack.json")

file_obs = FileStorageObserver.create(sacred_file_path)

ex.observers.append(slack_obs)
ex.observers.append(file_obs)


@ex.config
def config():
    """Variables defined in config get automatically passed to main"""

    n_sims = 2  # noqa: F841
示例#32
0
文件: train.py 项目: anslt/ADL4CV
from mot_neural_solver.utils.misc import make_deterministic, get_run_str_and_save_dir, ModelCheckpoint

from mot_neural_solver.path_cfg import OUTPUT_PATH
import os.path as osp

from mot_neural_solver.pl_module.pl_module import MOTNeuralSolver

from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
#from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint

from sacred import SETTINGS

SETTINGS.CONFIG.READ_ONLY_CONFIG = False

ex = Experiment()
ex.add_config('configs/tracking_cfg.yaml')
ex.add_config({
    'run_id': 'train_w_default_config',
    'add_date': True,
    'cross_val_split': None
})


@ex.config
def cfg(cross_val_split, eval_params, dataset_params, graph_model_params,
        data_splits):

    # Training requires the use of precomputed embeddings
    assert dataset_params[
        'precomputed_embeddings'], "Training without precomp. embeddings is not supp"
示例#33
0
import sys, os
import torch
from torchvision import datasets, transforms
import torchsm
from sacred import Experiment
from sacred.observers import MongoObserver

device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
    "cpu")

ex = Experiment('icpram2019')

from utils import MovingAverage


class Net(torchsm.BaseLayer):
    def __init__(self, input, output, **kwargs):
        torchsm.BaseLayer.__init__(self, input, output)

        self.hidden_layers = kwargs[
            "hidden_layers"] if "hidden_layers" in kwargs else 1
        self.hidden_dim = kwargs["hidden_dim"] if "hidden_dim" in kwargs else 10
        self.stig_dim = kwargs["stig_dim"]

        self.n_inputs = input
        self.n_outputs = output

        self.stigmergic_memory = torchsm.RecurrentStigmergicMemoryLayer(
            self.n_inputs,
            self.stig_dim,
            hidden_dim=kwargs["stig_hidden_dim"],
示例#34
0
"""Experiment Configuration"""
import os
import re
import glob
import itertools

import sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds

sacred.SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False
sacred.SETTINGS.CAPTURE_MODE = 'no'

ex = Experiment('PANet')
ex.captured_out_filter = apply_backspaces_and_linefeeds

source_folders = [
    '.', './dataloaders', './models', './util', './dataloaders_medical'
]
sources_to_save = list(
    itertools.chain.from_iterable(
        [glob.glob(f'{folder}/*.py') for folder in source_folders]))
for source_file in sources_to_save:
    ex.add_source_file(source_file)

    # "Organs" : ["background",
    #           "spleen",
    #           "right kidney",
    #           "left kidney",
    #           "gallbladder",
示例#35
0
import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver
from tensorflow.python.keras.backend import set_session
from tensorflow.python.keras.callbacks import ReduceLROnPlateau, TensorBoard
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator

from resnet import make_resnet
from utils import tf_disable_warnings, tf_disable_deprecation_warnings

tf_disable_warnings()
tf_disable_deprecation_warnings()

ex = Experiment()
observer = FileStorageObserver.create('runs')
ex.observers.append(observer)

ex.add_config({
    'net': 'resnet20'
})


@ex.automain
def main(net):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(sess)
示例#36
0
        self.register_buffer(
            'mean', torch.FloatTensor(mean).view(1, len(mean), 1, 1)
        )
        self.register_buffer(
            'std', torch.FloatTensor(std).view(1, len(std), 1, 1)
        )

    def forward(self, x):
        x = x.float()  # implicitly convert to float
        x = x.sub(self.mean).div(self.std)
        return self.module(x)


ROOT = pt.abspath(pt.dirname(__file__))

exp = Experiment('Experiment: Jigsaw encoder on Imagenet')
# Add a FileObserver if one hasn't been attached already
EXP_FOLDER = '../exp/'
log_location = pt.join(EXP_FOLDER, pt.basename(sys.argv[0])[:-3])
if len(exp.observers) == 0:
    print('Adding a file observer in %s' % log_location)
    exp.observers.append(file_storage.FileStorageObserver.create(log_location))


def get_checkpoint(exp, exp_variant):
	# currently three experiments exist, vanilla_AE, AE_with_tiles, Jigsaw (two variants)
	# have namedtuple for experiment and checkpoints directory

	if exp is None:
		raise RuntimeError('Experiment can not be None!')
示例#37
0
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.enable_eager_execution()

import numpy as np
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from sacred.observers import MongoObserver

import common
from convnet_data import load_train_val_datasets, load_test_dataset
from models import get_model_wrapper


EXPERIMENT = Experiment()
EXPERIMENT.captured_out_filter = apply_backspaces_and_linefeeds
EXPERIMENT.observers.append(MongoObserver.create())


@EXPERIMENT.config
def config():
    seed = 7198021
    train_examples_per_class = 112640
    val_examples_per_class = 768
    batch_size = 1024
    train_augmentation = True
    test_augmentation = 10
    model = 'convnet'
    image_size = 128
    base_filters = 64
示例#38
0
from __future__ import absolute_import, division, print_function

from multiprocessing import Process
from copy import deepcopy
import time

import mat4py as m4p
from utils import *
from sacred import Experiment

from rhn_stocks import Model

ex = Experiment('rhn_prediction_stocks')
logging = tf.logging

import h5py
import stocks_black_box as bb


class Config():

    # architecture

    weight_decay = 1e-07
    max_grad_norm = 0.8
    drop_i = 0.05
    drop_e = 0.5
    drop_h = 0.3
    drop_o = 0.75
    hidden_size = 200
    mask = [0.6, 0.95, 0.05]  # should be a list
    from utils.typing_alias import *

# Train helpers
from utils.train_helper import (
    get_optimisers,
    reduce_loss_dict,
    load_models,
    save_weights,
    ExpLoss_with_dict,
    AvgLoss_with_dict,
    pprint_args,
)
from utils.tupperware import tupperware

# Experiment, add any observers by command line
ex = Experiment("Train")
ex = initialise(ex)

# local rank 0: for logging, saving ckpts
if "LOCAL_RANK" in os.environ:
    is_local_rank_0 = int(os.environ["LOCAL_RANK"]) == 0
else:
    is_local_rank_0 = True
if not is_local_rank_0:
    sys.stdout = open(os.devnull, "w")

# To prevent "RuntimeError: received 0 items of ancdata"
torch.multiprocessing.set_sharing_strategy("file_system")
torch.autograd.set_detect_anomaly(True)

from zeiss_umbrella.config import FILE_OBSERVER_BASE_PATH, FILE_OBSERVER_RESOURCE_PATH, FILE_OBSERVER_SOURCE_PATH
import sacred
from sacred import Experiment
from sacred.observers.file_storage import FileStorageObserver
import torch.nn as nn
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from zeiss_umbrella.fundus.setting_parser import get_baseline, get_optimizer, get_loss
from zeiss_umbrella.fundus.train import *
from zeiss_umbrella.fundus import data
import pandas as pd
import os

ex = Experiment('incremental balancing')
template = ""
ex.observers.append(
    FileStorageObserver(FILE_OBSERVER_BASE_PATH, FILE_OBSERVER_RESOURCE_PATH,
                        FILE_OBSERVER_SOURCE_PATH, template))

# uncomment if you use progress bars
# from sacred.utils import apply_backspaces_and_linefeeds
# ex.captured_out_filter = apply_backspaces_and_linefeeds
# for more info see https://sacred.readthedocs.io/en/latest/collected_information.html#live-information


@ex.config
def my_config():
    """
    preprocessing: dictionary, 'type':'centerCrop','default' , 'cropSize':list, 'size':tuple
    adv_training_config: 'type':'baseline', 'fgsm', 'fgsm_k_image', 'pgd', 'boundary_attack'
示例#41
0
from config import PHONETIC_EMB_SIZE
from config import SEMEVAL_HUMOR_TRAIN_DIR
from config import SEMEVAL_HUMOR_TRIAL_DIR
from config import TWEET_SIZE
from tf_tools import GPU_OPTIONS
from tf_tools import HUMOR_DROPOUT
from tf_tools import create_dense_layer
from tf_tools import create_tensorboard_visualization
from tf_tools import predict_on_hashtag
from tf_tools import build_humor_model
from tools import extract_tweet_pair_from_hashtag_datas
from tools import get_hashtag_file_names
from tools import load_hashtag_data
from tools import load_hashtag_data_and_vocabulary

ex = Experiment('humor_model')
ex.observers.append(MongoObserver.create(db_name='humor_runs'))

EMBEDDING_HUMOR_MODEL_LEARNING_RATE = .00001
N_TRAIN_EPOCHS = 2


@ex.config
def my_config():
    learning_rate = .000005  # np.random.uniform(.00005, .0000005)
    num_epochs = 5  # int(np.random.uniform(1.0, 4.0))
    dropout = 1  # np.random.uniform(.5, 1.0)
    hidden_dim_size = 800  # int(np.random.uniform(200, 3200))
    use_emb_model = True
    use_char_model = True
    model_save_dir = EMB_CHAR_HUMOR_MODEL_DIR
示例#42
0
import tensorflow as tf
import numpy as np
import os
import librosa

import Datasets
import Utils
import Models.UnetSpectrogramSeparator
import Models.UnetAudioSeparator
import Test
import Evaluate

import functools
from tensorflow.contrib.signal.python.ops import window_ops

ex = Experiment('Waveunet Training', ingredients=[config_ingredient])


@ex.config
# Executed for training, sets the seed value to the Sacred config so that Sacred fixes the Python and Numpy RNG to the same state everytime.
def set_seed():
    seed = 1337


@config_ingredient.capture
def train(model_config, experiment_id, load_model=None):
    # Determine input and output shapes
    disc_input_shape = [
        model_config["batch_size"], model_config["num_frames"], 0
    ]  # Shape of input
    if model_config["network"] == "unet":
示例#43
0
#!/usr/bin/env python
# coding=utf-8
""" An example showcasing the logging system of Sacred."""
from __future__ import division, print_function, unicode_literals
import logging
from sacred import Experiment

ex = Experiment('log_example')

# set up a custom logger
logger = logging.getLogger('mylogger')
logger.handlers = []
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname).1s] %(name)s >> "%(message)s"')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel('INFO')

# attach it to the experiment
ex.logger = logger


@ex.config
def cfg():
    number = 2
    got_gizmo = False


@ex.capture
def transmogrify(got_gizmo, number, _log):
    if got_gizmo:
示例#44
0
import multidimensional
import multidimensional.common
import multidimensional.mds
import multidimensional.point_filters
import multidimensional.radius_updates
import multidimensional.datagen.shapes as datagen
import multidimensional.smacof

import config


EXPERIMENT_NAME = 'toroid_helix_noise_8e-2'

KEEP_HISTORY = True

ex = Experiment(EXPERIMENT_NAME)
ex.observers.append(MongoObserver.create(
    url=config.SACRED_MONGO_URL,
    db_name=config.SACRED_DB
    # db_name='test'
))


RESULT_IMAGE = EXPERIMENT_NAME + '.png'

@ex.config
def cfg():
    data_type = 'toroid-helix'
    # {
    #     'sphere': Sphere,
    #     'cut-sphere': CutSphere,
示例#45
0
This allows me to configure experiment by JSON file.

Author: Yuhuang Hu
Email : [email protected]
"""

from sacred import Experiment

import os
import cPickle as pickle
import numpy as np
import cv2

from spikefuel import tools, gui, helpers

exp = Experiment("DVS Recording - Caltech-256")

exp.add_config({
    "caltech256_dir": "",
    "caltech256_stats_path": "",
    "recording_save_path": "",
    "viewer_id": 1,
    "screen_height": 0,
    "screen_width": 0,
    "saccade_size": 0,
    "work_win_scale": 0.9,
    "bg_color": [255, 0, 0],
    "fps": 0,
    "start_class": 0
})
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from sacred import Experiment
from sacred.observers import FileStorageObserver

import os
import datetime

import audio_models
import dataset
from train import train
from test import test

ex = Experiment('UNet_Speech_Separation', interactive=True)
ex.observers.append(FileStorageObserver.create('my_runs'))


@ex.config
def cfg():
    model_config = {
        'model_variant':
        'unet',  # The type of model to use, from ['unet', capsunet', basic_capsnet', 'basic_convnet']
        'data_type':
        'mag_phase',  # From [' mag', 'mag_phase', 'real_imag', 'mag_real_imag']
        'phase_weight':
        0.005,  # When using a model which learns to estimate phase, defines how much
        # weight phase loss should be given against magnitude loss
        'initialisation_test':
        False,  # Whether or not to calculate test metrics before training
        'loading': False,  # Whether to load an existing checkpoint
        'checkpoint_to_load': "169/169-6",  # Checkpoint format: run/run-step
示例#47
0
from torch.utils.data import DataLoader
import torch.optim as optim
from tqdm import tqdm

from constants import *
sys.path.append(BASE_DIR)
from goggles.loss import CustomLoss2
from goggles.models.semantic_ae import SemanticAutoencoder
from goggles.opts import DATASET_MAP, DATA_DIR_MAP
from goggles.utils.vis import \
    get_image_from_tensor, save_prototype_patch_visualization


_make_cuda = lambda x: x.cuda() if torch.cuda.is_available() else x

ex = Experiment('goggles-experiment')
ex.observers.append(FileStorageObserver.create(os.path.join(ALL_RUNS_DIR)))


def _provision_run_dir(run_dir):
    new_dirs = [LOGS_DIR_NAME, IMAGES_DIR_NAME, PROTOTYPES_DIR_NAME]
    new_dirs = list(map(lambda d: os.path.join(run_dir, d), new_dirs))
    for new_dir in new_dirs:
        os.makedirs(new_dir)
    return new_dirs


@ex.config
def default_config():
    seed = 42                # RNG seed for the experiment
    dataset = 'cub'            # Dataset to be used (cub/awa2)
示例#48
0
文件: main.py 项目: conglu1997/MAVEN
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from utils.logging import get_logger
import yaml
import pymongo

from run import run

SETTINGS['CAPTURE_MODE'] = "fd"  # set to "no" if you want to see stdout/stderr in console
logger = get_logger()

ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds

# results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")
# Append results to data instead
results_path = os.path.join("/data", str(os.environ.get('STORAGE_HOSTNAME')), "conlu", "results")

mongo_client = None


def setup_mongodb():
    # The central mongodb for our deepmarl experiments
    # You need to set up local port forwarding to ensure this local port maps to the server
    # if conf_str == "":
    # db_host = "localhost"
示例#49
0
  Hello world!
  INFO - hello_config_dict - Completed after 0:00:00

The message can also easily be changed using the ``with`` command-line
argument::

  $ ./02_hello_config_dict.py with message='Ciao world!'
  INFO - hello_config_dict - Running command 'main'
  INFO - hello_config_dict - Started
  Ciao world!
  INFO - hello_config_dict - Completed after 0:00:00
"""
from __future__ import division, print_function, unicode_literals
from sacred import Experiment

ex = Experiment('hello_config_dict')

# We add message to the configuration of the experiment here
ex.add_config({
    "message": "Hello world!"
})
# Equivalent:
# ex.add_config(
#     message="Hello world!"
# )


# notice how we can access the message here by taking it as an argument
@ex.automain
def main(message):
    print(message)
示例#50
0
文件: biagioni.py 项目: renj/TrajMap
fh.setLevel(logging.DEBUG)
  
# write to console
ch = logging.StreamHandler()  
ch.setLevel(logging.DEBUG)  
  
# Handler format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\n\t%(message)s')  
fh.setFormatter(formatter)  
ch.setFormatter(formatter)  
  
logger.addHandler(fh)
logger.addHandler(ch)
  

ex = Experiment('Biagioni')
ex.logger = logger
ex.observers.append(MongoObserver.create(url='10.60.43.110:27017', db_name='Biagioni'))

@ex.config
def cfg():
	#data_file ="../../Data/Shanghai/minsh_1000_biagioni"
    data_file = "../Data/Chicago/all_trips"


@ex.automain
def main(data_file, side, k, percent, width, alpha, _log, _run):
    _log.info('data_file: %s' % (data_file))


    _run.info['time'] = {}