示例#1
0
def init():
    log_id = str(int(time.time() * 10) %
                 (60 * 60 * 24 * 365 * 10)) + str(os.getpid())
    global logger
    logger = logging.getLogger(str(log_id))

    logger.setLevel(logging.DEBUG)

    # write to file
    fh = logging.FileHandler('ex.log')
    fh.setLevel(logging.DEBUG)

    # write to console
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)

    # Handler format
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s -\n\t%(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)

    logger.addHandler(fh)
    logger.addHandler(ch)

    global ex
    ex = Experiment('DNLP')
    ex.logger = logger
    ex.observers.append(
        MongoObserver.create(url='10.60.43.110:27017', db_name='DNLP'))
    #ex.observers.append(MongoObserver.create(url='127.0.0.1:27017', db_name='nTrajMap'))
    return ex, logger
示例#2
0
def init():
    log_id = str(int(time.time()*10)%(60*60*24*365*10))+str(os.getpid())
    global logger
    logger = logging.getLogger(str(log_id))

    logger.setLevel(logging.DEBUG)  
      
    # write to file 
    fh = logging.FileHandler('ex.log')  
    fh.setLevel(logging.DEBUG)
      
    # write to console
    ch = logging.StreamHandler()  
    ch.setLevel(logging.DEBUG)  
      
    # Handler format
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\n\t%(message)s')  
    fh.setFormatter(formatter)  
    ch.setFormatter(formatter)  
      
    logger.addHandler(fh)
    logger.addHandler(ch)
      
    global ex
    ex = Experiment('TrajMap')
    ex.logger = logger
    ex.observers.append(MongoObserver.create(url='10.60.43.110:27017', db_name='TurnHMM'))
    #ex.observers.append(MongoObserver.create(url='127.0.0.1:27017', db_name='nTrajMap'))
    return ex, logger
示例#3
0
def init():
    log_id = str(int(time.time() * 10) %
                 (60 * 60 * 24 * 365 * 10)) + str(os.getpid())
    global logger
    logger = logging.getLogger(str(log_id))

    logger.setLevel(logging.DEBUG)

    # write to file
    fh = logging.FileHandler('ex.log')
    fh.setLevel(logging.DEBUG)

    # write to console
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)

    # Handler format
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s -\n\t%(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)

    logger.addHandler(fh)
    logger.addHandler(ch)

    global ex
    ex = Experiment('DNLP')
    ex.logger = logger
    return ex, logger
示例#4
0
def setup_logger(ex: Experiment):
    # set up the logger
    logger = logging.getLogger()
    logger.handlers = []
    ch = logging.StreamHandler()
    formatter = logging.Formatter(
        fmt='%(asctime)s (%(levelname)s): %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    logger.setLevel('INFO')
    ex.logger = logger
示例#5
0
def get_experiment(name, db_host, db_port, db_name, ingredients=None, log_verbose=True):

    if ingredients is None:
        ex = Experiment(name)
    else:
        ex = Experiment(name, ingredients=ingredients)

    ex.observers.append(MongoObserver.create(
        url=f"mongodb://{db_host}:{db_port}",
        db_name=db_name)
    )
    ex.logger = _get_logger(log_verbose)
    return ex
示例#6
0
    def _build_sacred_experiment(self) -> Experiment:
        logger = CustomConsoleLogger(f"instance-{self.idx}")

        ex = Experiment(f"instance-{self.idx}")
        ex.logger = logger
        ex.captured_out_filter = apply_backspaces_and_linefeeds
        ex.add_config(self.experiment_config)

        # Save to disk by default for sacred
        logger.info("Saving to FileStorageObserver in results/sacred.")
        results_path = os.path.join(self._instance_log_dir)
        file_obs_path = os.path.join(results_path, "sacred")
        ex.observers.append(FileStorageObserver(file_obs_path))
        return ex
示例#7
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from pathlib import Path

from sacred import Experiment
from sacred.observers import FileStorageObserver

from frxpy.utils.mylogger import MyLogger
from frxpy.data.preprocess import csv2hdf5
from frxpy.data.preprocess import csv2json

ex = Experiment('preprocess')
ex.logger =  MyLogger('preprocess').get_logger()

@ex.config
def config():
    workdir = ''
    datadir = '' # datadir contains csv files.
    
    output_data_type = 'hdf5'
    data_shape = ''

    whitening = True

    ### check arguments ###
    assert output_data_type in ['hdf5'], \
        "data_convert_type in ['hdf5', 'json', 'leveldb', 'protobuf']"
    assert workdir, 'workdir is required: ' \
        '"[prog] with workdir=/path/to/your/dir"'
    assert datadir, 'datadir is required: '
示例#8
0
文件: main.py 项目: a2821952/pymarl
from copy import deepcopy
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from utils.logging import get_logger
import yaml

from run import run

SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
logger = get_logger()

ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds

results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")


@ex.main
def my_main(_run, _config, _log):
    # Setting the random seed throughout the modules
    config = config_copy(_config)
    np.random.seed(config["seed"])
    th.manual_seed(config["seed"])
    config['env_args']['seed'] = config["seed"]

    # run the framework
    run(_run, config, _log)
示例#9
0
from tabulate import tabulate

from dfp.doom_sim_env import DoomSimulatorEnv
from dfp.env_wrapper import TargetMeasEnvWrapper
from dfp.network import model, make_model
from dfp.agent import Agent
from dfp.replay_buffer import DFPReplay
from dfp.policies import EpsilonGreedyPolicy, DFPPolicy, LinearSchedule
from dfp.preprocessing import ObservationPreprocessor, BatchObservationPreprocessor
from dfp.evaluate import evaluator, evaluate_policy
from dfp.utils import get_logger

MAPS_DIR = Path(__file__).parent / 'maps'

ex = Experiment(name='DFP', ingredients=[model, evaluator])
ex.logger = get_logger(__name__, level=logging.INFO)


@ex.config
def cfg():
    # env
    scenario = 'D1_basic'
    image_shape = [84, 84]
    frame_skip = 4
    maps = ['MAP01']
    switch_maps = False

    # training loop
    n_train_steps = 800_000
    n_eval_episodes = 100
    train_freq = 64  # env steps
示例#10
0
    TrajectoryStdevUtilityMeasure, PredictionErrorUtilityMeasure
from normalizer import TransitionNormalizer
from imagination import Imagination

from sac import SAC

import gym
import envs
from wrappers import BoundedActionsEnv, RecordedEnv, NoisyEnv

from sacred import Experiment

from logger import get_logger

ex = Experiment()
ex.logger = get_logger('max')


# noinspection PyUnusedLocal
@ex.config
def config():
    max_exploration = False
    random_exploration = False
    exploitation = False


# noinspection PyUnusedLocal
@ex.config
def env_config():
    env_name = 'MagellanHalfCheetah-v2'  # environment out of the defined magellan environments with `Magellan` prefix
    n_eval_episodes = 3  # number of episodes evaluated for each task
示例#11
0
from typing import Optional

import torch
from sacred import Experiment

from experiments.Experiment_utils import idx_maps, add_tensor_board_writers, run_training, create_logger
from experiments.data_loading import data_loading_ingredient
from src.LSTMLID import LSTMLIDModel

LSTM_exp = Experiment('LSTM_experiment', ingredients=[data_loading_ingredient])

# Attach the logger to the experiment
LSTM_exp.logger = create_logger()


@LSTM_exp.config
def config():
    pretrained_model = None
    epochs = 1
    log_to_tensorboard: bool = True
    seed = 42
    hidden_dim = 100
    embedding_dim = 75
    num_lstm_layers = 2
    optimizer = 'SGD'
    lr = 0.1
    weight_decay = 0.00001
    batch_size = 64


@LSTM_exp.capture
示例#12
0
fasttext_exp = Experiment('Fasttext_experiment',
                          ingredients=[data_loading_ingredient])

# set up a custom logger
logger = logging.getLogger()
logger.handlers = []
ch = logging.StreamHandler()
ch.setFormatter(
    logging.Formatter(fmt='%(asctime)s %(levelname)s %(name)s: "%(message)s"',
                      datefmt='%m/%d/%Y %H:%M:%S'))
logger.addHandler(ch)
logger.setLevel(logging.INFO)

# Attach the logger to the experiment
fasttext_exp.logger = logger


@fasttext_exp.config
def config():
    model_path = "../models/fasttext_lid_small.ftz"
    num_lang_preds = 300
    to_train = False
    to_quant = True


@fasttext_exp.capture
def test_model(data_set, model, num_lang_preds, lang_to_idx):
    langs = lang_to_idx.keys()
    pred_prob = []
    pred_prob10 = []
示例#13
0
from numpy.random import rand
from sacred import Experiment
from sacred.observers import MongoObserver
from pprint import pprint

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
python_version = sys.version_info.major

ex = Experiment()
mongo_observer = MongoObserver.create()
ex.observers.append(mongo_observer)
ex.add_config('optimizer_config.yaml')

# Configure your logger here
logging = logger.getLogger('hyper_opt')
ex.logger = logging

class Optimizer(object):
    """Neural Network Hyperparameter Optimizer Class.
    """

    def __init__(self, config):
        """Optimize class object initialization

        Args:
            config: Epoch parameters and hyperparameters for training
        Returns:
            None
        """
        self.C = config  # save sacred config dict
        self.epoch_config = {}  # initialize epoch config dict
示例#14
0
import logging
from typing import List, Optional, Sequence
from langid.langid import LanguageIdentifier
import numpy as np
from sacred import Experiment
from tqdm import tqdm

from torch.utils.data import DataLoader

from experiments.Experiment_utils import create_logger
from experiments.data_loading import data_loading_ingredient, load_test_folds, save_probs, save_lang_to_idx

ex = Experiment('LangID_experiment', ingredients=[data_loading_ingredient])
# Attach the logger to the experiment
ex.logger = create_logger()


@ex.capture
def test_model(
    data_set=None,
    langider=None,
    lang_to_idx=None,
) -> np.ndarray:
    """
    Tests a given langid.py model on the given data set.
    :param data_set: data set to test on
    :param langider: model to test
    :param lang_to_idx: mapping of languages to ids
    """
    import numpy as np
    langs = data_set.get_tag_set()
示例#15
0
from transformers import BertTokenizer, get_linear_schedule_with_warmup
from collections import defaultdict
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, balanced_accuracy_score
import joblib
from data import CATEGORY_IDS
from data import GraphDataset, TextGraphDataset, GloVeTokenizer
import models
import utils

OUT_PATH = 'output/'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

ex = Experiment()
ex.logger = utils.get_logger()
# Set up database logs
uri = os.environ.get('DB_URI')
database = os.environ.get('DB_NAME')
if all([uri, database]):
    ex.observers.append(MongoObserver(uri, database))


@ex.config
def config():
    dataset = 'entities'
    inductive = True
    dim = 128
    model = 'blp'
    rel_model = 'complex'
    loss_fn = 'margin'
示例#16
0
import gym
import envs
from wrappers import BoundedActionsEnv, NoisyEnv
from envs.half_cheetah import MagellanHalfCheetahRunningForwardRewardFunction, MagellanHalfCheetahFlippingForwardRewardFunction

import os

from sacred import Experiment
from logger import get_logger

from sac import *

ex = Experiment()
ex.logger = get_logger('bare_metal_sac')


class BareMetalSAC(SAC):
    def setup_reward_func(self, reward_func):
        self.reward_func = reward_func

    def episode(self, env, warm_up=False, train=True):
        ep_return, ep_length = 0, 0
        done = False
        state = env.reset()
        while not done:
            if warm_up:
                action = env.action_space.sample()
            else:
                action = self(
                    torch.from_numpy(state).unsqueeze(0).float().to(
                        self.device))
示例#17
0
import logging
from sacred import Experiment

ex = Experiment('log_example')

# set up a custom logger
logger = logging.getLogger('mylogger')
logger.handlers = []
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname).1s] %(name)s >> "%(message)s"')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel('INFO')

# attach it to the experiment
ex.logger = logger


@ex.config
def cfg():
    number = 2
    got_gizmo = False


@ex.capture
def transmogrify(got_gizmo, number, _log):
    if got_gizmo:
        _log.debug("Got gizmo. Performing transmogrification...")
        return number * 42
    else:
        _log.warning("No gizmo. Can't transmogrify!")
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
import torch
import torch.nn as nn
from torchvision import transforms
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds

from cow_tus.data.transforms import training_ingredient as transforms_ingredient
from cow_tus.util.util import unpickle, ce_loss, output
from cow_tus.models.modules import zoo as modules

EXPERIMENT_NAME = 'trainer'
ex = Experiment(EXPERIMENT_NAME, ingredients=[transforms_ingredient])
ex.logger = logging.getLogger(__name__)
ex.captured_out_filter = apply_backspaces_and_linefeeds


@ex.config
def config(transforms):
    """
    Configuration for training harness.
    """
    hypothesis_conditions = ['single-instance-learning', 'baseline']
    exp_dir = path.join('experiments', *hypothesis_conditions)

    meta_config = {'device': 'cpu'}

    logging_config = {'evaluation_freq': 40, 'checkpointing': False}
	if (args.output_path is not None and not os.path.exists(args.output_path)):
		os.makedirs(args.output_path)

	if (args.output_path_2 is not None and args.output_path_2 != 'none' and not os.path.exists(args.output_path_2)):
		os.makedirs(args.output_path_2)

	if (args.output_path_3 is not None and not os.path.exists(args.output_path_3)):
		os.makedirs(args.output_path_3)

	if (args.output_path_4 is not None and not os.path.exists(args.output_path_4)):
		os.makedirs(args.output_path_4)

	for obs in args.sacred_observers:
		ex.observers.append(OBSERVERS[obs])
	ex.logger = root_logger

	# Load experiment id file
	with open(os.path.join(PACKAGE_PATH, 'resources', args.experiment_file), 'r') as csv_file:
		csv_reader = csv.reader(csv_file)
		experiments = []

		for line in csv_reader:
			experiments.append(line)

		if (args.experiment_id > 0):
			experiments = [experiments[args.experiment_id - 1]]

	for experiment_id, (vector_file, vector_model_type, gen_hidden_size, disc_hidden_size, gen_activations,
						disc_activations, gen_dropout_ratios,  disc_dropout_ratios, num_epochs, optimiser_class,
						learning_rate, beta_1, beta_2, batch_size, output_file, random_seed, noun_file,