示例#1
0
    def __init__(self, f_main, f_config, f_capture, cfg,
                 mongo_url='127.0.0.1:27017', disable_logging=False):

        self.cfg = cfg

        curr_db_name = self.sacred_db_name()
        ex_name = self.sacred_ex_name()
        ex = Experiment(ex_name)
        ex.captured_out_filter = apply_backspaces_and_linefeeds

        if disable_logging == False:
            print(f'Connecting to MongoDB at {mongo_url}:{curr_db_name}')
            ex.observers.append(MongoObserver.create(url=mongo_url, db_name=curr_db_name))

        # init the experiment configuration (params)
        ex.config(f_config)

        # init the experiment logging (capture) method
        f_ex_capture = ex.capture(f_capture)

        # init the experiment main
        @ex.main
        def ex_main(_run):
            return main_wrapper(f_main, ex, f_ex_capture, self.sacred_db_name(), _run)

        self.ex = ex
示例#2
0
文件: config.py 项目: lizeyan/Imp
def init_sacred(name: str):
    experiment = Experiment(name)
    experiment.observers.append(
        MongoObserver.create(
            "mongodb://*****:*****@data1,data2,data3/admin?replicaSet=rs0",
            db_name="lizytalk",
        ))
    # capture all contents written into sys.stdout and sys.stderr
    SETTINGS["CAPTURE_MODE"] = "sys"
    experiment.captured_out_filter = apply_backspaces_and_linefeeds
    return experiment
示例#3
0
def train(build_model, dataset, hparams, logdir, expname, observer):

    # Location to save trained models
    output_dir = path.join(logdir, "test")

    # Create the actual train function to run
    def train(_run):
        model = build_model(hparams, **dataset.preprocessing.kwargs)

        # Make optimizer
        optimizer = tf.keras.optimizers.Adam(hparams.lr,
                                             beta_1=hparams.beta_1,
                                             beta_2=hparams.beta_2)

        model.compile(optimizer=optimizer,
                      loss="categorical_crossentropy",
                      metrics=["categorical_accuracy"])

        # model.summary()

        train_log = model.fit(
            dataset.train_data(hparams.batch_size),
            epochs=hparams.epochs,
            steps_per_epoch=dataset.train_examples // hparams.batch_size,
            validation_data=dataset.validation_data(hparams.batch_size),
            validation_steps=dataset.validation_examples // hparams.batch_size)

        # Log the performace values to sacred
        for (metric, values) in train_log.history.items():
            for (idx, value) in enumerate(values):
                _run.log_scalar(metric, value, idx)

        # TODO: Save model

    # Build config
    config = {}
    for (k, v) in hparams.items():
        config[k] = v
    config["model"] = build_model.__name__
    config["dataset"] = dataset.dataset_name

    # Setup sacred experiment
    ex = Experiment(expname)
    # Disable capturing the output from window
    ex.captured_out_filter = lambda captured_output: "Output capturing turned off."
    ex.main(train)
    ex.add_config(config)

    # build argv for sacred -- hacky way!
    _argv = f"{ex.default_command} --{observer}"
    ex.run_commandline(argv=_argv)
示例#4
0
    def _build_sacred_experiment(self) -> Experiment:
        logger = CustomConsoleLogger(f"instance-{self.idx}")

        ex = Experiment(f"instance-{self.idx}")
        ex.logger = logger
        ex.captured_out_filter = apply_backspaces_and_linefeeds
        ex.add_config(self.experiment_config)

        # Save to disk by default for sacred
        logger.info("Saving to FileStorageObserver in results/sacred.")
        results_path = os.path.join(self._instance_log_dir)
        file_obs_path = os.path.join(results_path, "sacred")
        ex.observers.append(FileStorageObserver(file_obs_path))
        return ex
示例#5
0
def start_sacred_experiment(lm_trainer, params, sacred_mongo):
    ex = Experiment('MatsuLM')
    parameters = flatten(params, reducer='path')
    ex.add_config(parameters)
    if sacred_mongo == 'docker':
        ex.observers.append(
            MongoObserver.create(
                url=
                f'mongodb://*****:*****@localhost:27017/?authMechanism=SCRAM-SHA-1',
                db_name='db'))
    else:
        ex.observers.append(MongoObserver.create(url=sacred_mongo))

    ex.captured_out_filter = apply_backspaces_and_linefeeds

    @ex.main
    def run():
        lm_trainer.train_model(ex=ex)

    r = ex.run()
示例#6
0
import zipfile
import imgaug
import numpy as np
import torch
from sacred import Experiment
from sacred.observers import MongoObserver, SlackObserver
from sacred.settings import SETTINGS

import hibashi.framework.factory as factory
from hibashi.commands import add_commands
from hibashi.framework.utils import PrefixLineFilter, get_username, get_meta_dir, OverwritingFileStorageObserver

SETTINGS.DISCOVER_SOURCES = 'dir'

ex = Experiment('Embers')
ex.captured_out_filter = PrefixLineFilter('*')
add_commands(ex)


# In the following function the model configuration is setup. Model init parameters and factory objects
# are added to the model config parameter by joining them into one dictionary. There are dynamically received based
# on the selected model_name.
@ex.config
def cfg(_log):
    _log.info('cfg triggered')
    devices = [
        0
    ]  # Device ids to use. -1 for cpu. 0-n for GPU ids. Multiple like [0, 1]
    user = get_username()  # add user for omniboard

    # # Model parameters. Should be set first.
示例#7
0
from experiments.exp_utils import get_config_var, LoggerForSacred, Args
from init_frcnn_utils import init_dataloaders_1s_1t, init_val_dataloaders_mt, init_val_dataloaders_1t, \
    init_htcn_model_optimizer

from sacred import Experiment
ex = Experiment()
from sacred.observers import MongoObserver
enable_mongo_observer = False
if enable_mongo_observer:
    vars = get_config_var()
    ex.observers.append(
        MongoObserver(
            url='mongodb://{}:{}@{}/admin?authMechanism=SCRAM-SHA-1'.format(
                vars["SACRED_USER"], vars["SACRED_PWD"], vars["SACRED_URL"]),
            db_name=vars["SACRED_DB"]))
    ex.captured_out_filter = lambda text: 'Output capturing turned off.'

from dataclasses import dataclass

import numpy as np

import torch
import torch.nn as nn

from model.utils.config import cfg, cfg_from_file, cfg_from_list
from model.utils.net_utils import adjust_learning_rate, save_checkpoint, FocalLoss, EFocalLoss

from model.utils.parser_func import set_dataset_args

import traineval_net_HTCN
from typing import Any
import torch.utils.data as torch_data
from torchvision import transforms
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds

from cow_tus.data.transforms import training_ingredient as transforms_ingredient
from cow_tus.data.dataloaders import get_sample_weights
from cow_tus.util.util import unpickle, ce_loss, output
from cow_tus.models.modules import zoo as modules
import cow_tus.data.datasets as all_datasets

EXPERIMENT_NAME = 'trainer'
ex = Experiment(EXPERIMENT_NAME, ingredients=[transforms_ingredient])
ex.logger = logging.getLogger(__name__)
ex.captured_out_filter = lambda captured_output: "Output capturing turned off."


@ex.config
def config(transforms):
    """
    Configuration for training harness.
    """
    task_str = None
    assert task_str, f'task {task_str} must have a value'

    tasks = task_str.split('&')
    for task in tasks:
        if task not in {'primary', 'primary_multiclass', '2normal_binary'}:
            raise ValueError(f'task {task} not recognized')
示例#9
0
folder_name = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
logdir = os.environ.get('PROTEIN_LOGDIR', 'results')
if not os.path.isdir(logdir):
    os.mkdir(logdir)
proteins.observers.append(
    FileStorageObserver.create(os.path.join(logdir, folder_name)))


def filter_text(text):
    pattern = re.compile(r"Epoch\s+\d+:")
    text = '\n'.join(
        filter(lambda line: not pattern.match(line), text.split('\n')))
    return text


proteins.captured_out_filter = filter_text


@gpu.config
def gpu_config():
    """Configure the gpu"""
    device = 0  # noqa: F841
    allow_growth = False  # noqa: F841


@proteins.config
def config():
    tasks = []  # noqa: F841
    model = ''  # noqa: F841
    num_epochs = 100  # noqa: F841
    load_from = None  # noqa: F841
示例#10
0
def set_up_experiment(model_cls, prepare_trainer):
    ex = Experiment('train and evaluate')
    ex.captured_out_filter = apply_backspaces_and_linefeeds

    @ex.config
    def config():
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        data_dir = None
        trained_model_dir = None
        output_dir = None

        train = True
        max_epochs = 100
        batch_size = 32
        transform_y = "relative_to_max_clique"

        model = None
        trainer_hparams = dict()
        model_hparams = dict()

        evaluate_on_full = False
        absolute_metrics = False

        embeddings = False
        predictions = False

        tag = None

    @ex.main
    def train_and_evaluate(device, data_dir, trained_model_dir, output_dir,
                           train, max_epochs, batch_size, transform_y,
                           trainer_hparams, model_hparams, evaluate_on_full,
                           absolute_metrics, embeddings, predictions, _run,
                           _seed):
        torch.manual_seed(_seed)
        # Prepare train (optional) and test data loaders.
        transform_y = transform_y_dict[transform_y]
        root = os.path.join(output_dir, "data")
        if train:
            train_data = GraphDataset(root=root,
                                      source_data_dir=data_dir,
                                      train=True,
                                      transform=transform_y)
            ex.add_resource(train_data.processed_data_path)
            train_loader = DataLoader(train_data,
                                      batch_size=batch_size,
                                      shuffle=True)
        test_data = GraphDataset(root=root,
                                 source_data_dir=data_dir,
                                 train=False,
                                 full=evaluate_on_full,
                                 transform=transform_y)
        ex.add_resource(test_data.processed_data_path)
        test_loader = DataLoader(test_data,
                                 batch_size=batch_size,
                                 shuffle=False)

        # Set up model and evaluator.
        model = model_cls(**model_hparams).to(device)
        if trained_model_dir is not None:
            path = model.load(trained_model_dir)
            ex.add_resource(path)

        def output_transform(inference_output):
            y_pred, y, data = inference_output
            if absolute_metrics:
                y_pred, y = transform_y.reverse(data,
                                                y_pred), transform_y.reverse(
                                                    data, y)
            return y_pred, y, data

        evaluator = create_supervised_graph_evaluator(
            model,
            metrics=OrderedDict(
                mse=Loss(F.mse_loss,
                         output_transform=lambda x: output_transform(x)[:2]),
                mae=Loss(F.l1_loss,
                         output_transform=lambda x: output_transform(x)[:2]),
                top1=PrecisionAtN(1, output_transform=output_transform),
                top5=PrecisionAtN(5, output_transform=output_transform),
                frac_mc=FractionOfMaxCliqueNodesFound(
                    output_transform=output_transform),
                avep=AveragePrecision(output_transform=output_transform),
            ),
            device=device,
            prepare_batch=prepare_batch,
        )

        # Train (optional) and evaluate.
        if train:
            trainer = prepare_trainer(model, evaluator, device,
                                      **trainer_hparams)
            run_and_log_training(trainer, evaluator, train_loader, test_loader,
                                 max_epochs, _run)
            new_trained_model_dir = os.path.join(output_dir, "model")
            path = model.save(new_trained_model_dir)
            ex.add_artifact(path)
        else:
            evaluator.run(test_loader)
            for name, value in evaluator.state.metrics.items():
                _run.log_scalar("val." + name, value, 0)

        evaluator.run(test_loader)
        for name, value in evaluator.state.metrics.items():
            print("{}: {:.3f}".format(name, value))

        if embeddings:
            path = compute_embeddings(model, data_dir, output_dir, device)
            ex.add_artifact(path)

        if predictions:
            path = compute_predictions(model, test_loader, transform_y,
                                       output_dir, device)
            ex.add_artifact(path)

    return ex
示例#11
0
import os

import numpy as np
import tensorflow as tf
from PIL import ImageFile
from keras import optimizers, backend as K
from keras.callbacks import TerminateOnNaN, EarlyStopping, ReduceLROnPlateau, TensorBoard, ModelCheckpoint
from sacred import Experiment, utils as sacred_utils

from connoisseur.datasets import load_pickle_data
from connoisseur.datasets.painter_by_numbers import load_multiple_outputs
from connoisseur.models import build_meta_limb

ex = Experiment('train-meta-network-multiple-predictions')

ex.captured_out_filter = sacred_utils.apply_backspaces_and_linefeeds
tf.logging.set_verbosity(tf.logging.ERROR)
tf_config = tf.ConfigProto(allow_soft_placement=True)
tf_config.gpu_options.allow_growth = True
s = tf.Session(config=tf_config)
K.set_session(s)


@ex.config
def config():
    data_dir = "/datasets/pbn/random_299/"
    batch_size = 4096
    shape = [1536]
    device = "/gpu:0"
    train_info = '/datasets/pbn/train_info.csv'
示例#12
0
def main():
    args = configs()

    args.restore_path = None
    if args.training_instance:
        if ".ckpt" in args.training_instance:
            training_dir, _ = os.path.splitext(args.training_instance)
            args.restore_path = args.training_instance
        else:
            args.restore_path = tf.train.latest_checkpoint(
                args.training_instance)
            training_dir = args.training_instance
        print("Restoring checkpoint:", args.restore_path)

        args.load_path = os.path.join(args.load_path, training_dir)
        args.summary_path = os.path.join(args.summary_path, training_dir)
    else:
        args.load_path = os.path.join(
            args.load_path,
            "evflownet_{}_{}".format(datetime.now().strftime("%m%d_%H%M%S"),
                                     args.exp_name))
        args.summary_path = os.path.join(
            args.summary_path,
            "evflownet_{}_{}".format(datetime.now().strftime("%m%d_%H%M%S"),
                                     args.exp_name))

        os.makedirs(args.load_path)
        dump_to_yaml(args, os.path.join(args.load_path, "args.yaml"))

    if args.sacred:
        sacred_exp = Experiment(args.exp_name)
        sacred_exp.captured_out_filter = apply_backspaces_and_linefeeds
        conf = vars(args)
        conf.update({'log_dir': args.load_path})
        conf.update({'summary_path': args.summary_path})
        sacred_exp.add_config(mongo_compatible(conf))

        if not args.mongodb_disable:
            url = "{0.mongodb_url}:{0.mongodb_port}".format(args)
            db_name = args.mongodb_name

            overwrite = None
            if args.restore_path is not None:
                client = pymongo.MongoClient(url)
                database = client[db_name]
                runs = database["runs"]
                matches = runs.find({"config.log_dir": args.load_path})
                if matches.count() > 1:
                    raise ValueError(
                        "Multiple MongoDB entries found with the specified path!"
                    )
                elif matches.count() == 0:
                    raise ValueError(
                        "No MongoDB entriy found with the specified path!")
                else:
                    overwrite = matches[0]['_id']

            print(
                colored('Connect to MongoDB@{}:{}'.format(url, db_name),
                        "green"))
            sacred_exp.observers.append(
                MongoObserver.create(url=url,
                                     db_name=db_name,
                                     overwrite=overwrite))

    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)
    if not os.path.exists(args.summary_path):
        os.makedirs(args.summary_path)

    # Fix the random seed for reproducibility.
    # Remove this if you are using this code for something else!
    tf.set_random_seed(12345)

    if args.do_aug_rewind:
        if args.no_aug_rot is False:
            raise ValueError(
                "no_aug_rot = False Not supported when do_aug_rewind = True")

        print("Using Event Loader for rewind augmentation!")
        loader_vals = get_loader_events(
            args.data_path,
            args.batch_size,
            args.image_width,
            args.image_height,
            split='train',
            shuffle=True,
            sequence=args.sequences,
            rotation=not args.no_aug_rot,
            rewind=args.do_aug_rewind,
            flip_updown=args.do_aug_flip_updown,
            nskips=args.loader_n_skips,
            binarize_polarity=args.loader_binarize_polarity)
        (events_loader, lengths_loader, event_img_loader, prev_img_loader,
         next_img_loader, _, rot_angle, crop_bbox, n_ima) = loader_vals
    else:
        event_img_loader, prev_img_loader, next_img_loader, _, n_ima = get_loader(
            args.data_path,
            args.batch_size,
            args.image_width,
            args.image_height,
            split='train',
            shuffle=True,
            sequence=args.sequences,
            rotation=not args.no_aug_rot,
            flip_updown=args.do_aug_flip_updown,
            nskips=args.loader_n_skips,
            gzip=args.gzip)
    print("Number of images: {}".format(n_ima))

    trainer = EVFlowNet(args,
                        event_img_loader,
                        prev_img_loader,
                        next_img_loader,
                        n_ima,
                        is_training=True)

    if args.sacred:

        @sacred_exp.main
        def train_wrapped():
            return trainer.train()

        sacred_exp.run()
    else:
        trainer.train()
示例#13
0
from engine import sample_dataset, run_epoch
from utils.interpretation_utils import convert_onehot, plot_maps, compute_auc_multiclass, compute_iou_interpretation, better_hparams, plot_misclassifications_grid

ex = Experiment('SemanticSupport')
logs_dir = 'storage'
#logs_dir = 'tmp'

ex.observers.append(FileStorageObserver.create(logs_dir))


def remove_progress(captured_out):
    lines = (line for line in captured_out.splitlines() if 'it/s]' not in line)
    return '\n'.join(lines)


ex.captured_out_filter = remove_progress


@ex.config
def config():
    model = 'UNetMirc'  # 'choose between: 'ResNet', 'UNetMirc', 'RecUNetMirc', 'RecUNetMirc', 'RecUNetMircTD', 'RecUNetMircMulti'
    loss = 'WeightedCrossEnt'  # 'Loss function for training the model'. Could be also array of [interp_loss, class_loss]
    dataset = 'HorseHead'  # 'The name of train/test sets'
    neg_set = 1  # Set of negative examples in train: (0) Small (usually saved for val), Medium (1), Large (2)
    batch_size = 64  # 'Specify the number of batches'
    num_workers = 32  # 'Specify the number of cpu cores'
    epochs = 100  # 'Specify the number of epochs to train'
    optimizer = 'ADAM'  # 'Optimizer (default: ADAM)'
    learningrate = 1e-4  # 'learning rate (default: 0.001)'
    loss_ratio = [1.0, 1.0]  # [interp, class]
    subset = 10000  # the subset size of negative examples added to each epoch. 'None' means adding all negative examples
示例#14
0
def new_exp(uri=mongo_uri, db=db_name, interactive=True):
    ex = Experiment('jupyter_ex', interactive=interactive)
    ex.captured_out_filter = apply_backspaces_and_linefeeds
    if uri is not None and db is not None:
        ex.observers.append(MongoObserver(url=uri, db_name=db))

    @ex.config
    def my_config():
        lambda_range = [1, 10, 100, 500, 1000]
        lambda_array = None
        min_freq = 0.1
        k = 100
        freq_cba = 0.1
        conf_cba = 0.3

    @ex.main
    def my_main(_run, ver, it, dataset, k, nsample, lambda_mode, quality,
                sample_mode, freq_cba, conf_cba, min_freq, lambda_range,
                lambda_array):
        from logger import log
        if uri is not None and db is not None:
            log = _run.log_scalar

        fn = 'dataset/{}-bin5.pkl'.format(dataset)
        rn = np.random.randint(10000)
        #rn = 25
        Xtr_, Xt, Ytr_, Yt, Xtr, Xv, Ytr, Yv, lb = load_data(fn,
                                                             rn=rn,
                                                             log=log)

        Xtr__dds, Xt_dds, Ytr__dds, Yt_dds, Xtr_dds, Xv_dds, Ytr_dds, Yv_dds, lb_dds = load_data(
            fn, rn=rn, log=log, onehot=True)

        k = 100 if k is None else k

        # CART
        run_cart(Xtr_, Ytr_, Xt, Yt, lb, k=k, log=log)
        print()

        # CBA
        run_cba(Xtr_,
                Ytr_,
                Xt,
                Yt,
                lb,
                support=freq_cba,
                confidence=conf_cba,
                k=k,
                log=log)
        print()

        # CN2
        run_cn2(Xtr_, Ytr_, Xt, Yt, lb, k=k, log=log)

        # IDS
        if min_freq is not None:
            if lambda_array is None:
                best_la = []
                for ith in range(7):
                    best_sc = -1
                    best_lamb = None
                    for lamb in lambda_range:
                        print('=== tuning IDS: ', ith, lamb)
                        la = best_la + [0.5] * (7 - len(best_la))
                        la[ith] = lamb

                        Y_pred = run_ids(Xtr,
                                         Ytr,
                                         Xv,
                                         Yv,
                                         lb,
                                         min_freq,
                                         la,
                                         log=None)
                        auc = roc_auc_score(lb.transform(Yv.values),
                                            lb.transform(Y_pred))
                        if auc > best_sc:
                            best_sc = auc
                            best_lamb = lamb

                    best_la.append(best_lamb)

                print('best lambs: ', best_la)
                [log('ids-lambda', lamb, i) for i, lamb in enumerate(best_la)]
            else:
                best_la = lambda_array

            run_ids(Xtr_, Ytr_, Xt, Yt, lb, min_freq, best_la, log=log)
        print()

        # DDS
        run_ours(Xtr__dds,
                 Ytr__dds,
                 Xt_dds,
                 Yt_dds,
                 lb_dds,
                 nsample,
                 lambda_mode,
                 q=quality,
                 sample_mode=sample_mode,
                 k=k,
                 log=log)
        run_ours(Xtr__dds,
                 Ytr__dds,
                 Xt_dds,
                 Yt_dds,
                 lb_dds,
                 nsample,
                 lambda_mode,
                 q=quality,
                 sample_mode=sample_mode,
                 k=k,
                 rerun=False,
                 log=log)

    return ex
示例#15
0
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds

from graph_utils import np_layer_array_to_graph_weights_array
from networks import cnn_dict, mlp_dict
from train_model import csordas_loss, eval_net, load_datasets
from utils import get_weight_tensors_from_state_dict, weights_to_layer_widths

# Warning: don't apply to network while pruning is happening.

# TODOS:
# - refactor compare_masks_clusters to share functions with this file
#   (probably by adding cluster_utils file)

ablation_acc_test = Experiment('ablation_acc_test')
ablation_acc_test.captured_out_filter = apply_backspaces_and_linefeeds
ablation_acc_test.observers.append(FileStorageObserver('ablation_acc_runs'))


@ablation_acc_test.config
def basic_config():
    training_dir = './training_runs/105/'
    shuffle_cluster_dir = './shuffle_clust_runs/82/'
    pre_mask_path = None
    is_pruned = False
    _ = locals()
    del _


def mask_from_cluster(cluster, cluster_labels, isolation_indicator,
                      layer_widths, net_type):
示例#16
0
def deepdictify(config):
    ret = dict()
    for item in config:
        value = config[item]
        if isinstance(value, dict):
            ret[item] = deepdictify(value)
        elif isinstance(value, list):
            ret[item] = list(value)
        else:
            ret[item] = copy(value)
    return ret


concept_experiments = Experiment('concepts_experiments')
concept_experiments.captured_out_filter = apply_backspaces_and_linefeeds


def draw_lrtg_bbox(bbox, **kwargs):
    rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], **kwargs)
    return rect


def draw_texted_bbox(ax, bbox, text, **kwargs):
    ax.add_patch(draw_lrtg_bbox(bbox, **kwargs))
    cx, cy = lrtb_center(bbox)
    ax.annotate(text, (cx, cy), color='w', weight='bold',
                fontsize=6, ha='center', va='center')


def lrtb_center(bbox):
示例#17
0
    def __init__(self,
                 f_main,
                 f_config,
                 f_capture,
                 observer_type='file',
                 mongo_url='mongodb://localhost:27017',
                 verbose=False):
        """
        :param f_main: function
            The main function for the experiment
        :param f_config: function or str
            The function where all the sacred parameters are init or
            a file with the config parameters
        :param f_capture: function
            The function that implements the metrics logging API with sacred
            (should be used with Lambda in keras but has problem right now. Thus it can be ignored)
        :param mongo_url: str
            The url for MongoDB
        :param verbose: bool
            If True logging is enabled
        """

        self.sacred_db_name()

        ex = Experiment(self.sacred_ex_name())
        ex.captured_out_filter = apply_backspaces_and_linefeeds

        if observer_type == 'mongodb':
            print('Connecting to MongoDB at {}:{}'.format(
                mongo_url, self.sacred_db_name()))
            ex.observers.append(
                MongoObserver.create(url=mongo_url,
                                     db_name=self.sacred_db_name()))
        elif observer_type == 'file':
            basedir = os.path.join(config['logs'], 'sacred')
            ex.observers.append(FileStorageObserver.create(basedir))
        else:
            raise ValueError(
                '{} is not a valid type for a SACRED observer.'.format(
                    observer_type))

        if hasattr(f_config, '__call__'):
            # init the experiment configuration using a function
            ex.config(f_config)
        elif isinstance(f_config, str):
            # init the experiment configuration usinga  file
            ex.add_config(f_config)
        elif isinstance(f_config, dict):
            # init the experiment configuration usinga  file
            ex.add_config(f_config)
        else:
            raise ValueError(
                'You should provide either a fucntion or a config file for setting up an experiemet.'
                'The given paramter has type {} which is not valid'.format(
                    type(f_config)))

        # init the experiment logging (capture) method
        f_ex_capture = ex.capture(f_capture)

        # init the experiment main
        @ex.main
        def ex_main(_run):
            if observer_type == 'mongodb':
                return main_wrapper(f_main, ex, f_ex_capture,
                                    self.sacred_db_name(), _run)
            else:
                f_main(ex, _run, f_ex_capture)

        self.ex = ex
示例#18
0
import numpy as np
import torch
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds

from spectral_cluster_model import layer_array_to_clustering_and_quality
from utils import (
    compute_percentile,
    get_random_int_time,
    load_masked_weights_numpy,
    load_model_weights_numpy,
)

shuffle_and_clust = Experiment('shuffle_and_clust')
shuffle_and_clust.captured_out_filter = apply_backspaces_and_linefeeds
shuffle_and_clust.observers.append(FileStorageObserver('shuffle_clust_runs'))


@shuffle_and_clust.config
def basic_config():
    num_clusters = 4
    weights_path = "./models/mlp_kmnist.pth"
    mask_path = None
    net_type = 'mlp'
    shuffle_method = "all"
    normalize_weights = True
    epsilon = 1e-9
    num_samples = 100
    eigen_solver = 'arpack'
    _ = locals()
def new_exp(db_name='sacred'):
    ex = Experiment('jupyter_ex', interactive=True)
    ex.captured_out_filter = apply_backspaces_and_linefeeds
    ex.observers.append(MongoObserver(url=mongo_uri, db_name=db_name))

    @ex.config
    def my_config():
        which = 'random'
        whichalgo = 'GE|GELS|GV|LSI|LSG|MC|RN'
        dataset = ''
        alphas = [0.95]
        nvec, ncls, ncls_per_vec, nsel, dim = 0, 0, 0, 0, 0
        noise = 0
        docname, passes, topic_threshold = '', 20, 0.25
        sz_clique, lambd = 0, 1

    @ex.main
    def my_main(_run, which, whichalgo, dataset, alphas, nvec, ncls,
                ncls_per_vec, nsel, dim, noise, docname, passes,
                topic_threshold, sz_clique, lambd):
        # _run.log_scalar(metric_name, value[, step])

        # Datasets
        if which == 'random':
            vecs, clss, vec2cls = gen_random(nvec=nvec,
                                             ncls=ncls,
                                             ncls_per_vec=ncls_per_vec,
                                             dim=dim)
            intra_ = intra.Intra(vecs,
                                 vec2cls,
                                 len(clss),
                                 nsel,
                                 metric='euclidean',
                                 eps=1)
        elif which == 'proto':
            proto, vecs, vec2cls = gen_clusters(nvec=nvec,
                                                ncls=ncls,
                                                dim=dim,
                                                noise=noise)
            intra_ = intra.Intra(vecs,
                                 vec2cls,
                                 ncls,
                                 nsel,
                                 metric='euclidean',
                                 eps=1)
        elif which == 'topic':
            dv, doc2cls, ntopic, _ = gen_topics(docname, ncls, passes,
                                                topic_threshold)
            intra_ = intra.Intra(dv,
                                 doc2cls,
                                 ntopic,
                                 nsel,
                                 metric='cosine',
                                 eps=1)
            nvec = len(dv)
        elif which == 'scholar':
            kws, i2c, coms, i2v, vs, c2i = gen_scholar_net(sz_clique)
            intra_ = intra.Intra(np.arange(len(kws)),
                                 i2c,
                                 len(coms),
                                 nsel,
                                 cls2vec=c2i,
                                 dist=partial(intra.dist_jaccard, i2v=i2v),
                                 quality=intra.Quality(i2v),
                                 tradeoff=lambd,
                                 eps=1)
            nvec = len(kws)
            ncls = len(coms)
        elif which == 'movielens':
            vecs, tag2t, t2tag, t2c, cls2c, t2v, v2c = gen_movielens()
            intra_ = intra.Intra(vecs, [t2c[i] for i in range(len(vecs))],
                                 len(cls2c),
                                 nsel,
                                 metric='cosine',
                                 quality=intra.Quality(t2v),
                                 tradeoff=lambd,
                                 eps=1)
            nvec = len(vecs)
            ncls = len(cls2c)
        else:
            raise ValueError('Which data? {}'.format(which))
        print('nvec, ncls:', nvec, ncls)

        # Algorithms
        whichalgo = whichalgo.lower()
        if 'gv' in whichalgo:
            run_permutation(1, 'gv', intra_.greedy_vertex, intra_, ncls, _run)

        if 'lsi' in whichalgo:
            f = partial(intra_.local_search, intra=True, print_=False)
            run_permutation(1, 'lsi', f, intra_, ncls, _run)

        if 'lsg' in whichalgo:
            f = partial(intra_.local_search, intra=False, print_=False)
            run_once('lsg', f, intra_, ncls, _run)

        if 'rn' in whichalgo:
            f = partial(intra_.random)
            run_once('rn', f, intra_, ncls, _run)

        for alpha in alphas:
            if 'ge' in whichalgo:
                alpha = np.round(alpha, 2)
                f = partial(intra_.greedy_edge, exact=False, alpha=alpha)
                sel = run_once('ge{}'.format(int(alpha * 100)), f, intra_,
                               ncls, _run)

                if 'gels' in whichalgo:
                    f = partial(intra_.local_search,
                                init=sel,
                                intra=True,
                                print_=False)
                    run_once('gels{}'.format(int(alpha * 100)), f, intra_,
                             ncls, _run)

        if 'mc' in whichalgo:
            if intra_.quality is not None:
                run_permutation(1, 'mc', intra_.max_coverage, intra_, ncls,
                                _run)

    return ex
from emmental.task import EmmentalTask
import torch
import torch.nn as nn
from torchvision import transforms
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds

from cow_tus.data.transforms import training_ingredient as transforms_ingredient
from cow_tus.util.util import unpickle, ce_loss, output
from cow_tus.models.modules import zoo as modules

EXPERIMENT_NAME = 'trainer'
ex = Experiment(EXPERIMENT_NAME, ingredients=[transforms_ingredient])
ex.logger = logging.getLogger(__name__)
ex.captured_out_filter = apply_backspaces_and_linefeeds


@ex.config
def config(transforms):
    """
    Configuration for training harness.
    """
    hypothesis_conditions = ['single-instance-learning', 'baseline']
    exp_dir = path.join('experiments', *hypothesis_conditions)

    meta_config = {'device': 'cpu'}

    logging_config = {'evaluation_freq': 40, 'checkpointing': False}

    dataset_class = 'TUSDataset'
示例#21
0
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from loggings import get_logger
import yaml

from run import run

SETTINGS[
    'CAPTURE_MODE'] = "fd"  # set to "no" if you want to see stdout/stderr in console  (默认值:“ fd”(Linux / osx)或“ sys”(Windows)) 配置如何捕获stdout / stderr。['no','sys','fd']
logger = get_logger()

ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds  #过滤功能将应用于运行的捕获输出

results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")


@ex.main  #运行此实验的命令行界面。
def my_main(_run, _config, _log):
    # Setting the random seed throughout the modules
    config = config_copy(_config)
    np.random.seed(config["seed"])
    th.manual_seed(config["seed"])
    config['env_args']['seed'] = config["seed"]

    # run the framework
    run(_run, config, _log)