コード例 #1
0
ファイル: arg_parser.py プロジェクト: jmrinaldi/sacred
def get_observers(args):
    observers = []
    if args['--mongo_db']:
        url, db_name, prefix = _parse_mongo_db_arg(args['--mongo_db'])
        if prefix:
            mongo = MongoObserver.create(db_name=db_name, url=url,
                                         prefix=prefix)
        else:
            mongo = MongoObserver.create(db_name=db_name, url=url)

        observers.append(mongo)

    return observers
コード例 #2
0
ファイル: hyperopt_experiments.py プロジェクト: Lab41/pythia
def objective(args_):

    # arguments to pass as config_updates dict
    global args
    # result to pass to hyperopt
    global result
    # command-line arguments 
    global parse_args

    try:
        ex = Experiment('Hyperopt')
        logger.debug("Adding observer for {}, DB {}".format(parse_args.mongo_db_address,parse_args.mongo_db_name))
        ex.observers.append(MongoObserver.create(url=parse_args.mongo_db_address, db_name=parse_args.mongo_db_name))
        
        pythia_args = make_args_for_pythia(args_)
        args = mp.get_args(**pythia_args) 
        ex.main(run_with_global_args)
        r = ex.run(config_updates=pythia_args)
        logger.debug("Experiment result: {}\n"
                     "Report to hyperopt: {}".format(r.result, result))

        return result

    except:
        raise
        #If we somehow cannot get to the MongoDB server, then continue with the experiment
        logger.warning("Running without Sacred")
        run_with_global_args()
コード例 #3
0
ファイル: sacred_turn_hmm.py プロジェクト: renj/TrajMap
def init():
    log_id = str(int(time.time()*10)%(60*60*24*365*10))+str(os.getpid())
    global logger
    logger = logging.getLogger(str(log_id))

    logger.setLevel(logging.DEBUG)  
      
    # write to file 
    fh = logging.FileHandler('ex.log')  
    fh.setLevel(logging.DEBUG)
      
    # write to console
    ch = logging.StreamHandler()  
    ch.setLevel(logging.DEBUG)  
      
    # Handler format
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\n\t%(message)s')  
    fh.setFormatter(formatter)  
    ch.setFormatter(formatter)  
      
    logger.addHandler(fh)
    logger.addHandler(ch)
      
    global ex
    ex = Experiment('TrajMap')
    ex.logger = logger
    ex.observers.append(MongoObserver.create(url='10.60.43.110:27017', db_name='TurnHMM'))
    #ex.observers.append(MongoObserver.create(url='127.0.0.1:27017', db_name='nTrajMap'))
    return ex, logger
コード例 #4
0
ファイル: experiments.py プロジェクト: aflower15/pythia
def set_up_xp():
    # Check that MongoDB config is set
    try:
        mongo_uri=os.environ['PYTHIA_MONGO_DB_URI']
    except KeyError as e:
        print("Must define location of MongoDB in PYTHIA_MONGO_DB_URI for observer output",file=sys.stderr)
        raise

    ex = Experiment(ex_name)
    ex.observers.append(MongoObserver.create(url=mongo_uri,
                                         db_name=db_name))
    return ex
コード例 #5
0
def my_config():
    learning_rate=0.13
    n_epochs=1000,
    data_path = "/home/drosen/repos/DeepLearningTutorials/data"
    datasetname = 'mnist.small.pkl.gz'
    #datasetname = 'mnist.pkl.gz'
    dataset = os.path.join(data_path, datasetname)
    batch_size=600
    theano_flags = "mode=FAST_RUN,device=gpu,floatX=float32"
    os.environ["THEANO_FLAGS"] = theano_flags
    db_name = "MY_DB"
    ex.observers.append(MongoObserver.create(db_name=db_name))
コード例 #6
0
def my_config():
    learning_rate = 0.01
    n_epochs = 25
    data_path = "/home/drosen/repos/DeepLearningTutorials/data"
    datasetname = 'mnist.small.pkl.gz'
    #datasetname = 'mnist.pkl.gz'
    dataset = os.path.join(data_path, datasetname)
    batch_size = 100
    n_hidden = 500
    theano_flags = "mode=FAST_RUN,device=gpu,floatX=float32"
    os.environ["THEANO_FLAGS"] = theano_flags
    db_name = "MY_DB"
    ex.observers.append(MongoObserver.create(db_name=db_name))
    random_seed = 1234
    rng = numpy.random.RandomState(random_seed)
    activation = "tanh"
コード例 #7
0
def objective(args_):

    global args
    global result
    global parse_args
    args=args_

    try:
        ex = Experiment('Hyperopt')
        ex.observers.append(MongoObserver.create(url=parse_args.mongo_db_address, db_name=parse_args.mongo_db_name))
        ex.main(lambda: run_with_global_args())
        r = ex.run(config_updates=args)
        print(r)

        return result

    except:
        raise
        #If we somehow cannot get to the MongoDB server, then continue with the experiment
        print("Running without Sacred")
        run_with_global_args()
コード例 #8
0
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score, f1_score

from sacred import Experiment
from tqdm import tqdm

ex = Experiment('mental_face_transfer')
from sacred.observers import MongoObserver

#We must change url to the the bluehive node on which the mongo server is running
url_database = conf_url_database
mongo_database_name = conf_mongo_database_name

ex.observers.append(
    MongoObserver.create(url=url_database, db_name=mongo_database_name))

my_logger = logging.getLogger()
my_logger.disabled = True


@ex.config
def cfg():
    node_index = 0
    epoch = 50  #paul did 50
    shuffle = True
    num_workers = 2
    best_model_path = "/scratch/mhasan8/saved_models_from_projects/mental_face_transfer/" + str(
        node_index) + "_best_model.chkpt"
    experiment_config_index = 0
コード例 #9
0
def run_for_class(clas, it=1):
    print('work on ' + clas)
    torch.cuda.empty_cache()
    data_clas = setup_data(clas)
    encoder_name = 'encoder_' + best_lm_exp_id
    drop_mult = args.do

    #learn = text_classifier_learner(data_clas, drop_mult=drop_mult, embed_prevent_first=0)
    #earn.load_encoder(encoder_name)

    all_lrs = []
    #for _ in range(3):
    #    all_lrs.append(news_utils.fastai.get_optimal_lr(learn, runs=1))
    #optim_lr = max(all_lrs)

    ex = Experiment(db_name + '_' + clas)
    ex.observers.append(MongoObserver.create(db_name=db_name + '_' + clas))

    @ex.config
    def my_config():
        exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
        factor = 2.6
        wd = 0.01
        moms = (0.8, 0.7)
        full_epochs = 200
        bs = 64
        embed_prevent = 0
        input_p = 0.3
        mod = 'simple_fit'
        lr = 0.001
        embed_p = 0.1

    @ex.main
    def run_exp(exp_id, drop_mult, input_p, embed_p, lr, moms, wd, factor,
                full_epochs):

        lrs = [lr / (factor**(4 - x)) for x in range(4)] + [lr]

        learn = text_classifier_learner(data_clas,
                                        drop_mult=drop_mult,
                                        embed_prevent_first=0)
        learn.load_encoder(encoder_name)

        learn.metrics += [
            KappaScore(),
            news_utils.fastai.F1Macro(),
            news_utils.fastai.F1Weighted(),
            news_utils.fastai.PrecisionMacro(),
            news_utils.fastai.RecallMacro(),
            news_utils.fastai.RecallMicro(),
            news_utils.fastai.PrecisionMicro()
        ]

        learn.callbacks += [
            SaveModelCallback(learn, name=exp_id, monitor='kappa_score'),
            EarlyStoppingCallback(learn,
                                  monitor='kappa_score',
                                  patience=20,
                                  mode='max'),
            news_utils.fastai.SacredLogger(learn, ex),
        ]

        for i in range(1, 5):
            epochs = 1
            if i in [1, 2, 3]:
                learn.freeze_to(-i)
            else:
                learn.unfreeze()
                epochs = full_epochs
            learn.fit(epochs, np.array(lrs))

    for _ in range(it):
        ex.run(config_updates={"drop_mult": drop_mult})
# create experiment:
ex = Experiment('PAN_CANCER_regressor_DS')

# add file observer
observer_path = '../runs/DEBUG' if DEBUG else '../runs'
ex.observers.append(
    FileStorageObserver.create(basedir=os.path.join(observer_path, ex.path)))

# only save to the mongodb when not in debug mode
if not DEBUG:
    # add mongo observer
    with open('../tools/.mongo', 'r') as f:
        auth_url = f.read()
        ex.observers.append(
            MongoObserver.create(url=auth_url, db_name='graduation'))


@ex.config
def my_config():
    dataset_name = 'TCGA'
    datasubset_name = '_coded'
    data_fname = 'exp_{}{}_normalized.hdf5'.format(dataset_name,
                                                   datasubset_name)
    data_path = os.path.join('../data/normalized/', data_fname)

    label_path = '../data/{}/exp_{}{}_labels_add.csv'.format(
        dataset_name, dataset_name, datasubset_name)

    genes_to_select = None
コード例 #11
0
from keras import callbacks, utils, models, layers, optimizers
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
from sklearn.model_selection import train_test_split

from custom_objects.callbacks import ObserveMetrics
from nn_blocks import conv
from utils.data_utils import DataSequence, prepare_data_df

_MODEL_ARC = 'ensemble'
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"

ex = Experiment(name=_MODEL_ARC)
ex.observers.append(
    MongoObserver.create(url='127.0.0.1:27017', db_name='severstal_sdd'))
ex.captured_out_filter = apply_backspaces_and_linefeeds


def train_model(model, train_seq, val_seq, training_callbacks):
    model.summary()
    history = model.fit_generator(train_seq,
                                  epochs=50,
                                  verbose=2,
                                  callbacks=training_callbacks,
                                  validation_data=val_seq,
                                  max_queue_size=4,
                                  workers=4,
                                  use_multiprocessing=True)
    return history
コード例 #12
0
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds

import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms

ex = Experiment("mnist_cnn")
ex.observers.append(
    MongoObserver.create(url='10.69.46.232:27017', db_name='sacred'))
ex.captured_out_filter = apply_backspaces_and_linefeeds


# 超参数设置
@ex.config
def myconfig():
    # Device configuration
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # Hyper parameters
    num_epochs = 5
    num_classes = 10
    batch_size = 100
    learning_rate = 0.1


# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
    def __init__(self, num_classes=10):
コード例 #13
0
from addict import Dict
from sklearn.pipeline import Pipeline
import clinical_text_analysis as cta
import pandas as pd
import numpy as np
import random
from os import path
import tensorflow as tf
import data_reader as read
import util_funcs
import string

from addict import Dict
import sacred
ex = sacred.Experiment(name="seizure_long_term")
ex.observers.append(MongoObserver.create(client=util_funcs.get_mongo_client()))
import preprocessingV2.preprocessingV2 as ppv2
from keras_models.metrics import f1, sensitivity, specificity, auroc
from sklearn.metrics import f1_score, roc_auc_score, classification_report


# @ex.capture
# def get_read_tfrecord(multilabel=False):
def read_tfrecord(example):
    features = {'original_index': tf.io.FixedLenFeature([1], tf.int64, ),\
               'data':  tf.io.FixedLenFeature([9*21*1000], tf.float32,),\
               'label':  tf.io.FixedLenFeature([10], tf.int64, [0 for i in range(10)]),\
               'subtypeLabel':  tf.io.FixedLenFeature([10], tf.int64, [0 for i in range(10)]),\
               'patient':  tf.io.FixedLenFeature([1], tf.int64,), \
               'session':  tf.io.FixedLenFeature([1], tf.int64,),
                       }
コード例 #14
0
        _run.info['trainHistory'] = alg.evals_result_['validation_0']['mae']
        _run.info['validHistory'] = alg.evals_result_['validation_1']['mae']
    # Optionally save oob predictions
        if save_oob_predictions:
            filename = '{}_oob_pred.csv'.format(time)
            pd.DataFrame(data.inverseScaleTransform(pred.values),
                         index=data.trainids,columns=['loss']).to_csv(
                             os.path.join(output_path, filename),
                         index_label='id')
    # Optionally generate test predictions
    if save_test_predictions:
        filename = '{}_test_pred.csv'.format(time)
        Xtr, ytr, Xte, _ = data.get_train_test_features() 
        print alg.get_params()
        alg.fit(Xtr,
                ytr)
        predtest = pd.DataFrame(
            data.inverseScaleTransform(alg.predict(Xte)),
            index = data.testids, columns = ['loss'])
        predtest.to_csv(os.path.join(output_path, filename), index_label='id')
    return maeScore

if __name__ == '__main__':
    print sys.argv
    if len(sys.argv) > 1 and sys.argv[1] == 'stam':
        ex.observers.append(MongoObserver.create(url='login1:27017',db_name = "allstate"))
    else:
        ex.observers.append(MongoObserver.create(db_name = "allstate"))
    run = ex.run()  

コード例 #15
0
ファイル: 2train.py プロジェクト: jfilter/masters-thesis
from fastai.imports import nn, torch
from fastai.callbacks import *

import random
import math
import datetime
from sacred import Experiment

from sacred.observers import MongoObserver
import fastai

import news_utils.fastai

ex = Experiment('germanlm_raw')

ex.observers.append(MongoObserver.create(db_name='germanlm'))

EX_PA = Path('/mnt/data/group07/johannes/germanlm/exp_1')


@ex.config
def my_config():
    lemma = "no"
    vocab = "25k"
    drop_mult = 0
    # lr = 1e-2
    epochs = 2
    bs = 128
    exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")

コード例 #16
0
import json
import logging
import os
import random
import tempfile

import pymongo
from sacred import Experiment
from sacred.observers import MongoObserver

import analyse
import config
import scenario as scenario_module  # "scenario" name already used

ex = Experiment('wf_open_world')
ex.observers.append(MongoObserver.create())


# pylint: disable=unused-variable
@ex.config
def my_config():
    scenario = random.choice(scenario_module.list_all()).path
    or_level = None
    remove_small = None
    remove_timeout = None
    auc_bound = None
    background_size = 'auto'  # 'auto', number, None
    binarize = True
    exclude_sites = []
    current_sites = False  # fix e.g. google duplicates in bg set
# pylint: enable=unused-variable
コード例 #17
0
ファイル: main.py プロジェクト: Lab41/attalos
def main():
    import argparse

    parser = argparse.ArgumentParser(description='Two layer linear regression')
    parser.add_argument("image_feature_file_train",
                        type=str,
                        help="Image Feature file for the training set")
    parser.add_argument("text_feature_file_train",
                        type=str,
                        help="Text Feature file for the training set")
    parser.add_argument("image_feature_file_test",
                        type=str,
                        help="Image Feature file for the test set")
    parser.add_argument("text_feature_file_test",
                        type=str,
                        help="Text Feature file for the test set")
    parser.add_argument("word_vector_file",
                        type=str,
                        help="Text file containing the word vectors")

    # Optional Args
    parser.add_argument("--learning_rate",
                        type=float,
                        default=0.001,
                        help="Learning Rate")
    parser.add_argument("--num_epochs",
                        type=int,
                        default=200,
                        help="Number of epochs to run for")
    parser.add_argument("--batch_size",
                        type=int,
                        default=128,
                        help="Batch size to use for training")
    parser.add_argument("--model_type",
                        type=str,
                        default="multihot",
                        choices=['multihot', 'naivesum', 'wdv', 'negsampling', 'fast0tag'],
                        help="Loss function to use for training")
    parser.add_argument("--in_memory",
                        action='store_true',
                        default="store_false",
                        help="Load training image features into memory for faster training")
    parser.add_argument("--model_input_path",
                        type=str,
                        default=None,
                        help="Model input path (to continue training)")
    parser.add_argument("--model_output_path",
                        type=str,
                        default=None,
                        help="Model output path (to save training)")

    # new args
    parser.add_argument("--hidden_units",
                        type=str,
                        default="200",
                        help="Define a neural network as comma separated layer sizes. If log-reg, then set to '0'.")
    parser.add_argument("--cross_eval",
                        action="store_true",
                        default=False,
                        help="Use if test dataset is different from training dataset")
    parser.add_argument("--word_vector_type",
                        type=str,
                        choices=[t.name for t in WordVectorTypes],
                        help="Format of word_vector_file")
    parser.add_argument("--epoch_verbosity",
                        type=int,
                        default=10,
                        help="Epoch verbosity rate")
    parser.add_argument("--verbose_eval",
                        action="store_true",
                        default=False,
                        help="Use to run evaluation against test data every epoch_verbosity")
    parser.add_argument("--optim_words",
                        action="store_true",
                        default=False,
                        help="If using negsampling model_type, use to jointly optimize words")
    parser.add_argument("--ignore_posbatch",
                        action="store_true",
                        default=False,
                        help="Sample, ignoring from positive batch instead of examples. This should be taken out in future iters.")
    parser.add_argument("--joint_factor",
                        type=float,
                        default=1.0,
                        help="Multiplier for learning rate in updating joint optimization")
    parser.add_argument("--use_batch_norm",
                        action="store_true",
                        default=False,
                        help="Do we want to use batch normalization? Default is False")
    parser.add_argument("--opt_type",
                        type=str,
                        default="adam",
                        help="What type of optimizer would you like? Choices are (adam,sgd)")
    parser.add_argument("--weight_decay",
                        type=float,
                        default=0.0,
                        help="Weight decay to manually decay every 10 epochs. Default=0 for no decay.")
    parser.add_argument("--scale_words",
                        type=float,
                        default=1.0,
                        help="Scale the word vectors. If set to zero, scale by L2-norm. Otherwise, wordvec=scale x wordvec")
    parser.add_argument("--scale_images",
                        type=float,
                        default=1.0,
                        help="Scale the word vectors. If set to zero, scale by L2-norm. Otherwise, imvec=scale x imvec. ")
    parser.add_argument("--fast_sample",
                        action="store_true",
                        default=False,
                        help="Fast sample based on distribution, only use in large dictionaries")


    args = parser.parse_args()

    try:
        # Sacred Imports
        from sacred import Experiment
        from sacred.observers import MongoObserver

        from sacred.initialize import Scaffold

        # Monkey patch to avoid having to declare all our variables
        def noop(item):
            pass

        Scaffold._warn_about_suspicious_changes = noop

        ex = Experiment('Attalos')
        ex.observers.append(MongoObserver.create(url=os.environ['MONGO_DB_URI'],
                                                 db_name='attalos_experiment'))
        ex.main(lambda: convert_args_and_call_model(args))
        ex.run(config_updates=args.__dict__)
    except ImportError:
        # We don't have sacred, just run the script
        logger.warn('Not using Sacred. Your results will not be saved')
        convert_args_and_call_model(args)
コード例 #18
0
from config import SEMEVAL_HUMOR_TRAIN_DIR
from config import SEMEVAL_HUMOR_TRIAL_DIR
from config import TWEET_SIZE
from tf_tools import GPU_OPTIONS
from tf_tools import HUMOR_DROPOUT
from tf_tools import create_dense_layer
from tf_tools import create_tensorboard_visualization
from tf_tools import predict_on_hashtag
from tf_tools import build_humor_model
from tools import extract_tweet_pair_from_hashtag_datas
from tools import get_hashtag_file_names
from tools import load_hashtag_data
from tools import load_hashtag_data_and_vocabulary

ex = Experiment('humor_model')
ex.observers.append(MongoObserver.create(db_name='humor_runs'))

EMBEDDING_HUMOR_MODEL_LEARNING_RATE = .00001
N_TRAIN_EPOCHS = 2


@ex.config
def my_config():
    learning_rate = .000005  # np.random.uniform(.00005, .0000005)
    num_epochs = 5  # int(np.random.uniform(1.0, 4.0))
    dropout = 1  # np.random.uniform(.5, 1.0)
    hidden_dim_size = 800  # int(np.random.uniform(200, 3200))
    use_emb_model = True
    use_char_model = True
    model_save_dir = EMB_CHAR_HUMOR_MODEL_DIR
    if '-emb-only' in sys.argv:
コード例 #19
0
ファイル: regress2sum.py プロジェクト: Andrew62/attalos
def main():
    import argparse

    parser = argparse.ArgumentParser(description='Two layer linear regression')
    parser.add_argument("image_feature_file_train",
                        type=str,
                        help="Image Feature file for the training set")
    parser.add_argument("text_feature_file_train",
                        type=str,
                        help="Text Feature file for the training set")
    parser.add_argument("image_feature_file_test",
                        type=str,
                        help="Image Feature file for the test set")
    parser.add_argument("text_feature_file_test",
                        type=str,
                        help="Text Feature file for the test set")
    parser.add_argument("word_vector_file",
                        type=str,
                        help="Text file containing the word vectors")

    # Optional Args
    parser.add_argument("--learning_rate",
                        type=float,
                        default=.001,
                        help="Learning Rate")
    parser.add_argument("--epochs",
                        type=int,
                        default=200,
                        help="Number of epochs to run for")
    parser.add_argument("--batch_size",
                        type=int,
                        default=128,
                        help="Batch size to use for training")
    parser.add_argument("--network",
                        type=str,
                        default="200,200",
                        help="Define a neural network as comma separated layer sizes")
    parser.add_argument("--model_type",
                        type=str,
                        default="mse",
                        choices=['mse', 'negsampling', 'fast0tag'],
                        help="Loss function to use for training")
    parser.add_argument("--in_memory",
                        action='store_true',
                        default="store_false",
                        help="Load training image features into memory for faster training")
    parser.add_argument("--model_input_path",
                        type=str,
                        default=None,
                        help="Model input path (to continue training)")
    parser.add_argument("--model_output_path",
                        type=str,
                        default=None,
                        help="Model output path (to save training)")
    parser.add_argument("--max_pos",
                        type=int,
                        default=5,
                        help="Max number of positive examples")
    parser.add_argument("--max_neg",
                        type=int,
                        default=10,
                        help="Max number of negative examples")

    global args
    args = parser.parse_args()

    try:
        # Sacred Imports
        from sacred import Experiment
        from sacred.observers import MongoObserver

        from sacred.initialize import Scaffold

        # Monkey patch to avoid having to declare all our variables
        def noop(item):
            pass
        Scaffold._warn_about_suspicious_changes = noop

        ex = Experiment('Regress2sum')
        ex.observers.append(MongoObserver.create(url=os.environ['MONGO_DB_URI'],
                                             db_name='attalos_experiment'))
        ex.main(lambda: convert_args_and_call_model())
        ex.run(config_updates=args.__dict__)
    except ImportError:
        # We don't have sacred, just run the script
        convert_args_and_call_model()
コード例 #20
0
import numpy as np
from datasets.mnist import mnist
import os
from torchvision.utils import make_grid

from sacred import Experiment
from sacred.observers import MongoObserver

ex = Experiment()
# Set up database logs
uri = os.environ.get('MLAB_URI')
database = os.environ.get('MLAB_DB')
if all([uri, database]):
    print(uri)
    print(database)
    ex.observers.append(MongoObserver.create(uri, database))

IMG_WIDTH = 28
IMG_HEIGHT = 28
IMG_PIXELS = IMG_WIDTH * IMG_HEIGHT

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


def log_prior(x):
    """
    Compute the elementwise log probability of a standard Gaussian, i.e.
    N(x | mu=0, sigma=1).
    """
    logp = -0.5 * ((x**2) + np.log(2.0 * np.pi))
コード例 #21
0
from sacred import Experiment
from sacred.observers import MongoObserver
from main_transboost_v2 import main
from transboost.callbacks.sacred_callbacks import SacredMetricsCallback
from transboost.datasets import MNISTDataset, CIFAR10Dataset

ex = Experiment()

ex.observers.append(
    MongoObserver.create(
        url='mongodb://*****:*****@127.0.0.1:27017',
        db_name='sacred'))


@ex.config
def my_config():
    m = 60000
    val = 6000
    dataset = 'mnist'
    center = True
    reduce = True
    encodings = 'onehot'
    wl = 'ridge'
    fs = 5
    fsh = 0
    n_layers = 1
    n_filters_per_layer = [200]
    bank_ratio = .05
    fn = 'c'
    loc = 3
    rot = 4
コード例 #22
0
import os

import tensorflow as tf

from sacred import Experiment
from sacred.stflow import LogFileWriter
from sacred.observers import MongoObserver

from reslab.constants import DATA_PATH
from reslab.model import model_fn
from reslab.data import get_input_fn

reslab_exp = Experiment('reslab')

reslab_exp.observers.append(
    MongoObserver.create(url='mongo:27017', db_name='reslab'))


@reslab_exp.config
def cfg():
    max_steps_train = 3000
    batch_size = 256
    batch_norm = True
    layers = [16, 16, 32, 64]
    lr_values = [1e-1, 1e-2, 1e-3]
    lr_boundaries = [max_steps_train // 3, max_steps_train // 3 * 2]
    l2_scale = 1e-4
    bn_momentum = 0.99
    momentum = 0.9

コード例 #23
0
import news_utils.fastai

import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--exp")
parser.add_argument("--device", type=int)
args = parser.parse_args()

torch.cuda.set_device(args.device)

EX_PA = Path('/mnt/data/group07/johannes/ynacc_proc/replicate/' + args.exp)

ex = Experiment(EX_PA.stem)

ex.observers.append(MongoObserver.create(db_name=EX_PA.stem))

print('f')

@ex.config
def my_config():
    bs=64
    epochs_start = math.ceil(random.uniform(0, 2))
    epochs=math.ceil(random.uniform(5, 10))
    drop_mult=random.uniform(0.4, 0.8)
    layer_factor=random.uniform(2, 3)
    exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
    backwards = False

@ex.main
def my_main(epochs, drop_mult, exp_id, bs, layer_factor, epochs_start):    
コード例 #24
0
def mongo_observer():
    observer = MongoObserver.create(url=None, db_name="incense_delete_test")
    return observer
コード例 #25
0
import numpy as np
from sacred import Experiment
from sacred.observers import MongoObserver, FileStorageObserver
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC

from utils import DatasetLoader, ResultStorage

ex = Experiment("tfid_clf")
ex.observers.append(MongoObserver.create(
    url="mongodb://localhost:27017",
    db_name="lyrics_classification"
))

ex.add_config("configs/preprocess_config.json")
@ex.config
def model_config():
    classifier_params = {
        "C": 0.5,
        "kernel": "linear"
    }


@ex.automain
def main(preprocess_params, classifier_params):
    dataset_loader = DatasetLoader()
    X_train, y_train = dataset_loader.load_train()
    X_test, y_test = dataset_loader.load_test()
コード例 #26
0
from autodiscern import DataManager, model
from autodiscern.experiment import PartitionedExperiment
from autodiscern.experiments.DocExperiment import DocLevelModelRun
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
from sacred import Experiment
from sklearn.ensemble import RandomForestClassifier
from sacred.observers import MongoObserver

# run with `python doc_experiment.py`
# or `python doc_experiment.py with "test_mode=True"`

ex = Experiment()
ex.observers.append(MongoObserver.create(
    url='mongodb://*****:*****@127.0.0.1:27017/?authMechanism=SCRAM-SHA-1',
    db_name='sacred'))


@ex.capture
def get_exp_id(_run):
    return _run._id


@ex.config
def my_config():

    test_mode = False
    if test_mode:
        run_hyperparam_search = False
        num_partitions_to_run = 1
コード例 #27
0
from downstream_task.models import CharRNN, SimpleLSTMTagger
from comick import TheFinalComick, TheFinalComickBoS, BoS, Mimick, MimickV2

from utils import *

exp_name = os.getenv('EXP_NAME', default='ud_tagging_model_multiple_tags')
db_url = os.getenv('DB_URL', default='localhost')
db_name = os.getenv('DB_NAME', default='chalet')

# Logging thangs
base_config_file = './configs/base.json'

# Experiment
experiment = Experiment(exp_name)
experiment.add_config(base_config_file)
experiment.observers.append(MongoObserver.create(url=db_url, db_name=db_name))

client = MongoClient(db_url)
database = client[db_name]
collection = database['logs']

languages = {
    'kk': ('kk', 'UD_Kazakh', 'kk-ud'),
    'ta': ('ta', 'UD_Tamil', 'ta-ud'),
    'lv': ('lv', 'UD_Latvian', 'lv-ud'),
    'vi': ('vi', 'UD_Vietnamese', 'vi-ud'),
    'hu': ('hu', 'UD_Hungarian', 'hu-ud'),
    'tr': ('tr', 'UD_Turkish', 'tr-ud'),
    'el': ('el', 'UD_Greek', 'el-ud'),
    'bg': ('bg', 'UD_Bulgarian', 'bg-ud'),
    'sv': ('sv', 'UD_Swedish', 'sv-ud'),
コード例 #28
0
ファイル: train_model.py プロジェクト: halilbilgin/dta_pred
    FLAGS.output_path = os.path.join(FLAGS.output_path,
                                     FLAGS.experiment_name + "_" + time_str)
    FLAGS.log_path = os.path.join(FLAGS.output_path, "logs")
    FLAGS.checkpoints_path = os.path.join(FLAGS.output_path, "checkpoints")
    ex = Experiment(FLAGS.experiment_name)

    if not os.path.exists(FLAGS.output_path):
        makedirs(FLAGS.output_path)
        makedirs(os.path.join(FLAGS.log_path))
        makedirs(os.path.join(FLAGS.checkpoints_path))

    mongo_conf = FLAGS.mongodb
    if mongo_conf != None:
        mongo_conf = FLAGS.mongodb.split(":")
        ex.observers.append(
            MongoObserver.create(url=":".join(mongo_conf[:-1]),
                                 db_name=mongo_conf[-1]))

    logging(str(FLAGS), FLAGS.log_path)

    ex.main(run_experiment)
    cfg = vars(FLAGS)
    cfg["FLAGS"] = FLAGS
    ex.add_config(cfg)

    r = ex.run()

# KD-GIP and KP-GS-domain
# kernels, followed closely by KD-GIP and KP-SW+ k
コード例 #29
0
ファイル: example.py プロジェクト: slp-ntua/keras-sacred
import numpy as np
from keras.models import Sequential
from keras.layers import InputLayer, Dense, BatchNormalization, Activation, Dropout

from sacred import Experiment
from sacred.observers import MongoObserver

ex = Experiment('My_Experiment')
my_url = '127.0.0.1:27017'  # Or <server-static-ip>:<port> if running on server
ex.observers.append(MongoObserver.create(url=my_url, db_name='my_database'))


@ex.config
def dnn_config():
    input_dim = 100
    output_dim = 20
    neurons = 64
    activation = 'relu'
    dropout = 0.4


@ex.automain
def dnn_main(input_dim, output_dim, neurons, activation, dropout,
             _run):  # Include _run in input for tracking metrics
    # Dummy data
    x_train = np.random.randn(1000, input_dim)
    y_train = np.random.randn(1000, output_dim)
    x_valid = np.random.randn(1000, input_dim)
    y_valid = np.random.randn(1000, output_dim)

    # Model architecture
コード例 #30
0
ファイル: EEGNAS_experiment.py プロジェクト: erap129/EEGNAS
            if index+1 < global_vars.get('start_exp_idx'):
                continue
            if global_vars.get('exp_id'):
                exp_id = global_vars.get('exp_id')
            configuration['DEFAULT']['exp_id'] = exp_id
            if FIRST_RUN:
                FIRST_DATASET = global_vars.get('dataset')
                if global_vars.get('include_params_folder_name'):
                    multiple_values.extend(global_vars.get('include_params_folder_name'))
                FIRST_RUN = False
            exp_name = f"{exp_id}_{index+1}_{experiment}"
            exp_name = add_params_to_name(exp_name, multiple_values)
            ex.config = {}
            ex.add_config({**configuration, **{'tags': [exp_id]}})
            if len(ex.observers) == 0 and not args.debug_mode:
                ex.observers.append(MongoObserver.create(url=f'mongodb://{global_vars.get("mongodb_server")}'
                                                             f'/{global_vars.get("mongodb_name")}',
                                                     db_name=global_vars.get("mongodb_name")))
            global_vars.set('sacred_ex', ex)
            try:
                run = ex.run(options={'--name': exp_name})
                if not args.debug_mode:
                    exp_line = add_exp(exp_name, index+1, all_exps, run)
                    pd.DataFrame(all_exps).to_csv(f'reports/{exp_id}.csv', index=False)
                    if global_vars.get('upload_exp_results'):
                        upload_exp_results_to_gdrive(exp_line, 'University/Masters/Experiment Results/EEGNAS_results.xlsx')
            except Exception as e:
                print(f'failed experiment {exp_id}_{index+1}, continuing...')
        if args.drive == 't':
            upload_exp_to_gdrive(FOLDER_NAMES, FIRST_DATASET)
コード例 #31
0
ファイル: finetune_CL.py プロジェクト: jfilter/masters-thesis
def run_for_class(clas, it=1):
    print('work on ' + clas)
    torch.cuda.empty_cache()
    data_clas = setup_data(clas)
    encoder_name = 'encoder_' + best_lm_exp_id
    drop_mult = 1

    learn = text_classifier_learner(data_clas,
                                    drop_mult=drop_mult,
                                    embed_prevent_first=0)
    learn.load_encoder(encoder_name)

    optim_lr = news_utils.fastai.get_optimal_lr(learn, runs=3)

    ex = Experiment(db_name + '_' + clas)
    ex.observers.append(MongoObserver.create(db_name=db_name + '_' + clas))

    @ex.config
    def my_config():
        exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
        factor = 2.6
        wd = 1e-7
        moms = (0.8, 0.7)
        full_epochs = 50
        bs = 64
        embed_prevent = 0
        lm_model_type = 'trained_0_embed_prevent'

    @ex.main
    def run_exp(exp_id, drop_mult, lr, moms, wd, factor, full_epochs):

        lrs = [lr / (factor**(4 - x)) for x in range(4)] + [lr]

        learn = text_classifier_learner(data_clas,
                                        drop_mult=drop_mult,
                                        embed_prevent_first=0)
        learn.load_encoder(encoder_name)

        learn.metrics += [
            KappaScore(),
            news_utils.fastai.F1Macro(),
            news_utils.fastai.F1Weighted(),
            news_utils.fastai.PrecisionMacro(),
            news_utils.fastai.RecallMacro()
        ]

        learn.callbacks += [
            SaveModelCallback(learn, name=exp_id, monitor='kappa_score'),
            news_utils.fastai.SacredLogger(learn, ex),
        ]

        for i in range(1, 4):
            epochs = 1
            if i in [1, 2]:
                learn.freeze_to(-i)
            else:
                learn.unfreeze()
                epochs = full_epochs
            learn.fit_one_cycle(epochs, np.array(lrs), wd=wd, moms=moms)

    for _ in range(it):
        ex.run(config_updates={"lr": optim_lr, "drop_mult": drop_mult})
コード例 #32
0
import argparse
import torch
import yaml
from torchvision import datasets, transforms
import nn_modules
from nn_utils import Args , train , test , digitize_input
import yaml
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from sacred.observers import MongoObserver
import time

ex = Experiment()
ex.observers.append(MongoObserver.create(db_name='MemristorNN'))
ex.captured_out_filter = apply_backspaces_and_linefeeds


# Training settings
@ex.config
def my_config():
    args = Args('Parameters.yaml')
    net = "manhattan_net"
    digitizeInput = False
    lower = 0
    upper = 1
    args.lower = lower
    args.upper = upper


@ex.automain
def main(args,digitizeInput,net,_run):
コード例 #33
0
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--exp")
#parser.add_argument("--device", type=int)
args = parser.parse_args()

#torch.cuda.set_device(args.device)

EX_PA = Path('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/lm/' +
             args.exp)

exp_name = str(EX_PA.stem) + 'lm'

ex = Experiment(exp_name)
ex.observers.append(MongoObserver.create(db_name=exp_name))


@ex.config
def my_config():
    bs = 128
    #epochs_start = math.ceil(random.uniform(0, 2))
    epochs_start = 1
    epochs = 1  #math.ceil(random.uniform(1, 3))
    drop_mult = 2  #random.uniform(1, 2)
    layer_factor = 4
    exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
    backwards = False
    dont_emb = 0

コード例 #34
0
def run_for_class(it=1):
    train_df = pd.read_pickle(
        Path('/mnt/data/group07/johannes/ompc/data_ann_pp_short4') / cat /
        fold / 'train.pkl')
    test_df = pd.read_pickle(
        Path('/mnt/data/group07/johannes/ompc/data_ann_pp_short4') / cat /
        fold / 'test.pkl')
    print(train_df.shape, test_df.shape)

    if cat == 'SentimentNeutral':
        train_df['Value'] = train_df['Value'].apply(lambda x: 1
                                                    if x == 0 else 0)
        test_df['Value'] = test_df['Value'].apply(lambda x: 1 if x == 0 else 0)
        print('fixing sentimal neutral')

    data = TextClasDataBunch.from_ids(pad_idx=25000,
                                      bs=64,
                                      path=exp_path,
                                      vocab=data_lm_ft.vocab,
                                      classes=[0, 1],
                                      train_lbls=train_df['Value'],
                                      valid_lbls=test_df['Value'],
                                      train_ids=train_df['res'],
                                      valid_ids=test_df['res'])

    drop_mult = 1
    exp_name = '4pp_' + cat + '_' + fold + '_' + str(drop_mult).replace(
        '.', '_')

    ex = Experiment(exp_name)
    ex.observers.append(MongoObserver.create(db_name=exp_name))

    @ex.config
    def my_config():
        exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
        factor = 2.6
        full_epochs = 10
        bs = 64
        mod = 'simple_fit'
        lr = 0.001

    @ex.main
    def run_exp(exp_id, drop_mult, lr, factor, full_epochs):

        lrs = [lr / (factor**(4 - x)) for x in range(4)] + [lr]

        learn = text_classifier_learner(data, drop_mult=drop_mult)

        learn.load_encoder('enc5')
        learn.metrics += [
            KappaScore(),
            news_utils.fastai.F1Bin(),
            news_utils.fastai.PrecBin(),
            news_utils.fastai.RecaBin()
        ]
        learn.loss_func = torch.nn.CrossEntropyLoss(
            torch.FloatTensor(
                sklearn.utils.class_weight.compute_class_weight(
                    'balanced', [0, 1], train_df['Value'])).cuda())

        learn.callbacks += [
            SaveModelCallback(learn, name=exp_id, monitor='F1_bin'),
            EarlyStoppingCallback(learn,
                                  monitor='F1_bin',
                                  patience=80,
                                  mode='max'),
            news_utils.fastai.SacredLogger(learn, ex),
        ]

        for i in range(1, 0):
            epochs = 1
            if i in [1, 2, 3]:
                learn.freeze_to(-i)
                #if i == 2:
                #epochs = full_epochs
            else:
                learn.unfreeze()
                epochs = full_epochs
            learn.fit_one_cycle(epochs, np.array(lrs), moms=(0.8, 0.7))
        ep_fac = 3
        learn.fit_one_cycle(ep_fac, 2e-2, moms=(0.8, 0.7))
        learn.freeze_to(-2)
        learn.fit_one_cycle(ep_fac,
                            slice(1e-2 / (2.6**4), 1e-2),
                            moms=(0.8, 0.7))
        learn.freeze_to(-3)
        learn.fit_one_cycle(ep_fac,
                            slice(5e-3 / (2.6**4), 5e-3),
                            moms=(0.8, 0.7))
        learn.unfreeze()
        learn.fit_one_cycle(2 * ep_fac,
                            slice(1e-3 / (2.6**4), 1e-3),
                            moms=(0.8, 0.7))

    for _ in range(it):
        ex.run(config_updates={"drop_mult": drop_mult})
コード例 #35
0
# pylint: disable=unused-variable,no-value-for-parameter
import logging
import os

import joblib
from sacred import Experiment
from sacred.observers import MongoObserver

import peptidebinding.helper.models as models

experiment_name = "random_forest"

ex = Experiment(experiment_name)
ex.observers.append(
    MongoObserver.create(
        url=f"mongodb+srv://{os.environ['MOORHEN_USERNAME']}:"
        f"{os.environ['MOORHEN_PASSWORD']}@moorhen-5migi.mongodb.net/",
        db_name="MY_DB"))


@ex.config  # Configuration is defined through local variables.
def cfg():
    """Config definitions for sacred"""
    representation = "bag_of_words"
    dataset = "beta/rand"
    seed = 4213
    rf_params = {
        'n_estimators': [10, 50, 100, 200, 400,
                         600],  # Number of trees in the forest
        'max_features': [0.1, 0.3, 'sqrt',
                         'log2'],  # Methods to choose number of features
        'max_depth': [2, 5, 10, 20, 30, 50, 60]  # Maximum depth of trees
コード例 #36
0
import seaborn as sns
import spacy
import torch
import torch.optim as optim
import torchnet as tnt

from models import FeedforwardTagger, MemorizationTagger
from utils import CorpusReader, SacredAwarePycrfsuiteTrainer as Trainer

ex = Experiment(name='id-pos-tagging')

# Setup Mongo observer
mongo_url = os.getenv('SACRED_MONGO_URL')
db_name = os.getenv('SACRED_DB_NAME')
if mongo_url is not None and db_name is not None:
    ex.observers.append(MongoObserver.create(url=mongo_url, db_name=db_name))

SACRED_OBSERVE_FILES = os.getenv('SACRED_OBSERVE_FILES',
                                 'false').lower() == 'true'


class ModelName(enum.Enum):
    # Majority vote baseline
    MAJOR = 'majority'
    # Memorization baseline
    MEMO = 'memo'
    # Conditional random field (Pisceldo et al., 2009)
    CRF = 'crf'
    # Feedforward neural network (Abka, 2016)
    FF = 'feedforward'
コード例 #37
0
from fastai.callbacks import *

import random
import math
import datetime
from sacred import Experiment

from sacred.observers import MongoObserver
import fastai
import news_utils.fastai

import news_utils

ex = Experiment('sec lm')

ex.observers.append(MongoObserver.create(db_name='seclm'))

EX_PA = Path('/mnt/data/group07/johannes/ynacc_proc/replicate/lmmodels2')


@ex.config
def my_config():
    bs = 64
    epochs = math.ceil(random.uniform(5, 20))
    drop_mult = random.uniform(0.1, 0.5)
    lr = random.uniform(1e-3, 1e-4)
    exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
    backwards = False
    frozen = 'last'

コード例 #38
0
ファイル: run_random_search.py プロジェクト: Qwlouse/Binding
from sacred.observers import MongoObserver
from dae import ex

nr_runs_per_dataset = 100
datasets = {
    'bars': 12, 
    'corners': 5,
    'shapes': 3,
    'multi_mnist': 3,
    'mnist_shape': 2,
    'simple_superpos':2
}
db_name = 'binding_via_rc'

# Random search
ex.observers = [MongoObserver.create(db_name=db_name, prefix='random_search')]
for ds, k in datasets.items():
    for i in range(nr_runs_per_dataset):
        ex.run(config_updates={'dataset.name': ds, 'verbose': False, 'em.k': k},
               named_configs=['random_search'])


# Multi-Train Runs
ex.observers = [MongoObserver.create(db_name=db_name, prefix='train_multi')]
for ds, k in datasets.items():
    if ds == "simple_superpos": continue
    for i in range(nr_runs_per_dataset):
        ex.run(config_updates={
            'dataset.name': ds, 
            'dataset.train_set': 'train_multi',
            'em.k': k,
コード例 #39
0
ファイル: optimizer.py プロジェクト: carlespoles/GalvanizeU
import psutil
import time
import select
import pickle
import sys
import numpy as np
from numpy.random import rand
from sacred import Experiment
from sacred.observers import MongoObserver
from pprint import pprint

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
python_version = sys.version_info.major

ex = Experiment()
mongo_observer = MongoObserver.create()
ex.observers.append(mongo_observer)
ex.add_config('optimizer_config.yaml')

# Configure your logger here
logging = logger.getLogger('hyper_opt')
ex.logger = logging

class Optimizer(object):
    """Neural Network Hyperparameter Optimizer Class.
    """

    def __init__(self, config):
        """Optimize class object initialization

        Args:
コード例 #40
0
from torch.utils.data import DataLoader
from torchvision import transforms

import mnist_loss
from mnist_loss import MultitaskMnistLoss
from mnist_model import MultitaskMnistModel

ex = sacred.Experiment()

config_updates, _ = get_config_updates(sys.argv)

# Disable saving to mongo using "with save_to_db=False"
if ("save_to_db" not in config_updates) or config_updates["save_to_db"]:
    # Server disabled, credentials useless.
    mongo_observer = MongoObserver.create(
        url='mongodb://*****:*****@134.209.21.201/admin?retryWrites=true',
        db_name='multitask-learning')
    ex.observers.append(mongo_observer)
else:
    ex.observers.append(FileStorageObserver.create('multitask_results'))


@ex.config
def config():
    """Default config values."""
    # Allows us to filter to mnist results only in sacredboard.
    mnist = 1
    # Whether to use the standard MNIST or FashionMNIST dataset, and so what type of classifcation task to perform.
    # See mnist_loss._labels_to_1()
    mnist_type = 'numbers'
    max_epochs = 100
コード例 #41
0
import math
import datetime
from sacred import Experiment

from sacred.observers import MongoObserver
import fastai

import news_utils.fastai
from bpemb import BPEmb

import news_utils.clean.german

EX_PA = Path('/mnt/data/group07/johannes/ompc/pplmexp_short4')

ex = Experiment('shortppompclm4')
ex.observers.append(MongoObserver.create(db_name='shortppompclm4'))


@ex.config
def my_config():
    bs = 128
    epochs_start = 0
    epochs = 5  #math.ceil(random.uniform(1, 3))
    drop_mult = 0.5  #random.uniform(1, 2)
    exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
    model_id = '2019_ 3_27_14_30_09_921754'  # best model after 5 epochs


@ex.main
def my_main(epochs, drop_mult, exp_id, bs, epochs_start, model_id):
    ex.info['path'] = EX_PA
コード例 #42
0
ファイル: RRFexp.py プロジェクト: JaredChung/kaggle-telstra
import os
import pandas as pd
import numpy as np
import datetime
from sklearn.ensemble import RandomForestClassifier as RF
from src.refined_rf import RefinedRandomForest as RRF
from sklearn.cross_validation import StratifiedKFold
from src.telstra_data import TelstraData, multiclass_log_loss
from sacred import Experiment
from sacred.observers import MongoObserver

ex = Experiment('refined_random_forest')
ex.observers.append(MongoObserver.create(db_name = "telstra"))

@ex.config
def my_config():
    series = "RRF"
    n_folds = 10
    featureparams = {"location_min_count": 0,
                     "n_common_events":20,
                     "n_common_log_features":40,
                     "n_common_resources":5,
                     "n_label_encoded_events":4,
                     "n_label_encoded_log_features":4}
    aggregateparams = {"loc_agg_prior_weight":3.0}
    include = []
    exclude = []
    clfparams = {'n_estimators': 200,
    'criterion': 'gini',
    'max_depth': None,
    'min_samples_split': 2,
コード例 #43
0
ファイル: biagioni.py プロジェクト: renj/TrajMap
# write to console
ch = logging.StreamHandler()  
ch.setLevel(logging.DEBUG)  
  
# Handler format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\n\t%(message)s')  
fh.setFormatter(formatter)  
ch.setFormatter(formatter)  
  
logger.addHandler(fh)
logger.addHandler(ch)
  

ex = Experiment('Biagioni')
ex.logger = logger
ex.observers.append(MongoObserver.create(url='10.60.43.110:27017', db_name='Biagioni'))

@ex.config
def cfg():
	#data_file ="../../Data/Shanghai/minsh_1000_biagioni"
    data_file = "../Data/Chicago/all_trips"


@ex.automain
def main(data_file, side, k, percent, width, alpha, _log, _run):
    _log.info('data_file: %s' % (data_file))


    _run.info['time'] = {}

    total_time = 0