Пример #1
0
def run_experiments(config):
    result_folder = config.experiment.result_path
    auc_logger = MLogger.CAUCLogger(order=config.experiment.dataset_set)
    auc_logger.write(result_folder+"auc.res", append=False)
    medianauc_logger = MLogger.CAUCLogger(order=config.experiment.dataset_set)
    medianauc_logger.write(result_folder+"auc_median.res", append=False)

    with open(result_folder+"metric.log", "w") as log_file:
        for policy_name in config.experiment.policy_set:
            for dataset_name in config.experiment.dataset_set:
                env = MConfig.CEnvironment(config=config)
                env.experiment.random = np.random.RandomState(config.experiment.rand_seed)
                env.experiment.policy_name = policy_name
                env.experiment.dataset_name = dataset_name
                env.experiment.dataset = data.load_data(dataset_name, env.experiment.random, config.data.max_sz)
                env.experiment.data_dim = env.experiment.dataset.all_data[0].x.shape[0]
                
                #todo: remove following fields
                env.dataset = env.experiment.dataset
                config.example_dim = env.experiment.data_dim
                config.q0 = config.data.q0

                experiments.run_experiments(config, env,\
                    auclogger=auc_logger, medianauc_logger=medianauc_logger, log_file=log_file)    
            auc_logger.write(result_folder+"auc.res", append=True)
            auc_logger.clear()
            medianauc_logger.write(result_folder+"auc_median.res", append=True)
            medianauc_logger.clear()
Пример #2
0
def main():
    args = parse_args()
    if args.all:
        print("Running all experiments")
        run_experiments(all=True)
    else:
        if args.e.lower() not in ['ada', 'dt', 'knn', 'nn', 'svm', 'all']:
            raise ValueError(
                "Invalid experiment, please select from following: ada, dt, knn, nn, svm"
            )
        else:
            run_experiments(experiment=args.e)
Пример #3
0
def run_experiments(dataset_set, paras_set, algos, max_sz, result_folder, \
        batch_sz, batch_rate, label_budget=None, \
        debug=False):
    if not os.path.isdir(result_folder):
        os.makedirs(result_folder)
    utils.backup_code(result_folder)
    auc_logger = MLogger.CAUCLogger(order=dataset_set)
    auc_logger.write(result_folder + "auc.res", append=False)
    with open(result_folder + "metric.log", "w") as log_file:
        for paras_name in paras_set:
            for dataset_name in dataset_set:
                experiments.run_experiments(data.load_data(dataset_name, max_sz), paras_name, \
                    dataset_name, algos, result_folder+dataset_name+"-"+paras_name, \
                    batch_sz, batch_rate,\
                    auclogger=auc_logger, log_file=log_file, label_budget=label_budget, debug=debug)
            auc_logger.write(result_folder + "auc.res", append=True)
            auc_logger.clear()
Пример #4
0
# -*- coding: utf-8 -*-
import numpy as np
from lvqmln import LVQMLN
from experiments import run_experiments

# Import data
train = np.load('./data/flc_scaled_train.npy')
test = np.load('./data/flc_scaled_test.npy')

x_train = train[:, :-1].astype('float32')
y_train = train[:, -1].astype('float32')
x_test = test[:, :-1].astype('float32')
y_test = test[:, -1].astype('float32')

# Build model
clf = LVQMLN(x_train.shape[1], 3, len(set(y_train)), list(set(y_train)))

run_experiments(
    fname='flc_3_10_{}.pth',
    clf=clf,
    x_train=x_train,
    y_train=y_train,
    x_test=x_test,
    y_test=y_test,
    p_list=[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0],
    n=1000,
)
    base_fn = base_dir + run
    log_fn = base_fn + ".log"

    # Run experiments
    if not only_evaluation:
        experiments.run_experiments(
            N,
            D,
            Ms,
            max_variance,
            variance_decay_rate,
            mu_beta,
            sigma_beta,
            n_random_iters,
            base_fn,
            log_fn,
            n_reps,
            plot_2D=False,
            include_bias=False,
            include_sampling=include_sampling,
            include_advi=include_advi,
            include_fast_sampling=include_fast_sampling,
            sketch=sketch,
            ftol=ftol,
            full_sm=full_sm,
            fast_sm=fast_sm)

    ### Evaluation
    approximation_methods = ["Laplace", "Diagonal_Laplace", "Prior"]

    if include_sampling:
Пример #6
0
# set prior over beta
print("\n\nBeginning experiments for D=%d" % D)
mu_beta, sigma_beta = np.zeros(D, dtype=np.float64), np.ones(D,
                                                             dtype=np.float64)
run = "D=%05d-MaxVar=%0.02f-VarDecayRate=%0.02f-SVDnIters=%02d-N=%05d" % (
    D, max_variance, variance_decay_rate, n_random_iters, N)

base_fn = base_dir + run
log_fn = base_fn + ".log"

# Run experiments and plot
import ipdb
ipdb.set_trace()
experiments.run_experiments(N,
                            D,
                            Ms,
                            max_variance,
                            variance_decay_rate,
                            mu_beta,
                            sigma_beta,
                            n_random_iters,
                            base_fn,
                            log_fn,
                            n_reps=1,
                            plot_2D=True,
                            include_bias=include_bias,
                            include_sampling=include_sampling,
                            include_fast_sampling=include_sampling,
                            regenerate_data=False)
Пример #7
0
def main(_):
    ## import main model ##
    main_model_name = '{}_{}'.format(args.target, args.arch)
    if main_model_name in MODEL:
        MainModel = MODEL[main_model_name]
    else:
        raise Exception("Unsupported target-arch pair!")

    ## create save dir ##
    if args.action == 'baseline':
        save_dir = os.path.join('save_baseline',
                                '{}_{}'.format(args.arch, '_'.join(args.task)))
        if not os.path.exists(save_dir + '/record'):
            os.makedirs(save_dir + '/record', exist_ok=True)
    elif args.action == 'experiments_rl':
        save_dir = os.path.join('save_experiments_rl',
                                '{}_{}'.format(args.arch, '_'.join(args.task)))
    elif args.action == 'experiments_nonrl':
        save_dir = os.path.join('save_experiments_nonrl',
                                '{}_{}'.format(args.arch, '_'.join(args.task)))
    elif args.target == 'lm':
        save_dir = os.path.join('save', '{}_{}'.format(args.target, args.arch))
    else:
        save_dir = os.path.join(
            'save', '{}_{}_{}'.format(args.target, args.arch,
                                      '_'.join(args.task)))
    args.save_dir = save_dir

    ## data set ##
    if 'all' in args.task:
        args.task = list(range(1, 21))
    else:
        args.task = [int(i) for i in args.task]
    train, test, words, args.story_size, args.sentence_size, args.question_size = read_babi(
        args.task, args.batch_size, args.target == 'expert')
    val = train.split_dataset(args.val_ratio)
    print("training count: {}".format(train.count))
    print("testing count: {}".format(test.count))
    print("word2idx:", len(words.word2idx))
    print("idx2word:", len(words.idx2word))
    print("story size: {}".format(args.story_size))
    print("sentence size: {}".format(args.sentence_size))
    print("question size: {}".format(args.question_size))

    ## create params ##
    params_dict = vars(args)
    params_class = namedtuple('params_class', params_dict.keys())
    params = params_class(**params_dict)

    ## check target-action-dirs ##
    if params.target == 'lm':
        if params.action == 'train':
            if params.load_dir or params.expert_dir or params.lm_dir:
                raise Exception("No dir needed while training %s model!" %
                                params.target)
            train_process = train_lm
        elif params.action == 'test':
            if not params.load_dir:
                raise Exception("Need a trained %s model to test!" %
                                params.target)
            if params.expert_dir or params.lm_dir:
                raise Exception(
                    "No other dirs needed while testing %s model!" %
                    params.target)
            test_process = test_lm
        else:
            raise Exception("Unsupported action for %s!" % params.target)
    elif params.target == 'expert':
        if params.action == 'train':
            if params.load_dir or params.expert_dir or params.lm_dir:
                raise Exception("No dir needed while training %s model!" %
                                params.target)
            train_process = train_normal
        elif params.action == 'test':
            if not params.load_dir:
                raise Exception("Need a trained %s model to test!" %
                                params.target)
            if params.expert_dir or params.lm_dir:
                raise Exception(
                    "No other dirs needed while testing %s model!" %
                    params.target)
            test_process = test_normal
        else:
            raise Exception("Unsupported action for %s!" % params.target)
    elif params.target == 'learner':
        if params.action == 'train':
            if params.load_dir:
                raise Exception(
                    "Learner in %s mode can only be trained from scratch!" %
                    params.action)
            if (not params.expert_dir) or (not params.lm_dir):
                raise Exception(
                    "Two auxiliary models are needed in learner %s mode! " %
                    params.action)
            train_process = train_normal
        elif params.action == 'test':
            if (not params.load_dir) or (not params.expert_dir) or (
                    not params.lm_dir):
                raise Exception("All dirs needed while in learner %s mode!" %
                                params.action)
            test_process = test_normal
        elif params.action == 'rl':
            if (not params.load_dir) or (not params.expert_dir) or (
                    not params.lm_dir):
                raise Exception("All dirs needed while in learner %s mode!" %
                                params.action)
        else:
            if params.load_dir:
                raise Exception(
                    "Learner in %s mode can only be trained from scratch!" %
                    params.action)
            if (not params.expert_dir) or (not params.lm_dir):
                raise Exception(
                    "Two auxiliary models are needed in learner %s mode! " %
                    params.action)

    ## load params from load_dir ##
    if not params.load_dir == '':
        params_filename = os.path.join(params.load_dir, 'params.json')
        load_params = load_params_dict(params_filename)
        if not load_params['task'] == params.task:
            raise Exception("incompatible task with load model!")
        if (not load_params['target']
                == params.target) or (not load_params['arch'] == params.arch):
            raise Exception("incompatible main model with load model!")
        params = params._replace(**load_params)
    else:
        if tf.gfile.Exists(params.save_dir):
            tf.gfile.DeleteRecursively(params.save_dir)
        os.makedirs(params.save_dir, exist_ok=True)

    ## load expert params from expert_dir ##
    if not params.expert_dir == '':
        params_filename = os.path.join(params.expert_dir, 'params.json')
        load_params = load_params_dict(params_filename)
        if not load_params['task'] == params.task:
            raise Exception("incompatible task with expert model!")
        if not load_params['target'] == 'expert':
            raise Exception("dir contains no expert model!")
        expert_params = params._replace(action='test',
                                        load_dir=params.expert_dir,
                                        **load_params)
    else:
        expert_params = None

    ## load lm params from lm_dir ##
    if not params.lm_dir == '':
        params_filename = os.path.join(params.lm_dir, 'params.json')
        load_params = load_params_dict(params_filename)
        if not load_params['target'] == 'lm':
            raise Exception("dir contains no language model!")
        lm_params = params._replace(action='test',
                                    load_dir=params.lm_dir,
                                    lm_num_steps=1,
                                    lm_batch_size=params.batch_size,
                                    **load_params)
    else:
        lm_params = None

    ## run action ##
    if args.action == 'train':
        train_process(MainModel, params, expert_params, lm_params, words,
                      train, val)

    elif args.action == 'test':
        test_process(MainModel, params, expert_params, lm_params, words, test)

    elif args.action == 'rl':
        main_model = MainModel(words, params, expert_params, lm_params)
        main_model.rl_train(train, val)
        main_model.save_params()

    elif args.action == 'baseline':
        run_baseline(MainModel, params, expert_params, lm_params, words, train,
                     val)

    elif args.action == 'experiments_rl':
        run_experiments(MainModel,
                        params,
                        expert_params,
                        lm_params,
                        words,
                        train,
                        val,
                        RL=True)

    elif args.action == 'experiments_nonrl':
        run_experiments(MainModel,
                        params,
                        expert_params,
                        lm_params,
                        words,
                        train,
                        val,
                        RL=False)
Пример #8
0
INPUT_SHAPE = np.product(x_train.shape[1:])
PROJECTION_DIM = x_train.shape[-1]
x_train = x_train.reshape(-1, INPUT_SHAPE).astype('float32')
y_train = y_train.astype('float32')
x_test = x_test.reshape(-1, INPUT_SHAPE).astype('float32')
y_test = y_test.astype('float32')

scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)

# Build model
clf = LVQMLN(x_train.shape[1],
             PROJECTION_DIM,
             len(set(y_train)),
             list(set(y_train)),
             sigmoid_beta=1,
             swish_beta=0.1)

run_experiments(
    fname=f'mnist_{PROJECTION_DIM}' + '_10_{}.pth',
    clf=clf,
    x_train=x_train,
    y_train=y_train,
    x_test=x_test,
    y_test=y_test,
    p_list=[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0],
    n=1000,
)
Пример #9
0
def main():
    run_experiments()
BUFFER_SIZE = 10000

EPOCHS = [25, 50, 100]

_GPU_NUMBER = None

# Electric demand forecasting
run_experiments(
    train_file_name='data/hourly_20140102_20191101_train.csv',
    test_file_name='data/hourly_20140102_20191101_test.csv',
    result_file_name='files/results/experimental_results_electricity.csv',
    forecast_horizon=FORECAST_HORIZON,
    past_history_ls=PAST_HISTORY,
    batch_size_ls=BATCH_SIZE,
    epochs_ls=EPOCHS,
    tcn_params=TCN_PARAMS,
    lstm_params=LSTM_PARAMS,
    gpu_number=_GPU_NUMBER,
    metrics_ls=METRICS,
    buffer_size=1000,
    seed=1,
    show_plots=False,
    webhook=WEBHOOK,
    validation_size=0.)

# Electric vehicle power consumption forecasting
run_experiments(train_file_name='data/CECOVEL_train.csv',
                test_file_name='data/CECOVEL_test.csv',
                result_file_name='files/results/experimental_results_EV.csv',
                forecast_horizon=FORECAST_HORIZON,
                past_history_ls=PAST_HISTORY,