def main(argv):
    from experiments.util import AsyncExecutor, generate_launch_commands
    import experiments.meta_overfitting_v2.neural_processes_overfitting_base

    command_list = []

    for dataset in FLAGS.datasets.split(','):
        if dataset == 'sin':
            n_context_samples = [5]
        elif dataset == 'cauchy':
            n_context_samples = [20]
        else:
            raise AssertionError('dataset must be either of [sin, cauchy]')

        exp_config = {
            'exp_name': ['meta-overfitting-v2-nps-%s' % dataset],
            'dataset': [dataset],
            'n_threads': [N_THREADS],
            'seed': list(range(30, 55)),
            'data_seed': [28],
            'weight_decay': [0.0, 1e-3, 1e-2, 1e-1, 2e-1, 4e-1],
            'r_dim': [256, 512],
            'n_iter_fit': [30000],
            'lr': [1e-3],
            'lr_decay': [0.97],
            'n_train_tasks': [2, 4, 8, 16, 32, 64, 128, 256, 512],
            'n_test_tasks': [200],
            'n_context_samples': n_context_samples,
            'n_test_samples': [100],
        }

        command_list.extend(
            generate_launch_commands(
                experiments.meta_overfitting_v2.
                neural_processes_overfitting_base, exp_config))

    if FLAGS.cluster:
        cluster_cmds = []
        for python_cmd in command_list:
            cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()

            bsub_cmd = 'bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out' \
                       ' -W 03:59'\
                       ' -R "rusage[mem=1048]"' \
                       ' -n %i '% (cmd_hash, N_THREADS)
            cluster_cmds.append(bsub_cmd + ' ' + python_cmd)
        answer = input(
            "About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n"
            % len(cluster_cmds))
        if answer == 'yes':
            for cmd in cluster_cmds:
                os.system(cmd)
    else:
        answer = input("About to run %i compute jobs locally on %i workers. "
                       "Proceed? [yes/no]\n" %
                       (len(command_list), FLAGS.n_workers))
        if answer == 'yes':
            exec_fn = lambda cmd: os.system(cmd)
            executor = AsyncExecutor(n_jobs=FLAGS.n_workers)
            executor.run(exec_fn, command_list)
def main(argv):
    hparam_search_modules = [algo_map_dict[algo_str] for algo_str in FLAGS.algos.split(',')]

    command_list = []

    for hparam_search_module in hparam_search_modules:

        exp_config = {
            'dataset': ['cauchy_20', 'sin_20', 'physionet_0', 'physionet_2', 'swissfel'],
            'covar_module': ['NN'],
            'num_cpus': [2 * FLAGS.n_cpus],
            'metric': [FLAGS.metric]
        }
        if FLAGS.load_analysis:
            exp_config['load_analysis'] = [True]
        if FLAGS.resume:
            exp_config['resume'] = [True]
        command_list += generate_launch_commands(hparam_search_module, exp_config, check_flags=False)

    print(command_list)


    if FLAGS.cluster:
        cluster_cmds = []
        for python_cmd in command_list:
            bsub_cmd = 'bsub' \
                       ' -W %i:59'%(3 if FLAGS.load_analysis else 23) + \
                       ' -R "rusage[mem=6000]"' + \
                       ' -R "rusage[ngpus_excl_p=%i]"'%FLAGS.n_gpus + \
                       ' -R "span[hosts=1]"' \
                       ' -n %i '% (FLAGS.n_cpus)
            cluster_cmds.append(bsub_cmd + ' ' + python_cmd)

        answer = input("About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n"%len(cluster_cmds))
        if answer == 'yes':
            for cmd in cluster_cmds:
                if FLAGS.dry:
                    print(cmd)
                else:
                    os.system(cmd)

    else:
        answer = input("About to run %i compute jobs in a for loop. Proceed? [yes/no]\n"%len(command_list))
        if answer == 'yes':
            for cmd in command_list:
                if FLAGS.dry:
                    print(cmd)
                else:
                    os.system(cmd)
N_THREADS = 1

exp_config = {
    'exp_name': ['meta-overfitting-swissfel'],
    'dataset': ['swissfel'],
    'n_threads': [N_THREADS],
    'seed': [31, 32, 33, 34, 35],
    'weight_decay': list(np.arange(0.05, 1.1, step=0.05)),
    'covar_module': ['NN'],
    'mean_module': ['NN'],
    'num_layers': [4],
    'layer_size': [32],
    'n_iter_fit': [30000],
}

command_list = generate_launch_commands(experiments.meta_GPR_mll_base_exp, exp_config)

if cluster :
    cluster_cmds = []
    for python_cmd in command_list:
        cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()

        bsub_cmd = 'bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out' \
                   ' -W 3:59'\
                   ' -R "rusage[mem=8048]"' \
                   ' -n %i '% (cmd_hash, N_THREADS)
        cluster_cmds.append(bsub_cmd + ' ' + python_cmd)
    answer = input("About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n"%len(cluster_cmds))
    if answer == 'yes':
        for cmd in cluster_cmds:
            os.system(cmd)
NUM_CPUS = 20

command_list = []

for hparam_search_module in [
        meta_mll_hparam, meta_svgd_hparam, meta_vi_hparam
]:

    exp_config = {
        'dataset':
        ['cauchy_20', 'sin_20', 'physionet_0', 'physionet_2', 'swissfel'],
        'covar_module': ['NN'],
        'num_cpus': [NUM_CPUS]
    }
    command_list += generate_launch_commands(hparam_search_module,
                                             exp_config,
                                             check_flags=False)

print(command_list)

if CLUSTER:
    cluster_cmds = []
    for python_cmd in command_list:
        bsub_cmd = 'bsub' \
                   ' -W 23:00'\
                   ' -R "rusage[mem=4500]"' \
                   ' -R "rusage[ngpus_excl_p=1]"' \
                   ' -n %i '% (NUM_CPUS)
        cluster_cmds.append(bsub_cmd + ' ' + python_cmd)

    answer = input(