def main(args):
    # Create a new database
    db = sql.db('%s?table=%s' % (args.database, args.table_name))

    # Create a jobman state
    state = DD()

    # Loop over the search space and schedule jobs
    config_generator = gen_configurations(args.configurations)
    for i, params in enumerate(config_generator):
        # Complete parameters dictionary and add to the state
        state.parameters = params
        state.parameters['model'] = args.model
        state.parameters['dataset'] = args.dataset
        state.parameters['nb_epoch'] = args.n_epochs
        state.parameters['batch_size'] = args.batch_size

        # Insert the job into the database
        if args.model == 'ADIOS':
            state.parameters['labels_order'] = args.labels_order
            state.parameters['n_label_splits'] = args.n_label_splits
            for i in xrange(args.n_label_splits):
                state.parameters['label_split'] = i + 1
                sql.insert_job(train_adios, flatten(state), db)
        else:  # args.model == 'MLP'
            sql.insert_job(train_mlp, flatten(state), db)

    # Create a view for the new database table
    db.createView(args.table_name + '_view')
Beispiel #2
0
 def insert(self,v=-1):
     print self.dbpath
     _db = db(self.dbpath)
     j = 0
     self.jobs = [DD(dic) for dic in self.sampler.create_hp(v)]
     for i,_job in enumerate(self.jobs):
         _job.update({EXPERIMENT: str(self.exppath)})
         insert_dict(_job,_db)
         j+=1
         print 'inserting job: '+str(_job)
         if i == v:
             break
     print 'Inserted',j,'jobs in',self.dbpath
Beispiel #3
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    logger.info("Getting dataset info for %s" % dataset_name)
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + dataset_name)
    mask_file = path.join(data_path, "mask.npy")
    mask = np.load(mask_file)
    input_dim = (mask == 1).sum()
    if input_dim % 2 == 1:
        input_dim -= 1
    mri = MRI.MRI_Standard(which_set="full",
                           dataset_name=dataset_name,
                           unit_normalize=True,
                           even_input=True,
                           apply_mask=True)
    variance_map_file = path.join(data_path, "variance_map.npy")
    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in jg.nested_generator(
            jg.hidden_generator("nhid1", 1),
            jg.hidden_generator("nhid2", 1),
    ):

        state = DD()
        experiment_hyperparams = mlp_experiment.default_hyperparams(input_dim)

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value

        experiment_hyperparams["dataset_name"] = dataset_name

        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   flatten(experiment_hyperparams).values())))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        state.pid = 0

        sql.insert_job(mlp_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
Beispiel #4
0
'''
  Script to add more targeted jobs into the db
'''

from jobman.tools import DD
from jobman import sql
import numpy

# import types of architectures we want to use

if __name__ == '__main__':

    TABLE_NAME = 'nips_2013_test_inpainting001'

    db = sql.db(
        'postgres://*****:*****@opter.iro.umontreal.ca/pascanur_db/' +
        TABLE_NAME)
    #import ipdb; ipdb.set_trace()
    #db = sql.postgres_serial( user = '******',
    #        password='******',
    #        host='gershwin',
    #        port = 5432,
    #        database='pascanur_db',
    #        table_prefix = TABLE_NAME)

    state = DD()

    ## DEFAULT VALUES ##
    state['jobman.experiment'] = 'run_dbm_inpainting.jobman'
    state['path'] = '/RQexec/pascanur/data/mnist.npz'
    state['samebatch'] = 1
Beispiel #5
0
from jobman.tools import DD
from jobman import sql

# import types of architectures we want to use
import daa
import main
import mlp
import numpy


if __name__=='__main__':

    TABLE_NAME='dragonball_cifarmlp001'

    db = sql.db('postgres://*****:*****@gershwin.iro.umontreal.ca/pascanur_db/'+TABLE_NAME)
    #db = sql.postgres_serial( user = '******',
    #        password='******',
    #        host='colosse1',
    #        port = 5432,
    #        database='pascanur_db',
    #        table_prefix = TABLE_NAME)

    state = DD()

    ## DEFAULT VALUES ##
    state['jobman.experiment'] = 'main.main'
    n_jobs = 0
    state['data'] = ('/home/pascanur/data/' +
                       'mnist.npz')
    state['profile'] = 1
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    logger.info("Getting dataset info for %s%s"
                % (dataset_name, ", transposed" if args.transposed else ""))
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)

    if args.transposed:
        logger.info("Data in transpose...")
        mri = MRI.MRI_Transposed(dataset_name=args.dataset_name,
                                 unit_normalize=True,
                                 even_input=True,
                                 apply_mask=True)
        input_dim = mri.X.shape[1]
        variance_map_file = path.join(data_path, "transposed_variance_map.npy")
    else:
        mask_file = path.join(data_path, "mask.npy")
        mask = np.load(mask_file)
        input_dim = (mask == 1).sum()
        if input_dim % 2 == 1:
            input_dim -= 1
        mri = MRI.MRI_Standard(which_set="full",
                               dataset_name=args.dataset_name,
                               unit_normalize=True,
                               even_input=True,
                               apply_mask=True)
        variance_map_file = path.join(data_path, "variance_map.npy")

    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in nested_generator(layer_depth_generator("encoder.layer_depths", 
                                                        xrange(4, 6), 5),
                                  hidden_generator("encoder.nhid", 4),
                                  float_generator("weight_decay.coeffs.z", 3, 0.1, 0.001, log_scale=True)):
#        logger.info("Adding NICE experiment with hyperparameters %s" % (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams(input_dim)
        if args.transposed:
            experiment_hyperparams["data_class"] = "MRI_Transposed"
        if args.logistic:
            experiment_hyperparams["prior"]["__builder__"] =\
                "nice.pylearn2.models.nice.StandardLogistic"

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list) else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%d"
                                      % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(
            nice_experiment.experiment,
            flatten(state),
            db
            )

    db.createView("%s_view" % args.table)
Beispiel #7
0
def load_experiments(args):
    dataset_name = args.dataset_name

    # Load the database and table.
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    # Don't worry about this yet.
    input_handler = InputHandler()

    # For generating models, we use a special set of jobman generators, made
    # for convenience.
    for items in jg.nested_generator(
            jg.float_generator("learning_rate",
                               3,
                               0.01,
                               0.0001,
                               log_scale=True),
            jg.list_generator("nhid", [50, 100, 200, 300]),
    ):

        logger.info("Adding RBM experiment across hyperparameters %s" %
                    (items, ))
        state = DD()

        # Load experiment hyperparams from experiment
        experiment_hyperparams = experiment.default_hyperparams()

        # Set them with values in our loop.
        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry, (
                "Key not found in hyperparams: %s" % split_keys[-1])
            entry[split_keys[-1]] = value

        # Set the dataset name
        experiment_hyperparams["dataset_name"] = dataset_name

        # Get the input dim and variance map. Don't worry about variance maps right now,
        # they aren't used here.
        input_dim, variance_map_file = input_handler.get_input_params(
            args, experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))

        # Set the input dimensionality by the data
        experiment_hyperparams["nvis"] = input_dim

        # Set the minimum learning rate relative to the initial learning rate.
        experiment_hyperparams[
            "min_lr"] = experiment_hyperparams["learning_rate"] / 10

        # Make a unique hash for experiments. Remember that lists, dicts, and other data
        # types may not be hashable, so you may need to do some special processing. In
        # this case we convert the lists to tuples.
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list)
                                    else v
                                    for v in flatten(experiment_hyperparams).values()])))

        # Save path for the experiments. In this case we are sharing a directory in my
        # export directory so IT can blame me.
        save_path = serial.preprocess(
            "/export/mialab/users/dhjelm/pylearn2_outs/rbm_demo/%d" % h)

        # We save file params separately as they aren't model specific.
        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        user = path.expandvars("$USER")
        state.created_by = user

        # Finally we add the experiment to the table.
        sql.insert_job(experiment.experiment, flatten(state), db)

    # A view can be used when querying the database using psql. May not be needed in future.
    db.createView("%s_view" % args.table)
def load_experiments(args):

    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:"
        "%(port)d/%(database)s?table=%(table)s" % {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })
    input_handler = InputHandler()

    for items in jg.nested_generator(
            jg.list_generator(
                "encoder.layer_depths",
                [[3, 5, 5, 5, 3], [5, 5, 5, 5, 5], [2, 4, 4, 2]]),
            jg.list_generator("variance_normalize", [False, 2]),
            jg.float_generator("weight_decay.coeff",
                               4,
                               0.1,
                               0.0001,
                               log_scale=True),
            jg.list_generator("prior.__builder__", [
                "nice.pylearn2.models.nice.StandardNormal",
                "nice.pylearn2.models.nice.StandardLogistic"
            ])):

        logger.info("Adding NICE experiment across hyperparameters %s" %
                    (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams()

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry,\
                ("Key not found in hyperparams: %s, "
                 "found: %s" % (split_keys[-1], entry.keys()))
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        input_dim, variance_map_file = input_handler.get_input_params(
            args, experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))
        experiment_hyperparams["nvis"] = input_dim
        experiment_hyperparams["encoder"]["nvis"] = input_dim

        h = abs(hash(frozenset(
            flatten(experiment_hyperparams).keys() +\
            [tuple(v) if isinstance(v, list)
             else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(nice_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
Beispiel #9
0
from jobman.tools import DD, flatten
from jobman import sql
from DARPAscript import NLPSDAE

db = sql.db(
    'postgres://[email protected]/glorotxa_db/opentablegpu'
)  # you should change this line to match the database you need

state = DD()

state.act = ['tanh']
state.depth = 1
state.n_hid = [5000]

state.noise = ['binomial_NLP']

state.weight_regularization_type = 'l2'
state.weight_regularization_coeff = [0.0, 0.0]
state.activation_regularization_type = 'l1'

# Number of pretraining epochs, one-per-layer
state.nepochs = [30]

# Different validation runs
#        - 100 training examples (x20 different samples of 100 training examples)
#        - 1000 training examples (x10 different samples of 1000 training examples)
#        - 10000 training examples (x1 different sample of 10000 training examples)
# (because of jobman, the keys have to be strings, not ints)
# NOTE: Probably you don't want to make trainsize larger than 10K,
# because it will be too large for CPU memory.
state.validation_runs_for_each_trainingsize = {
Beispiel #10
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    logger.info("Getting dataset info for %s%s" %
                (dataset_name, ", transposed" if args.transposed else ""))
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)

    if args.transposed:
        logger.info("Data in transpose...")
        mri = MRI.MRI_Transposed(dataset_name=args.dataset_name,
                                 unit_normalize=True,
                                 even_input=True,
                                 apply_mask=True)
        input_dim = mri.X.shape[1]
        variance_map_file = path.join(data_path, "transposed_variance_map.npy")
    else:
        mask_file = path.join(data_path, "mask.npy")
        mask = np.load(mask_file)
        input_dim = (mask == 1).sum()
        if input_dim % 2 == 1:
            input_dim -= 1
        mri = MRI.MRI_Standard(which_set="full",
                               dataset_name=args.dataset_name,
                               unit_normalize=True,
                               even_input=True,
                               apply_mask=True)
        variance_map_file = path.join(data_path, "variance_map.npy")

    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in nested_generator(
            layer_depth_generator("encoder.layer_depths", xrange(4, 6), 5),
            hidden_generator("encoder.nhid", 4),
            float_generator("weight_decay.coeffs.z",
                            3,
                            0.1,
                            0.001,
                            log_scale=True)):
        #        logger.info("Adding NICE experiment with hyperparameters %s" % (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams(input_dim)
        if args.transposed:
            experiment_hyperparams["data_class"] = "MRI_Transposed"
        if args.logistic:
            experiment_hyperparams["prior"]["__builder__"] =\
                "nice.pylearn2.models.nice.StandardLogistic"

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list) else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(nice_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
from jobman import api0, sql, expand
from jobman.tools import DD, flatten

# Experiment function
import my_script

# Database
TABLE_NAME = 'JULIENS_023_DAE'
db = sql.db('postgres://[email protected]/ift6266h12_db/' +
            TABLE_NAME)

# Default values
state = DD()
state['jobman.experiment'] = 'my_script.execute'

# Parameters
learning_rate_values = [0.005, 0.01]
nb_hidden_values = [575, 600, 625]
corrupt_levels = [0.1, 0.4, 0.5]
n_comp_trans_pca = [10, 11, 12, 15, 17]

state['tied_weights'] = True
state['max_epochs'] = 50
state[
    'work_dir'] = '/data/lisa/exp/ift6266h12/juliensi/repos/JulienS-ift6266h12/ift6266h12/results/avicenna/'

for learning_rate in learning_rate_values:
    for nhid in nb_hidden_values:
        for corruption_level in corrupt_levels:
            for n_components_trans_pca in n_comp_trans_pca:
Beispiel #12
0
#!/usr/bin/python

from jobman.tools import DD, flatten
from jobman import sql
#from DARPAscript import NLPSDAE
from DARPAscript_simplified import NLPSDAE

#db = sql.db('postgres://[email protected]/glorotxa_db/opentablegpu') # you should change this line to match the database you need
#db = sql.db('postgres://[email protected]/ift6266h10_sandbox_db/opentablegpu')
db = sql.db('postgres://*****:*****@gershwin.iro.umontreal.ca/ift6266h10_sandbox_db/opentablegpu')


state = DD()

state.act = ['tanh']
state.depth = 1

state.noise = ['gaussian']

state.weight_regularization_type = 'l2'
state.weight_regularization_coeff = [0.0,0.0]
state.activation_regularization_type = 'l1'

# Number of pretraining epochs, one-per-layer
state.nepochs = [128]

# Different validation runs
#        - 100 training examples (x20 different samples of 100 training examples)
#        - 1000 training examples (x10 different samples of 1000 training examples)
#        - 10000 training examples (x1 different sample of 10000 training examples)
# (because of jobman, the keys have to be strings, not ints)
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    logger.info("Getting dataset info for %s" % dataset_name)
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + dataset_name)
    mask_file = path.join(data_path, "mask.npy")
    mask = np.load(mask_file)
    input_dim = (mask == 1).sum()
    if input_dim % 2 == 1:
        input_dim -= 1
    mri = MRI.MRI_Standard(which_set="full",
                           dataset_name=dataset_name,
                           unit_normalize=True,
                           even_input=True,
                           apply_mask=True)
    variance_map_file = path.join(data_path, "variance_map.npy")
    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in jg.nested_generator(jg.hidden_generator("nhid1", 1),
                                     jg.hidden_generator("nhid2", 1),
                                     ):

        state = DD()
        experiment_hyperparams = mlp_experiment.default_hyperparams(input_dim)

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        
        experiment_hyperparams["dataset_name"] = dataset_name

        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   flatten(experiment_hyperparams).values())))

        user = path.expandvars("$USER")
        save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%d"
                                      % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        state.pid = 0

        sql.insert_job(
            mlp_experiment.experiment,
            flatten(state),
            db
            )

    db.createView("%s_view" % args.table)
Beispiel #14
0
#
# You can also submit the jobs on the command line like this example:
# jobman sqlschedules <tablepath> Experimentsbatchpretrain.finetuning dat="MNIST" depth=1 tie=True "n_hid={{100,250,500,750,1000}}" act=tanh "sup_lr={{.01,.001,.0001,.00001}}" seed=1 nbepochs_sup=1000 batchsize=10
#


from jobman.tools import DD, flatten
from jobman import sql
from jobman.parse import filemerge
from Experimentsbatchpretrain import *
import numpy

db = sql.db('postgres://[email protected]/glorotxa_db/pretrainexpe')

state = DD()
state.curridata = DD(filemerge('Curridata.conf'))

state.depth = 3
state.tie = True
state.n_hid = 1000 #nb of unit per layer
state.act = 'tanh'

state.sup_lr = 0.01
state.unsup_lr = 0.001
state.noise = 0.25

state.seed = 1

state.nbepochs_unsup = 30 #maximal number of supervised updates
state.nbepochs_sup = 1000 #maximal number of unsupervised updates per layer
state.batchsize = 10
Beispiel #15
0
from jobman.tools import DD, flatten
from jobman import sql
from DARPAscript import NLPSDAE

db = sql.db('postgres://[email protected]/glorotxa_db/opentablegpu') # you should change this line to match the database you need


state = DD()

state.act = ['tanh']
state.depth = 1
state.n_hid = [5000]

state.noise = ['binomial_NLP']

state.weight_regularization_type = 'l2'
state.weight_regularization_coeff = [0.0,0.0]
state.activation_regularization_type = 'l1'

# Number of pretraining epochs, one-per-layer
state.nepochs = [30]

# Different validation runs
#        - 100 training examples (x20 different samples of 100 training examples)
#        - 1000 training examples (x10 different samples of 1000 training examples)
#        - 10000 training examples (x1 different sample of 10000 training examples)
# (because of jobman, the keys have to be strings, not ints)
# NOTE: Probably you don't want to make trainsize larger than 10K,
# because it will be too large for CPU memory.
state.validation_runs_for_each_trainingsize = {"100": 20, "1000": 10, "10000": 1}
def load_experiments(args):
    dataset_name = args.dataset_name

    # Load the database and table.
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    # Don't worry about this yet.
    input_handler = InputHandler()

    # For generating models, we use a special set of jobman generators, made
    # for convenience.
    for items in jg.nested_generator(
        jg.float_generator("learning_rate", 3, 0.01, 0.0001, log_scale=True),
        jg.list_generator("nhid", [50, 100, 200, 300]),
        ):

        logger.info("Adding RBM experiment across hyperparameters %s" % (items, ))
        state = DD()

        # Load experiment hyperparams from experiment
        experiment_hyperparams = experiment.default_hyperparams()

        # Set them with values in our loop.
        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry, ("Key not found in hyperparams: %s"
                                             % split_keys[-1])
            entry[split_keys[-1]] = value

        # Set the dataset name
        experiment_hyperparams["dataset_name"] = dataset_name

        # Get the input dim and variance map. Don't worry about variance maps right now,
        # they aren't used here.
        input_dim, variance_map_file = input_handler.get_input_params(args,
                                                                      experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))

        # Set the input dimensionality by the data
        experiment_hyperparams["nvis"] = input_dim

        # Set the minimum learning rate relative to the initial learning rate.
        experiment_hyperparams["min_lr"] = experiment_hyperparams["learning_rate"] / 10

        # Make a unique hash for experiments. Remember that lists, dicts, and other data
        # types may not be hashable, so you may need to do some special processing. In
        # this case we convert the lists to tuples.
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list)
                                    else v 
                                    for v in flatten(experiment_hyperparams).values()])))

        # Save path for the experiments. In this case we are sharing a directory in my 
        # export directory so IT can blame me.
        save_path = serial.preprocess("/export/mialab/users/dhjelm/pylearn2_outs/rbm_demo/%d"
                                      % h)

        # We save file params separately as they aren't model specific.
        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        user = path.expandvars("$USER")
        state.created_by = user

        # Finally we add the experiment to the table.
        sql.insert_job(
            experiment.experiment,
            flatten(state),
            db
            )

    # A view can be used when querying the database using psql. May not be needed in future.
    db.createView("%s_view" % args.table)