def main(args):
    # Create a new database
    db = sql.db('%s?table=%s' % (args.database, args.table_name))

    # Create a jobman state
    state = DD()

    # Loop over the search space and schedule jobs
    config_generator = gen_configurations(args.configurations)
    for i, params in enumerate(config_generator):
        # Complete parameters dictionary and add to the state
        state.parameters = params
        state.parameters['model'] = args.model
        state.parameters['dataset'] = args.dataset
        state.parameters['nb_epoch'] = args.n_epochs
        state.parameters['batch_size'] = args.batch_size

        # Insert the job into the database
        if args.model == 'ADIOS':
            state.parameters['labels_order'] = args.labels_order
            state.parameters['n_label_splits'] = args.n_label_splits
            for i in xrange(args.n_label_splits):
                state.parameters['label_split'] = i + 1
                sql.insert_job(train_adios, flatten(state), db)
        else:  # args.model == 'MLP'
            sql.insert_job(train_mlp, flatten(state), db)

    # Create a view for the new database table
    db.createView(args.table_name + '_view')
def tfd(n_trials):
    ri = numpy.random.random_integers

    state = DD()
    with open('mnist_powerup_temp_l2.yaml') as ymtmp:
        state.yaml_string = ymtmp.read()

    state.powerup_nunits = 240
    state.powerup_npieces = 5

    state.powerup_nunits2 = 240
    state.powerup_npieces2 = 5

    state.W_lr_scale = 0.04
    state.p_lr_scale = 0.01
    state.lr_rate = 0.1
    state.init_mom = 0.5
    state.final_mom = 0.5
    state.decay_factor = 0.5
    state.max_col_norm = 1.9365

    state.save_path = './'

    n_pieces = [2, 3, 4, 5]
    n_units = [200, 240, 320, 360, 420, 480]

    learning_rates = numpy.logspace(numpy.log10(0.09), numpy.log10(1.2), 60)
    learning_rate_scalers = numpy.logspace(numpy.log10(0.1), numpy.log10(1), 50)
    decay_factors =  numpy.logspace(numpy.log10(0.001), numpy.log10(0.06), 40)
    max_col_norms = [1.8365, 1.9365, 2.1365, 2.2365, 2.3486]

    ind = 0
    TABLE_NAME = "powerup_mnist_finest_large_2l"
    db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)

    for i in xrange(n_trials):
        state.lr_rate = learning_rates[ri(learning_rates.shape[0]) - 1]

        state.powerup_nunits = n_units[ri(len(n_units)) - 1]
        state.powerup_npieces = n_pieces[ri(len(n_pieces) - 1)]

        state.powerup_nunits2 = state.powerup_nunits
        state.powerup_npieces2 = state.powerup_npieces

        state.W_lr_scale = numpy.random.uniform(low=0.09, high=1.0)
        state.p_lr_scale = numpy.random.uniform(low=0.09, high=1.0)

        state.init_mom = numpy.random.uniform(low=0.3, high=0.6)
        state.final_mom = numpy.random.uniform(low=state.init_mom + 0.1, high=0.9)
        state.decay_factor = decay_factors[ri(len(decay_factors)) - 1]
        state.max_col_norm = max_col_norms[ri(len(max_col_norms)) - 1]

        alphabet = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUWXYZ0123456789')
        state.save_path = './'
        state.save_path += ''.join(alphabet[:7]) + '_'
        sql.insert_job(experiment, flatten(state), db)
        ind += 1

    db.createView(TABLE_NAME + '_view')
    print "{} jobs submitted".format(ind)
Example #3
0
def create_jobman_jobs():

    #Database operations
    TABLE_NAME = "arcade_post_mlp_cv_binary_8x8_40k"

    db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)

    ri = numpy.random.random_integers

    # Default values
    state = DD()
    state.dataset = \
    "/home/gulcehre/dataset/pentomino/experiment_data/pento64x64_40k_seed_23112222.npy"

    state.no_of_folds = 5
    state.exid = 0

    state.n_hiddens = [100, 200, 300]
    state.n_hidden_layers = 3

    state.learning_rate = 0.001
    state.l1_reg = 1e-5
    state.l2_reg = 1e-3
    state.n_epochs = 2
    state.batch_size = 120
    state.save_exp_data = True
    self.no_of_patches = 64
    state.cost_type = "crossentropy"
    state.n_in = 8*8
    state.n_out = 1

    state.best_valid_error = 0.0

    state.best_test_error = 0.0

    state.valid_obj_path_error = 0.0
    state.test_obj_path_error = 0.0

    l1_reg_values = [0., 1e-6, 1e-5, 1e-4]
    l2_reg_values = [0., 1e-5, 1e-4]

    learning_rates = numpy.logspace(numpy.log10(0.0001), numpy.log10(1), 36)
    num_hiddens = numpy.logspace(numpy.log10(256), numpy.log10(2048), 24)

    for i in xrange(NO_OF_TRIALS):
        state.exid = i
        state.n_hidden_layers = ri(4)
        n_hiddens = []

        for i in xrange(state.n_hidden_layers):
            n_hiddens.append(int(num_hiddens[ri(num_hiddens.shape[0]) - 1]))

        state.n_hiddens = n_hiddens

        state.learning_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.l1_reg = l1_reg_values[ri(len(l1_reg_values)) - 1]
        state.l2_reg = l2_reg_values[ri(len(l2_reg_values)) - 1]
        sql.insert_job(experiment, flatten(state), db)

    db.createView(TABLE_NAME + "_view")
Example #4
0
def pento(n_trials):
    ri = numpy.random.random_integers

    state = DD()
    with open('mnist_powerup_temp.yaml') as ymtmp:
        state.yaml_string = ymtmp.read()

    state.powerup_nunits = 240
    state.powerup_npieces = 5
    state.W_lr_scale = 0.04
    state.p_lr_scale = 0.01
    state.lr_rate = 0.1
    state.l2_pen = 1e-5
    state.l2_pen2 = 0.0000
    state.init_mom = 0.5
    state.final_mom = 0.5
    state.decay_factor = 0.5
    state.max_col_norm = 1.9365
    state.max_col_norm2 = 1.8365
    state.batch_size = 128

    state.save_path = './'

    n_pieces = [2, 3, 4, 5, 6, 8, 10, 12, 14, 16]
    n_units = [200, 240, 280, 320, 360, 420, 480]
    batch_sizes = [128, 256, 512]

    learning_rates = numpy.logspace(numpy.log10(0.001), numpy.log10(1.0), 30)
    learning_rate_scalers = numpy.logspace(numpy.log10(0.01), numpy.log10(1), 30)
    l2_pen = numpy.logspace(numpy.log10(1e-6), numpy.log10(8*1e-3), 100)
    max_col_norms = [1.7365, 1.8365, 1.9365, 2.1365, 2.2365, 2.4365]

    ind = 0
    TABLE_NAME = "powerup_mnist_1layer_fixed"
    db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)

    for i in xrange(n_trials):

        state.lr_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.powerup_nunits = n_units[ri(len(n_units)) - 1]
        state.powerup_npieces = n_pieces[ri(len(n_pieces)) - 1]
        state.W_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.p_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.batch_size = batch_sizes[ri(len(batch_sizes)) - 1]

        state.l2_pen = l2_pen[ri(l2_pen.shape[0]) - 1]
        state.init_mom = numpy.random.uniform(low=0.3, high=0.6)
        state.final_mom = numpy.random.uniform(low=state.init_mom + 0.1, high=0.9)
        state.decay_factor = numpy.random.uniform(low=0.01, high=0.05)
        state.max_col_norm = max_col_norms[ri(len(max_col_norms)) - 1]

        alphabet = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUWXYZ0123456789')
        numpy.random.shuffle(alphabet)
        state.save_path = './'
        state.save_path += ''.join(alphabet[:7]) + '_'
        sql.insert_job(experiment, flatten(state), db)
        ind += 1

    db.createView(TABLE_NAME + '_view')
    print "{} jobs submitted".format(ind)
Example #5
0
def tfd(n_trials):
    ri = numpy.random.random_integers

    state = DD()
    with open("tfd_powerup_temp.yaml") as ymtmp:
        state.yaml_string = ymtmp.read()

    state.powerup_nunits = 240
    state.powerup_npieces = 5
    state.W_lr_scale = 0.04
    state.p_lr_scale = 0.01
    state.lr_rate = 0.1
    state.l2_pen = 1e-5
    state.l2_pen2 = 0.0000
    state.init_mom = 0.5
    state.final_mom = 0.5
    state.decay_factor = 0.5
    state.max_col_norm = 1.9365
    state.max_col_norm2 = 1.8365

    state.save_path = "./"

    n_pieces = [2, 3, 4, 5, 6, 8, 10, 12, 14, 16]
    n_units = [200, 240, 280, 320, 420]
    learning_rates = numpy.logspace(numpy.log10(0.001), numpy.log10(1.0), 32)
    learning_rate_scalers = numpy.logspace(numpy.log10(0.01), numpy.log10(1), 30)
    l2_pen = numpy.logspace(numpy.log10(1e-6), numpy.log10(3 * 1e-3), 100)
    max_col_norms = [1.8365, 1.9365, 2.1365, 2.2365, 2.3486, 2.4365]

    ind = 0
    TABLE_NAME = "powerup_tfd_1layer_finer_large2"
    db = api0.open_db("postgresql://[email protected]/gulcehrc_db?table=" + TABLE_NAME)

    for i in xrange(n_trials):

        state.lr_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.powerup_nunits = n_units[ri(len(n_units)) - 1]

        if state.powerup_nunits >= 320:
            state.powerup_npieces = n_pieces[ri(low=0, high=5)]
        else:
            state.powerup_npieces = n_pieces[ri(low=3, high=(len(n_pieces) - 1))]

        state.W_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.p_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.l2_pen = l2_pen[ri(l2_pen.shape[0]) - 1]
        state.init_mom = numpy.random.uniform(low=0.3, high=0.6)
        state.final_mom = numpy.random.uniform(low=state.init_mom + 1.0, high=0.9)
        state.decay_factor = numpy.random.uniform(low=0.01, high=0.05)
        state.max_col_norm = max_col_norms[ri(len(max_col_norms)) - 1]

        alphabet = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUWXYZ0123456789")
        state.save_path = "./"
        state.save_path += "".join(alphabet[:7]) + "_"
        sql.insert_job(experiment, flatten(state), db)
        ind += 1

    db.createView(TABLE_NAME + "_view")
    print "{} jobs submitted".format(ind)
Example #6
0
def insert_jobexp(exp_args, jobman_args):
    """
    Insert jobman experiment jobs in a sql database.
    Remarks: We have this in a separate module since we can't refer to the jobexp.run function
    from within the jobexp module by it's absolute name (else jobman won't find the experiment when run)
    :param exp_args:
    :param jobman_args:
    :return:
    """
    table_name = jobman_args.get("table_name", "experiment")
    db = api0.open_db('postgres://*****:*****@127.0.0.1/jobbase?table='+table_name)
    for arg in jobman_args:
        sql.insert_job(predictive_rl.rlglueexp.jobexp.run, flatten(arg), db)
Example #7
0
def load_experiments_jobman(experiment_module, jobargs):
    """
    Load jobs from experiment onto postgresql database table.
    """
    dbdescr = get_desc(jobargs)
    db = api0.open_db(dbdescr)

    experiment = imp.load_source("module.name", experiment_module)
    for i, items in enumerate(experiment.generator):
        hyperparams = experiment.default_hyperparams
        state = DD()
        set_hyper_parameters(hyperparams, **dict((k, v) for k, v in items))
        state.hyperparams = translate(hyperparams, "knex")
        state["out&path"] = path.abspath(jobargs["out_path"])
        state["experiment&module"] = path.abspath(experiment_module)
        state["dbdescr"] = dbdescr

        sql.insert_job(run_experiment_jobman, flatten(state), db)
    db.createView("%s" % jobargs["table"])
Example #8
0
def load_experiments_jobman(experiment_module, jobargs):
    """
    Load jobs from experiment onto postgresql database table.
    """
    dbdescr = get_desc(jobargs)
    db = api0.open_db(dbdescr)

    experiment = imp.load_source("module.name", experiment_module)
    for i, items in enumerate(experiment.generator):
        hyperparams = experiment.default_hyperparams
        state = DD()
        set_hyper_parameters(hyperparams, **dict((k, v) for k, v in items))
        state.hyperparams = translate(hyperparams, "knex")
        state["out&path"] = path.abspath(jobargs["out_path"])
        state["experiment&module"] = path.abspath(experiment_module)
        state["dbdescr"] = dbdescr

        sql.insert_job(run_experiment_jobman,
                       flatten(state),
                       db)
    db.createView("%s" % jobargs["table"])
Example #9
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    logger.info("Getting dataset info for %s%s" %
                (dataset_name, ", transposed" if args.transposed else ""))
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)

    if args.transposed:
        logger.info("Data in transpose...")
        mri = MRI.MRI_Transposed(dataset_name=args.dataset_name,
                                 unit_normalize=True,
                                 even_input=True,
                                 apply_mask=True)
        input_dim = mri.X.shape[1]
        variance_map_file = path.join(data_path, "transposed_variance_map.npy")
    else:
        mask_file = path.join(data_path, "mask.npy")
        mask = np.load(mask_file)
        input_dim = (mask == 1).sum()
        if input_dim % 2 == 1:
            input_dim -= 1
        mri = MRI.MRI_Standard(which_set="full",
                               dataset_name=args.dataset_name,
                               unit_normalize=True,
                               even_input=True,
                               apply_mask=True)
        variance_map_file = path.join(data_path, "variance_map.npy")

    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in nested_generator(
            layer_depth_generator("encoder.layer_depths", xrange(4, 6), 5),
            hidden_generator("encoder.nhid", 4),
            float_generator("weight_decay.coeffs.z",
                            3,
                            0.1,
                            0.001,
                            log_scale=True)):
        #        logger.info("Adding NICE experiment with hyperparameters %s" % (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams(input_dim)
        if args.transposed:
            experiment_hyperparams["data_class"] = "MRI_Transposed"
        if args.logistic:
            experiment_hyperparams["prior"]["__builder__"] =\
                "nice.pylearn2.models.nice.StandardLogistic"

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list) else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(nice_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
Example #10
0
# to decode the tf*idf.
state.inputtype = 'binary'

state.seed = 123

state.activation_regularization_coeff = [0]

#here is the for loops that does the grid:

for i in [0.01,0.001]:
    state.lr = [i]
    for j in [0.5,0.25,0.125,0.05]:
        state.noise_lvl=[j]
        for k in [1400,2500,5000]:
            state.n_hid = [k]
            sql.insert_job(NLPSDAE, flatten(state), db) #this submit the current state DD to the db, if it already exist in the db no additionnal job is added.


db.createView('opentablegpuview')

# First run this script
# PYTHONPATH=$PYTHONPATH:.. python DARPAjobs.py 

# Test the jobs are in the database:
# psql -d ift6266h10_sandbox_db -h gershwin.iro.umontreal.ca -U ift6266h10
# select id,lr,noiselvl,nhid as reg,jobman_status from opentablegpuview;
# password: f0572cd63b
# Set some values
#  update opentablegpukeyval set ival=0 where name='jobman.status';
# update opentablegpukeyval set ival=0 where name='jobman.status' where dict_id=20;
Example #11
0
def load_experiments(args):
    dataset_name = args.dataset_name

    # Load the database and table.
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    # Don't worry about this yet.
    input_handler = InputHandler()

    # For generating models, we use a special set of jobman generators, made
    # for convenience.
    for items in jg.nested_generator(
        jg.float_generator("learning_rate", 3, 0.01, 0.0001, log_scale=True),
        jg.list_generator("nhid", [50, 100, 200, 300]),
        ):

        logger.info("Adding RBM experiment across hyperparameters %s" % (items, ))
        state = DD()

        # Load experiment hyperparams from experiment
        experiment_hyperparams = experiment.default_hyperparams()

        # Set them with values in our loop.
        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry, ("Key not found in hyperparams: %s"
                                             % split_keys[-1])
            entry[split_keys[-1]] = value

        # Set the dataset name
        experiment_hyperparams["dataset_name"] = dataset_name

        # Get the input dim and variance map. Don't worry about variance maps right now,
        # they aren't used here.
        input_dim, variance_map_file = input_handler.get_input_params(args,
                                                                      experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))

        # Set the input dimensionality by the data
        experiment_hyperparams["nvis"] = input_dim

        # Set the minimum learning rate relative to the initial learning rate.
        experiment_hyperparams["min_lr"] = experiment_hyperparams["learning_rate"] / 10

        # Make a unique hash for experiments. Remember that lists, dicts, and other data
        # types may not be hashable, so you may need to do some special processing. In
        # this case we convert the lists to tuples.
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list)
                                    else v 
                                    for v in flatten(experiment_hyperparams).values()])))

        # Save path for the experiments. In this case we are sharing a directory in my 
        # export directory so IT can blame me.
        save_path = serial.preprocess("/export/mialab/users/dhjelm/pylearn2_outs/rbm_demo/%d"
                                      % h)

        # We save file params separately as they aren't model specific.
        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        user = path.expandvars("$USER")
        state.created_by = user

        # Finally we add the experiment to the table.
        sql.insert_job(
            experiment.experiment,
            flatten(state),
            db
            )

    # A view can be used when querying the database using psql. May not be needed in future.
    db.createView("%s_view" % args.table)
Example #12
0
def load_experiments(args):
    dataset_name = args.dataset_name

    # Load the database and table.
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    # Don't worry about this yet.
    input_handler = InputHandler()

    # For generating models, we use a special set of jobman generators, made
    # for convenience.
    for items in jg.nested_generator(
            jg.float_generator("learning_rate",
                               3,
                               0.01,
                               0.0001,
                               log_scale=True),
            jg.list_generator("nhid", [50, 100, 200, 300]),
    ):

        logger.info("Adding RBM experiment across hyperparameters %s" %
                    (items, ))
        state = DD()

        # Load experiment hyperparams from experiment
        experiment_hyperparams = experiment.default_hyperparams()

        # Set them with values in our loop.
        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry, (
                "Key not found in hyperparams: %s" % split_keys[-1])
            entry[split_keys[-1]] = value

        # Set the dataset name
        experiment_hyperparams["dataset_name"] = dataset_name

        # Get the input dim and variance map. Don't worry about variance maps right now,
        # they aren't used here.
        input_dim, variance_map_file = input_handler.get_input_params(
            args, experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))

        # Set the input dimensionality by the data
        experiment_hyperparams["nvis"] = input_dim

        # Set the minimum learning rate relative to the initial learning rate.
        experiment_hyperparams[
            "min_lr"] = experiment_hyperparams["learning_rate"] / 10

        # Make a unique hash for experiments. Remember that lists, dicts, and other data
        # types may not be hashable, so you may need to do some special processing. In
        # this case we convert the lists to tuples.
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list)
                                    else v
                                    for v in flatten(experiment_hyperparams).values()])))

        # Save path for the experiments. In this case we are sharing a directory in my
        # export directory so IT can blame me.
        save_path = serial.preprocess(
            "/export/mialab/users/dhjelm/pylearn2_outs/rbm_demo/%d" % h)

        # We save file params separately as they aren't model specific.
        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        user = path.expandvars("$USER")
        state.created_by = user

        # Finally we add the experiment to the table.
        sql.insert_job(experiment.experiment, flatten(state), db)

    # A view can be used when querying the database using psql. May not be needed in future.
    db.createView("%s_view" % args.table)
Example #13
0
state.batch_size = 20
state.n_hidden = 10

# Hyperparameter exploration
for n_hidden in 20, 30:

    print "h_hidden =",h_hidden
    state.n_hidden = n_hidden

    # Explore L1 regularization w/o L2
    state.L2_reg = 0.
    for L1_reg in 0., 1e-6, 1e-5, 1e-4:
        print "L1_reg =",L1_reg
        state.L1_reg = L1_reg

        # Insert job
        sql.insert_job(experiment, flatten(state), db)

    # Explore L2 regularization w/o L1
    state.L1_reg = 0.
    for L2_reg in 1e-5, 1e-4:
        print "L2_reg =",L2_reg
        state.L1_reg = L1_reg

        # Insert job
        sql.insert_job(experiment, flatten(state), db)

# Create the view
db.createView(TABLE_NAME+'_view')

Example #14
0
from jobman import sql
from jobman.parse import filemerge
from Experimentsbatchpretrain import *
import numpy

db = sql.db('postgres://[email protected]/glorotxa_db/pretrainexpe')

state = DD()
state.curridata = DD(filemerge('Curridata.conf'))

state.depth = 3
state.tie = True
state.n_hid = 1000 #nb of unit per layer
state.act = 'tanh'

state.sup_lr = 0.01
state.unsup_lr = 0.001
state.noise = 0.25

state.seed = 1

state.nbepochs_unsup = 30 #maximal number of supervised updates
state.nbepochs_sup = 1000 #maximal number of unsupervised updates per layer
state.batchsize = 10

for i in ['MNIST','CIFAR10','ImageNet','shapesetbatch']:
    state.dat =i 
    sql.insert_job(pretrain, flatten(state), db)

db.createView('pretrainexpeview')
Example #15
0
state = DD()
state.learning_rate = 0.01
state.L1_reg = 0.00
state.L2_reg = 0.0001
state.n_iter = 50
state.batch_size = 20
state.n_hidden = 10

# Hyperparameter exploration
for n_hidden in 20, 30:
    state.n_hidden = n_hidden

    # Explore L1 regularization w/o L2
    state.L2_reg = 0.
    for L1_reg in 0., 1e-6, 1e-5, 1e-4:
        state.L1_reg = L1_reg

        # Insert job
        sql.insert_job(experiment, flatten(state), db)

    # Explore L2 regularization w/o L1
    state.L1_reg = 0.
    for L2_reg in 1e-5, 1e-4:
        state.L1_reg = L1_reg

        # Insert job
        sql.insert_job(experiment, flatten(state), db)

# Create the view
db.createView(TABLE_NAME + '_view')
from jobman.tools import DD, flatten
from jobman import api0, sql

from jobman.examples.def_addition import addition_example

TABLE_NAME = 'test_add_'

# DB path...
db = api0.open_db('postgres://<user>:<pass>@<server>/<database>?table=' +
                  TABLE_NAME)

state = DD()
for first in 0, 2, 4, 6, 8, 10:
    state.first = first
    for second in 1, 3, 5, 7, 9:
        state.second = second

        sql.insert_job(addition_example, flatten(state), db)
Example #17
0
# decoding activation function is for the first layer
# e.g. inputtype 'tfidf' ('tf*idf'?) uses activation function softplus
# to decode the tf*idf.
state.inputtype = 'binary'

state.seed = 123

#here is the for loops that does the grid:

for i in [0.01, 0.001, 0.0001]:
    state.lr = [i]
    for j in [(0.7, 0.0041), (0.5, 0.003), (0.8, 0.005)]:
        state.noise_lvl = [j]
        for k in [0.001, 0.00001, 0.0]:
            state.activation_regularization_coeff = [k]
            sql.insert_job(
                NLPSDAE, flatten(state), db
            )  #this submit the current state DD to the db, if it already exist in the db no additionnal job is added.

db.createView('opentablegpuview')

#in order to access the db from a compute node you need to create an tunnel ssh connection on ang23:
#(to do one time, I think you should keep the shell open or you can create the tunnel on a screen and detached it)

#ssh -v -f -o ServerAliveInterval=60 -o ServerAliveCountMax=60 -N -L *:5432:localhost:5432 gershwin.iro.umontreal.ca

#you will need to give your LISA password.

#here is the command you use to launch 1 jobs of the db.
#THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32  sqsub -q gpu -r 2d -n 1 --gpp=1 --memperproc=2.5G -o the_output_you_want jobman sql 'postgres://glorotxa@ang23/glorotxa_db/opentablegpu' /scratch/glorotxa/
def load_experiments(args):

    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:"
        "%(port)d/%(database)s?table=%(table)s" % {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })
    input_handler = InputHandler()

    for items in jg.nested_generator(
            jg.list_generator(
                "encoder.layer_depths",
                [[3, 5, 5, 5, 3], [5, 5, 5, 5, 5], [2, 4, 4, 2]]),
            jg.list_generator("variance_normalize", [False, 2]),
            jg.float_generator("weight_decay.coeff",
                               4,
                               0.1,
                               0.0001,
                               log_scale=True),
            jg.list_generator("prior.__builder__", [
                "nice.pylearn2.models.nice.StandardNormal",
                "nice.pylearn2.models.nice.StandardLogistic"
            ])):

        logger.info("Adding NICE experiment across hyperparameters %s" %
                    (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams()

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry,\
                ("Key not found in hyperparams: %s, "
                 "found: %s" % (split_keys[-1], entry.keys()))
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        input_dim, variance_map_file = input_handler.get_input_params(
            args, experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))
        experiment_hyperparams["nvis"] = input_dim
        experiment_hyperparams["encoder"]["nvis"] = input_dim

        h = abs(hash(frozenset(
            flatten(experiment_hyperparams).keys() +\
            [tuple(v) if isinstance(v, list)
             else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(nice_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
Example #19
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    logger.info("Getting dataset info for %s" % dataset_name)
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + dataset_name)
    mask_file = path.join(data_path, "mask.npy")
    mask = np.load(mask_file)
    input_dim = (mask == 1).sum()
    if input_dim % 2 == 1:
        input_dim -= 1
    mri = MRI.MRI_Standard(which_set="full",
                           dataset_name=dataset_name,
                           unit_normalize=True,
                           even_input=True,
                           apply_mask=True)
    variance_map_file = path.join(data_path, "variance_map.npy")
    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in jg.nested_generator(jg.hidden_generator("nhid1", 1),
                                     jg.hidden_generator("nhid2", 1),
                                     ):

        state = DD()
        experiment_hyperparams = mlp_experiment.default_hyperparams(input_dim)

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        
        experiment_hyperparams["dataset_name"] = dataset_name

        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   flatten(experiment_hyperparams).values())))

        user = path.expandvars("$USER")
        save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%d"
                                      % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        state.pid = 0

        sql.insert_job(
            mlp_experiment.experiment,
            flatten(state),
            db
            )

    db.createView("%s_view" % args.table)
Example #20
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    logger.info("Getting dataset info for %s%s"
                % (dataset_name, ", transposed" if args.transposed else ""))
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)

    if args.transposed:
        logger.info("Data in transpose...")
        mri = MRI.MRI_Transposed(dataset_name=args.dataset_name,
                                 unit_normalize=True,
                                 even_input=True,
                                 apply_mask=True)
        input_dim = mri.X.shape[1]
        variance_map_file = path.join(data_path, "transposed_variance_map.npy")
    else:
        mask_file = path.join(data_path, "mask.npy")
        mask = np.load(mask_file)
        input_dim = (mask == 1).sum()
        if input_dim % 2 == 1:
            input_dim -= 1
        mri = MRI.MRI_Standard(which_set="full",
                               dataset_name=args.dataset_name,
                               unit_normalize=True,
                               even_input=True,
                               apply_mask=True)
        variance_map_file = path.join(data_path, "variance_map.npy")

    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in nested_generator(layer_depth_generator("encoder.layer_depths", 
                                                        xrange(4, 6), 5),
                                  hidden_generator("encoder.nhid", 4),
                                  float_generator("weight_decay.coeffs.z", 3, 0.1, 0.001, log_scale=True)):
#        logger.info("Adding NICE experiment with hyperparameters %s" % (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams(input_dim)
        if args.transposed:
            experiment_hyperparams["data_class"] = "MRI_Transposed"
        if args.logistic:
            experiment_hyperparams["prior"]["__builder__"] =\
                "nice.pylearn2.models.nice.StandardLogistic"

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list) else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%d"
                                      % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(
            nice_experiment.experiment,
            flatten(state),
            db
            )

    db.createView("%s_view" % args.table)
state.mem_nel = 200
state.mem_size = 28

np.random.seed(3)

ri = np.random.random_integers
learning_rates = np.logspace(np.log10(lr_min), np.log10(lr_max), 100)
stds = np.random.uniform(std_min, std_max, 100)

#Change the table name everytime you try
TABLE_NAME = "adam_grusoft_model_search_v0"

# You should have an account for jobman
db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)
ind = 0

for i in xrange(n_trials):
    state.lr = learning_rates[ri(learning_rates.shape[0]) - 1]
    state.std = stds[ri(len(stds)) - 1]
    state.batch_size = batches[ri(len(batches)) - 1]
    state.renormalization_scale = renormalization_scale[ri(len(renormalization_scale)) - 1]
    state.mem_nel = mem_nels[ri(len(mem_nels)) - 1]
    state.mem_size = mem_sizes[ri(len(mem_sizes)) - 1]
    state.std = stds[ri(stds.shape[0]) - 1]
    sql.insert_job(memnet.train_model_adam_gru_soft.search_model_adam_gru_soft, flatten(state), db)
    ind += 1

db.createView(TABLE_NAME + "_view")
print "{} jobs submitted".format(ind)

Example #22
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    logger.info("Getting dataset info for %s" % dataset_name)
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + dataset_name)
    mask_file = path.join(data_path, "mask.npy")
    mask = np.load(mask_file)
    input_dim = (mask == 1).sum()
    if input_dim % 2 == 1:
        input_dim -= 1
    mri = MRI.MRI_Standard(which_set="full",
                           dataset_name=dataset_name,
                           unit_normalize=True,
                           even_input=True,
                           apply_mask=True)
    variance_map_file = path.join(data_path, "variance_map.npy")
    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in jg.nested_generator(
            jg.hidden_generator("nhid1", 1),
            jg.hidden_generator("nhid2", 1),
    ):

        state = DD()
        experiment_hyperparams = mlp_experiment.default_hyperparams(input_dim)

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value

        experiment_hyperparams["dataset_name"] = dataset_name

        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   flatten(experiment_hyperparams).values())))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        state.pid = 0

        sql.insert_job(mlp_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
Example #23
0
from jobman.tools import DD, flatten
from jobman import api0, sql

from jobman.examples.def_addition import addition_example

TABLE_NAME = 'test_add_'

# DB path...
db = api0.open_db(
    'postgres://<user>:<pass>@<server>/<database>?table=' + TABLE_NAME)

state = DD()
for first in 0, 2, 4, 6, 8, 10:
    state.first = first
    for second in 1, 3, 5, 7, 9:
        state.second = second

        sql.insert_job(addition_example, flatten(state), db)
Example #24
0
                        method_name: contraction_penalty
                    },
                    coefficient: %(coefficient)f
                }
            ],
            "termination_criterion" : %(term_crit)s,
        }
    }
    '''

    state.hyper_parameters = {
            "file": "${PYLEARN2_DATA_PATH}/UTLC/pca/sylvester_train_x_pca32.npy",
            "nvis": 32,
            "nhid": 6,
            "learning_rate": 0.1,
            "batch_size": 10,
            "coefficient": 0.5,
            "term_crit": {
                "__builder__": "pylearn2.training_algorithms.sgd.EpochCounter",
                "max_epochs": 2
                }
            }

    state.extract_results = "pylearn2.scripts.jobman.tester.result_extractor"

    sql.insert_job(
            experiment.train_experiment,
            flatten(state),
            db,
            force_dup=True)
Example #25
0
def create_jobman_jobs():

    #Database operations
    TABLE_NAME = "arcade_multi_prmlp_cv_binary_8x8_40k"

    db = api0.open_db(
        'postgresql://[email protected]/gulcehrc_db?table=' +
        TABLE_NAME)

    ri = numpy.random.random_integers

    # Default values
    state = DD()
    state.dataset = \
    "/home/gulcehre/dataset/pentomino/experiment_data/pento64x64_40k_seed_23112222.npy"

    state.no_of_folds = 5
    state.exid = 0

    state.n_hiddens = [100, 200, 300]
    state.n_hidden_layers = 3

    state.learning_rate = 0.001
    state.l1_reg = 1e-5
    state.l2_reg = 1e-3
    state.n_epochs = 2
    state.batch_size = 120
    state.save_exp_data = True
    self.no_of_patches = 64
    state.cost_type = "crossentropy"
    state.n_in = 8 * 8
    state.n_out = 1

    state.best_valid_error = 0.0

    state.best_test_error = 0.0

    state.valid_obj_path_error = 0.0
    state.test_obj_path_error = 0.0

    l1_reg_values = [0., 1e-6, 1e-5, 1e-4]
    l2_reg_values = [0., 1e-5, 1e-4]

    learning_rates = numpy.logspace(numpy.log10(0.0001), numpy.log10(1), 36)
    num_hiddens = numpy.logspace(numpy.log10(256), numpy.log10(2048), 24)

    for i in xrange(NO_OF_TRIALS):
        state.exid = i
        state.n_hidden_layers = ri(4)
        n_hiddens = []

        for i in xrange(state.n_hidden_layers):
            n_hiddens.append(int(num_hiddens[ri(num_hiddens.shape[0]) - 1]))

        state.n_hiddens = n_hiddens

        state.learning_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.l1_reg = l1_reg_values[ri(len(l1_reg_values)) - 1]
        state.l2_reg = l2_reg_values[ri(len(l2_reg_values)) - 1]
        sql.insert_job(experiment, flatten(state), db)

    db.createView(TABLE_NAME + "_view")