Esempio n. 1
0
    def update_db(self):
        """
        Update the postgresql database with results.
        """

        if self.dbdescr == None:
            return
        assert self.job_id is not None
        db = api0.open_db(self.dbdescr)
        j = db.get(self.job_id)
        for key, value in self.d["logs"].iteritems():
            if key in ["cpu", "mem"]:
                j[translate_string("stats.%s" % key, "knex")] = value[key][-1]
            else:
                for channel, channel_value in value.iteritems():
                    if len(channel_value) > 0:
                        j[translate_string("results.%s" % channel, "knex")] =\
                            channel_value[-1]
        for stat in self.d["stats"]:
            j[translate_string("stats.%s" % stat, "knex")] =\
                self.d["stats"][stat]
        status = self.d["stats"]["status"]

        if status in ["STARTING", "RUNNING (afaik)"]:
            j["jobman.status"] = 1
        elif status == "KILLED":
            j["jobman.status"] = -1
        elif status == "COMPLETED":
            j["jobman.status"] = 2
        table = self.dbdescr.split("table=", 1)[1]
        db.createView("%s" % table)
def tfd(n_trials):
    ri = numpy.random.random_integers

    state = DD()
    with open('mnist_powerup_temp_l2.yaml') as ymtmp:
        state.yaml_string = ymtmp.read()

    state.powerup_nunits = 240
    state.powerup_npieces = 5

    state.powerup_nunits2 = 240
    state.powerup_npieces2 = 5

    state.W_lr_scale = 0.04
    state.p_lr_scale = 0.01
    state.lr_rate = 0.1
    state.init_mom = 0.5
    state.final_mom = 0.5
    state.decay_factor = 0.5
    state.max_col_norm = 1.9365

    state.save_path = './'

    n_pieces = [2, 3, 4, 5]
    n_units = [200, 240, 320, 360, 420, 480]

    learning_rates = numpy.logspace(numpy.log10(0.09), numpy.log10(1.2), 60)
    learning_rate_scalers = numpy.logspace(numpy.log10(0.1), numpy.log10(1), 50)
    decay_factors =  numpy.logspace(numpy.log10(0.001), numpy.log10(0.06), 40)
    max_col_norms = [1.8365, 1.9365, 2.1365, 2.2365, 2.3486]

    ind = 0
    TABLE_NAME = "powerup_mnist_finest_large_2l"
    db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)

    for i in xrange(n_trials):
        state.lr_rate = learning_rates[ri(learning_rates.shape[0]) - 1]

        state.powerup_nunits = n_units[ri(len(n_units)) - 1]
        state.powerup_npieces = n_pieces[ri(len(n_pieces) - 1)]

        state.powerup_nunits2 = state.powerup_nunits
        state.powerup_npieces2 = state.powerup_npieces

        state.W_lr_scale = numpy.random.uniform(low=0.09, high=1.0)
        state.p_lr_scale = numpy.random.uniform(low=0.09, high=1.0)

        state.init_mom = numpy.random.uniform(low=0.3, high=0.6)
        state.final_mom = numpy.random.uniform(low=state.init_mom + 0.1, high=0.9)
        state.decay_factor = decay_factors[ri(len(decay_factors)) - 1]
        state.max_col_norm = max_col_norms[ri(len(max_col_norms)) - 1]

        alphabet = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUWXYZ0123456789')
        state.save_path = './'
        state.save_path += ''.join(alphabet[:7]) + '_'
        sql.insert_job(experiment, flatten(state), db)
        ind += 1

    db.createView(TABLE_NAME + '_view')
    print "{} jobs submitted".format(ind)
Esempio n. 3
0
def pento(n_trials):
    ri = numpy.random.random_integers

    state = DD()
    with open('mnist_powerup_temp.yaml') as ymtmp:
        state.yaml_string = ymtmp.read()

    state.powerup_nunits = 240
    state.powerup_npieces = 5
    state.W_lr_scale = 0.04
    state.p_lr_scale = 0.01
    state.lr_rate = 0.1
    state.l2_pen = 1e-5
    state.l2_pen2 = 0.0000
    state.init_mom = 0.5
    state.final_mom = 0.5
    state.decay_factor = 0.5
    state.max_col_norm = 1.9365
    state.max_col_norm2 = 1.8365
    state.batch_size = 128

    state.save_path = './'

    n_pieces = [2, 3, 4, 5, 6, 8, 10, 12, 14, 16]
    n_units = [200, 240, 280, 320, 360, 420, 480]
    batch_sizes = [128, 256, 512]

    learning_rates = numpy.logspace(numpy.log10(0.001), numpy.log10(1.0), 30)
    learning_rate_scalers = numpy.logspace(numpy.log10(0.01), numpy.log10(1), 30)
    l2_pen = numpy.logspace(numpy.log10(1e-6), numpy.log10(8*1e-3), 100)
    max_col_norms = [1.7365, 1.8365, 1.9365, 2.1365, 2.2365, 2.4365]

    ind = 0
    TABLE_NAME = "powerup_mnist_1layer_fixed"
    db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)

    for i in xrange(n_trials):

        state.lr_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.powerup_nunits = n_units[ri(len(n_units)) - 1]
        state.powerup_npieces = n_pieces[ri(len(n_pieces)) - 1]
        state.W_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.p_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.batch_size = batch_sizes[ri(len(batch_sizes)) - 1]

        state.l2_pen = l2_pen[ri(l2_pen.shape[0]) - 1]
        state.init_mom = numpy.random.uniform(low=0.3, high=0.6)
        state.final_mom = numpy.random.uniform(low=state.init_mom + 0.1, high=0.9)
        state.decay_factor = numpy.random.uniform(low=0.01, high=0.05)
        state.max_col_norm = max_col_norms[ri(len(max_col_norms)) - 1]

        alphabet = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUWXYZ0123456789')
        numpy.random.shuffle(alphabet)
        state.save_path = './'
        state.save_path += ''.join(alphabet[:7]) + '_'
        sql.insert_job(experiment, flatten(state), db)
        ind += 1

    db.createView(TABLE_NAME + '_view')
    print "{} jobs submitted".format(ind)
Esempio n. 4
0
    def update_db(self):
        """
        Update the postgresql database with results.
        """

        if self.dbdescr == None:
            return
        assert self.job_id is not None
        db = api0.open_db(self.dbdescr)
        j = db.get(self.job_id)
        for key, value in self.d["logs"].iteritems():
            if key in ["cpu", "mem"]:
                j[translate_string("stats.%s" % key, "knex")] = value[key][-1]
            else:
                for channel, channel_value in value.iteritems():
                    if len(channel_value) > 0:
                        j[translate_string("results.%s" % channel, "knex")] =\
                            channel_value[-1]
        for stat in self.d["stats"]:
            j[translate_string("stats.%s" % stat, "knex")] =\
                self.d["stats"][stat]
        status = self.d["stats"]["status"]

        if status in ["STARTING", "RUNNING (afaik)"]:
            j["jobman.status"] = 1
        elif status == "KILLED":
            j["jobman.status"] = -1
        elif status == "COMPLETED":
            j["jobman.status"] = 2
        table = self.dbdescr.split("table=",1)[1]
        db.createView("%s" % table)
Esempio n. 5
0
def create_jobman_jobs():

    #Database operations
    TABLE_NAME = "arcade_post_mlp_cv_binary_8x8_40k"

    db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)

    ri = numpy.random.random_integers

    # Default values
    state = DD()
    state.dataset = \
    "/home/gulcehre/dataset/pentomino/experiment_data/pento64x64_40k_seed_23112222.npy"

    state.no_of_folds = 5
    state.exid = 0

    state.n_hiddens = [100, 200, 300]
    state.n_hidden_layers = 3

    state.learning_rate = 0.001
    state.l1_reg = 1e-5
    state.l2_reg = 1e-3
    state.n_epochs = 2
    state.batch_size = 120
    state.save_exp_data = True
    self.no_of_patches = 64
    state.cost_type = "crossentropy"
    state.n_in = 8*8
    state.n_out = 1

    state.best_valid_error = 0.0

    state.best_test_error = 0.0

    state.valid_obj_path_error = 0.0
    state.test_obj_path_error = 0.0

    l1_reg_values = [0., 1e-6, 1e-5, 1e-4]
    l2_reg_values = [0., 1e-5, 1e-4]

    learning_rates = numpy.logspace(numpy.log10(0.0001), numpy.log10(1), 36)
    num_hiddens = numpy.logspace(numpy.log10(256), numpy.log10(2048), 24)

    for i in xrange(NO_OF_TRIALS):
        state.exid = i
        state.n_hidden_layers = ri(4)
        n_hiddens = []

        for i in xrange(state.n_hidden_layers):
            n_hiddens.append(int(num_hiddens[ri(num_hiddens.shape[0]) - 1]))

        state.n_hiddens = n_hiddens

        state.learning_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.l1_reg = l1_reg_values[ri(len(l1_reg_values)) - 1]
        state.l2_reg = l2_reg_values[ri(len(l2_reg_values)) - 1]
        sql.insert_job(experiment, flatten(state), db)

    db.createView(TABLE_NAME + "_view")
Esempio n. 6
0
def tfd(n_trials):
    ri = numpy.random.random_integers

    state = DD()
    with open("tfd_powerup_temp.yaml") as ymtmp:
        state.yaml_string = ymtmp.read()

    state.powerup_nunits = 240
    state.powerup_npieces = 5
    state.W_lr_scale = 0.04
    state.p_lr_scale = 0.01
    state.lr_rate = 0.1
    state.l2_pen = 1e-5
    state.l2_pen2 = 0.0000
    state.init_mom = 0.5
    state.final_mom = 0.5
    state.decay_factor = 0.5
    state.max_col_norm = 1.9365
    state.max_col_norm2 = 1.8365

    state.save_path = "./"

    n_pieces = [2, 3, 4, 5, 6, 8, 10, 12, 14, 16]
    n_units = [200, 240, 280, 320, 420]
    learning_rates = numpy.logspace(numpy.log10(0.001), numpy.log10(1.0), 32)
    learning_rate_scalers = numpy.logspace(numpy.log10(0.01), numpy.log10(1), 30)
    l2_pen = numpy.logspace(numpy.log10(1e-6), numpy.log10(3 * 1e-3), 100)
    max_col_norms = [1.8365, 1.9365, 2.1365, 2.2365, 2.3486, 2.4365]

    ind = 0
    TABLE_NAME = "powerup_tfd_1layer_finer_large2"
    db = api0.open_db("postgresql://[email protected]/gulcehrc_db?table=" + TABLE_NAME)

    for i in xrange(n_trials):

        state.lr_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.powerup_nunits = n_units[ri(len(n_units)) - 1]

        if state.powerup_nunits >= 320:
            state.powerup_npieces = n_pieces[ri(low=0, high=5)]
        else:
            state.powerup_npieces = n_pieces[ri(low=3, high=(len(n_pieces) - 1))]

        state.W_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.p_lr_scale = learning_rate_scalers[ri(len(learning_rate_scalers)) - 1]
        state.l2_pen = l2_pen[ri(l2_pen.shape[0]) - 1]
        state.init_mom = numpy.random.uniform(low=0.3, high=0.6)
        state.final_mom = numpy.random.uniform(low=state.init_mom + 1.0, high=0.9)
        state.decay_factor = numpy.random.uniform(low=0.01, high=0.05)
        state.max_col_norm = max_col_norms[ri(len(max_col_norms)) - 1]

        alphabet = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUWXYZ0123456789")
        state.save_path = "./"
        state.save_path += "".join(alphabet[:7]) + "_"
        sql.insert_job(experiment, flatten(state), db)
        ind += 1

    db.createView(TABLE_NAME + "_view")
    print "{} jobs submitted".format(ind)
Esempio n. 7
0
    def _open_db(self, table_name):

        db = self._dbs.get(table_name, None)

        if db is None:
            logger.debug("Open database's table %s..." % table_name)
            db = self._dbs[table_name] = open_db(
                self.get_db_string(table_name))

        return db
Esempio n. 8
0
    def _open_db(self, table_name):

        db = self._dbs.get(table_name, None)

        if db is None:
            logger.debug("Open database's table %s..." % table_name)
            db = self._dbs[table_name] = open_db(
                self.get_db_string(table_name))

        return db
Esempio n. 9
0
def insert_jobexp(exp_args, jobman_args):
    """
    Insert jobman experiment jobs in a sql database.
    Remarks: We have this in a separate module since we can't refer to the jobexp.run function
    from within the jobexp module by it's absolute name (else jobman won't find the experiment when run)
    :param exp_args:
    :param jobman_args:
    :return:
    """
    table_name = jobman_args.get("table_name", "experiment")
    db = api0.open_db('postgres://*****:*****@127.0.0.1/jobbase?table='+table_name)
    for arg in jobman_args:
        sql.insert_job(predictive_rl.rlglueexp.jobexp.run, flatten(arg), db)
Esempio n. 10
0
def load_experiments_jobman(experiment_module, jobargs):
    """
    Load jobs from experiment onto postgresql database table.
    """
    dbdescr = get_desc(jobargs)
    db = api0.open_db(dbdescr)

    experiment = imp.load_source("module.name", experiment_module)
    for i, items in enumerate(experiment.generator):
        hyperparams = experiment.default_hyperparams
        state = DD()
        set_hyper_parameters(hyperparams, **dict((k, v) for k, v in items))
        state.hyperparams = translate(hyperparams, "knex")
        state["out&path"] = path.abspath(jobargs["out_path"])
        state["experiment&module"] = path.abspath(experiment_module)
        state["dbdescr"] = dbdescr

        sql.insert_job(run_experiment_jobman, flatten(state), db)
    db.createView("%s" % jobargs["table"])
Esempio n. 11
0
def load_experiments_jobman(experiment_module, jobargs):
    """
    Load jobs from experiment onto postgresql database table.
    """
    dbdescr = get_desc(jobargs)
    db = api0.open_db(dbdescr)

    experiment = imp.load_source("module.name", experiment_module)
    for i, items in enumerate(experiment.generator):
        hyperparams = experiment.default_hyperparams
        state = DD()
        set_hyper_parameters(hyperparams, **dict((k, v) for k, v in items))
        state.hyperparams = translate(hyperparams, "knex")
        state["out&path"] = path.abspath(jobargs["out_path"])
        state["experiment&module"] = path.abspath(experiment_module)
        state["dbdescr"] = dbdescr

        sql.insert_job(run_experiment_jobman,
                       flatten(state),
                       db)
    db.createView("%s" % jobargs["table"])
Esempio n. 12
0
def main(args):
    if not args.experiment:
        raise ValueError("Must include experiment source file")
    global experiment
    logger.info("Loading module %s" % args.experiment)
    experiment = imp.load_source("module.name", args.experiment)

    db = api0.open_db(
        "postgres://%(user)s@%(host)s:"
        "%(port)d/%(database)s?table=%(table)s" % {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    alerter = Alerter(prompt=args.table)
    alerter.start()

    manager = JobManager()
    manager.start()

    jobs = manager.dict()

    updater = Updater(manager, alerter, jobs, db)
    analyzer = Analyzer(manager, alerter, jobs, db)

    table = Table(jobs, db, args.table, updater, analyzer, alerter,
                  args.reload)

    auto_updater = mp.Process(target=auto_update, args=(table, ))
    auto_updater.start()

    command_line = CommandLine(alerter, table)
    command_line.run()

    updater.join()
    analyzer.join()
Esempio n. 13
0
def main(args):
    if not args.experiment:
        raise ValueError("Must include experiment source file")
    global experiment
    logger.info("Loading module %s" % args.experiment)
    experiment = imp.load_source("module.name", args.experiment)

    db = api0.open_db("postgres://%(user)s@%(host)s:"
                      "%(port)d/%(database)s?table=%(table)s"
                      % {"user": args.user,
                         "host": args.host,
                         "port": args.port,
                         "database": args.database,
                         "table": args.table,
                         })

    alerter = Alerter(prompt=args.table)
    alerter.start()

    manager = JobManager()
    manager.start()

    jobs = manager.dict()

    updater = Updater(manager, alerter, jobs, db)
    analyzer = Analyzer(manager, alerter, jobs, db)

    table = Table(jobs, db, args.table, updater, analyzer, alerter, args.reload)

    auto_updater = mp.Process(target=auto_update,
                              args=(table,))
    auto_updater.start()

    command_line = CommandLine(alerter, table)
    command_line.run()

    updater.join()
    analyzer.join()
Esempio n. 14
0
def open_db(jobargs):
    dbdescr = get_desc(jobargs)
    db = api0.open_db(dbdescr)
    return db
Esempio n. 15
0
from jobman import api0, sql
from jobman.tools import DD, flatten

# Experiment function
from mlp_jobman import experiment

# Database
TABLE_NAME = 'mlp_dumi'
db = api0.open_db(
    'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db?table=' + TABLE_NAME)

# Default values
state = DD()
state.learning_rate = 0.01
state.L1_reg = 0.00
state.L2_reg = 0.0001
state.n_iter = 50
state.batch_size = 20
state.n_hidden = 10

# Hyperparameter exploration
for n_hidden in 20, 30:
    state.n_hidden = n_hidden

    # Explore L1 regularization w/o L2
    state.L2_reg = 0.
    for L1_reg in 0., 1e-6, 1e-5, 1e-4:
        state.L1_reg = L1_reg

        # Insert job
        sql.insert_job(experiment, flatten(state), db)
Esempio n. 16
0
from jobman.tools import DD, flatten
from jobman import api0, sql
import numpy

from pylearn2.scripts.jobman import experiment

rng = numpy.random.RandomState(4312987)

if __name__ == '__main__':
    db = api0.open_db('postgres://gershwin.iro.umontreal.ca/desjagui_db/aistats12_mnist_500_1000_diag')

    state = DD()

    state.yaml_template = """
            !obj:pylearn2.scripts.train.Train {
                "max_epochs": 250,
                "save_freq": 10,
                "save_path": "dbm",
                # dataset below is now (temporarily) obsolete
                "dataset": &data !obj:pylearn2.datasets.mnist.MNIST {
                    "which_set": 'train',
                    "shuffle": True,
                    "one_hot": True,
                    "binarize": %(binarize)i,
                },
                "model": &model !obj:DBM.dbm.DBM {
                    "seed" : 123141,
                    "n_u": [784, %(nu1)i, %(nu2)i],
                    "lr": %(lr)f,
                    "flags": {
                        'enable_natural': %(enable_natural)i,
from jobman.tools import DD, flatten
from jobman import api0, sql
import numpy

from pylearn2.scripts.jobman import experiment

rng = numpy.random.RandomState(4312987)

if __name__ == '__main__':
    db = api0.open_db('postgres://opter.iro.umontreal.ca/desjagui_db/nips2013_modes_rbm_exp1_1')

    state = DD()

    state.yaml_template = """
        !obj:pylearn2.scripts.train.Train {
            "dataset": &data !obj:deep_tempering.data.test_modes.OnlineModesNIPSDLUFL {},
            "model": &model !obj:deep_tempering.rbm.RBM {
                        "seed" : %(seed)i,
                        "init_from" : "",
                        "batch_size" : &batch_size %(batch_size)i,
                        "n_v"  : 784,
                        "n_h"  : &n_h %(nh1)i,
                        "neg_sample_steps" : 1,
                        "flags": {
                            "ml_vbias": 0,
                        },
                        "lr_spec"  : {
                            'type': 'linear',
                            'start': %(lr_start)f,
                            'end': %(lr_end)f,
                        },
"""
IMPORTANT: actually runs on sampled MNIST, contrary to nips2013_mnist_tdbn_exp9_1,
which used thresholding to binarize (due to a discrepancy in Pylearn2).
"""

from jobman.tools import DD, flatten
from jobman import api0, sql
import numpy

from pylearn2.scripts.jobman import experiment

rng = numpy.random.RandomState(4312987)

if __name__ == '__main__':
    db = api0.open_db('postgres://opter.iro.umontreal.ca/desjagui_db/icml14_dtneg_mnist_tdbn_exp1_1')

    state = DD()

    state.yaml_template = """
        !obj:pylearn2.train.Train {
            "dataset": &data !obj:pylearn2.datasets.mnist.MNIST {
                "which_set": 'train',
                "center": False,
                "one_hot": True,
                "binarize": 'sample',
            },
            "model": &model !obj:deep_tempering.tempered_dbn.TemperedDBN {
                "max_updates": 2000000,
                "flags": {
                    'train_on_samples': 0,
                    'order': 'swap_sample_learn',
from jobman.tools import DD, flatten
from jobman import api0, sql
import numpy

from pylearn2.scripts.jobman import experiment

rng = numpy.random.RandomState(4312987)

if __name__ == '__main__':
    db = api0.open_db('postgres://opter.iro.umontreal.ca/desjagui_db/nips2013_modes_tdbn_exp3_2')

    state = DD()

    state.yaml_template = """
        !obj:pylearn2.scripts.train.Train {
            "dataset": &data !obj:deep_tempering.data.test_modes.OnlineModesNIPSDLUFL {},
            "model": &model !obj:deep_tempering.tempered_dbn.TemperedDBN {
                "rbm1": &rbm1 !obj:deep_tempering.rbm.RBM {
                        "seed" : %(seed1)i,
                        "batch_size" : &batch_size %(batch_size)i,
                        "n_v"  : 784,
                        "n_h"  : &nh1 %(nh1)i,
                        "gibbs_vhv": True,
                        "neg_sample_steps" : 1,
                        "flags": {
                            'ml_vbias': 0,
                        },
                        "lr_spec"  : {
                            'type': 'linear',
                            'start': %(lr_start)f,
                            'end': %(lr_end)f,
state.max_iters = 20000
state.n_hids = 200
state.mem_nel = 200
state.mem_size = 28

np.random.seed(3)

ri = np.random.random_integers
learning_rates = np.logspace(np.log10(lr_min), np.log10(lr_max), 100)
stds = np.random.uniform(std_min, std_max, 100)

#Change the table name everytime you try
TABLE_NAME = "adam_grusoft_model_search_v0"

# You should have an account for jobman
db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)
ind = 0

for i in xrange(n_trials):
    state.lr = learning_rates[ri(learning_rates.shape[0]) - 1]
    state.std = stds[ri(len(stds)) - 1]
    state.batch_size = batches[ri(len(batches)) - 1]
    state.renormalization_scale = renormalization_scale[ri(len(renormalization_scale)) - 1]
    state.mem_nel = mem_nels[ri(len(mem_nels)) - 1]
    state.mem_size = mem_sizes[ri(len(mem_sizes)) - 1]
    state.std = stds[ri(stds.shape[0]) - 1]
    sql.insert_job(memnet.train_model_adam_gru_soft.search_model_adam_gru_soft, flatten(state), db)
    ind += 1

db.createView(TABLE_NAME + "_view")
print "{} jobs submitted".format(ind)
Esempio n. 21
0
def create_jobman_jobs():

    #Database operations
    TABLE_NAME = "arcade_multi_prmlp_cv_binary_8x8_40k"

    db = api0.open_db(
        'postgresql://[email protected]/gulcehrc_db?table=' +
        TABLE_NAME)

    ri = numpy.random.random_integers

    # Default values
    state = DD()
    state.dataset = \
    "/home/gulcehre/dataset/pentomino/experiment_data/pento64x64_40k_seed_23112222.npy"

    state.no_of_folds = 5
    state.exid = 0

    state.n_hiddens = [100, 200, 300]
    state.n_hidden_layers = 3

    state.learning_rate = 0.001
    state.l1_reg = 1e-5
    state.l2_reg = 1e-3
    state.n_epochs = 2
    state.batch_size = 120
    state.save_exp_data = True
    self.no_of_patches = 64
    state.cost_type = "crossentropy"
    state.n_in = 8 * 8
    state.n_out = 1

    state.best_valid_error = 0.0

    state.best_test_error = 0.0

    state.valid_obj_path_error = 0.0
    state.test_obj_path_error = 0.0

    l1_reg_values = [0., 1e-6, 1e-5, 1e-4]
    l2_reg_values = [0., 1e-5, 1e-4]

    learning_rates = numpy.logspace(numpy.log10(0.0001), numpy.log10(1), 36)
    num_hiddens = numpy.logspace(numpy.log10(256), numpy.log10(2048), 24)

    for i in xrange(NO_OF_TRIALS):
        state.exid = i
        state.n_hidden_layers = ri(4)
        n_hiddens = []

        for i in xrange(state.n_hidden_layers):
            n_hiddens.append(int(num_hiddens[ri(num_hiddens.shape[0]) - 1]))

        state.n_hiddens = n_hiddens

        state.learning_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.l1_reg = l1_reg_values[ri(len(l1_reg_values)) - 1]
        state.l2_reg = l2_reg_values[ri(len(l2_reg_values)) - 1]
        sql.insert_job(experiment, flatten(state), db)

    db.createView(TABLE_NAME + "_view")
Esempio n. 22
0
from jobman.tools import DD, flatten
from jobman import api0, sql
import numpy

from pylearn2.scripts.jobman import experiment

rng = numpy.random.RandomState(4312987)

if __name__ == '__main__':
    db = api0.open_db(
        'postgres://gershwin.iro.umontreal.ca/desjagui_db/aistats12_mnist_nat')

    state = DD()

    state.yaml_template = """
            !obj:pylearn2.scripts.train.Train {
                "max_epochs": 250,
                "save_freq": 10,
                "save_path": "dbm",
                # dataset below is now (temporarily) obsolete
                "dataset": &data !obj:pylearn2.datasets.mnist.MNIST {
                    "which_set": 'train',
                    "shuffle": True,
                    "one_hot": True,
                    "binarize": %(binarize)i,
                },
                "model": &model !obj:DBM.dbm.DBM {
                    "seed" : 123141,
                    "n_u": [784, %(nu1)i, %(nu2)i],
                    "lr": %(lr)f,
                    "flags": {
Esempio n. 23
0
print "Hello World"

from jobman import api0, sql
from jobman.tools import DD, flatten

print "jobman loaded"

# Experiment function
from test_exp import experiment

print "open database"

# Database
TABLE_NAME = 'mlp_dumi'
db = api0.open_db('postgres://ift6266h13@gershwin/ift6266h13_sandbox_db?table='+TABLE_NAME)

print "database loaded"

# Default values
state = DD()
state.learning_rate = 0.01
state.L1_reg = 0.00
state.L2_reg = 0.0001
state.n_iter = 50
state.batch_size = 20
state.n_hidden = 10

# Hyperparameter exploration
for n_hidden in 20, 30:

    print "h_hidden =",h_hidden
Esempio n. 24
0
from jobman import api0

TABLE_NAME = 'test_add_'
db = api0.open_db('postgres://<user>:<pass>@<server>/<database>?table=' +
                  TABLE_NAME)
db.createView(TABLE_NAME + 'view')
Esempio n. 25
0
def open_db(jobargs):
    dbdescr = get_desc(jobargs)
    db = api0.open_db(dbdescr)
    return db
Esempio n. 26
0
    """
    import numpy

    channels = train_obj.model.monitor.channels
    train_cost = channels['sgd_cost(ExhaustiveSGD[X])']
    best_epoch = numpy.argmin(train_cost.val_record)
    best_rec_error = train_cost.val_record[best_epoch]
    batch_num = train_cost.batch_record[best_epoch]
    return dict(
            best_epoch=best_epoch,
            train_rec_error=best_rec_error,
            batch_num=batch_num)


if __name__ == '__main__':
    db = api0.open_db('sqlite:///test.db?table=test_jobman_pylearn2')

    state = DD()

    state.yaml_template = '''
        !obj:pylearn2.train.Train {
        "dataset": !obj:pylearn2.datasets.npy_npz.NpyDataset &dataset {
            "file" : "%(file)s"
        },
        "model": !obj:pylearn2.autoencoder.ContractiveAutoencoder {
            "nvis" : %(nvis)d,
            "nhid" : %(nhid)d,
            "irange" : 0.05,
            "act_enc": "sigmoid", #for some reason only sigmoid function works
            "act_dec": "sigmoid",
        },
Esempio n. 27
0
from jobman.tools import DD, flatten
from jobman import api0, sql

from jobman.examples.def_addition import addition_example

TABLE_NAME = 'test_add_'

# DB path...
db = api0.open_db(
    'postgres://<user>:<pass>@<server>/<database>?table=' + TABLE_NAME)

state = DD()
for first in 0, 2, 4, 6, 8, 10:
    state.first = first
    for second in 1, 3, 5, 7, 9:
        state.second = second

        sql.insert_job(addition_example, flatten(state), db)