Esempio n. 1
0
def create_jobman_jobs():

    #Database operations
    TABLE_NAME = "arcade_post_mlp_cv_binary_8x8_40k"

    db = api0.open_db('postgresql://[email protected]/gulcehrc_db?table=' + TABLE_NAME)

    ri = numpy.random.random_integers

    # Default values
    state = DD()
    state.dataset = \
    "/home/gulcehre/dataset/pentomino/experiment_data/pento64x64_40k_seed_23112222.npy"

    state.no_of_folds = 5
    state.exid = 0

    state.n_hiddens = [100, 200, 300]
    state.n_hidden_layers = 3

    state.learning_rate = 0.001
    state.l1_reg = 1e-5
    state.l2_reg = 1e-3
    state.n_epochs = 2
    state.batch_size = 120
    state.save_exp_data = True
    self.no_of_patches = 64
    state.cost_type = "crossentropy"
    state.n_in = 8*8
    state.n_out = 1

    state.best_valid_error = 0.0

    state.best_test_error = 0.0

    state.valid_obj_path_error = 0.0
    state.test_obj_path_error = 0.0

    l1_reg_values = [0., 1e-6, 1e-5, 1e-4]
    l2_reg_values = [0., 1e-5, 1e-4]

    learning_rates = numpy.logspace(numpy.log10(0.0001), numpy.log10(1), 36)
    num_hiddens = numpy.logspace(numpy.log10(256), numpy.log10(2048), 24)

    for i in xrange(NO_OF_TRIALS):
        state.exid = i
        state.n_hidden_layers = ri(4)
        n_hiddens = []

        for i in xrange(state.n_hidden_layers):
            n_hiddens.append(int(num_hiddens[ri(num_hiddens.shape[0]) - 1]))

        state.n_hiddens = n_hiddens

        state.learning_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.l1_reg = l1_reg_values[ri(len(l1_reg_values)) - 1]
        state.l2_reg = l2_reg_values[ri(len(l2_reg_values)) - 1]
        sql.insert_job(experiment, flatten(state), db)

    db.createView(TABLE_NAME + "_view")
Esempio n. 2
0
def jobman_insert_random(n_jobs):
    JOBDB = 'postgres://*****:*****@opter.iro.umontreal.ca/devries_db/lvq_mnist'
    EXPERIMENT_PATH = "lvq_mnist.jobman_entrypoint"

    jobs = []
    for _ in range(n_jobs):
        job = DD()

        job.n_hiddens = numpy.random.randint(1000, high=3000)
        job.n_out = numpy.random.randint(100, high=500)
        job.noise_std = numpy.random.uniform(low=0.0, high=0.8)
        job.learning_rate = 10.**numpy.random.uniform(-2, 0)
        job.momentum = 10.**numpy.random.uniform(-2, 0)
        job.gamma = numpy.random.uniform(low=1.0, high=3.0)

        job.tag = "lvq_mnist"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))
    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --gpu --env=THEANO_FLAGS='floatX=float32, device=gpu' --repeat_jobs=%d jobman sql -n 1 'postgres://*****:*****@opter.iro.umontreal.ca/devries_db/lvq_mnist' ." % len(
            jobs)
def jobman_insert_random(n_jobs):
    JOBDB = 'postgres://*****:*****@opter.iro.umontreal.ca/dauphiya_db/saddle_mnist_ae'
    EXPERIMENT_PATH = "ilya_experiment.jobman_entrypoint"

    jobs = []
    for _ in range(n_jobs):
        job = DD()

        job.learning_rate = 10.**numpy.random.uniform(-2, 0)
        job.momentum = 10.**numpy.random.uniform(-2, 0)
        job.batch_size = 200
        job.tag = "ilya_fixed"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))
    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --gpu --env=THEANO_FLAGS='floatX=float32, device=gpu' --repeat_jobs=%d jobman sql -n 1 'postgres://*****:*****@opter.iro.umontreal.ca/dauphiya_db/saddle_mnist_ae' ." % len(jobs)
Esempio n. 4
0
def create_jobman_jobs():

    #Database operations
    TABLE_NAME = "arcade_multi_prmlp_cv_binary_8x8_40k"

    db = api0.open_db(
        'postgresql://[email protected]/gulcehrc_db?table=' +
        TABLE_NAME)

    ri = numpy.random.random_integers

    # Default values
    state = DD()
    state.dataset = \
    "/home/gulcehre/dataset/pentomino/experiment_data/pento64x64_40k_seed_23112222.npy"

    state.no_of_folds = 5
    state.exid = 0

    state.n_hiddens = [100, 200, 300]
    state.n_hidden_layers = 3

    state.learning_rate = 0.001
    state.l1_reg = 1e-5
    state.l2_reg = 1e-3
    state.n_epochs = 2
    state.batch_size = 120
    state.save_exp_data = True
    self.no_of_patches = 64
    state.cost_type = "crossentropy"
    state.n_in = 8 * 8
    state.n_out = 1

    state.best_valid_error = 0.0

    state.best_test_error = 0.0

    state.valid_obj_path_error = 0.0
    state.test_obj_path_error = 0.0

    l1_reg_values = [0., 1e-6, 1e-5, 1e-4]
    l2_reg_values = [0., 1e-5, 1e-4]

    learning_rates = numpy.logspace(numpy.log10(0.0001), numpy.log10(1), 36)
    num_hiddens = numpy.logspace(numpy.log10(256), numpy.log10(2048), 24)

    for i in xrange(NO_OF_TRIALS):
        state.exid = i
        state.n_hidden_layers = ri(4)
        n_hiddens = []

        for i in xrange(state.n_hidden_layers):
            n_hiddens.append(int(num_hiddens[ri(num_hiddens.shape[0]) - 1]))

        state.n_hiddens = n_hiddens

        state.learning_rate = learning_rates[ri(learning_rates.shape[0]) - 1]
        state.l1_reg = l1_reg_values[ri(len(l1_reg_values)) - 1]
        state.l2_reg = l2_reg_values[ri(len(l2_reg_values)) - 1]
        sql.insert_job(experiment, flatten(state), db)

    db.createView(TABLE_NAME + "_view")
Esempio n. 5
0
print "jobman loaded"

# Experiment function
from test_exp import experiment

print "open database"

# Database
TABLE_NAME = 'mlp_dumi'
db = api0.open_db('postgres://ift6266h13@gershwin/ift6266h13_sandbox_db?table='+TABLE_NAME)

print "database loaded"

# Default values
state = DD()
state.learning_rate = 0.01
state.L1_reg = 0.00
state.L2_reg = 0.0001
state.n_iter = 50
state.batch_size = 20
state.n_hidden = 10

# Hyperparameter exploration
for n_hidden in 20, 30:

    print "h_hidden =",h_hidden
    state.n_hidden = n_hidden

    # Explore L1 regularization w/o L2
    state.L2_reg = 0.
    for L1_reg in 0., 1e-6, 1e-5, 1e-4:
Esempio n. 6
0
from jobman import api0, sql
from jobman.tools import DD, flatten

# Experiment function
from mlp_jobman import experiment

# Database
TABLE_NAME = 'mlp_dumi'
db = api0.open_db(
    'postgres://ift6266h10@gershwin/ift6266h10_sandbox_db?table=' + TABLE_NAME)

# Default values
state = DD()
state.learning_rate = 0.01
state.L1_reg = 0.00
state.L2_reg = 0.0001
state.n_iter = 50
state.batch_size = 20
state.n_hidden = 10

# Hyperparameter exploration
for n_hidden in 20, 30:
    state.n_hidden = n_hidden

    # Explore L1 regularization w/o L2
    state.L2_reg = 0.
    for L1_reg in 0., 1e-6, 1e-5, 1e-4:
        state.L1_reg = L1_reg

        # Insert job
        sql.insert_job(experiment, flatten(state), db)