Esempio n. 1
0
#!/usr/bin/python

from jobman.tools import DD, flatten
from jobman import sql
#from DARPAscript import NLPSDAE
from DARPAscript_simplified import NLPSDAE

#db = sql.db('postgres://[email protected]/glorotxa_db/opentablegpu') # you should change this line to match the database you need
#db = sql.db('postgres://[email protected]/ift6266h10_sandbox_db/opentablegpu')
db = sql.db('postgres://*****:*****@gershwin.iro.umontreal.ca/ift6266h10_sandbox_db/opentablegpu')


state = DD()

state.act = ['tanh']
state.depth = 1

state.noise = ['gaussian']

state.weight_regularization_type = 'l2'
state.weight_regularization_coeff = [0.0,0.0]
state.activation_regularization_type = 'l1'

# Number of pretraining epochs, one-per-layer
state.nepochs = [128]

# Different validation runs
#        - 100 training examples (x20 different samples of 100 training examples)
#        - 1000 training examples (x10 different samples of 1000 training examples)
#        - 10000 training examples (x1 different sample of 10000 training examples)
# (because of jobman, the keys have to be strings, not ints)
Esempio n. 2
0
from jobman.tools import DD, flatten
from jobman import sql
from DARPAscript import NLPSDAE

db = sql.db(
    'postgres://[email protected]/glorotxa_db/opentablegpu'
)  # you should change this line to match the database you need

state = DD()

state.act = ['tanh']
state.depth = 1
state.n_hid = [5000]

state.noise = ['binomial_NLP']

state.weight_regularization_type = 'l2'
state.weight_regularization_coeff = [0.0, 0.0]
state.activation_regularization_type = 'l1'

# Number of pretraining epochs, one-per-layer
state.nepochs = [30]

# Different validation runs
#        - 100 training examples (x20 different samples of 100 training examples)
#        - 1000 training examples (x10 different samples of 1000 training examples)
#        - 10000 training examples (x1 different sample of 10000 training examples)
# (because of jobman, the keys have to be strings, not ints)
# NOTE: Probably you don't want to make trainsize larger than 10K,
# because it will be too large for CPU memory.
state.validation_runs_for_each_trainingsize = {
Esempio n. 3
0
from jobman.tools import DD, flatten
from jobman import sql
from jobman.parse import filemerge
from Experimentsbatchpretrain import *
import numpy

db = sql.db('postgres://[email protected]/glorotxa_db/pretrainexpe')

state = DD()
state.curridata = DD(filemerge('Curridata.conf'))

state.depth = 3
state.tie = True
state.n_hid = 1000 #nb of unit per layer
state.act = 'tanh'

state.sup_lr = 0.01
state.unsup_lr = 0.001
state.noise = 0.25

state.seed = 1

state.nbepochs_unsup = 30 #maximal number of supervised updates
state.nbepochs_sup = 1000 #maximal number of unsupervised updates per layer
state.batchsize = 10

for i in ['MNIST','CIFAR10','ImageNet','shapesetbatch']:
    state.dat =i 
    sql.insert_job(pretrain, flatten(state), db)