state.model_to_build_upon = None state.ninputs = 5000 # inputtype ('binary', 'tfidf', other options?) determines what the # decoding activation function is for the first layer # e.g. inputtype 'tfidf' ('tf*idf'?) uses activation function softplus # to decode the tf*idf. state.inputtype = 'binary' state.seed = 123 #here is the for loops that does the grid: for i in [0.01, 0.001, 0.0001]: state.lr = [i] for j in [(0.7, 0.0041), (0.5, 0.003), (0.8, 0.005)]: state.noise_lvl = [j] for k in [0.001, 0.00001, 0.0]: state.activation_regularization_coeff = [k] sql.insert_job( NLPSDAE, flatten(state), db ) #this submit the current state DD to the db, if it already exist in the db no additionnal job is added. db.createView('opentablegpuview') #in order to access the db from a compute node you need to create an tunnel ssh connection on ang23: #(to do one time, I think you should keep the shell open or you can create the tunnel on a screen and detached it) #ssh -v -f -o ServerAliveInterval=60 -o ServerAliveCountMax=60 -N -L *:5432:localhost:5432 gershwin.iro.umontreal.ca
#state.ninputs = 1000 # inputtype ('binary', 'tfidf', other options?) determines what the # decoding activation function is for the first layer # e.g. inputtype 'tfidf' ('tf*idf'?) uses activation function softplus # to decode the tf*idf. state.inputtype = 'binary' state.seed = 123 state.activation_regularization_coeff = [0] #here is the for loops that does the grid: for i in [0.01,0.001]: state.lr = [i] for j in [0.5,0.25,0.125,0.05]: state.noise_lvl=[j] for k in [1400,2500,5000]: state.n_hid = [k] sql.insert_job(NLPSDAE, flatten(state), db) #this submit the current state DD to the db, if it already exist in the db no additionnal job is added. db.createView('opentablegpuview') # First run this script # PYTHONPATH=$PYTHONPATH:.. python DARPAjobs.py # Test the jobs are in the database: # psql -d ift6266h10_sandbox_db -h gershwin.iro.umontreal.ca -U ift6266h10 # select id,lr,noiselvl,nhid as reg,jobman_status from opentablegpuview;