# n_z=n_z) # dimensionality of latent space # # jnt_network_architecture = \ # dict(scope='joint', # hidden_conv=False, # n_hidden_recog_1=200, # 1st layer encoder neurons # n_hidden_recog_2=200, # 2nd layer encoder neurons # n_hidden_gener_1=200, # 1st layer decoder neurons # n_hidden_gener_2=200, # 2nd layer decoder neurons # n_input=147, # 21 bases for each function approximator # n_z=n_z) # dimensionality of latent space #create a new graph to ensure the resource is released after the training #change to clear the default graph tf.reset_default_graph() vae_assoc_model, cost_hist = vae_assoc.train(data_sets, [img_network_architecture, jnt_network_architecture], binary=[True, False], weights=weights, assoc_lambda = assoc_lambda, learning_rate=0.001, batch_size=batch_size, training_epochs=1000, display_step=5) vae_assoc_model.save_model('output/model_batchsize{}_nz{}_lambda{}_weight{}.ckpt'.format(batch_size, n_z, assoc_lambda, weights[0])) # time.sleep(10) # #change to clear the default graph # tf.reset_default_graph() # # vae_assoc_model = vae_assoc.AssocVariationalAutoEncoder([img_network_architecture, jnt_network_architecture], # binary=[True, False], # transfer_fct=tf.nn.relu, # assoc_lambda=5, # learning_rate=0.0001, # batch_size=batch_size) # vae_assoc_model.restore_model()