def _objective(self): n_train_examples = 50000 train_x_l, train_l_y, train_u_x, train_u_y, self.valid_x, self.valid_y, self.test_x, self.test_y = extract_data( n_train_examples) self.train_x = np.concatenate((train_x_l, train_u_x), axis=0) self.train_y = np.concatenate((train_l_y, train_u_y), axis=0) num_batches = int(n_train_examples / self.batch_size) logging.debug("num batches:{}, batch_size:{}, epochs:{}".format(num_batches, self.batch_size, int( self.num_iterations / num_batches))) self.y_logits, self.y_pred_cls, self.cost = self.build_model() tf.summary.scalar('cost', self.cost) self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.cost)
def _objective(self): n_train_examples = 50000 train_x_l, train_l_y, train_u_x, train_u_y, self.valid_x, self.valid_y, self.test_x, self.test_y = extract_data( n_train_examples) num_batches = int(n_train_examples / self.batch_size) logging.debug("num batches:{}, batch_size:{}, epochs:{}".format( num_batches, self.batch_size, int(self.num_iterations / num_batches))) self.train_x = np.concatenate((train_x_l, train_u_x), axis=0) self.train_y = np.concatenate((train_l_y, train_u_y), axis=0) elbo, self.x_recon_mu, self.z_sample, self.z_mu, self.z_logvar, self.loglik = self.build_model( ) self.cost = (elbo * num_batches + prior_weights()) / (-self.batch_size * num_batches) self.optimizer = tf.train.AdamOptimizer( learning_rate=self.learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.cost)
'n_labeled': 50000, 'alpha': 1, # 0.1 - 2 TODO change alpha back to 0.1 'latent_dim': 22, # should be 50 TODO change back to 50 'require_improvement': 5000, 'n_train': 50000, 'learning_rate': 3e-4, 'beta1': 0.9, 'beta2': 0.999, 'input_dim': 28 * 28, 'num_classes': 10, 'min_std': 0.1, # Dimensions with std < min_std are removed before training with GC 'l2_weight': 1e-6 } train_x_lab, train_l_y, train_x_unlab, train_u_y, valid_x, valid_y, test_x, test_y = extract_data( FLAGS['n_labeled']) train_x_l_mu, train_x_l_logvar, train_x_u_mu, train_x_u_logvar, valid_x_mu, \ valid_x_logvar, test_x_mu, test_x_logvar = encode_dataset(FLAGS=FLAGS, train_lab=train_x_lab, train_unlab=train_x_unlab, valid=valid_x, test=test_x, min_std=FLAGS['min_std']) train_lab = [train_x_l_mu, train_x_l_logvar, train_l_y] train_unlab = [train_x_u_mu, train_x_u_logvar, train_u_y] valid = [valid_x_mu, valid_x_logvar, valid_y] test = [test_x_mu, test_x_logvar, test_y] print("train lab: mu {}, var:{}, y:{}".format(train_x_l_mu.shape, train_x_l_logvar.shape, train_l_y.shape)) print("train unlab: mu {}, var:{}, y:{}".format(train_x_u_mu.shape, train_x_u_logvar.shape, train_u_y.shape))
if __name__ == '__main__': FLAGS = { 'num_iterations': 40000, # should 3000 epochs 'batch_size': 200, 'seed': 31415, 'alpha': 0.1, 'require_improvement': 5000, 'n_train': 50000, 'learning_rate': 3e-4, 'beta1': 0.9, 'beta2': 0.999, 'num_classes': 10, 'n_components': 22 } train_x_l, train_l_y, train_u_x, train_u_y, valid_x, valid_y, test_x, test_y = extract_data( FLAGS['n_train']) train_x = np.concatenate((train_x_l, train_u_x), axis=0) train_y = np.concatenate((train_l_y, train_u_y), axis=0) pca = pca_components(train_x, FLAGS['n_components']) train_x_tran = pca.transform(train_x) train = [train_x_tran, train_y] valid_x_tran = pca.transform(valid_x) valid = [valid_x_tran, valid_y] test_x_tran = pca.transform(test_x) test = [test_x_tran, test_y] print("train_x_tran:{}, valid_x_tran:{}, test_x_tran:{} ".format(