st, ed = ed, ed+FLAGS.batch_size times += 1 loss /= times acc /= times return acc, loss def inference(model, sess, X): # Test Process return sess.run([model.pred_val], {model.x_: X})[0] with tf.Session() as sess: if not os.path.exists(FLAGS.train_dir): os.mkdir(FLAGS.train_dir) if FLAGS.is_train: X_train, X_test, y_train, y_test = load_cifar_2d(FLAGS.data_dir) X_val, y_val = X_train[40000:], y_train[40000:] X_train, y_train = X_train[:40000], y_train[:40000] mlp_model = Model() writer = tf.summary.FileWriter(FLAGS.train_dir) writer.add_graph(tf.get_default_graph()) writer.flush() if tf.train.get_checkpoint_state(FLAGS.train_dir): mlp_model.saver.restore(sess, tf.train.latest_checkpoint(FLAGS.train_dir)) else: tf.global_variables_initializer().run() pre_losses = [1e18] * 3 best_val_acc = 0.0
acc /= times return acc, loss def inference(model, X): # Test Process model.eval() pred_ = model(torch.from_numpy(X).to(device)) return pred_.cpu().data.numpy() if __name__ == '__main__': device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if not os.path.exists(args.train_dir): os.mkdir(args.train_dir) if args.is_train: X_train, X_test, y_train, y_test = load_cifar_2d(args.data_dir) X_val, y_val = X_train[40000:], y_train[40000:] X_train, y_train = X_train[:40000], y_train[:40000] mlp_model = Model(batch_norm=args.batch_norm, drop_rate=args.drop_rate) mlp_model.to(device) print(mlp_model) optimizer = optim.Adam(mlp_model.parameters(), lr=args.learning_rate) # model_path = os.path.join(args.train_dir, 'checkpoint_%d.pth.tar' % args.inference_version) # if os.path.exists(model_path): # mlp_model = torch.load(model_path) pre_losses = [1e18] * 3 best_val_acc = 0.0 epochs = [] train_data = {"loss": [], "acc": []}