def main(args): train_set, valid_set, test_set = prepare_dataset(args.data_dir) train_x, train_y = train_set test_x, test_y = test_set train_y = get_one_hot(train_y, 10) if args.model_type == "cnn": train_x = train_x.reshape((-1, 28, 28, 1)) test_x = test_x.reshape((-1, 28, 28, 1)) if args.model_type == "cnn": net = Net([ Conv2D(kernel=[5, 5, 1, 8], stride=[2, 2], padding="SAME"), ReLU(), Conv2D(kernel=[5, 5, 8, 16], stride=[2, 2], padding="SAME"), ReLU(), Conv2D(kernel=[5, 5, 16, 32], stride=[2, 2], padding="SAME"), ReLU(), Flatten(), Dense(10) ]) elif args.model_type == "dense": net = Net([ Dense(200), ReLU(), Dense(100), ReLU(), Dense(70), ReLU(), Dense(30), ReLU(), Dense(10) ]) else: raise ValueError( "Invalid argument model_type! Must be 'cnn' or 'dense'") model = Model(net=net, loss=SoftmaxCrossEntropyLoss(), optimizer=Adam(lr=args.lr)) iterator = BatchIterator(batch_size=args.batch_size) evaluator = AccEvaluator() loss_list = list() for epoch in range(args.num_ep): t_start = time.time() for batch in iterator(train_x, train_y): pred = model.forward(batch.inputs) loss, grads = model.backward(pred, batch.targets) model.apply_grad(grads) loss_list.append(loss) print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start)) # evaluate model.set_phase("TEST") test_pred = model.forward(test_x) test_pred_idx = np.argmax(test_pred, axis=1) test_y_idx = np.asarray(test_y) res = evaluator.evaluate(test_pred_idx, test_y_idx) print(res) model.set_phase("TRAIN")
def main(args): train_set, valid_set, test_set = prepare_dataset(args.data_dir) train_x, train_y = train_set test_x, test_y = test_set # train_y = get_one_hot(train_y, 2) net = Net([Dense(100), ReLU(), Dense(30), ReLU(), Dense(1)]) model = Model(net=net, loss=SigmoidCrossEntropyLoss(), optimizer=Adam(lr=args.lr)) iterator = BatchIterator(batch_size=args.batch_size) evaluator = AccEvaluator() loss_list = list() for epoch in range(args.num_ep): t_start = time.time() for batch in iterator(train_x, train_y): pred = model.forward(batch.inputs) loss, grads = model.backward(pred, batch.targets) model.apply_grad(grads) loss_list.append(loss) print("Epoch %d time cost: %.4f" % (epoch, time.time() - t_start)) for timer in model.timers.values(): timer.report() # evaluate model.set_phase("TEST") test_y_idx = np.asarray(test_y).reshape(-1) test_pred = model.forward(test_x) test_pred[test_pred > 0] = 1 test_pred[test_pred <= 0] = 0 test_pred_idx = test_pred.reshape(-1) res = evaluator.evaluate(test_pred_idx, test_y_idx) print(res) model.set_phase("TRAIN")
def main(args): if args.seed >= 0: random_seed(args.seed) train_set, valid_set, test_set = prepare_dataset(args.data_dir) train_x, train_y = train_set test_x, test_y = test_set train_y = get_one_hot(train_y, 10) train_x = Tensor(train_x) train_y = Tensor(train_y) test_x = Tensor(test_x) test_y = Tensor(test_y) net = Net([ Dense(200), ReLU(), Dense(100), ReLU(), Dense(70), ReLU(), Dense(30), ReLU(), Dense(10) ]) model = Model(net=net, loss=SoftmaxCrossEntropyLoss(), optimizer=Adam(lr=args.lr)) loss_layer = SoftmaxCrossEntropyLoss() iterator = BatchIterator(batch_size=args.batch_size) evaluator = AccEvaluator() loss_list = list() for epoch in range(args.num_ep): t_start = time.time() for batch in iterator(train_x, train_y): model.zero_grad() pred = model.forward(batch.inputs) loss = loss_layer.loss(pred, batch.targets) loss.backward() model.step() loss_list.append(loss.values) print("Epoch %d tim cost: %.4f" % (epoch, time.time() - t_start)) # evaluate model.set_phase("TEST") test_pred = model.forward(test_x) test_pred_idx = np.argmax(test_pred, axis=1) test_y_idx = test_y.values res = evaluator.evaluate(test_pred_idx, test_y_idx) print(res) model.set_phase("TRAIN")
def main(args): train_set, valid_set, test_set = prepare_dataset(args.data_dir) train_x, train_y = train_set test_x, test_y = test_set train_y = get_one_hot(train_y, 10) net = Net([ Dense(784, 200), ReLU(), Dense(200, 100), ReLU(), Dense(100, 70), ReLU(), Dense(70, 30), ReLU(), Dense(30, 10) ]) model = Model(net=net, loss=SoftmaxCrossEntropyLoss(), optimizer=Adam(lr=args.lr)) iterator = BatchIterator(batch_size=args.batch_size) evaluator = AccEvaluator() loss_list = list() for epoch in range(args.num_ep): t_start = time.time() for batch in iterator(train_x, train_y): pred = model.forward(batch.inputs) loss, grads = model.backward(pred, batch.targets) model.apply_grad(grads) loss_list.append(loss) t_end = time.time() # evaluate test_pred = model.forward(test_x) test_pred_idx = np.argmax(test_pred, axis=1) test_y_idx = np.asarray(test_y) res = evaluator.evaluate(test_pred_idx, test_y_idx) print("Epoch %d time cost: %.4f\t %s" % (epoch, t_end - t_start, res))