args = parser.parse_args() ratio = args.ratio epoch = args.epoch nBatch = args.nBatch lr = args.lr pPerEpoch = args.pPerEpoch if args.pDetail else -1 x_train, _, y_value, y_pred, x_test, y_test, traget_pred = datasets.getBigML( args.data, ratio) layer = layers.getLayer(args.data) #control model if args.pDetail: print("\n======================== control model ========================") control = pack.classifyModel(layer, optimArgs={"lr": lr}) control.train(x_train, y_pred, epoch, nBatch, printPerEpoch=pPerEpoch, yType="long", yTo2D=False, printData=args.pDetail, printAcc=args.pDetail) control.save(getAbsPath("model/BigML_" + args.data + "_control.model")) #copy model if args.pDetail: print("\n======================== copy model ========================") copy = pack.classifyModel(layer, loss_func=args.loss, optimArgs={"lr": lr})
import datasets import packagedModel as pack x_train, y_train = datasets.getTrainedIris() # layers = [("Linear", (5,5)), \ # ("ReLU", ()), \ # ("Linear", (5,5)), \ # ("ReLU", ()), \ # ("Linear", (5,3)), \ # ("Softmax", 1)] layers = [("Linear", (5,2)), \ ("ReLU", ()), \ ("Linear", (2,3)), \ ("Softmax", 1)] # layers = [("Linear", (5,3)), \ # ("Softmax", 1)] model = pack.classifyModel(layers, loss_func="BCELoss", optimArgs = {"lr" : 1e-3} ) model.train(x_train, y_train, epoch=1000, batch = len(x_train)//2, printPerEpoch=50) model.save("iris.model") for i in range(len(x_train)) : predict = model.predict(x_train[i]) x_result = predict.argmax() print(predict.argmax(), end=" ") print(y_train[i].argmax())
import sys sys.path.insert(1, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) import packagedModel as pack import datasets import numpy as np x_train, y_train, x_test, y_test = datasets.getMnist() layers = [("Linear", (784,100)), \ ("ReLU", ()), \ ("Linear", (100,100)), \ ("ReLU", ()), \ ("Linear", (100,10)), \ ("Softmax", 1)] epoch = 10 batch = 100 if __name__ == '__main__': model = pack.classifyModel(layers, loss_func="CrossEntropyLoss", optimArgs={"lr": 5e-5}) model.train(x_train, y_train, epoch, batch) model.save( os.path.join(os.path.dirname(os.path.abspath(__file__)), "mnist.model")) print(model.getAccuracy(x_test, y_test))
x_train, y_train, x_test, y_test = datasets.getMnist() mask = np.random.choice(x_train.shape[0], nTrain, replace=False) x_train = x_train[mask] y_train = y_train[mask] y_value = pre_model.predict(x_train) layers = [("Linear", (784,100)), \ ("ReLU", ()), \ ("Linear", (100,100)), \ ("ReLU", ()), \ ("Linear", (100,10)), \ ("Softmax", 1)] #Origin model print("\n======================== origin model ========================") origin = pack.classifyModel(layers, optimArgs={"lr": 5e-5}) origin.train(x_train, y_train, epoch, nBatch, printPerEpoch=pPerEpoch, yType="long", yTo2D=False) origin.save( os.path.join(os.path.dirname(os.path.abspath(__file__)), "model/origin_mnist.model")) #copy model print("\n======================== copy model ========================") copy = pack.classifyModel(layers, loss_func="BCELoss", optimArgs={"lr": 5e-5}) copy.train(x_train, y_value, epoch, nBatch, printPerEpoch=pPerEpoch)
def getMnistModel(): import pretrain_mnist as mnist result = pack.classifyModel(mnist.layers) result.load(getAbsPath("pre_model/mnist.model")) return result