def evaluate(net_file, model_file): """ main """ #1, build model net_path = os.path.dirname(net_file) if net_path not in sys.path: sys.path.insert(0, net_path) from lenet import LeNet as MyNet #1, define network topology images = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') net = MyNet({'data': images}) prediction = net.layers['prob'] acc = fluid.layers.accuracy(input=prediction, label=label) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) #2, load weights if model_file.find('.npy') > 0: net.load(data_path=model_file, exe=exe, place=place) else: net.load(data_path=model_file, exe=exe) #3, test this model test_program = fluid.default_main_program().clone() test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128) feeder = fluid.DataFeeder(feed_list=[images, label], place=place) fetch_list = [acc, prediction] print('go to test model using test set') acc_val = test_model(exe, test_program, \ fetch_list, test_reader, feeder) print('test accuracy is [%.4f], expected value[0.919]' % (acc_val))
transforms.Normalize(norm_mean, norm_std), ]) # 构建MyDataset实例 train_data = DogCatDataset(data_dir=train_dir, transform=train_transform) valid_data = DogCatDataset(data_dir=valid_dir, transform=valid_transform) # 构建DataLoder train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE) # ============================ step 2/5 模型 ============================ net = MyNet(classes=2) net.initialize_weights() net.to("cuda") # ============================ step 3/5 损失函数 ============================ criterion = nn.CrossEntropyLoss() # 选择损失函数 # ============================ step 4/5 优化器 ============================ optimizer = optim.Adam(net.parameters(), lr=LR, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-1) # 选择优化器 # optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=1e-1) # 选择优化器 scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10,
transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std), ]) # 构建MyDataset实例 train_data = DogCatDataset(data_dir=train_dir, transform=train_transform) valid_data = DogCatDataset(data_dir=valid_dir, transform=valid_transform) # 构建DataLoder train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE) # ============================ step 2/5 模型 ============================ net = MyNet(classes=2) net.initialize_weights() net.to("cuda") # ============================ step 3/5 损失函数 ============================ criterion = nn.CrossEntropyLoss() # 选择损失函数 # ============================ step 4/5 优化器 ============================ optimizer = optim.Adam(net.parameters(), lr=LR, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-1) # 选择优化器 # optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=1e-1) # 选择优化器 scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # 设置学习率下降策略 # ============================ step 5/5 训练 ============================ train_curve = list() valid_curve = list()
from lenet import LeNet, MyNet from torchsummary import summary net = MyNet(classes=2) net.initialize_weights() summary(net, input_size=(3, 40, 40), device='cpu') # print(net)
transforms.Normalize(norm_mean, norm_std), ]) # 构建MyDataset实例 train_data = DogCatDataset(data_dir=train_dir, transform=train_transform) valid_data = DogCatDataset(data_dir=valid_dir, transform=valid_transform) # 构建DataLoder train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE) # ============================ step 2/5 模型 ============================ net = MyNet(classes=2) net.initialize_weights() net.to("cuda") # ============================ step 3/5 损失函数 ============================ criterion = nn.CrossEntropyLoss() # 选择损失函数 # ============================ step 4/5 优化器 ============================ optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=1e-1) # 选择优化器 scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # 设置学习率下降策略 # ============================ step 5/5 训练 ============================ train_curve = list()