# 验证集不增强 val_loader = torch.utils.data.DataLoader(datasets.ImageFolder( dataPath + '/val', transform=data_transforms), batch_size=batch_size, shuffle=False, num_workers=0, pin_memory=use_gpu) # 神经网络和优化器 from model import Net model = Net() model_dict = model.load_state_dict(torch.load('./model/40.pth')) fc2_infeature = model.fc2.in_features model.fc2 = nn.Linear(fc2_infeature, 4) if use_gpu: model.cuda() # 优化器和学习率变化策略设置 optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5, verbose=True) def train(epoch):
from torch.autograd import Variable import torch.nn.functional as F import torchvision.datasets as datasets import numpy as np import time parser = argparse.ArgumentParser(description='evaluation script') parser.add_argument('--c', type=int, default=60,metavar='D', help="folder where data is located. train_data.zip and test_data.zip need to be found in the folder") args = parser.parse_args() print('- - - - - - step3 Class inference Begin - - - - - - ') start = time.time() model_class = Net() fc2_features = model_class.fc2.in_features model_class.fc2 = nn.Linear(fc2_features, 121) model_class.load_state_dict(torch.load('./new_model/class_121.pth')) model_class.cuda() model_class.eval() model_temporary = Net() fc2_features = model_temporary.fc2.in_features model_temporary.fc2 = nn.Linear(fc2_features, 2) model_temporary.load_state_dict(torch.load('./new_model/temporary_2.pth')) model_temporary.cuda() model_temporary.eval() model_data = Net() fc2_features = model_data.fc2.in_features model_data.fc2 = nn.Linear(fc2_features, 20) model_data.load_state_dict(torch.load('./new_model/data_20.pth')) model_data.cuda() model_data.eval()
edgecolors='none') else: ax.scatter(location[i, 0], location[i, 1], c=colors[l], s=10, alpha=0.7, edgecolors='none') test_loader = torch.utils.data.DataLoader(datasets.MNIST( '../data', train=False, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])), batch_size=100, shuffle=False) model = Net() model = load_network(model) model.fc2 = nn.Sequential() model = model.eval() model = model.cuda() fig, ax = plt.subplots() test(model, test_loader) ax.grid(True) ax.legend(loc='best') fig.savefig('train.jpg')
# from tqdm import tqdm import os import PIL.Image as Image import torch from torch.autograd import Variable import torch.nn.functional as F import torchvision.datasets as datasets import numpy as np import time print('- - - - - - step3 Class inference Begin - - - - - - ') start = time.time() model_class = Net() fc2_features = model_class.fc2.in_features model_class.fc2 = nn.Linear(fc2_features, 121) model_class.load_state_dict(torch.load('./new model/class_121.pth')) model_data = Net() fc2_features = model_data.fc2.in_features model_data.fc2 = nn.Linear(fc2_features, 20) model_data.load_state_dict(torch.load('./new model/data_20.pth')) def pil_loader(path): # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: with Image.open(f) as img: return img.convert('RGB')
num_workers=0, pin_memory=use_gpu) val_loader = torch.utils.data.DataLoader(datasets.ImageFolder( dataPath + '/val', transform=data_transforms), batch_size=batch_size, shuffle=False, num_workers=0, pin_memory=use_gpu) # Neural Network and Optimizer from model import Net model = Net() model.load_state_dict(torch.load('./model/domestic40.pth')) fc2_features = model.fc2.in_features model.fc2 = nn.Linear(fc2_features, 20) if use_gpu: model.cuda() optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, factor=0.5, verbose=True) def train(epoch): model.train()