def main(args): test_ds = MnistDataset( args.test_image_file, args.test_label_file, transform=transforms.Compose([ToTensor()]), ) test_loader = torch.utils.data.DataLoader( test_ds, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=False, ) model = Net().to(device) model.load_state_dict(torch.load(args.checkpoint)) model.eval() predicts = [] truths = [] with torch.no_grad(): for i, sample in enumerate(test_loader): X, Y_true = sample["X"].to(device), sample["Y"].to(device) output = model(X) predicts.append(torch.argmax(output, dim=1)) truths.append(Y_true) predicts = torch.cat(predicts, dim=0) truths = torch.cat(truths, dim=0) acc = torch.sum(torch.eq(predicts, truths)) print("Acc: {:.4f}".format(acc / len(predicts)))
model = Net() # 加载模型 print(model) if cpu: checkpoint = torch.load('./weights/best_model-20200904.pth.tar', map_location=lambda storage, loc: storage) else: checkpoint = torch.load('./weights/best_model-20200904.pth.tar') #new_state_dict = OrderedDict() # 用了nn.DataParallel的模型需要处理才能在cpu上使用 '''for k, v in checkpoint.items(): name = k[7:] # remove module. new_state_dict[name] = v model.load_state_dict(new_state_dict)''' #model.load_state_dict(torch.load('./weights/best-8-24.pth.tar')) model.load_state_dict(torch.load('./weights/best_model-20200904.pth.tar')) model.eval() model = model.to(device) a = 0 b = 0
model = Net() # 加载模型 print(model) if cpu: checkpoint = torch.load('./weights/chen_liveness_mobilenetv2.pth.tar', map_location=lambda storage, loc: storage) else: checkpoint = torch.load('./weights/chen_liveness_mobilenetv2.pth.tar') print('gpu') new_state_dict = OrderedDict() # 用了nn.DataParallel的模型需要处理才能在cpu上使用 '''for k, v in checkpoint.items(): name = k[7:] # remove module. new_state_dict[name] = v model.load_state_dict(new_state_dict)''' model.load_state_dict( torch.load('./weights/chen_liveness_mobilenetv2.pth.tar')) model.eval() model = model.to(device) a = 0 b = 0 start_time = time.time()
# print('Pre-train from:', pre_path) ################################ ################################ print('---------- Networks architecture -------------') print_network(model) print('----------------------------------------------') if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) opt.start_epoch = checkpoint["epoch"] + 1 print(opt.start_epoch) model.load_state_dict(checkpoint["model"].state_dict()) else: print("=> no checkpoint found at '{}'".format(opt.resume)) if cuda: model = model.cuda() criterion = criterion.cuda() optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8) for epoch in range(opt.start_epoch, opt.nEpochs + 1): train(model, epoch)
# time: 28/06/2017 import os import torch import torch.optim as optim import torch.nn as nn from torch.autograd import Variable from config import MODEL_PATH, DATA_PATH from models.model import transform, Net from data import loadTrainData trainset, trainloader = loadTrainData() net = Net() if os.path.exists(MODEL_PATH): net.load_state_dict(torch.load(MODEL_PATH)) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for epoch in range(2): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data # wrap them in Variable inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients
state_dict = torch.load(config["model"]) df = pd.read_csv('kaggle_template.csv') features_test = np.load( os.path.join(config["load_dir"] + '/_feature_test_assigment.npy')) features_test_crop = np.load( os.path.join(config["load_dir"] + '/_feature_test_assigment_crop.npy')) if config["concatenate"]: features_test = np.concatenate((features_test, features_test_crop), axis=1) #Model model = Net(features_test.shape[1]) model.load_state_dict(state_dict['model']) model.eval() if use_cuda: print('Using GPU') model.cuda() else: print('Using CPU') for dossier, sous_dossiers, fichiers in os.walk(config["test_dir"]): for num, fichier in enumerate(fichiers): num_photo = df.loc[df['Id'] == fichier.split('.')[0]].index[0] data = torch.tensor(features_test[num]) if use_cuda: data = data.cuda() output = model(data) prout, pred = torch.max(output.data, 0)