def trainCNN(modelName='resnet'): # More hyperparameters dataset = ImageDataset() dataset_labels = dataset.get_all_labels() num_classes = len(dataset_labels) if modelName == 'resnet': model = resnet_dropout_18(num_classes=num_classes, p=cnnDropout) elif modelName == 'inception': model = Inception3(num_classes=num_classes, aux_logits=False) elif modelName == 'segnet': # TODO: Figure out how dims need to be changed based off of NYU dataset model = SegNet(input_channels=3, output_channels=1, pretrained_vgg=True) else: raise Exception("Please select one of \'resnet\' or \'inception\' or " "\'segnet\'") if torch.cuda.is_available(): if multiGPU: model = nn.DataParallel(model) model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=cnnLr) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2) # setup the device for running device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) model.eval() return model
# test_loader = DataLoader(dataset=cv_dataset, batch_size=10, shuffle=True, num_workers=2) # cv_dataset = CamVidDataset('./CamVid/', mold='test', transforms=transformations, output_size=(352, 480), predct=True) cv_dataset = DatasetFrombaidu('./baidu/', mold='val', transforms=transformations, output_size=(720, 720), predct=True) # cv_loader = DataLoader(dataset=cv_dataset, batch_size=1, shuffle=False, num_workers=8, drop_last=True) # 定义预测函数 # print(cv_dataset.datalen) # cm = np.array(COLORMAP).astype('uint8') # cm = np.array(CamVid_colours).astype('uint8') n_class = 9 net = SegNet(num_classes=9) # net=SegNet(num_classes=12) net.cuda() net.eval() dir = './checkpoints/baiduSegNet5.pth' state = t.load(dir) net.load_state_dict(state['net']) test_data, test_label = cv_dataset[1] print(test_data.size()) # out=net(Variable(test_data.unsqueeze(0)).cuda()) # print(out.data.size()) # pred = out.max(1)[1].squeeze().cpu().data.numpy() # print(pred.shape) def predict(im, label): # 预测结果 im = Variable(im.unsqueeze(0)).cuda()
for i, key in enumerate(class_encoding.keys()): print("{} \t {}".format(i, key)) optimizer = optim.SGD(own_net.parameters(), lr=1e-3, weight_decay=5e-4, momentum=0.9) # Evaluation metric ignore_index = list(class_encoding).index('unlabeled') criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) if use_gpu: own_net = own_net.cuda() trainer = train.Trainer(criterion, optimizer, own_net, single_sample=single_sample) chkpt_path = "D:\CloudStorage\ODWork\Work Dropbox\MB\Tmp\SGD_lr_0.001_momentum_0.9_dampening_0_weight_decay_0.0005_nesterov_False_CrossEntropyLoss_RGB_2\Checkpoints\epoch150.ckpt" trainer.load_checkpoint(chkpt_path) own_net = trainer.model own_net.eval() for i, (inputs, labels) in enumerate(trainloader, 0):