start_time = time.time() depth_dataset = DepthDataset(root_dir=data, transform=transforms.Compose([ToTensor()])) train_loader = torch.utils.data.DataLoader(depth_dataset, batchSize) dataiter = iter(train_loader) images = dataiter.next() print("\n Time taken to load Images: %s " % (time.time() - start_time)) print("\n Test Dataset Shape: {shape}".format(shape=np.shape(depth_dataset))) # ### Importing the Model from Mobile_model import Model model = Model().cuda() model = nn.DataParallel(model) # Import the Pre-trained Model model.load_state_dict(torch.load(pretrained_path)) print("\n Loaded MobileNet U-Net Weights successfully\n") model.eval() # ### Model Variables (state_dict) # print("\n\nModel's state_dict:\n\n") # for param_tensor in model.state_dict(): # print(param_tensor, "\t", model.state_dict()[param_tensor].size())
t2 = torch.from_numpy(t1).float().div(255) t3 = model(t2.cuda()) t4 = t3.detach().cpu().numpy() t5 = t4 # t5 = cv2.resize(t5, (320, 240)) preds.append(t5[0][0]) return preds image_list = glob.glob('rgb_syn_test/*.jpg') test_images = load_images(image_list) image_list_gt = glob.glob('gt_syn_test/*.png') test_images_gt = load_images(image_list_gt) # model checkpoints model = Model().cuda() # model.load_state_dict(torch.load(r'models\01-27-2021_12-51-33-n17183-e20-bs4-lr0.0001\weights.epoch8_model.pth')) # optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # epoch = checkpoint['epoch'] # loss = checkpoint['loss'] optimizer = torch.optim.Adam(model.parameters(), 0.0001) checkpoint = torch.load( r'models\01-27-2021_12-51-33-n17183-e20-bs4-lr0.0001\weights.epoch8_model.pth' ) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) epoch = checkpoint['epoch'] loss = checkpoint['loss'] model.eval()
import datetime import torch import torch.nn as nn import torch.nn.utils as utils import torchvision.utils as vutils from tensorboardX import SummaryWriter writer = SummaryWriter() # from data import getTrainingTestingData # from utils import AverageMeter, DepthNorm, colorize # model = Model().cuda() if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs model = nn.DataParallel(model) #load trained model if needed #model.load_state_dict(torch.load('/workspace/1.pth')) print('Model created.') # [['rr-1.ppm', 'd-1.jpg'], [], []] depth_dataset = DepthDataset(traincsv=traincsv, transform=transforms.Compose( [Augmentation(0.5), ToTensor()])) train_loader = DataLoader(depth_dataset, batch_size, shuffle=True)
help='Image size of network input') parser.add_argument('--data_dir', default='comarision_datasets\input', type=str, help='Data path') parser.add_argument( '--result_dir', default='demo_results', type=str, help='Directory for saving results, default: demo_results') parser.add_argument('--gpu_id', default=0, type=int, help='GPU id, default:0') args = parser.parse_args() if not os.path.exists(args.result_dir): os.makedirs(args.result_dir) gpu_id = args.gpu_id torch.cuda.device(gpu_id) net = Model().cuda() net.load_state_dict(torch.load('weights_model.pth')) net.eval() print('Begin to test ...') with torch.no_grad(): demo(net, args) print('Finished!')