def main(): from torchvision.utils import make_grid from torchvision import transforms from common.vis_utils import show_batch import matplotlib.pyplot as plt import os.path as osp import os.path as osp from maskrobotcar import RobotCar from dataset_loader.dataloaders import get_mask_transforms, get_train_transforms from common.utils import get_configuration from mapnet.config import MapNetConfigurator from dataloaders import get_mapnet_train_dataloader config = get_configuration(MapNetConfigurator()) data_loader, val_dataloader = get_mapnet_train_dataloader(config) # scene = 'full' # num_workers = 4 # transform = transforms.Compose([ # transforms.Scale(256), # transforms.CenterCrop(224), # transforms.ToTensor()]) # mask_transform = get_mask_transforms(config) # transform, t = get_train_transforms(config) # data_path = osp.join('/home/drinkingcoder/Dataset/robotcar') # data_dir = '../data/RobotCar/full/' # dset = RobotCar(scene, data_path, train=True, real=True, mask_transform=mask_transform, transform=transform, data_dir=data_dir) # print 'Loaded RobotCar scene {:s}, length = {:d}'.format(scene, len(dset)) # plot the poses # plt.figure() # plt.plot(dset.poses[:, 0], dset.poses[:, 1]) # plt.show() # data_loader = data.DataLoader(dset, batch_size=10, shuffle=True, # num_workers=num_workers) batch_count = 0 N = 3 for batch in data_loader: print 'Minibatch {:d}'.format(batch_count) data, targ = batch img, mask = data print(img.size()) print(mask.size()) img = mask[0][0] print(img.size()) print(torch.mean(mask[0][0])) img = transforms.ToPILImage()(img) # plt.imshow(batch[0][0, 3:]) img.show() # show_batch(make_grid(img, nrow=5, padding=25, normalize=True)) # img = batch[0][1][0] * 255 # img = transforms.ToPILImage()(img) # plt.imshow(batch[0][0, 3:]) # img.show() batch_count += 1 if batch_count >= N: break
def main(): from common.vis_utils import show_batch from torchvision.utils import make_grid import torchvision.transforms as transforms import matplotlib.pyplot as plt from mapnet.config import MapNetConfigurator from common.utils import get_configuration scene = 'full' num_workers = 4 transform = transforms.Compose([ transforms.Scale(256), # transforms.CenterCrop(224), transforms.ToTensor()]) config = get_configuration(MapNetConfigurator()) config.uniform_sampling = False config.mask_sampling = False config.mask_image = True data_path = "/home/drinkingcoder/Dataset/robotcar/" dset = RobotCar(scene, data_path, train=True, real=False, transform=transform, data_dir=osp.join("..", "data", "RobotCar"), config=config) print 'Loaded RobotCar scene {:s}, length = {:d}'.format(scene, len(dset)) # plot the poses plt.figure() plt.plot(dset.poses[:, 0], dset.poses[:, 1]) plt.show() print(len(dset)) data_loader = data.DataLoader(dset, batch_size=10, shuffle=True, num_workers=num_workers) #plt.imshow(dset.muimg.data.numpy()[0, :, :]) #plt.imshow(dset[0].data.numpy()) batch_count = 0 N = 2 for batch in data_loader: print 'Minibatch {:d}'.format(batch_count) show_batch(make_grid(batch[0], nrow=5, padding=25, normalize=True)) batch_count += 1 if batch_count >= N: break
def main(): from common.vis_utils import show_batch from torchvision.utils import make_grid import torchvision.transforms as transforms import matplotlib.pyplot as plt from mapnet.config import MapNetConfigurator from common.utils import get_configuration scene = 'KingsCollege' num_workers = 4 transform = transforms.Compose([ transforms.Scale(256), # transforms.CenterCrop(224), transforms.ToTensor()]) config = get_configuration(MapNetConfigurator()) data_path = "/home/drinkingcoder/Dataset/Cambridge/" dset = Cambridge(scene, data_path, train=True, real=False, transform=transform, data_dir=osp.join("..", "data", "Cambridge", "KingsCollege"), config=config) print 'Loaded RobotCar scene {:s}, length = {:d}'.format(scene, len(dset)) # plot the poses plt.figure() plt.scatter(dset.poses[:, 0], dset.poses[:, 1]) plt.show() plt.figure() plt.scatter(dset.poses[:, 3], dset.poses[:, 5]) plt.show() print(len(dset)) data_loader = data.DataLoader(dset, batch_size=10, shuffle=True, num_workers=num_workers) batch_count = 0 N = 2 for batch in data_loader: print 'Minibatch {:d}'.format(batch_count) show_batch(make_grid(batch[0], nrow=5, padding=25, normalize=True)) batch_count += 1 if batch_count >= N: break
from mapnet.MapNet import MapNet def get_args(): parser = argparse.ArgumentParser(description='Training script for mapnet') parser.add_argument('--config', type=str, default=None) args = parser.parse_args() return args if __name__ == "__main__": # configuration configuration = get_configuration(AttentionMapNetConfigurator(), get_args()) # model feature_extractor = models.resnet34(pretrained=True) if configuration.model == "SEAttentionPoseNet": posenet = SEAttentionPoseNet(resnet=feature_extractor, config=configuration, drop_rate=configuration.dropout) elif configuration.model == "LearnGAPoseNet": posenet = LearnGAPoseNet(feature_extractor=feature_extractor, drop_rate=configuration.dropout) dataloader = get_dataloader(configuration) evaluator = Evaluator(config=configuration, model=posenet,
def get_args(): parser = argparse.ArgumentParser(description='Training script for mapnet') parser.add_argument('--config', type=str, default=None) args = parser.parse_args() return args if __name__ == "__main__": args = get_args() print("Parse configuration...") configuration = get_configuration(MapNetConfigurator(), args) # Model print("Load model...") feature_extractor = models.resnet34(pretrained=True) posenet = PoseNet(feature_extractor, drop_rate=configuration.dropout) model = MapNet(mapnet=posenet) param_list = [{'params': model.parameters()}] kwargs = dict(loss_fn=nn.L1Loss(), beta=configuration.beta, gamma=configuration.gamma, rel_beta=configuration.beta, rel_gamma=configuration.gamma, learn_beta=configuration.learn_beta, learn_rel_beta=configuration.learn_beta)