def train_factory(args, preprocess, target_transforms): train_datas = [datasets.CocoKeypoints( root=args.train_image_dir, annFile=item, preprocess=preprocess, image_transform=transforms.image_transform_train, target_transforms=target_transforms, n_images=args.n_images, ) for item in args.train_annotations] train_data = torch.utils.data.ConcatDataset(train_datas) train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=args.pin_memory, num_workers=args.loader_workers, drop_last=True) val_data = datasets.CocoKeypoints( root=args.val_image_dir, annFile=args.val_annotations, preprocess=preprocess, image_transform=transforms.image_transform_train, target_transforms=target_transforms, n_images=args.n_images, ) val_loader = torch.utils.data.DataLoader( val_data, batch_size=args.batch_size, shuffle=False, pin_memory=args.pin_memory, num_workers=args.loader_workers, drop_last=True) return train_loader, val_loader, train_data, val_data
def test_dataloader(self): val_data = datasets.CocoKeypoints( root=self.args.val_image_dir, annFile=self.args.val_annotations, preprocess=preprocess, image_transform=transforms.image_transform_train, target_transforms=self.target_transforms, n_images=self.args.n_images, ) val_loader = torch.utils.data.DataLoader( val_data, batch_size=self.args.batch_size, shuffle=False, pin_memory=self.args.pin_memory, num_workers=self.args.loader_workers, drop_last=True) return val_loader
def test_dataloader(self): val_data = datasets.CocoKeypoints( root=cfg.DATASET.VAL_IMAGE_DIR, annFile=cfg.DATASET.VAL_ANNOTATIONS, preprocess=preprocess, image_transform=transforms.image_transform_train, target_transforms=self.target_transforms, n_images=None, ) val_loader = torch.utils.data.DataLoader( val_data, batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS), shuffle=False, pin_memory=cfg.PIN_MEMORY, num_workers=cfg.WORKERS, drop_last=True) return val_loader
def tng_dataloader(self): train_datas = [datasets.CocoKeypoints( root=self.args.train_image_dir, annFile=item, preprocess=preprocess, image_transform=transforms.image_transform_train, target_transforms=self.target_transforms, n_images=args.n_images, ) for item in self.args.train_annotations] train_data = torch.utils.data.ConcatDataset(train_datas) train_loader = torch.utils.data.DataLoader( train_data, batch_size=self.args.batch_size, shuffle=True, pin_memory=self.args.pin_memory, num_workers=self.args.loader_workers, drop_last=True) return train_loader
def tng_dataloader(self): train_datas = [datasets.CocoKeypoints( root=cfg.DATASET.TRAIN_IMAGE_DIR, annFile=item, preprocess=preprocess, image_transform=transforms.image_transform_train, target_transforms=self.target_transforms, n_images=None, ) for item in cfg.DATASET.TRAIN_ANNOTATIONS] train_data = torch.utils.data.ConcatDataset(train_datas) train_loader = torch.utils.data.DataLoader( train_data, batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU*len(cfg.GPUS), shuffle=True, pin_memory=cfg.PIN_MEMORY, num_workers=cfg.WORKERS, drop_last=True) return train_loader
stds = [0.229, 0.224, 0.225] image = image.transpose((1,2,0)) for i in range(3): image[:, :, i] = image[:, :, i] * stds[i] image[:, :, i] = image[:, :, i] + means[i] image = image.copy()[:,:,::-1] image = image*255 return image train_datas = [datasets.CocoKeypoints( root=cfg.DATASET.TRAIN_IMAGE_DIR, annFile=item, preprocess=preprocess, image_transform=transforms.image_transform_train, target_transforms=None, n_images=None, ) for item in cfg.DATASET.TRAIN_ANNOTATIONS] train_data = torch.utils.data.ConcatDataset(train_datas) train_loader = torch.utils.data.DataLoader( train_data, batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU*len(cfg.GPUS), shuffle=True, pin_memory=cfg.PIN_MEMORY, num_workers=cfg.WORKERS, drop_last=True) val_data = datasets.CocoKeypoints( root=cfg.DATASET.VAL_IMAGE_DIR, annFile=cfg.DATASET.VAL_ANNOTATIONS, preprocess=preprocess,