Exemple #1
0
        def dense_process_data(index):
            images = list()
            for ind in indices['dense']:
                ptr = int(ind)

                if ptr <= record.num_frames:
                    imgs = self._load_image(record.path, ptr)
                else:
                    imgs = self._load_image(record.path, record.num_frames)
                images.extend(imgs)

            if self.phase == 'Fntest':

                images = [np.asarray(im) for im in images]
                clip_input = np.concatenate(images, axis=2)

                self.t = transforms.Compose([
                    transforms.Resize(256)])
                clip_input = self.t(clip_input)

                normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                 std=[0.229, 0.224, 0.225])

                if record.crop_pos == 0:
                    self.transform = transforms.Compose([

                        transforms.CenterCrop((256, 256)),

                        transforms.ToTensor(),
                        normalize,
                    ])
                elif record.crop_pos == 1:
                    self.transform = transforms.Compose([

                        transforms.CornerCrop2((256, 256),),

                        transforms.ToTensor(),
                        normalize,
                    ])
                elif record.crop_pos == 2:
                    self.transform = transforms.Compose([
                        transforms.CornerCrop1((256, 256)),
                        transforms.ToTensor(),
                        normalize,
                    ])

                return self.transform(clip_input)

            return self.transform(images)
Exemple #2
0
    print("Preprocessing finished!")

cuda_available = torch.cuda.is_available()

# directory results
if not os.path.exists(RESULTS_PATH):
    os.makedirs(RESULTS_PATH)

# Load dataset
mean = m
std_dev = s

transform_train = transforms.Compose([
    transforms.RandomApply([transforms.ColorJitter(0.1, 0.1, 0.1, 0.1)],
                           p=0.5),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean, std_dev)
])

transform_test = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean, std_dev)
])

training_set = LocalDataset(IMAGES_PATH,
                            TRAINING_PATH,
                            transform=transform_train)
validation_set = LocalDataset(IMAGES_PATH,
                              VALIDATION_PATH,
Exemple #3
0
    default="facades",
    help="Name of the dataset: ['facades', 'maps', 'cityscapes']")
parser.add_argument("--batch_size",
                    type=int,
                    default=1,
                    help="Size of the batches")
parser.add_argument("--lr",
                    type=float,
                    default=0.0002,
                    help="Adams learning rate")
args = parser.parse_args()

device = ('cuda:0' if torch.cuda.is_available() else 'cpu')

transforms = T.Compose([
    T.Resize((256, 256)),
    T.ToTensor(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
# models
print('Defining models!')
generator = UnetGenerator().to(device)
discriminator = ConditionalDiscriminator().to(device)
# optimizers
g_optimizer = torch.optim.Adam(generator.parameters(),
                               lr=args.lr,
                               betas=(0.5, 0.999))
d_optimizer = torch.optim.Adam(discriminator.parameters(),
                               lr=args.lr,
                               betas=(0.5, 0.999))
# loss functions
Exemple #4
0
if args.preprocess:
    print ("Preprocessing..")
    preprocessing()
    print ("Preprocessing finished!")

cuda_available = torch.cuda.is_available()

# directory results
if not os.path.exists(RESULTS_PATH):
    os.makedirs(RESULTS_PATH)

# Load dataset
mean=m
std_dev=s

transform = transforms.Compose([transforms.Resize((224,224)),
                                    transforms.ToTensor(),
                                    transforms.Normalize(mean, std_dev)])

training_set = LocalDataset(IMAGES_PATH, TRAINING_PATH, transform=transform)
validation_set = LocalDataset(IMAGES_PATH, VALIDATION_PATH, transform=transform)

training_set_loader = DataLoader(dataset=training_set, batch_size=BATCH_SIZE, num_workers=THREADS, shuffle=True)
validation_set_loader = DataLoader(dataset=validation_set, batch_size=BATCH_SIZE, num_workers=THREADS, shuffle=False)

def train_model(model_name, model, lr=LEARNING_RATE, epochs=EPOCHS, momentum=MOMENTUM, weight_decay=0, train_loader=training_set_loader, test_loader=validation_set_loader):

    if not os.path.exists(RESULTS_PATH + "/" + model_name):
        os.makedirs(RESULTS_PATH + "/" + model_name)
    
    criterion = nn.CrossEntropyLoss()
Exemple #5
0
    parser.add_argument('--device', type=str, default='cuda:0',
                    help='cpu or cuda:0 or cuda:1')

    args = parser.parse_args() if string is None else parser.parse_args(string)
    return args 
    
if __name__=='__main__':
    
    args = parse_args()
   
    wandb.init(config=args, 
        project=f'dlcv_naive_{args.source}2{args.target}')

    size = 64
    t0 = transforms.Compose([
            transforms.Resize(size),
            transforms.ColorJitter(),
            transforms.RandomRotation(15, fill=(0,)),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])
    t1 = transforms.Compose([
            transforms.Resize(size),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])

    root = '../hw3_data/digits/'
    # dataset
Exemple #6
0
def inference(args):
    
    if args.target=='mnistm':
        args.source = 'usps'
    elif args.target=='usps':
        args.source = 'svhn'
    elif args.target=='svhn':
        args.source = 'mnistm'
    else:
        raise NotImplementedError(f"{args.target}: not implemented!")
    
    size = args.img_size
    t1 = transforms.Compose([
            transforms.Resize(size),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])

    valid_target_dataset = Digits_Dataset_Test(args.dataset_path, t1)
        
    valid_target_dataloader = DataLoader(valid_target_dataset,
                                             batch_size=512,
                                             num_workers=6)
        
         
    load = torch.load(
        f"./p3/result/3_2/{args.source}2{args.target}/best_model.pth",
        map_location='cpu')
        
    feature_extractor = FeatureExtractor()
    feature_extractor.load_state_dict(load['F'])
    feature_extractor.cuda()
    feature_extractor.eval()

    label_predictor = LabelPredictor()
    label_predictor.load_state_dict(load['C'])
    label_predictor.cuda()
    label_predictor.eval()
           
    out_preds = []
    out_fnames = []
    count=0
    for i,(imgs, fnames) in enumerate(valid_target_dataloader):
        bsize = imgs.size(0)

        imgs = imgs.cuda()

        features = feature_extractor(imgs)
        class_output = label_predictor(features)
        
        _, preds = class_output.max(1)
        preds = preds.detach().cpu()
        
        out_preds.append(preds)
        out_fnames += fnames
        
        count+=bsize
        print(f"\t [{count}/{len(valid_target_dataloader.dataset)}]", 
                                                        end="   \r")
        
    out_preds = torch.cat(out_preds)
    out_preds = out_preds.cpu().numpy()
    
    d = {'image_name':out_fnames, 'label':out_preds}
    df = pd.DataFrame(data=d)
    df = df.sort_values('image_name')
    df.to_csv(args.out_csv, index=False)
    print(f' [Info] finish predicting {args.dataset_path}')
Exemple #7
0
                        type=str,
                        default='cuda:0',
                        help='cpu or cuda:0 or cuda:1')

    args = parser.parse_args() if string is None else parser.parse_args(string)
    return args


if __name__ == '__main__':

    args = parse_args()

    wandb.init(config=args, project='dlcv_gan_face')

    transform = transforms.Compose([
        transforms.Resize(args.img_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.5] * 3, [0.5] * 3)
    ])
    train_dataset = Face_Dataset('../hw3_data/face/train', transform)
    valid_dataset = Face_Dataset('../hw3_data/face/test', transform)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch,
                                  shuffle=True,
                                  num_workers=args.num_workers)
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=args.batch,
                                  num_workers=args.num_workers)

    train(args, train_dataloader, valid_dataloader)
    def transform_val(self, input_data):
        rgb = np.array(input_data["image"]).astype(np.float32)
        lidar_depth = np.array(input_data["lidar_depth"]).astype(np.float32)
        radar_depth = np.array(input_data["radar_depth"]).astype(np.float32)
        if 'index_map' in input_data.keys():
            index_map = np.array(input_data["index_map"]).astype(np.int)

        # Then, we add model-aware resizing
        if self.transform_mode == "DORN":
            if cfg.scaling is True:
                h, w, _ = tuple((np.array(rgb.shape)).astype(np.int32))
            else:
                h, w, _ = tuple((np.array(rgb.shape) * 0.5).astype(np.int32))

            h_new = self.t_cfg.crop_size_train[0]
            w_new = w
            resize_image_method = transforms.Resize([h_new, w_new],
                                                    interpolation="bilinear")
            resize_depth_method = transforms.Resize([h_new, w_new],
                                                    interpolation="nearest")
        elif self.transform_mode == "sparse-to-dense":
            h_new = self.t_cfg.crop_size_train[0]
            w_new = self.t_cfg.crop_size_train[1]
            resize_image_method = transforms.Resize([h_new, w_new],
                                                    interpolation="bilinear")
            resize_depth_method = transforms.Resize([h_new, w_new],
                                                    interpolation="nearest")

        transform_rgb = transforms.Compose([
            # resize_image_method,
            transforms.CenterCrop(self.t_cfg.crop_size_val)
        ])
        transform_depth = transforms.Compose([
            # resize_depth_method,
            transforms.CenterCrop(self.t_cfg.crop_size_val)
        ])

        rgb = transform_rgb(rgb)
        rgb = rgb / 255.
        lidar_depth = transform_depth(lidar_depth)

        rgb = np.array(rgb).astype(np.float32)
        lidar_depth = np.array(lidar_depth).astype(np.float32)

        rgb = to_tensor(rgb)
        lidar_depth = to_tensor(lidar_depth)

        radar_depth = transform_depth(radar_depth)
        radar_depth = np.array(radar_depth).astype(np.float32)
        radar_depth = to_tensor(radar_depth)

        # Perform transform on index map
        if 'index_map' in input_data.keys():
            index_map = transform_depth(index_map)
            index_map = np.array(index_map).astype(np.int)
            index_map = to_tensor(index_map)
            index_map = index_map.unsqueeze(0)

        # Normalize to imagenet mean and std
        if self.transform_mode == "DORN":
            rgb = transforms.normalization_imagenet(rgb)

        ####################
        ## Filtering part ##
        ####################
        if self.sparsifier == "radar_filtered":
            # Indicating the invalid entries
            invalid_mask = ~input_data['valid_mask']
            invalid_index = np.where(invalid_mask)[0]
            invalid_index_mask = invalid_index[None, None,
                                               ...].transpose(2, 0, 1)

            # Constructing mask for dense depth
            dense_mask = torch.ByteTensor(
                np.sum(index_map.numpy() == invalid_index_mask, axis=0))
            radar_depth_filtered = radar_depth.clone()
            radar_depth_filtered[dense_mask.to(torch.bool)] = 0.
            radar_depth_filtered = radar_depth_filtered.unsqueeze(0)
            # ipdb.set_trace()
            ####################

        ######################################
        ## Filtering using predicted labels ##
        ######################################
        if self.sparsifier == "radar_filtered2":
            # ipdb.set_trace()
            invalid_mask = ~input_data['pred_labels']
            invalid_index = np.where(invalid_mask)[0]
            invalid_index_mask = invalid_index[None, None,
                                               ...].transpose(2, 0, 1)

            dense_mask = torch.ByteTensor(
                np.sum(index_map.numpy() == invalid_index_mask, axis=0))
            radar_depth_filtered2 = radar_depth.clone()
            radar_depth_filtered2[dense_mask.to(torch.bool)] = 0.
            radar_depth_filtered2 = radar_depth_filtered2.unsqueeze(0)
            ######################################

        lidar_depth = lidar_depth.unsqueeze(0)
        radar_depth = radar_depth.unsqueeze(0)

        # Return different data for different modality
        ################ Input sparsifier #########
        if self.modality == "rgb":
            inputs = rgb
        elif self.modality == "rgbd":
            if self.sparsifier == "radar":
                # Filter out the the points exceeding max_depth
                mask = (radar_depth > self.max_depth)
                radar_depth[mask] = 0
                inputs = torch.cat((rgb, radar_depth), dim=0)
            elif self.sparsifier == "radar_filtered":
                # Filter out the points exceeding max_depth
                mask = (radar_depth_filtered > self.max_depth)
                radar_depth_filtered[mask] = 0
                inputs = torch.cat((rgb, radar_depth_filtered), dim=0)
            # Using the learned classifyer
            elif self.sparsifier == "radar_filtered2":
                # Filter out the points exceeding max_depth
                mask = (radar_depth_filtered2 > self.max_depth)
                radar_depth_filtered2[mask] = 0
                inputs = torch.cat((rgb, radar_depth_filtered2), dim=0)
            else:
                s_depth = self.get_sparse_depth(lidar_depth, radar_depth)
                inputs = torch.cat((rgb, s_depth), dim=0)
        else:
            raise ValueError("[Error] Unsupported modality. Consider ",
                             self.avail_modality)
        labels = lidar_depth

        output_dict = {
            "rgb": rgb,
            "lidar_depth": lidar_depth,
            "radar_depth": radar_depth,
            "inputs": inputs,
            "labels": labels
        }

        if self.sparsifier == "radar_filtered":
            output_dict["radar_depth_filtered"] = radar_depth_filtered

        if self.sparsifier == "radar_filtered2":
            output_dict["radar_depth_filtered2"] = radar_depth_filtered2

        # For 'index_map' compatibility
        if 'index_map' in input_data.keys():
            output_dict["index_map"] = index_map

        return output_dict
    def transform_train(self, input_data):
        # import ipdb; ipdb.set_trace()
        # Fetch the data
        rgb = np.array(input_data["image"]).astype(np.float32)
        lidar_depth = np.array(input_data["lidar_depth"]).astype(np.float32)
        radar_depth = np.array(input_data["radar_depth"]).astype(np.float32)
        if 'index_map' in input_data.keys():
            index_map = np.array(input_data["index_map"]).astype(np.int)

        # Define augmentation factor
        scale_factor = np.random.uniform(
            self.t_cfg.scale_factor_train[0],
            self.t_cfg.scale_factor_train[1])  # random scaling
        angle_factor = np.random.uniform(
            -self.t_cfg.rotation_factor,
            self.t_cfg.rotation_factor)  # random rotation degrees
        flip_factor = np.random.uniform(0.0,
                                        1.0) < 0.5  # random horizontal flip

        # Compose customized transform for RGB and Depth separately
        color_jitter = transforms.ColorJitter(0.2, 0.2, 0.2)
        resize_image = transforms.Resize(scale_factor,
                                         interpolation="bilinear")
        resize_depth = transforms.Resize(scale_factor, interpolation="nearest")

        # # First, we uniformly downsample all the images by half
        # resize_image_initial = transforms.Resize(0.5, interpolation="bilinear")
        # resize_depth_initial = transforms.Resize(0.5, interpolation="nearest")

        # Then, we add model-aware resizing
        if self.transform_mode == "DORN":
            if cfg.scaling is True:
                h, w, _ = tuple((np.array(rgb.shape)).astype(np.int32))
            else:
                h, w, _ = tuple((np.array(rgb.shape) * 0.5).astype(np.int32))

            # ipdb.set_trace()
            h_new = self.t_cfg.crop_size_train[0]
            w_new = w
            resize_image_method = transforms.Resize([h_new, w_new],
                                                    interpolation="bilinear")
            resize_depth_method = transforms.Resize([h_new, w_new],
                                                    interpolation="nearest")
        elif self.transform_mode == "sparse-to-dense":
            h_new = self.t_cfg.crop_size_train[0]
            w_new = self.t_cfg.crop_size_train[1]
            resize_image_method = transforms.Resize([h_new, w_new],
                                                    interpolation="bilinear")
            resize_depth_method = transforms.Resize([h_new, w_new],
                                                    interpolation="nearest")

        # Get the border of random crop
        h_scaled, w_scaled = math.floor(h_new * scale_factor), math.floor(
            (w_new * scale_factor))
        h_bound, w_bound = h_scaled - self.t_cfg.crop_size_train[
            0], w_scaled - self.t_cfg.crop_size_train[1]
        h_startpoint = round(np.random.uniform(0, h_bound))
        w_startpoint = round(np.random.uniform(0, w_bound))

        # Compose the transforms for RGB
        transform_rgb = transforms.Compose([
            transforms.Rotate(angle_factor), resize_image,
            transforms.Crop(h_startpoint, w_startpoint,
                            self.t_cfg.crop_size_train[0],
                            self.t_cfg.crop_size_train[1]),
            transforms.HorizontalFlip(flip_factor)
        ])

        # Compose the transforms for Depth
        transform_depth = transforms.Compose([
            transforms.Rotate(angle_factor), resize_depth,
            transforms.Crop(h_startpoint, w_startpoint,
                            self.t_cfg.crop_size_train[0],
                            self.t_cfg.crop_size_train[1]),
            transforms.HorizontalFlip(flip_factor)
        ])

        # Perform transform on rgb data
        # ToDo: whether we need to - imagenet mean here
        rgb = transform_rgb(rgb)
        rgb = color_jitter(rgb)
        rgb = rgb / 255.

        # Perform transform on lidar depth data
        lidar_depth /= float(scale_factor)
        lidar_depth = transform_depth(lidar_depth)

        rgb = np.array(rgb).astype(np.float32)
        lidar_depth = np.array(lidar_depth).astype(np.float32)

        rgb = to_tensor(rgb)
        lidar_depth = to_tensor(lidar_depth)

        # Perform transform on radar depth data
        radar_depth /= float(scale_factor)
        radar_depth = transform_depth(radar_depth)

        radar_depth = np.array(radar_depth).astype(np.float32)
        radar_depth = to_tensor(radar_depth)

        # Perform transform on index map
        if 'index_map' in input_data.keys():
            index_map = transform_depth(index_map)
            index_map = np.array(index_map).astype(np.int)
            index_map = to_tensor(index_map)
            index_map = index_map.unsqueeze(0)

        # Normalize rgb using imagenet mean and std
        # ToDo: only do imagenet normalization on DORN
        if self.transform_mode == "DORN":
            rgb = transforms.normalization_imagenet(rgb)

        if self.sparsifier == "radar_filtered":
            ####################
            ## Filtering part ##
            ####################
            # Indicating the invalid entries
            invalid_mask = ~input_data['valid_mask']
            invalid_index = np.where(invalid_mask)[0]
            invalid_index_mask = invalid_index[None, None,
                                               ...].transpose(2, 0, 1)

            # Constructing mask for dense depth
            dense_mask = torch.ByteTensor(
                np.sum(index_map.numpy() == invalid_index_mask, axis=0))
            radar_depth_filtered = radar_depth.clone()
            radar_depth_filtered[dense_mask.to(torch.bool)] = 0.
            radar_depth_filtered = radar_depth_filtered.unsqueeze(0)

        if self.sparsifier == "radar_filtered2":
            ######################################
            ## Filtering using predicted labels ##
            ######################################
            invalid_mask = ~input_data['pred_labels']
            invalid_index = np.where(invalid_mask)[0]
            invalid_index_mask = invalid_index[None, None,
                                               ...].transpose(2, 0, 1)

            dense_mask = torch.ByteTensor(
                np.sum(index_map.numpy() == invalid_index_mask, axis=0))
            radar_depth_filtered2 = radar_depth.clone()
            radar_depth_filtered2[dense_mask.to(torch.bool)] = 0.
            radar_depth_filtered2 = radar_depth_filtered2.unsqueeze(0)
            ######################################

        lidar_depth = lidar_depth.unsqueeze(0)
        radar_depth = radar_depth.unsqueeze(0)

        # Return different data for different modality
        if self.modality == "rgb":
            inputs = rgb
        elif self.modality == "rgbd":
            if self.sparsifier == "radar":
                # Filter out the the points exceeding max_depth
                mask = (radar_depth > self.max_depth)
                radar_depth[mask] = 0
                inputs = torch.cat((rgb, radar_depth), dim=0)
            # Using the generated groundtruth
            elif self.sparsifier == "radar_filtered":
                # Filter out the points exceeding max_depth
                mask = (radar_depth_filtered > self.max_depth)
                radar_depth_filtered[mask] = 0
                inputs = torch.cat((rgb, radar_depth_filtered), dim=0)
            # Using the learned classifyer
            elif self.sparsifier == "radar_filtered2":
                # Filter out the points exceeding max_depth
                mask = (radar_depth_filtered2 > self.max_depth)
                radar_depth_filtered2[mask] = 0
                inputs = torch.cat((rgb, radar_depth_filtered2), dim=0)
            else:
                s_depth = self.get_sparse_depth(lidar_depth, radar_depth)
                inputs = torch.cat((rgb, s_depth), dim=0)
        else:
            raise ValueError("[Error] Unsupported modality. Consider ",
                             self.avail_modality)
        labels = lidar_depth

        # Gathering output results
        output_dict = {
            "rgb": rgb,
            "lidar_depth": lidar_depth,
            "radar_depth": radar_depth,
            "inputs": inputs,
            "labels": labels
        }
        if self.sparsifier == "radar_filtered":
            output_dict["radar_depth_filtered"] = radar_depth_filtered

        if self.sparsifier == "radar_filtered2":
            output_dict["radar_depth_filtered2"] = radar_depth_filtered2

        if 'index_map' in input_data.keys():
            output_dict["index_map"] = index_map

        return output_dict