Esempio n. 1
0
        def dense_process_data(index):
            images = list()
            for ind in indices['dense']:
                ptr = int(ind)

                if ptr <= record.num_frames:
                    imgs = self._load_image(record.path, ptr)
                else:
                    imgs = self._load_image(record.path, record.num_frames)
                images.extend(imgs)

            if self.phase == 'Fntest':

                images = [np.asarray(im) for im in images]
                clip_input = np.concatenate(images, axis=2)

                self.t = transforms.Compose([
                    transforms.Resize(256)])
                clip_input = self.t(clip_input)

                normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                 std=[0.229, 0.224, 0.225])

                if record.crop_pos == 0:
                    self.transform = transforms.Compose([

                        transforms.CenterCrop((256, 256)),

                        transforms.ToTensor(),
                        normalize,
                    ])
                elif record.crop_pos == 1:
                    self.transform = transforms.Compose([

                        transforms.CornerCrop2((256, 256),),

                        transforms.ToTensor(),
                        normalize,
                    ])
                elif record.crop_pos == 2:
                    self.transform = transforms.Compose([
                        transforms.CornerCrop1((256, 256)),
                        transforms.ToTensor(),
                        normalize,
                    ])

                return self.transform(clip_input)

            return self.transform(images)
Esempio n. 2
0
cuda_available = torch.cuda.is_available()

# directory results
if not os.path.exists(RESULTS_PATH):
    os.makedirs(RESULTS_PATH)

# Load dataset
mean = m
std_dev = s

transform_train = transforms.Compose([
    transforms.RandomApply([transforms.ColorJitter(0.1, 0.1, 0.1, 0.1)],
                           p=0.5),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean, std_dev)
])

transform_test = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean, std_dev)
])

training_set = LocalDataset(IMAGES_PATH,
                            TRAINING_PATH,
                            transform=transform_train)
validation_set = LocalDataset(IMAGES_PATH,
                              VALIDATION_PATH,
                              transform=transform_test)
Esempio n. 3
0
# valloader = torch.utils.data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/val.lst', crop_size=(1024, 2048), mean=IMG_MEAN, scale=False, mirror=False),
#                                 batch_size=2, shuffle=False, pin_memory=True)

value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * 255 for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * 255 for item in std]
train_transform = my_trans.Compose([
    # my_trans.Resize((args.height, args.width)),
    # my_trans.RandScale([0.5, 2.0]),
    # my_trans.RandomGaussianBlur(),
    my_trans.RandomHorizontalFlip(),
    # my_trans.Crop([args.height, args.width],crop_type='rand', padding=mean, ignore_label=255),
    my_trans.ToTensor(),  # without div 255
    my_trans.Normalize(mean=mean, std=std)
])
val_transform = my_trans.Compose([
    # my_trans.Resize((args.height, args.width)),
    my_trans.ToTensor(),  # without div 255
    my_trans.Normalize(mean=mean, std=std)
])

data_dir = '/data/zzg/CamVid/'
train_dataset = CamVid(data_dir,
                       mode='train',
                       p=None,
                       transform=train_transform)
trainloader = torch.utils.data.DataLoader(train_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=True,
Esempio n. 4
0
def main():
    global args, best_record
    args = parser.parse_args()

    if args.augment:
        transform_train = joint_transforms.Compose([
            joint_transforms.RandomCrop(256),
            joint_transforms.Normalize(),
            joint_transforms.ToTensor(),
        ])
    else:
        transform_train = None

    dataset_train = Data.WData(args.data_root, transform_train)
    dataloader_train = data.DataLoader(dataset_train,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       num_workers=16)

    dataset_val = Data.WData(args.val_root, transform_train)
    dataloader_val = data.DataLoader(dataset_val,
                                     batch_size=args.batch_size,
                                     shuffle=None,
                                     num_workers=16)

    model = SFNet(input_channels=37, dilations=[2, 4, 8], num_class=2)

    # multi gpu
    model = torch.nn.DataParallel(model)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()
    cudnn.benchmark = True

    # define loss function (criterion) and pptimizer
    criterion = torch.nn.CrossEntropyLoss(ignore_index=-1).cuda()
    optimizer = torch.optim.SGD([{
        'params': get_1x_lr_params(model)
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(dataloader_train, model, criterion, optimizer, epoch)

        # evaluate on validation set
        acc, mean_iou, val_loss = validate(dataloader_val, model, criterion,
                                           epoch)

        is_best = mean_iou > best_record['miou']
        if is_best:
            best_record['epoch'] = epoch
            best_record['val_loss'] = val_loss.avg
            best_record['acc'] = acc
            best_record['miou'] = mean_iou
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'val_loss': val_loss.avg,
                'accuracy': acc,
                'miou': mean_iou,
                'model': model,
            }, is_best)

        print(
            '------------------------------------------------------------------------------------------------------'
        )
        print('[epoch: %d], [val_loss: %5f], [acc: %.5f], [miou: %.5f]' %
              (epoch, val_loss.avg, acc, mean_iou))
        print(
            'best record: [epoch: {epoch}], [val_loss: {val_loss:.5f}], [acc: {acc:.5f}], [miou: {miou:.5f}]'
            .format(**best_record))
        print(
            '------------------------------------------------------------------------------------------------------'
        )
Esempio n. 5
0
parser.add_argument("--batch_size",
                    type=int,
                    default=1,
                    help="Size of the batches")
parser.add_argument("--lr",
                    type=float,
                    default=0.0002,
                    help="Adams learning rate")
args = parser.parse_args()

device = ('cuda:0' if torch.cuda.is_available() else 'cpu')

transforms = T.Compose([
    T.Resize((256, 256)),
    T.ToTensor(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
# models
print('Defining models!')
generator = UnetGenerator().to(device)
discriminator = ConditionalDiscriminator().to(device)
# optimizers
g_optimizer = torch.optim.Adam(generator.parameters(),
                               lr=args.lr,
                               betas=(0.5, 0.999))
d_optimizer = torch.optim.Adam(discriminator.parameters(),
                               lr=args.lr,
                               betas=(0.5, 0.999))
# loss functions
g_criterion = GeneratorLoss(alpha=100)
d_criterion = DiscriminatorLoss()
Esempio n. 6
0
    
if __name__=='__main__':
    
    args = parse_args()
   
    wandb.init(config=args, 
        project=f'dlcv_naive_{args.source}2{args.target}')

    size = 64
    t0 = transforms.Compose([
            transforms.Resize(size),
            transforms.ColorJitter(),
            transforms.RandomRotation(15, fill=(0,)),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])
    t1 = transforms.Compose([
            transforms.Resize(size),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])

    root = '../hw3_data/digits/'
    # dataset
    source, target = args.source, args.target 
    train_source_dataset = Digits_Dataset(root+f'{source}/train', source, t0)
    train_target_dataset = Digits_Dataset(root+f'{target}/train', target, t0)
    valid_source_dataset = Digits_Dataset(root+f'{source}/test', source, t1)
    valid_target_dataset = Digits_Dataset(root+f'{target}/test', target, t1)
Esempio n. 7
0
def inference(args):
    
    if args.target=='mnistm':
        args.source = 'usps'
    elif args.target=='usps':
        args.source = 'svhn'
    elif args.target=='svhn':
        args.source = 'mnistm'
    else:
        raise NotImplementedError(f"{args.target}: not implemented!")
    
    size = args.img_size
    t1 = transforms.Compose([
            transforms.Resize(size),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])

    valid_target_dataset = Digits_Dataset_Test(args.dataset_path, t1)
        
    valid_target_dataloader = DataLoader(valid_target_dataset,
                                             batch_size=512,
                                             num_workers=6)
        
         
    load = torch.load(
        f"./p3/result/3_2/{args.source}2{args.target}/best_model.pth",
        map_location='cpu')
        
    feature_extractor = FeatureExtractor()
    feature_extractor.load_state_dict(load['F'])
    feature_extractor.cuda()
    feature_extractor.eval()

    label_predictor = LabelPredictor()
    label_predictor.load_state_dict(load['C'])
    label_predictor.cuda()
    label_predictor.eval()
           
    out_preds = []
    out_fnames = []
    count=0
    for i,(imgs, fnames) in enumerate(valid_target_dataloader):
        bsize = imgs.size(0)

        imgs = imgs.cuda()

        features = feature_extractor(imgs)
        class_output = label_predictor(features)
        
        _, preds = class_output.max(1)
        preds = preds.detach().cpu()
        
        out_preds.append(preds)
        out_fnames += fnames
        
        count+=bsize
        print(f"\t [{count}/{len(valid_target_dataloader.dataset)}]", 
                                                        end="   \r")
        
    out_preds = torch.cat(out_preds)
    out_preds = out_preds.cpu().numpy()
    
    d = {'image_name':out_fnames, 'label':out_preds}
    df = pd.DataFrame(data=d)
    df = df.sort_values('image_name')
    df.to_csv(args.out_csv, index=False)
    print(f' [Info] finish predicting {args.dataset_path}')
Esempio n. 8
0
                        default='cuda:0',
                        help='cpu or cuda:0 or cuda:1')

    args = parser.parse_args() if string is None else parser.parse_args(string)
    return args


if __name__ == '__main__':

    args = parse_args()

    wandb.init(config=args, project='dlcv_gan_face')

    transform = transforms.Compose([
        transforms.Resize(args.img_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.5] * 3, [0.5] * 3)
    ])
    train_dataset = Face_Dataset('../hw3_data/face/train', transform)
    valid_dataset = Face_Dataset('../hw3_data/face/test', transform)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch,
                                  shuffle=True,
                                  num_workers=args.num_workers)
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=args.batch,
                                  num_workers=args.num_workers)

    train(args, train_dataloader, valid_dataloader)