Esempio n. 1
0
        def dense_process_data(index):
            images = list()
            for ind in indices['dense']:
                ptr = int(ind)

                if ptr <= record.num_frames:
                    imgs = self._load_image(record.path, ptr)
                else:
                    imgs = self._load_image(record.path, record.num_frames)
                images.extend(imgs)

            if self.phase == 'Fntest':

                images = [np.asarray(im) for im in images]
                clip_input = np.concatenate(images, axis=2)

                self.t = transforms.Compose([
                    transforms.Resize(256)])
                clip_input = self.t(clip_input)

                normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                 std=[0.229, 0.224, 0.225])

                if record.crop_pos == 0:
                    self.transform = transforms.Compose([

                        transforms.CenterCrop((256, 256)),

                        transforms.ToTensor(),
                        normalize,
                    ])
                elif record.crop_pos == 1:
                    self.transform = transforms.Compose([

                        transforms.CornerCrop2((256, 256),),

                        transforms.ToTensor(),
                        normalize,
                    ])
                elif record.crop_pos == 2:
                    self.transform = transforms.Compose([
                        transforms.CornerCrop1((256, 256)),
                        transforms.ToTensor(),
                        normalize,
                    ])

                return self.transform(clip_input)

            return self.transform(images)
    def trans(is_training = True):

        transforms = []
        transforms.append(T.ToTensor())
        if is_training:
            transforms.append(T.RandomHorizontalFlip(0.5))

        return T.Compose(transforms)
Esempio n. 3
0
def make_coco_transforms(image_set):

    normalize = T.Compose([T.ToTensor()])

    if image_set == 'train':
        return T.Compose([
            T.RandomHorizontalFlip(0.5),
            normalize,
        ])

    if image_set == 'val':
        return T.Compose([
            normalize,
        ])

    raise ValueError(f'unknown {image_set}')
Esempio n. 4
0
cuda_available = torch.cuda.is_available()

# directory results
if not os.path.exists(RESULTS_PATH):
    os.makedirs(RESULTS_PATH)

# Load dataset
mean = m
std_dev = s

transform_train = transforms.Compose([
    transforms.RandomApply([transforms.ColorJitter(0.1, 0.1, 0.1, 0.1)],
                           p=0.5),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean, std_dev)
])

transform_test = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean, std_dev)
])

training_set = LocalDataset(IMAGES_PATH,
                            TRAINING_PATH,
                            transform=transform_train)
validation_set = LocalDataset(IMAGES_PATH,
                              VALIDATION_PATH,
                              transform=transform_test)
Esempio n. 5
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Esempio n. 6
0
#                 batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
# valloader = torch.utils.data.DataLoader(CSDataSet(args.data_dir, './dataset/list/cityscapes/val.lst', crop_size=(1024, 2048), mean=IMG_MEAN, scale=False, mirror=False),
#                                 batch_size=2, shuffle=False, pin_memory=True)

value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * 255 for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * 255 for item in std]
train_transform = my_trans.Compose([
    # my_trans.Resize((args.height, args.width)),
    # my_trans.RandScale([0.5, 2.0]),
    # my_trans.RandomGaussianBlur(),
    my_trans.RandomHorizontalFlip(),
    # my_trans.Crop([args.height, args.width],crop_type='rand', padding=mean, ignore_label=255),
    my_trans.ToTensor(),  # without div 255
    my_trans.Normalize(mean=mean, std=std)
])
val_transform = my_trans.Compose([
    # my_trans.Resize((args.height, args.width)),
    my_trans.ToTensor(),  # without div 255
    my_trans.Normalize(mean=mean, std=std)
])

data_dir = '/data/zzg/CamVid/'
train_dataset = CamVid(data_dir,
                       mode='train',
                       p=None,
                       transform=train_transform)
trainloader = torch.utils.data.DataLoader(train_dataset,
                                          batch_size=args.batch_size,
Esempio n. 7
0
def main():
    global args, best_record
    args = parser.parse_args()

    if args.augment:
        transform_train = joint_transforms.Compose([
            joint_transforms.RandomCrop(256),
            joint_transforms.Normalize(),
            joint_transforms.ToTensor(),
        ])
    else:
        transform_train = None

    dataset_train = Data.WData(args.data_root, transform_train)
    dataloader_train = data.DataLoader(dataset_train,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       num_workers=16)

    dataset_val = Data.WData(args.val_root, transform_train)
    dataloader_val = data.DataLoader(dataset_val,
                                     batch_size=args.batch_size,
                                     shuffle=None,
                                     num_workers=16)

    model = SFNet(input_channels=37, dilations=[2, 4, 8], num_class=2)

    # multi gpu
    model = torch.nn.DataParallel(model)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()
    cudnn.benchmark = True

    # define loss function (criterion) and pptimizer
    criterion = torch.nn.CrossEntropyLoss(ignore_index=-1).cuda()
    optimizer = torch.optim.SGD([{
        'params': get_1x_lr_params(model)
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(dataloader_train, model, criterion, optimizer, epoch)

        # evaluate on validation set
        acc, mean_iou, val_loss = validate(dataloader_val, model, criterion,
                                           epoch)

        is_best = mean_iou > best_record['miou']
        if is_best:
            best_record['epoch'] = epoch
            best_record['val_loss'] = val_loss.avg
            best_record['acc'] = acc
            best_record['miou'] = mean_iou
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'val_loss': val_loss.avg,
                'accuracy': acc,
                'miou': mean_iou,
                'model': model,
            }, is_best)

        print(
            '------------------------------------------------------------------------------------------------------'
        )
        print('[epoch: %d], [val_loss: %5f], [acc: %.5f], [miou: %.5f]' %
              (epoch, val_loss.avg, acc, mean_iou))
        print(
            'best record: [epoch: {epoch}], [val_loss: {val_loss:.5f}], [acc: {acc:.5f}], [miou: {miou:.5f}]'
            .format(**best_record))
        print(
            '------------------------------------------------------------------------------------------------------'
        )
Esempio n. 8
0
def train(args,
          model,
          optimizer,
          loss,
          log_file=None,
          test_cnf=None,
          val_cnf=None,
          models_folder=None,
          tb_writer=None):
    log = log_file
    start = time.time()
    train_set = NTUSkeletonDataset.NTUSkeletonDataset(
        args['train_path'],
        cache_dir=args['cache_path'],
        selected_actions=args['selected_actions'],
        selected_joints=args['selected_joints'],
        transform=transforms.Compose([
            skeleton_transforms.MoveOriginToJoint(),
            skeleton_transforms.GaussianFilter(),
            skeleton_transforms.ResizeSkeletonSegments(),
            skeleton_transforms.UniformSampleOrPad(
                args['maximum_sample_size']),
            skeleton_transforms.ToTensor(),
            skeleton_transforms.MovingPoseDescriptor(
                args['maximum_sample_size'])
        ]),
        use_cache=args['use_cache'],
        use_validation=args['use_validation'],
        validation_fraction=args['validation_fraction'],
        preprocessing_threads=args['preprocessing_threads'])
    end = time.time()
    num_train_samples = len(train_set)
    train_set.set_use_mode(NTUSkeletonDataset.DatasetMode.VALIDATION)
    num_val_samples = len(train_set)
    train_set.set_use_mode(NTUSkeletonDataset.DatasetMode.TRAIN)
    print(
        end - start,
        "Loaded {} train samples and {} validation samples".format(
            num_train_samples, num_val_samples))

    start = time.time()
    test_set = NTUSkeletonDataset.NTUSkeletonDataset(
        args['test_path'],
        cache_dir=args['cache_path'],
        selected_actions=args['selected_actions'],
        selected_joints=args['selected_joints'],
        transform=transforms.Compose([
            skeleton_transforms.MoveOriginToJoint(),
            skeleton_transforms.GaussianFilter(),
            skeleton_transforms.ResizeSkeletonSegments(),
            skeleton_transforms.UniformSampleOrPad(
                args['maximum_sample_size']),
            skeleton_transforms.ToTensor(),
            skeleton_transforms.MovingPoseDescriptor(
                args['maximum_sample_size'])
        ]),
        use_cache=args['use_cache'],
        use_validation=False,
        preprocessing_threads=args['preprocessing_threads'])
    end = time.time()
    print(end - start, "Loaded {} test samples".format(len(test_set)))
    train_loader = DataLoader(train_set,
                              batch_size=args['batch_size'],
                              shuffle=True)

    min_train_loss = np.inf
    min_train_epoch = -1

    min_validation_loss = np.inf
    min_validation_epoch = -1
    max_validation_acc = 0
    max_validation_acc_epoch = -1

    min_test_loss = np.inf
    min_test_epoch = -1
    max_test_acc = 0
    max_test_acc_epoch = -1

    for epoch in range(1, args['epochs'] + 1):
        __log('\n################\n### EPOCH {}\n################\n'.format(
            epoch),
              color='cyan',
              log=log)

        train_loss, (mean_max_param, mean_avg_param, mean_max_grad,
                     mean_avg_grad) = train_epoch(epoch, args, model,
                                                  train_loader, optimizer,
                                                  loss, log)

        if train_loss < min_train_loss:
            color = "green"
            min_train_loss = train_loss
            min_train_epoch = epoch
        else:
            color = "red"

        __log('[TRAIN] Mean loss: {}\t'
              'Best_train_loss: {}\t at epoch: {}'.format(
                  train_loss, min_train_loss, min_train_epoch),
              color=color,
              log=log)
        if epoch % 5 != 0:
            continue

        # Perform validation
        train_set.set_use_mode(NTUSkeletonDataset.DatasetMode.VALIDATION)
        validation_loss, validation_acc = validate(epoch, model, train_set,
                                                   args, val_cnf, loss, log)
        train_set.set_use_mode(NTUSkeletonDataset.DatasetMode.TRAIN)
        test_color = None
        test_loss = 0.
        if validation_loss < min_validation_loss:
            min_validation_loss = validation_loss
            min_validation_epoch = epoch
        if validation_acc > max_validation_acc:
            color = "green"
            max_validation_acc = validation_acc
            max_validation_acc_epoch = epoch

            # Perform test on best models
            test_loss, test_acc = test(epoch, model, test_set, args, test_cnf,
                                       loss)
            if test_loss < min_test_loss:
                min_test_loss = test_loss
                min_test_epoch = epoch
            if test_acc > max_test_acc:
                max_test_acc = test_acc
                max_test_acc_epoch = epoch
                test_color = "blue"
                save_best_model(model, models_folder)
            else:
                test_color = "red"
        else:
            color = "red"

        __log('[VALIDATION] Mean loss\t: {}\t Best_validation_loss: {}\t'
              'at epoch: {}'.format(validation_loss, min_validation_loss,
                                    min_validation_epoch),
              log=log)
        __log('[VALIDATION] Accuracy\t: {}\t Best_validation_acc: {}\t '
              'at epoch: {}'.format(validation_acc, max_validation_acc,
                                    max_validation_acc_epoch),
              color=color,
              log=log)

        if test_color:
            __log('[TEST] Mean loss\t: {}\t Best_test_loss: {}\t'
                  'at epoch: {}'.format(test_loss, min_test_loss,
                                        min_test_epoch),
                  log=log)
            __log('[TEST] Accuracy\t: {}\t Best_test_acc: {}\t '
                  'at epoch: {}'.format(test_acc, max_test_acc,
                                        max_test_acc_epoch),
                  color=test_color,
                  log=log)

        if test_color:
            tb_writer.add_scalars(
                'Loss', {
                    'Train': train_loss,
                    'Validation': validation_loss,
                    'Test': test_acc
                }, epoch)
            tb_writer.add_scalar('Test-Accuracy', test_acc, epoch)
        else:
            tb_writer.add_scalars('Loss', {
                'Train': train_loss,
                'Validation': validation_loss
            }, epoch)
        tb_writer.add_scalar('Validation-Accuracy', validation_acc, epoch)
        tb_writer.add_scalars('Params', {
            'Max': mean_max_param,
            'Avg': mean_avg_param
        }, epoch)
        tb_writer.add_scalars('Grads', {
            'Max': mean_max_grad,
            'Avg': mean_avg_grad
        }, epoch)

        log.flush()
Esempio n. 9
0
    help="Name of the dataset: ['facades', 'maps', 'cityscapes']")
parser.add_argument("--batch_size",
                    type=int,
                    default=1,
                    help="Size of the batches")
parser.add_argument("--lr",
                    type=float,
                    default=0.0002,
                    help="Adams learning rate")
args = parser.parse_args()

device = ('cuda:0' if torch.cuda.is_available() else 'cpu')

transforms = T.Compose([
    T.Resize((256, 256)),
    T.ToTensor(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
# models
print('Defining models!')
generator = UnetGenerator().to(device)
discriminator = ConditionalDiscriminator().to(device)
# optimizers
g_optimizer = torch.optim.Adam(generator.parameters(),
                               lr=args.lr,
                               betas=(0.5, 0.999))
d_optimizer = torch.optim.Adam(discriminator.parameters(),
                               lr=args.lr,
                               betas=(0.5, 0.999))
# loss functions
g_criterion = GeneratorLoss(alpha=100)
Esempio n. 10
0
def main():
    global best_acc

    if not os.path.isdir(args.out):
        mkdir_p(args.out)

    # Data
    print(f'==> Preparing cifar10')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32),
        transforms.RandomFlip(),
        transforms.ToTensor(),
    ])

    transform_val = transforms.Compose([
        transforms.CenterCrop(32),
        transforms.ToTensor(),
    ])

    train_labeled_set, train_unlabeled_set, _, val_set, test_set = dataset.get_cifar10(
        './data',
        args.n_labeled,
        args.outdata,
        transform_train=transform_train,
        transform_val=transform_val)

    labeled_trainloader = data.DataLoader(train_labeled_set,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          num_workers=0,
                                          drop_last=True)
    unlabeled_trainloader = data.DataLoader(train_unlabeled_set,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=0,
                                            drop_last=True)
    val_loader = data.DataLoader(val_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=0)
    test_loader = data.DataLoader(test_set,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=0)

    # Model
    print("==> creating WRN-28-2")

    def create_model(ema=False):
        model = models.WideResNet(num_classes=10)
        model = model.cuda()

        if ema:
            for param in model.parameters():
                param.detach_()

        return model

    model = create_model()
    ema_model = create_model(ema=True)

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    train_criterion = SemiLoss()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    ema_optimizer = WeightEMA(model, ema_model, alpha=args.ema_decay)
    start_epoch = 0

    # Resume
    title = 'noisy-cifar-10'
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.out = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        ema_model.load_state_dict(checkpoint['ema_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.out, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.out, 'log.txt'), title=title)
        logger.set_names([
            'Train Loss', 'Train Loss X', 'Train Loss U', 'Valid Loss',
            'Valid Acc.', 'Test Loss', 'Test Acc.'
        ])

    writer = SummaryWriter(args.out)
    step = 0
    test_accs = []
    # Train and val
    for epoch in range(start_epoch, args.epochs):

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_loss_x, train_loss_u = train(
            labeled_trainloader, unlabeled_trainloader, model, optimizer,
            ema_optimizer, train_criterion, epoch, use_cuda)
        _, train_acc = validate(labeled_trainloader,
                                ema_model,
                                criterion,
                                epoch,
                                use_cuda,
                                mode='Train Stats')
        val_loss, val_acc = validate(val_loader,
                                     ema_model,
                                     criterion,
                                     epoch,
                                     use_cuda,
                                     mode='Valid Stats')
        test_loss, test_acc = validate(test_loader,
                                       ema_model,
                                       criterion,
                                       epoch,
                                       use_cuda,
                                       mode='Test Stats ')

        step = args.val_iteration * (epoch + 1)

        writer.add_scalar('losses/train_loss', train_loss, step)
        writer.add_scalar('losses/valid_loss', val_loss, step)
        writer.add_scalar('losses/test_loss', test_loss, step)

        writer.add_scalar('accuracy/train_acc', train_acc, step)
        writer.add_scalar('accuracy/val_acc', val_acc, step)
        writer.add_scalar('accuracy/test_acc', test_acc, step)

        # append logger file
        logger.append([
            train_loss, train_loss_x, train_loss_u, val_loss, val_acc,
            test_loss, test_acc
        ])

        # save model
        is_best = val_acc > best_acc
        best_acc = max(val_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'ema_state_dict': ema_model.state_dict(),
                'acc': val_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best)
        test_accs.append(test_acc)
    logger.close()
    writer.close()

    print('Mean acc:')
    print(np.mean(test_accs[-20:]))
Esempio n. 11
0
def inference(args):
    
    if args.target=='mnistm':
        args.source = 'usps'
    elif args.target=='usps':
        args.source = 'svhn'
    elif args.target=='svhn':
        args.source = 'mnistm'
    else:
        raise NotImplementedError(f"{args.target}: not implemented!")
    
    size = args.img_size
    t1 = transforms.Compose([
            transforms.Resize(size),
            transforms.Grayscale(3),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ])

    valid_target_dataset = Digits_Dataset_Test(args.dataset_path, t1)
        
    valid_target_dataloader = DataLoader(valid_target_dataset,
                                             batch_size=512,
                                             num_workers=6)
        
         
    load = torch.load(
        f"./p3/result/3_2/{args.source}2{args.target}/best_model.pth",
        map_location='cpu')
        
    feature_extractor = FeatureExtractor()
    feature_extractor.load_state_dict(load['F'])
    feature_extractor.cuda()
    feature_extractor.eval()

    label_predictor = LabelPredictor()
    label_predictor.load_state_dict(load['C'])
    label_predictor.cuda()
    label_predictor.eval()
           
    out_preds = []
    out_fnames = []
    count=0
    for i,(imgs, fnames) in enumerate(valid_target_dataloader):
        bsize = imgs.size(0)

        imgs = imgs.cuda()

        features = feature_extractor(imgs)
        class_output = label_predictor(features)
        
        _, preds = class_output.max(1)
        preds = preds.detach().cpu()
        
        out_preds.append(preds)
        out_fnames += fnames
        
        count+=bsize
        print(f"\t [{count}/{len(valid_target_dataloader.dataset)}]", 
                                                        end="   \r")
        
    out_preds = torch.cat(out_preds)
    out_preds = out_preds.cpu().numpy()
    
    d = {'image_name':out_fnames, 'label':out_preds}
    df = pd.DataFrame(data=d)
    df = df.sort_values('image_name')
    df.to_csv(args.out_csv, index=False)
    print(f' [Info] finish predicting {args.dataset_path}')
Esempio n. 12
0
    # others
    parser.add_argument('--device',
                        type=str,
                        default='cuda:0',
                        help='cpu or cuda:0 or cuda:1')

    args = parser.parse_args() if string is None else parser.parse_args(string)
    return args


if __name__ == '__main__':

    args = parse_args()

    wandb.init(config=args, project='dlcv_gan_face')

    transform = transforms.Compose(
        [transforms.RandomHorizontalFlip(),
         transforms.ToTensor()])
    train_dataset = Face_Dataset('../hw3_data/face/train', transform)
    valid_dataset = Face_Dataset('../hw3_data/face/test', transform)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch,
                                  shuffle=True,
                                  num_workers=args.num_workers)
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=args.batch,
                                  num_workers=args.num_workers)

    train(args, train_dataloader, valid_dataloader)
sys.path.insert(0, "/cluster/home/julin/workspace/Semester_project_release")

import os
import torch
import numpy as np
from torch.utils.data import Dataset
from dataset.nuscenes_dataset import Nuscenes_dataset
from config.config_nuscenes import config_nuscenes as cfg
from dataset.dense_to_sparse import UniformSampling, LidarRadarSampling
from dataset import transforms as transforms
from dataset.radar_preprocessing import filter_radar_points_gt
import math
import h5py
import pickle
import matplotlib.pyplot as plt
to_tensor = transforms.ToTensor()

####################################
## Sparsifier Documentations:
## 1. uniform: Uniformly sampled LiDAR points.
## 2. lidar_radar: Sampled LiDAR points using the radar pattern.
## 3. radar: raw radar points (accumulated from three time steps.
## 4. radar_filtered: Filtered radar points using the heuristic algorithm.
## 5. radar_filtered2: Filtered radar points using the trained point classifier.
####################################


# Define the dataset object for torch
class nuscenes_dataset_torch(Dataset):
    def __init__(
        self,
Esempio n. 14
0
import onnx
import onnxruntime
import numpy as np
import torch
import torchvision
from detrac import Detrac
import dataset.transforms as T

root = r"D:\dataset\UA-DETRAC\Detrac_dataset"
transforms = []
transforms.append(T.ToTensor())
transformscompose = T.Compose(transforms)
detrac = Detrac(root, imgformat='jpg', transforms=transformscompose)
img = [detrac[0][0]]

onnx_model = onnx.load("carmodel2.onnx")
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession("carmodel2.onnx")

checkpoint = torch.load(r"D:\dataset\UA-DETRAC\model_9.pth",
                        map_location='cpu')
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(num_classes=5,
                                                             pretrained=False)
model.load_state_dict(checkpoint['model'])
model.eval()
torch_out = model(img)
print(torch_out)


def to_numpy(tensor):
    return tensor.detach().cpu().numpy(