Ejemplo n.º 1
0
def init_dataset(use_gpu):
    normalize = Normalize(mean=[0.485, 0.456, 0.406],
                          std=[0.229, 0.224, 0.225])

    transform_tr = Compose([
        Random2DTranslation(args.height, args.width, p=0.5),
        RandomHorizontalFlip(),
        ToTensor(), normalize
    ])

    transform_te = Compose(
        [Resize([args.height, args.width]),
         ToTensor(), normalize])

    trainset = datasets.init_dataset(args.dataset,
                                     root=args.root,
                                     transform=transform_tr,
                                     mode='train',
                                     verbose=True)

    valset = datasets.init_dataset(args.dataset,
                                   root=args.root,
                                   transform=transform_te,
                                   mode='val',
                                   verbose=False)

    testset = datasets.init_dataset(args.dataset,
                                    root=args.root,
                                    transform=transform_te,
                                    mode='test',
                                    verbose=False)

    num_attrs = trainset.num_attrs
    attr_dict = trainset.attr_dict

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.workers,
                                              pin_memory=use_gpu,
                                              drop_last=True)

    valloader = torch.utils.data.DataLoader(valset,
                                            batch_size=args.batch_size,
                                            shuffle=False,
                                            num_workers=args.workers,
                                            pin_memory=use_gpu,
                                            drop_last=False)

    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=use_gpu,
                                             drop_last=False)

    return trainloader, valloader, testloader, num_attrs, attr_dict
Ejemplo n.º 2
0
def run():
    args = parse_opts()

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"   # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = "9"

    # GLOBAL VARS #
    MODE = args.mode
    CLASS_WEIGHT = False
    N_EP = 20
    FLATTEN = args.flatten
    RNN = args.rnn
    BATCH_SIZE = args.batch_size
    ####

    datasets, dataloaders = init_dataset(
        BATCH_SIZE, single_channel=args.single_channel)

    print('[Train] class counts', np.unique(
        datasets['train'].target_vals, return_counts=True))
    print('[Test] class counts', np.unique(
        datasets['test'].target_vals, return_counts=True))

    n_ch = 1 if args.single_channel else 3

    if MODE == 'min':
        in_channels = datasets['train'].min_depth*n_ch
    elif MODE == 'max':
        in_channels = datasets['train'].max_depth*n_ch

    torch.manual_seed(0)

    # init net
    net = init_net(opt=args.model_idx, in_channels=in_channels)

    class_weight = None
    if CLASS_WEIGHT:
        cnts = Counter(datasets['train'].target_vals)
        n = len(datasets['train'])
        class_weight = [max(cnts.values())/cnts['0'],
                        max(cnts.values())/cnts['1']]
        class_weight = torch.FloatTensor(class_weight)

    cross_entrp_loss = nn.CrossEntropyLoss(weight=class_weight).cuda()
    focal_loss = FocalLoss().cuda()

    optimizer = optim.Adam(net.parameters(), lr=0.000027)

    criterion = cross_entrp_loss

    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(
    #     optimizer, 'min', verbose=True, patience=7)

    for ep in range(N_EP):
        train_epoch(net, dataloaders['train'], optimizer,
                    criterion, ep, scheduler=None, flatten=FLATTEN, MODE=MODE, rnn=RNN)
        valid_loss = evaluate(net, dataloaders['test'], criterion,
                              ep, flatten=FLATTEN, MODE=MODE, rnn=RNN)
Ejemplo n.º 3
0
def main():
    global args
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids
    cudnn.benchmark = True

    args.resume = osp.join(args.resume, 'best_model.pth.tar')

    save_root = osp.join(osp.dirname(args.resume), 'results')
    if not osp.isdir(save_root):
        os.makedirs(save_root)
    print("==========\nArgs:{}\n==========".format(args))
    # create model
    print("=> creating model '{}'".format(args.arch))
    model = models.init_model(name=args.arch)
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))

    if os.path.isfile(args.resume):
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))
        return

    model = torch.nn.DataParallel(model).cuda()

    print("Initializing dataset {}".format(args.dataset))
    dataset = datasets.init_dataset(args.dataset, root=osp.join(args.data_root,args.dataset))

    to_pil = T.ToPILImage()

    model.eval()
    print("===> Start testing")
    with torch.no_grad():
        for img in dataset.valset_select['raw']:
            raw_path = osp.join(args.data_root,args.dataset,img)
            raw_pil = Image.open(raw_path)
            raw = depth_transform(raw_pil)
            raw = TF.to_tensor(raw).float()
            valid_mask = (raw>0).detach().float()

            input = torch.unsqueeze(raw,0).cuda()
            output = torch.clamp(model(input),min=0, max=255)
            output = output * 256.
            raw = raw * 256.

            output = output[0].cpu()
            output = raw*valid_mask + output*(1-valid_mask)
            pil_img = to_pil(output.int())
            pil_img.save(osp.join(save_root, osp.basename(img)))
            print(img+' finish.')
Ejemplo n.º 4
0
train_transforms = T.Compose([
    T.Resize([384, 128]),
    T.RandomHorizontalFlip(p=0.5),
    T.Pad(10),
    T.RandomCrop([384, 128]),
    T.ToTensor(), normalize_transform,
    RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])

# val_transforms = T.Compose([
#     T.Resize([384, 128]),
#     T.ToTensor(),
#     normalize_transform
# ])

dataset = init_dataset('mars', root='../')
dataset_sizes = {}
dataset_sizes['train'] = dataset.num_train_imgs
train_set = VideoDataset(dataset.train, opt.seq_len, opt.sample_method,
                         train_transforms)
dataloaders = {}
dataloaders['train'] = DataLoader(train_set,
                                  batch_size=opt.batchsize,
                                  drop_last=True,
                                  sampler=RandomIdentitySampler(
                                      dataset.train, opt.batchsize, 4),
                                  num_workers=8)

# val_set = VideoDataset(dataset.query + dataset.gallery, 4, val_transforms)
# dataloaders['val'] = DataLoader(
#     val_set, batch_size=opt.batchsize, drop_last=True, shuffle=False, num_workers=8)
Ejemplo n.º 5
0
class_names = image_datasets['train'].classes

use_gpu = torch.cuda.is_available()

# since = time.time()
# inputs, classes = next(iter(dataloaders['train']))
# print(time.time()-since)


######################################################################
# New Train Loader
# --------
#

dataset = init_dataset('market1501', root='../')
train_set = ImageDataset(dataset.train, data_transforms['train'])
dataloaders['train'] = DataLoader(
    train_set, batch_size=opt.batchsize, drop_last=True,
    sampler=RandomIdentitySampler(dataset.train, opt.batchsize, 4),
    num_workers=8)


######################################################################
# Training the model
# --------
#

y_loss = {}  # loss history
y_loss['train'] = []
y_loss['val'] = []
Ejemplo n.º 6
0
 def init_dataset(self):
     """ """
     self.dataset = init_dataset(**self.params['dataset'])
     self.params['model']['num_classes'] = self.dataset.num_classes