Ejemplo n.º 1
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    h, w = map(int, args.input_size.split(','))

    input_size = (h, w)

    model = get_cls_net(config=config, is_train=False)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    lip_dataset = LIPDataSet(args.data_dir,
                             'val',
                             crop_size=input_size,
                             transform=transform)
    num_samples = len(lip_dataset)

    valloader = data.DataLoader(lip_dataset,
                                batch_size=args.batch_size * len(gpus),
                                shuffle=False,
                                pin_memory=True)

    restore_from = args.restore_from

    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)

    model.eval()
    model.cuda()

    parsing_preds, scales, centers, time_list, parsing_logits = valid(
        model, valloader, input_size, num_samples, len(gpus))
    print(len(parsing_logits))
    mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes,
                            args.data_dir, input_size)
    print(mIoU)
    print('Write Results!')
    write_results(parsing_preds,
                  scales,
                  centers,
                  args.data_dir,
                  'val',
                  args.save_dir,
                  input_size=input_size)
    print('Write Logits!')
    write_logits(parsing_logits,
                 scales,
                 centers,
                 args.data_dir,
                 'val',
                 args.save_dir,
                 input_size=input_size)

    print('total time is ', sum(time_list))
    print('avg time is ', sum(time_list) / len(time_list))
Ejemplo n.º 2
0
def main():
    """Create the model and start the training."""
    print(args)
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    # cudnn related setting
    cudnn.enabled = True
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = get_cls_net(config=config, is_train=True)
    # model = DataParallelModel(deeplab)

    print('-------Load Weight', args.restore_from)
    saved_state_dict = torch.load(args.restore_from)

    if args.start_epoch > 0:
        model = DataParallelModel(deeplab)
        model.load_state_dict(saved_state_dict['state_dict'])
    else:
        new_params = deeplab.state_dict().copy()
        state_dict_pretrain = saved_state_dict
        for state_name in state_dict_pretrain:
            if state_name in new_params:
                new_params[state_name] = state_dict_pretrain[state_name]
                print('LOAD', state_name)
            else:
                print('NOT LOAD', state_name)
        deeplab.load_state_dict(new_params)
        model = DataParallelModel(deeplab)
    print('-------Load Weight', args.restore_from)

    model.cuda()

    criterion = CriterionAll(loss_type=args.loss)
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    print("-------Loading data...")
    if 'vehicle_parsing_dataset' in args.data_dir:
        parsing_dataset = VPDataSet(args.data_dir,
                                    args.dataset,
                                    crop_size=input_size,
                                    transform=transform)
    elif 'LIP' in args.data_dir:
        parsing_dataset = LIPDataSet(args.data_dir,
                                     args.dataset,
                                     crop_size=input_size,
                                     transform=transform)
    print("Data dir : ", args.data_dir)
    print("Dataset : ", args.dataset)
    trainloader = data.DataLoader(parsing_dataset,
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=8,
                                  pin_memory=True)
    '''
    list_map = []

    for part in deeplab.path_list:
        list_map = list_map + list(map(id, part.parameters()))
    
    base_params = filter(lambda p: id(p) not in list_map,
                         deeplab.parameters())
    params_list = []
    params_list.append({'params': base_params, 'lr':args.learning_rate*0.1})
    for part in deeplab.path_list:
        params_list.append({'params': part.parameters()})
    print ('len(params_list)',len(params_list))
    '''

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    if args.start_epoch > 0:
        optimizer.load_state_dict(saved_state_dict['optimizer'])
        print('========Load Optimizer', args.restore_from)

    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        for i_iter, batch in enumerate(trainloader):
            i_iter += len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, _ = batch
            labels = labels.long().cuda(non_blocking=True)
            preds = model(images)

            loss = criterion(preds, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i_iter % 100 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            print(
                f'epoch = {epoch}, iter = {i_iter}/{total_iters}, lr={lr:.6f}, loss = {loss.data.cpu().numpy():.6f}'
            )

        if (epoch + 1) % args.save_step == 0 or epoch == args.epochs:
            time.sleep(10)
            print("-------Saving checkpoint...")
            save_checkpoint(model, epoch, optimizer)

    time.sleep(10)
    save_checkpoint(model, epoch, optimizer)
    end = timeit.default_timer()
    print(end - start, 'seconds')
Ejemplo n.º 3
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    h, w = map(int, args.input_size.split(','))
    
    input_size = (h, w)

    model = get_cls_net(config=config, num_classes=args.num_classes, is_train=False)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    print('-------Load Data', args.data_dir)
    if 'vehicle_parsing_dataset' in args.data_dir:
        parsing_dataset = VPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    elif 'LIP' in args.data_dir:
        parsing_dataset = LIPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    elif 'WeiyiAll' in args.data_dir:
        parsing_dataset = WYDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    
    num_samples = len(parsing_dataset)
    valloader = data.DataLoader(parsing_dataset, batch_size=args.batch_size * len(gpus), shuffle=False, pin_memory=True)

    print('-------Load Weight', args.restore_from)
    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    print('-------Start Evaluation...')
    parsing_preds, scales, centers, time_list = valid(model, valloader, input_size, num_samples, len(gpus))
    mIoU, no_test_class = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, list_path, input_size, dataset=args.dataset)
    print(mIoU)
    print('No test class : ', no_test_class)

    print('-------Saving Results', args.save_dir)
    write_results(parsing_preds, scales, centers, args.data_dir, args.dataset, args.save_dir, input_size=input_size)

    print('total time is ', sum(time_list))
    print('avg time is ', sum(time_list) / len(time_list))
def main():
    """Create the model and start the training."""
    print(args)
    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    writer = SummaryWriter(args.snapshot_dir)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    input_size = [h, w]

    # cudnn related setting
    cudnn.enabled = True
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.enabled = True

    deeplab = get_cls_net(config=config,
                          num_classes=args.num_classes,
                          is_train=True)

    print('-------Load Weight', args.restore_from)
    saved_state_dict = torch.load(args.restore_from)

    if args.start_epoch > 0:
        model = DataParallelModel(deeplab)
        model.load_state_dict(saved_state_dict['state_dict'])
    else:
        new_params = deeplab.state_dict().copy()
        state_dict_pretrain = saved_state_dict
        for state_name in state_dict_pretrain:
            if state_name in new_params:
                new_params[state_name] = state_dict_pretrain[state_name]
            else:
                print('NOT LOAD', state_name)
        deeplab.load_state_dict(new_params)
        model = DataParallelModel(deeplab)
    print('-------Load Weight Finish', args.restore_from)

    model.cuda()

    criterion0 = CriterionAll(loss_type='ohem')
    criterion0 = DataParallelCriterion(criterion0)
    criterion0.cuda()

    criterion1 = LovaszSoftmax(input_size=input_size)
    criterion1 = DataParallelCriterion(criterion1)
    criterion1.cuda()

    transform = build_transforms(args)

    print("-------Loading data...")
    parsing_dataset = WYDataSet(args.data_dir,
                                args.dataset,
                                crop_size=input_size,
                                transform=transform)
    print("Data dir : ", args.data_dir)
    print("Dataset : ", args.dataset, "Sample Number: ",
          parsing_dataset.number_samples)
    trainloader = data.DataLoader(parsing_dataset,
                                  batch_size=args.batch_size * len(gpus),
                                  shuffle=True,
                                  num_workers=8,
                                  collate_fn=fast_collate_fn_mask,
                                  pin_memory=True)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    if args.start_epoch > 0:
        optimizer.load_state_dict(saved_state_dict['optimizer'])
        print('========Load Optimizer', args.restore_from)

    total_iters = args.epochs * len(trainloader)
    for epoch in range(args.start_epoch, args.epochs):
        model.train()
        tng_prefetcher = data_prefetcher_mask(trainloader)
        batch = tng_prefetcher.next()
        n_batch = 0
        while batch[0] is not None:
            #         for i_iter, batch in enumerate(trainloader):
            i_iter = n_batch + len(trainloader) * epoch
            lr = adjust_learning_rate(optimizer, i_iter, total_iters)

            images, labels, _ = batch
            labels = labels.squeeze(1)
            labels = labels.long().cuda(non_blocking=True)
            preds = model(images)

            loss0 = criterion0(preds, labels)
            loss1 = criterion1(preds, labels)
            loss = loss0 + loss1

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch = tng_prefetcher.next()
            n_batch += 1

            if i_iter % 1 == 0:
                writer.add_scalar('learning_rate', lr, i_iter)
                writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)
                writer.add_scalar('loss0', loss0.data.cpu().numpy(), i_iter)
                writer.add_scalar('loss1', loss1.data.cpu().numpy(), i_iter)

            print(
                f'epoch = {epoch}, iter = {i_iter}/{total_iters}, lr={lr:.6f}, \
                  loss = {loss.data.cpu().numpy():.6f}, \
                  loss0 = {loss0.data.cpu().numpy():.6f}, \
                  loss1 = {loss1.data.cpu().numpy():.6f}')

        if (epoch + 1) % args.save_step == 0 or epoch == args.epochs:
            time.sleep(10)
            print("-------Saving checkpoint...")
            save_checkpoint(model, epoch, optimizer)

    time.sleep(10)
    save_checkpoint(model, epoch, optimizer)
    end = timeit.default_timer()
    print(end - start, 'seconds')
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)
    image_size = (2788, 1400)

    model = get_cls_net(config=config,
                        num_classes=args.num_classes,
                        is_train=False)

    transform = build_transforms(args)

    print('-------Load Data : ', args.data_dir)
    parsing_dataset = WYDataSet(args.data_dir,
                                args.dataset,
                                crop_size=input_size,
                                transform=transform)
    list_path = os.path.join(args.data_dir, parsing_dataset.list_file)

    num_samples = len(parsing_dataset)
    if 'test_no_label' not in args.dataset:
        valloader = data.DataLoader(parsing_dataset,
                                    batch_size=args.batch_size * len(gpus),
                                    shuffle=False,
                                    collate_fn=fast_collate_fn_mask,
                                    pin_memory=True)
    else:
        valloader = data.DataLoader(parsing_dataset,
                                    batch_size=args.batch_size * len(gpus),
                                    shuffle=False,
                                    collate_fn=fast_collate_fn,
                                    pin_memory=True)

    print('-------Load Weight', args.restore_from)
    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    print('-------Start Evaluation...')
    parsing_preds, is_rotated, during_time = valid(args, model, valloader,
                                                   image_size, input_size,
                                                   num_samples, len(gpus))
    if 'test_no_label' not in args.dataset:
        mIoU, no_test_class = compute_mean_ioU_wy(parsing_preds,
                                                  is_rotated,
                                                  args.num_classes,
                                                  args.data_dir,
                                                  input_size,
                                                  dataset=args.dataset,
                                                  list_path=list_path)
        print(mIoU)
        print('No test class : ', no_test_class)

    print('-------Saving Results', args.save_dir)
    write_results_wy(parsing_preds,
                     is_rotated,
                     args.data_dir,
                     args.dataset,
                     args.save_dir,
                     input_size=input_size,
                     list_path=list_path)

    print('total time is ', during_time)
    print('avg time is ', during_time / num_samples)