コード例 #1
0
def main():
    torch.multiprocessing.set_start_method("spawn", force=True)
    """Create the model and start the evaluation process."""
    args = get_arguments()

    os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    deeplab = CorrPM_Model(args.num_classes, args.num_points)
    if len(gpus) > 1:
        model = DataParallelModel(deeplab)
    else:
        model = deeplab

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    if args.data_name == 'lip':
        lip_dataset = LIPDataSet(args.data_dir, VAL_POSE_ANNO_FILE, args.dataset, crop_size=input_size, transform=transform)
        num_samples = len(lip_dataset)
        valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus),
                                    shuffle=False, num_workers=4, pin_memory=True)

    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)

    for key in state_dict.keys():
        if key not in state_dict_old.keys():
            print(key)
    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model.eval()
    model.cuda()

    parsing_preds, scales, centers = valid(model, valloader, input_size, num_samples, len(gpus))

    mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size, args.dataset)
    print(mIoU)

    end = datetime.datetime.now()
    print(end - start, 'seconds')
    print(end)
コード例 #2
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print (args)
    gpus = [int(i) for i in args.gpu.split(',')]
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    h, w = map(int, args.input_size.split(','))
    
    input_size = (h, w)

    model = get_seg_model(cfg=config, num_classes=args.num_classes,is_train=False)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    lip_dataset = LIPDataSet(args.data_dir, 'val',args.list_path, crop_size=input_size, transform=transform)
    num_samples = len(lip_dataset)

    valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus),
                                shuffle=False, pin_memory=True)

    restore_from = args.restore_from

    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    parsing_preds, scales, centers,time_list= valid(model, valloader, input_size, num_samples, len(gpus))
    mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size,args.dataset,args.list_path)
    # write_results(parsing_preds, scales, centers, args.data_dir, 'val', args.save_dir, input_size=input_size)
    # write_logits(parsing_logits, scales, centers, args.data_dir, 'val', args.save_dir, input_size=input_size)
    
    

    print(mIoU)
    print('total time is ',sum(time_list))
    print('avg time is ',sum(time_list)/len(time_list))
コード例 #3
0
def main():
    args.time = get_currect_time()

    visualizer = Visualizer(args)
    log = Log(args)
    log.record_sys_param()
    log.record_file()

    """Set GPU Environment"""
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    trainloader = data.DataLoader(NYUDataset_crop_fast(args.data_list, args.random_scale, args.random_mirror, args.random_crop,
                args.batch_size, args.colorjitter),batch_size=args.batch_size,
                shuffle=True, num_workers=4, pin_memory=True)
    valloader = data.DataLoader(NYUDataset_val_full(args.data_val_list, args.random_scale, args.random_mirror, args.random_crop,
                       1), batch_size=8, shuffle=False, pin_memory=True)

    """Create Network"""
    deeplab = Res_Deeplab(num_classes=args.num_classes)
    print(deeplab)

    """Load pretrained Network"""
    saved_state_dict = torch.load(args.restore_from)
    print(args.restore_from)
    new_params = deeplab.state_dict().copy()
    for i in saved_state_dict:
        # Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        # if not i_parts[1]=='layer5':
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]

    deeplab.load_state_dict(new_params)

    model = deeplab
    model.cuda()
    model.train()
    model = model.float()
    model = DataParallelModel(model, device_ids=[0, 1])

    criterion = CriterionDSN()
    criterion = DataParallelCriterion(criterion)

    optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': args.learning_rate }],
                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)

    optimizer.zero_grad()

    i_iter = 0
    args.num_steps = len(trainloader) * args.epoch
    best_iou = 0.0
    total = sum([param.nelement() for param in model.parameters()])
    print('  + Number of params: %.2fM' % (total / 1e6))

    for epoch in range(args.epoch):
        ## Train one epoch
        model.train()
        for batch in trainloader:
            start = timeit.default_timer()
            i_iter = i_iter + 1
            images = batch['image'].cuda()
            labels = batch['seg'].cuda()
            HHAs = batch['HHA'].cuda()
            depths = batch['depth'].cuda()
            labels = torch.squeeze(labels,1).long()
            if (images.size(0) != args.batch_size):
                break
            optimizer.zero_grad()
            preds = model(images, HHAs, depths)
            loss = criterion(preds, labels)
            loss.backward()
            optimizer.step()
            if i_iter % 100 == 0:
                visualizer.add_scalar('learning_rate', args.learning_rate, i_iter)
                visualizer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

            current_lr = optimizer.param_groups[0]['lr']
            end = timeit.default_timer()
            log.log_string(
                '====================> epoch=%03d/%d, iter=%05d/%05d, loss=%.3f, %.3fs/iter, %02d:%02d:%02d, lr=%.6f' % (
                    epoch, args.epoch, i_iter, len(trainloader)*args.epoch, loss.data.cpu().numpy(), (end - start),
                    (int((end - start) * (args.num_steps - i_iter)) // 3600),
                    (int((end - start) * (args.num_steps - i_iter)) % 3600 // 60),
                    (int((end - start) * (args.num_steps - i_iter)) % 3600 % 60), current_lr))
        if (epoch+1) % 40 == 0:
            adjust_learning_rate(optimizer, i_iter, args)

        if epoch % 5 == 0:
            model.eval()
            confusion_matrix = np.zeros((args.num_classes, args.num_classes))
            loss_val = 0
            log.log_string("====================> evaluating")
            for batch_val in valloader:
                images_val = batch_val['image'].cuda()
                labels_val = batch_val['seg'].cuda()
                labels_val = torch.squeeze(labels_val,1).long()
                HHAs_val = batch_val['HHA'].cuda()
                depths_val = batch_val['depth'].cuda()

                with torch.no_grad():
                    preds_val = model(images_val, HHAs_val, depths_val)
                    loss_val += criterion(preds_val, labels_val)
                    preds_val = torch.cat([preds_val[i][0] for i in range(len(preds_val))], 0)
                    preds_val = F.upsample(input=preds_val, size=(480, 640), mode='bilinear', align_corners=True)

                    preds_val = np.asarray(np.argmax(preds_val.cpu().numpy(), axis=1), dtype=np.uint8)

                    labels_val = np.asarray(labels_val.cpu().numpy(), dtype=np.int)
                    ignore_index = labels_val != 255

                    labels_val = labels_val[ignore_index]
                    preds_val = preds_val[ignore_index]

                    confusion_matrix += get_confusion_matrix(labels_val, preds_val, args.num_classes)
            loss_val = loss_val / len(valloader)
            pos = confusion_matrix.sum(1)
            res = confusion_matrix.sum(0)
            tp = np.diag(confusion_matrix)

            IU_array = (tp / np.maximum(1.0, pos + res - tp))
            mean_IU = IU_array.mean()

            # getConfusionMatrixPlot(confusion_matrix)
            log.log_string('val loss' + ' ' + str(loss_val.cpu().numpy()) + ' ' + 'meanIU' + str(mean_IU) + 'IU_array' + str(IU_array))

            visualizer.add_scalar('val loss', loss_val.cpu().numpy(), epoch)
            visualizer.add_scalar('meanIU', mean_IU, epoch)

            if mean_IU > best_iou:
                best_iou = mean_IU
                log.log_string('save best model ...')
                torch.save(deeplab.state_dict(),
                           osp.join(args.snapshot_dir, 'model', args.dataset + NAME + 'best_iu' + '.pth'))

        if epoch % 5 == 0:
            log.log_string('save model ...')
            torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir,'model', args.dataset+ NAME + str(epoch)+'.pth'))
コード例 #4
0
ファイル: sgnet_agent.py プロジェクト: yyyyy1231/SGNet
class SGNetAgent(BaseAgent):
    """
    This class will be responsible for handling the whole process of our architecture.
    """
    def __init__(self, config):
        super().__init__(config)
        ## Select network
        if config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet":
            from graphs.models.SGNet.SGNet_fps import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet_ASPP import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP":
            from graphs.models.SGNet.SGNet_ASPP_fps import SGNet

        random.seed(self.config.seed)
        os.environ['PYTHONHASHSEED'] = str(self.config.seed)
        np.random.seed(self.config.seed)
        torch.manual_seed(self.config.seed)
        torch.cuda.manual_seed(self.config.seed)
        torch.cuda.manual_seed_all(self.config.seed)
        cudnn.enabled = True
        cudnn.benchmark = True
        cudnn.deterministic = True
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
        # create data loader
        if config.dataset == "NYUD":
            self.testloader = data.DataLoader(NYUDataset_val_full(
                self.config.val_list_path),
                                              batch_size=1,
                                              shuffle=False,
                                              pin_memory=True)
        # Create an instance from the Model
        self.logger.info("Loading encoder pretrained in imagenet...")
        self.model = SGNet(self.config.num_classes)
        print(self.model)

        self.model.cuda()
        self.model.train()
        self.model.float()
        print(config.gpu)
        if config.mode != 'measure_speed':
            self.model = DataParallelModel(self.model, device_ids=[0])
            print('parallel....................')

        total = sum([param.nelement() for param in self.model.parameters()])
        print('  + Number of params: %.2fM' % (total / 1e6))
        print_cuda_statistics()

    def load_checkpoint(self, filename):
        try:
            self.logger.info("Loading checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)

            self.current_epoch = checkpoint['epoch']
            self.current_iteration = checkpoint['iteration']
            self.model.load_state_dict(checkpoint['state_dict'])

            # self.optimizer.load_state_dict(checkpoint['optimizer'])

            self.logger.info(
                "Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\n"
                .format(filename, checkpoint['epoch'],
                        checkpoint['iteration']))
        except OSError as e:
            self.logger.info(
                "No checkpoint exists from '{}'. Skipping...".format(
                    self.config.checkpoint_dir))
            self.logger.info("**First time to train**")

    def run(self):
        """
        This function will the operator
        :return:
        """
        assert self.config.mode in [
            'train', 'test', 'measure_speed', 'train_iters'
        ]
        try:
            if self.config.mode == 'test':
                self.test()
            elif self.config.mode == 'measure_speed':
                with torch.no_grad():
                    self.measure_speed(input_size=[1, 3, 480, 640])
        except KeyboardInterrupt:
            self.logger.info("You have entered CTRL+C.. Wait to finalize")

    def test(self):

        tqdm_batch = tqdm(self.testloader,
                          total=len(self.testloader),
                          desc="Testing...")
        self.model.eval()
        metrics = IOUMetric(self.config.num_classes)
        loss_val = 0
        metrics = IOUMetric(self.config.num_classes)
        palette = get_palette(256)
        # if (not os.path.exists(self.config.output_img_dir)):
        #     os.mkdir(self.config.output_img_dir)
        # if (not os.path.exists(self.config.output_gt_dir)):
        #     os.mkdir(self.config.output_gt_dir)
        if (not os.path.exists(self.config.output_predict_dir)):
            os.mkdir(self.config.output_predict_dir)
        self.load_checkpoint(self.config.trained_model_path)
        index = 0
        for batch_val in tqdm_batch:
            image = batch_val['image'].cuda()
            label = batch_val['seg'].cuda()
            label = torch.squeeze(label, 1).long()
            HHA = batch_val['HHA'].cuda()
            depth = batch_val['depth'].cuda()
            size = np.array([label.size(1), label.size(2)])
            input_size = (label.size(1), label.size(2))

            with torch.no_grad():
                if self.config.ms:
                    output = predict_multiscale(self.model, image, depth,
                                                input_size, [0.8, 1.0, 2.0],
                                                self.config.num_classes, False)
                else:
                    output = predict_multiscale(self.model, image, depth,
                                                input_size, [1.0],
                                                self.config.num_classes, False)
                seg_pred = np.asarray(np.argmax(output, axis=2), dtype=np.int)
                output_im = Image.fromarray(
                    np.asarray(np.argmax(output, axis=2), dtype=np.uint8))
                output_im.putpalette(palette)
                output_im.save(self.config.output_predict_dir + '/' +
                               str(index) + '.png')
                seg_gt = np.asarray(label[0].cpu().numpy(), dtype=np.int)

                ignore_index = seg_gt != 255
                seg_gt = seg_gt[ignore_index]
                seg_pred = seg_pred[ignore_index]

                metrics.add_batch(seg_pred, seg_gt, ignore_index=255)

                index = index + 1
        acc, acc_cls, iu, mean_iu, fwavacc = metrics.evaluate()
        print({
            'meanIU': mean_iu,
            'IU_array': iu,
            'acc': acc,
            'acc_cls': acc_cls
        })
        pass

    def finalize(self):
        """
        Finalize all the operations of the 2 Main classes of the process the operator and the data loader
        :return:
        """
        # TODO
        pass

    def measure_speed(self, input_size, iteration=500):
        """
        Measure the speed of model
        :return: speed_time
                 fps
        """
        self.model.eval()
        input = torch.randn(*input_size).cuda()
        depth = torch.randn(*input_size).cuda()
        HHA = torch.randn(*input_size).cuda()

        for _ in range(100):
            self.model(input, depth)
        print('=========Speed Testing=========')
        torch.cuda.synchronize()
        torch.cuda.synchronize()
        t_start = time.time()

        for _ in range(iteration):
            x = self.model(input, depth)
        torch.cuda.synchronize()
        elapsed_time = time.time() - t_start
        speed_time = elapsed_time / iteration * 1000
        fps = iteration / elapsed_time
        print(iteration)
        print('Elapsed Time: [%.2f s / %d iter]' % (elapsed_time, iteration))
        print('Speed Time: %.2f ms / iter   FPS: %.2f' % (speed_time, fps))
        return speed_time, fps
コード例 #5
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    h, w = map(int, args.input_size.split(','))
    
    input_size = (h, w)

    model = get_cls_net(config=config, num_classes=args.num_classes, is_train=False)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    print('-------Load Data', args.data_dir)
    if 'vehicle_parsing_dataset' in args.data_dir:
        parsing_dataset = VPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    elif 'LIP' in args.data_dir:
        parsing_dataset = LIPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    elif 'WeiyiAll' in args.data_dir:
        parsing_dataset = WYDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform)
    
    num_samples = len(parsing_dataset)
    valloader = data.DataLoader(parsing_dataset, batch_size=args.batch_size * len(gpus), shuffle=False, pin_memory=True)

    print('-------Load Weight', args.restore_from)
    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    print('-------Start Evaluation...')
    parsing_preds, scales, centers, time_list = valid(model, valloader, input_size, num_samples, len(gpus))
    mIoU, no_test_class = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, list_path, input_size, dataset=args.dataset)
    print(mIoU)
    print('No test class : ', no_test_class)

    print('-------Saving Results', args.save_dir)
    write_results(parsing_preds, scales, centers, args.data_dir, args.dataset, args.save_dir, input_size=input_size)

    print('total time is ', sum(time_list))
    print('avg time is ', sum(time_list) / len(time_list))
コード例 #6
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()
    update_config(config, args)
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpus = [int(i) for i in args.gpu.split(',')]

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)
    image_size = (2788, 1400)

    model = get_cls_net(config=config,
                        num_classes=args.num_classes,
                        is_train=False)

    transform = build_transforms(args)

    print('-------Load Data : ', args.data_dir)
    parsing_dataset = WYDataSet(args.data_dir,
                                args.dataset,
                                crop_size=input_size,
                                transform=transform)
    list_path = os.path.join(args.data_dir, parsing_dataset.list_file)

    num_samples = len(parsing_dataset)
    if 'test_no_label' not in args.dataset:
        valloader = data.DataLoader(parsing_dataset,
                                    batch_size=args.batch_size * len(gpus),
                                    shuffle=False,
                                    collate_fn=fast_collate_fn_mask,
                                    pin_memory=True)
    else:
        valloader = data.DataLoader(parsing_dataset,
                                    batch_size=args.batch_size * len(gpus),
                                    shuffle=False,
                                    collate_fn=fast_collate_fn,
                                    pin_memory=True)

    print('-------Load Weight', args.restore_from)
    restore_from = args.restore_from
    state_dict = model.state_dict().copy()
    state_dict_old = torch.load(restore_from)
    state_dict_old = state_dict_old['state_dict']

    for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):
        if key != nkey:
            # remove the 'module.' in the 'key'
            state_dict[key[7:]] = deepcopy(state_dict_old[key])
        else:
            state_dict[key] = deepcopy(state_dict_old[key])

    model.load_state_dict(state_dict)
    model = DataParallelModel(model)

    model.eval()
    model.cuda()

    print('-------Start Evaluation...')
    parsing_preds, is_rotated, during_time = valid(args, model, valloader,
                                                   image_size, input_size,
                                                   num_samples, len(gpus))
    if 'test_no_label' not in args.dataset:
        mIoU, no_test_class = compute_mean_ioU_wy(parsing_preds,
                                                  is_rotated,
                                                  args.num_classes,
                                                  args.data_dir,
                                                  input_size,
                                                  dataset=args.dataset,
                                                  list_path=list_path)
        print(mIoU)
        print('No test class : ', no_test_class)

    print('-------Saving Results', args.save_dir)
    write_results_wy(parsing_preds,
                     is_rotated,
                     args.data_dir,
                     args.dataset,
                     args.save_dir,
                     input_size=input_size,
                     list_path=list_path)

    print('total time is ', during_time)
    print('avg time is ', during_time / num_samples)