예제 #1
0
파일: demo.py 프로젝트: coderzbx/ademxapp
def do(args, model_specs, logger):
    meta = get_dataset_specs(args, model_specs)
    id_2_label = meta['id_2_label']
    cmap = meta['cmap']
    input_h = 1024
    input_w = 2048
    classes = model_specs['classes']
    label_stride = model_specs['feat_stride']

    start_idx = args.start
    end_idx = args.end
    print('{}-{}\n'.format(start_idx, end_idx))

    image_list = []
    idx = 0
    with open(args.file_list) as f:
        for item in f.readlines():
            if idx < start_idx:
                idx += 1
                continue

            if idx > end_idx:
                break

            item = item.strip()
            image_list.append(os.path.join(args.data_root, item))
            idx += 1

    net_args, net_auxs = mxutil.load_params_from_file(args.weights)
    net = fcrna_model_a1(classes, label_stride, bootstrapping=True)
    if net is None:
        raise NotImplementedError('Unknown network')
    contexts = [mx.gpu(int(_)) for _ in args.gpus.split(',')]
    mod = mx.mod.Module(net, context=contexts)

    crop_size = 2048
    save_dir = args.output

    x_num = len(image_list)

    transformers = [ts.Scale(crop_size, Image.CUBIC, False)]
    transformers += _get_transformer_image()
    transformer = ts.Compose(transformers)

    start = time.time()

    for i in range(x_num):
        time1 = time.time()

        sample_name = osp.splitext(osp.basename(image_list[i]))[0]
        out_path = osp.join(save_dir, '{}.png'.format(sample_name))
        if os.path.exists(out_path):
            continue

        im_path = osp.join(args.data_root, image_list[i])
        rim = np.array(Image.open(im_path).convert('RGB'), np.uint8)

        h, w = rim.shape[:2]
        need_resize = False
        if h != input_h or w != input_w:
            need_resize = True
            im = np.array(
                Image.fromarray(rim.astype(np.uint8, copy=False)).resize(
                    (input_w, input_h), Image.NEAREST))
        else:
            im = rim
        im = transformer(im)
        imh, imw = im.shape[:2]

        # init
        label_h, label_w = input_h / label_stride, input_w / label_stride
        test_steps = 1
        pred_stride = label_stride / test_steps
        pred_h, pred_w = label_h * test_steps, label_w * test_steps

        input_data = np.zeros((1, 3, input_h, input_w), np.single)
        input_label = 255 * np.ones((1, label_h * label_w), np.single)
        dataiter = mx.io.NDArrayIter(input_data, input_label)
        batch = dataiter.next()
        mod.bind(dataiter.provide_data,
                 dataiter.provide_label,
                 for_training=False,
                 force_rebind=True)
        if not mod.params_initialized:
            mod.init_params(arg_params=net_args, aux_params=net_auxs)

        nim = np.zeros((3, imh + label_stride, imw + label_stride), np.single)
        sy = sx = label_stride / 2
        nim[:, sy:sy + imh, sx:sx + imw] = im.transpose(2, 0, 1)

        net_preds = np.zeros((classes, pred_h, pred_w), np.single)
        # sy = sx = pred_stride // 2 + np.arange(test_steps) * pred_stride
        # sy = sx = sy[0]
        input_data = np.zeros((1, 3, input_h, input_w), np.single)
        input_data[0, :, :imh, :imw] = nim[:, sy:sy + imh, sx:sx + imw]
        batch.data[0] = mx.nd.array(input_data)
        mod.forward(batch, is_train=False)
        this_call_preds = mod.get_outputs()[0].asnumpy()[0]
        if args.test_flipping:
            batch.data[0] = mx.nd.array(input_data[:, :, :, ::-1])
            mod.forward(batch, is_train=False)
            this_call_preds = 0.5 * (
                this_call_preds +
                mod.get_outputs()[0].asnumpy()[0][:, :, ::-1])
        net_preds[:, 0:0 + pred_h:test_steps,
                  0:0 + pred_w:test_steps] = this_call_preds

        # compute pixel-wise predictions
        interp_preds = interp_preds_as(rim.shape[:2], net_preds, pred_stride,
                                       imh, imw)
        pred_label = interp_preds.argmax(0)
        if id_2_label is not None:
            pred_label = id_2_label[pred_label]

        # save predicted labels into an image
        im_to_save = Image.fromarray(pred_label.astype(np.uint8))
        if cmap is not None:
            im_to_save.putpalette(cmap.ravel())

        if need_resize:
            im_to_save = im_to_save.resize((w, h), Image.NEAREST)

        im_to_save.save(out_path)

        time2 = time.time()
        print("{}/{} {} finish in {} s\n".format(i, x_num, out_path,
                                                 time2 - time1))

    logger.info('Done in %.2f s.', time.time() - start)
예제 #2
0
파일: solve_ST.py 프로젝트: yzou2/CBST
def _val_impl(args, model_specs, logger):
    if len(args.output) > 0:
        _make_dirs(args.output)
    # dataiter
    dataset_specs_tgt = get_dataset_specs_tgt(args, model_specs)
    scale, mean_, _ = _get_scalemeanstd()
    if scale > 0:
        mean_ /= scale
    margs = argparse.Namespace(**model_specs)
    dargs = argparse.Namespace(**dataset_specs_tgt)
    mod = _get_module(args, margs, dargs)
    addr_weights = args.weights  # first weights should be xxxx_ep-0000.params!
    addr_output = args.output
    # current round index
    cround = args.idx_round

    net_args = None
    net_auxs = None
    ###
    if addr_weights is not None:
        net_args, net_auxs = mxutil.load_params_from_file(addr_weights)
    ######
    save_dir = osp.join(args.output, str(cround), 'results')
    save_dir_self_train = osp.join(args.output, str(cround), 'self_train')

    # pseudo labels
    save_dir_pseudo_labelIds = osp.join(save_dir_self_train, 'pseudo_labelIds')
    save_dir_pseudo_color = osp.join(save_dir_self_train, 'pseudo_color')
    # without sp
    save_dir_nplabelIds = osp.join(save_dir, 'nplabelIds')
    save_dir_npcolor = osp.join(save_dir, 'npcolor')
    # probability map
    save_dir_probmap = osp.join(args.output, 'probmap')
    save_dir_stats = osp.join(args.output, 'stats')

    _make_dirs(save_dir)
    _make_dirs(save_dir_pseudo_labelIds)
    _make_dirs(save_dir_pseudo_color)
    _make_dirs(save_dir_nplabelIds)
    _make_dirs(save_dir_npcolor)
    _make_dirs(save_dir_probmap)
    _make_dirs(save_dir_stats)
    if args.with_prior == 'True':
        # with sp
        save_dir_splabelIds = osp.join(save_dir_self_train, 'splabelIds')
        save_dir_spcolor = osp.join(save_dir_self_train, 'spcolor')
        _make_dirs(save_dir_splabelIds)
        _make_dirs(save_dir_spcolor)
    if args.kc_policy == 'cb':
        # reweighted prediction map
        save_dir_rwlabelIds = osp.join(save_dir_self_train, 'rwlabelIds')
        save_dir_rwcolor = osp.join(save_dir_self_train, 'rwcolor')
        _make_dirs(save_dir_rwlabelIds)
        _make_dirs(save_dir_rwcolor)
    ######
    dataset_tgt = model_specs['dataset_tgt']
    image_list_tgt, label_gt_list_tgt, image_tgt_list = parse_split_file_tgt(
        margs.dataset_tgt, args.split_tgt)
    has_gt = args.split_tgt in (
        'train',
        'val',
    )
    crop_sizes = sorted([int(_) for _ in args.test_scales.split(',')])[::-1]
    crop_size = crop_sizes[0]
    assert len(crop_sizes) == 1, 'multi-scale testing not implemented'
    label_stride = margs.feat_stride
    x_num = len(image_list_tgt)
    do_forward = True
    # for all images that has the same resolution
    if do_forward:
        batch = None
        transformers = [ts.Scale(crop_size, Image.CUBIC, False)]
        transformers += _get_transformer_image()
        transformer = ts.Compose(transformers)

    scorer_np = ScoreUpdater(dargs.valid_labels_tgt, margs.classes, x_num,
                             logger)
    scorer_np.reset()
    # with prior
    if args.with_prior == 'True':
        scorer = ScoreUpdater(dargs.valid_labels_tgt, margs.classes, x_num,
                              logger)
        scorer.reset()

    done_count = 0  # for multi-scale testing
    num_classes = margs.classes
    init_tgt_port = float(args.init_tgt_port)
    # class-wise
    cls_exist_array = np.zeros([1, num_classes], dtype=int)
    cls_thresh = np.zeros([num_classes
                           ])  # confidence thresholds for all classes
    cls_size = np.zeros([num_classes])  # number of predictions in each class
    array_pixel = 0.0
    # prior
    if args.with_prior == 'True':
        in_path_prior = 'spatial_prior/{}/prior_array.mat'.format(args.dataset)
        sprior = scipy.io.loadmat(in_path_prior)
        prior_array = sprior["prior_array"].astype(np.float32)
        #prior_array = np.maximum(prior_array,0)
    ############################ network forward
    for i in xrange(x_num):
        start = time.time()
        ############################ network forward on single image (from official ResNet-38 implementation)
        sample_name = osp.splitext(osp.basename(image_list_tgt[i]))[0]
        im_path = osp.join(args.data_root_tgt, image_list_tgt[i])
        rim = np.array(Image.open(im_path).convert('RGB'), np.uint8)
        if do_forward:
            im = transformer(rim)
            imh, imw = im.shape[:2]
            # init
            if batch is None:
                if dargs.ident_size:
                    input_h = make_divisible(imh, margs.feat_stride)
                    input_w = make_divisible(imw, margs.feat_stride)
                else:
                    input_h = input_w = make_divisible(crop_size,
                                                       margs.feat_stride)
                label_h, label_w = input_h / label_stride, input_w / label_stride
                test_steps = args.test_steps
                pred_stride = label_stride / test_steps
                pred_h, pred_w = label_h * test_steps, label_w * test_steps
                input_data = np.zeros((1, 3, input_h, input_w), np.single)
                input_label = 255 * np.ones((1, label_h * label_w), np.single)
                dataiter_tgt = mx.io.NDArrayIter(input_data, input_label)
                batch = dataiter_tgt.next()

                mod.bind(dataiter_tgt.provide_data,
                         dataiter_tgt.provide_label,
                         for_training=False,
                         force_rebind=True)
                if not mod.params_initialized:
                    mod.init_params(arg_params=net_args, aux_params=net_auxs)

            nim = np.zeros((3, imh + label_stride, imw + label_stride),
                           np.single)
            sy = sx = label_stride // 2
            nim[:, sy:sy + imh, sx:sx + imw] = im.transpose(2, 0, 1)

            net_preds = np.zeros((margs.classes, pred_h, pred_w), np.single)
            sy = sx = pred_stride // 2 + np.arange(test_steps) * pred_stride
            for ix in xrange(test_steps):
                for iy in xrange(test_steps):
                    input_data = np.zeros((1, 3, input_h, input_w), np.single)
                    input_data[0, :, :imh, :imw] = nim[:, sy[iy]:sy[iy] + imh,
                                                       sx[ix]:sx[ix] + imw]
                    batch.data[0] = mx.nd.array(input_data)
                    mod.forward(batch, is_train=False)
                    this_call_preds = mod.get_outputs()[0].asnumpy()[0]
                    if args.test_flipping:
                        batch.data[0] = mx.nd.array(input_data[:, :, :, ::-1])
                        mod.forward(batch, is_train=False)
                        # average the original and flipped image prediction
                        this_call_preds = 0.5 * (
                            this_call_preds +
                            mod.get_outputs()[0].asnumpy()[0][:, :, ::-1])
                    net_preds[:, iy:iy + pred_h:test_steps,
                              ix:ix + pred_w:test_steps] = this_call_preds
        interp_preds_np = interp_preds_as(rim.shape[:2], net_preds,
                                          pred_stride, imh, imw)
        ########################### #save predicted labels and confidence score vectors in target domains
        # no prior prediction with trainIDs
        pred_label_np = interp_preds_np.argmax(0)
        # no prior prediction with labelIDs
        if dargs.id_2_label_tgt is not None:
            pred_label_np = dargs.id_2_label_tgt[pred_label_np]
        # no prior color prediction
        im_to_save_np = Image.fromarray(pred_label_np.astype(np.uint8))
        im_to_save_npcolor = im_to_save_np.copy()
        if dargs.cmap is not None:
            im_to_save_npcolor.putpalette(dargs.cmap.ravel())
        # save no prior prediction with labelIDs and colors
        out_path_np = osp.join(save_dir_nplabelIds,
                               '{}.png'.format(sample_name))
        out_path_npcolor = osp.join(save_dir_npcolor,
                                    '{}.png'.format(sample_name))
        im_to_save_np.save(out_path_np)
        im_to_save_npcolor.save(out_path_npcolor)
        # with prior
        if args.with_prior == 'True':
            probmap = np.multiply(prior_array,
                                  interp_preds_np).astype(np.float32)
        elif args.with_prior == 'False':
            probmap = interp_preds_np.copy().astype(np.float32)
        pred_label = probmap.argmax(0)
        probmap_max = np.amax(probmap, axis=0)
        ############################ save confidence scores of target domain as class-wise vectors
        for idx_cls in np.arange(0, num_classes):
            idx_temp = pred_label == idx_cls
            sname = 'array_cls' + str(idx_cls)
            if not (sname in locals()):
                exec("%s = np.float32(0)" % sname)
            if idx_temp.any():
                cls_exist_array[0, idx_cls] = 1
                probmap_max_cls_temp = probmap_max[idx_temp].astype(np.float32)
                len_cls = probmap_max_cls_temp.size
                # downsampling by rate 4
                probmap_cls = probmap_max_cls_temp[0:len_cls:4]
                exec("%s = np.append(%s,probmap_cls)" % (sname, sname))
        ############################ save prediction
        # save prediction probablity map
        out_path_probmap = osp.join(save_dir_probmap,
                                    '{}.npy'.format(sample_name))
        np.save(out_path_probmap, probmap.astype(np.float32))
        # save predictions with spatial priors, if sp exist.
        if args.with_prior == 'True':
            if dargs.id_2_label_tgt is not None:
                pred_label = dargs.id_2_label_tgt[pred_label]
            im_to_save_sp = Image.fromarray(pred_label.astype(np.uint8))
            im_to_save_spcolor = im_to_save_sp.copy()
            if dargs.cmap is not None:  # save color seg map
                im_to_save_spcolor.putpalette(dargs.cmap.ravel())
            out_path_sp = osp.join(save_dir_splabelIds,
                                   '{}.png'.format(sample_name))
            out_path_spcolor = osp.join(save_dir_spcolor,
                                        '{}.png'.format(sample_name))
            im_to_save_sp.save(out_path_sp)
            im_to_save_spcolor.save(out_path_spcolor)
        # log information
        done_count += 1
        if not has_gt:
            logger.info('Done {}/{} with speed: {:.2f}/s'.format(
                i + 1, x_num, 1. * done_count / (time.time() - start)))
            continue
        if args.split_tgt in ('train', 'val'):
            # evaluate with ground truth
            label_path = osp.join(args.data_root_tgt, label_gt_list_tgt[i])
            label = np.array(Image.open(label_path), np.uint8)
            if args.with_prior == 'True':
                scorer.update(pred_label, label, i)
            scorer_np.update(pred_label_np, label, i)
    # save target training list
    fout = 'issegm/data_list/{}/{}_training_gpu{}.lst'.format(
        args.dataset_tgt, args.split_tgt, args.gpus)
    fo = open(fout, "w")
    for idx_image in range(x_num):
        sample_name = osp.splitext(osp.basename(image_list_tgt[idx_image]))[0]
        fo.write(
            image_tgt_list[idx_image] + '\t' +
            osp.join(save_dir_pseudo_labelIds, '{}.png'.format(sample_name)) +
            '\n')
    fo.close()
    ############################ kc generation
    start_sort = time.time()
    # threshold for each class
    if args.kc_policy == 'global':
        for idx_cls in np.arange(0, num_classes):
            tname = 'array_cls' + str(idx_cls)
            exec(
                "array_pixel = np.append(array_pixel,%s)" % tname
            )  # reverse=False for ascending losses and reverse=True for descending confidence
        array_pixel = sorted(array_pixel, reverse=True)
        len_cls = len(array_pixel)
        len_thresh = int(math.floor(len_cls * init_tgt_port))
        cls_size[:] = len_cls
        cls_thresh[:] = array_pixel[len_thresh - 1].copy()
        array_pixel = 0.0
    if args.kc_policy == 'cb':
        for idx_cls in np.arange(0, num_classes):
            tname = 'array_cls' + str(idx_cls)
            if cls_exist_array[0, idx_cls] == 1:
                exec(
                    "%s = sorted(%s,reverse=True)" % (tname, tname)
                )  # reverse=False for ascending losses and reverse=True for descending confidence
                exec("len_cls = len(%s)" % tname)
                cls_size[idx_cls] = len_cls
                len_thresh = int(math.floor(len_cls * init_tgt_port))
                if len_thresh != 0:
                    exec("cls_thresh[idx_cls] = %s[len_thresh-1].copy()" %
                         tname)
                exec("%s = %d" % (tname, 0.0))

    # threshold for mine_id with priority
    mine_id_priority = np.nonzero(
        cls_size / np.sum(cls_size) < args.mine_thresh)[0]
    # chosen mine_id
    mine_id_all = np.argsort(cls_size / np.sum(cls_size))
    mine_id = mine_id_all[:args.mine_id_number]
    print(mine_id)
    np.save(save_dir_stats + '/mine_id.npy', mine_id)
    np.save(save_dir_stats + '/mine_id_priority.npy', mine_id_priority)
    np.save(save_dir_stats + '/cls_thresh.npy', cls_thresh)
    np.save(save_dir_stats + '/cls_size.npy', cls_size)
    logger.info('Kc determination done in %.2f s.', time.time() - start_sort)
    ############################ pseudo-label generation
    for i in xrange(x_num):
        sample_name = osp.splitext(osp.basename(image_list_tgt[i]))[0]
        sample_pseudo_label_name = osp.join(save_dir_pseudo_labelIds,
                                            '{}.png'.format(sample_name))
        sample_pseudocolor_label_name = osp.join(save_dir_pseudo_color,
                                                 '{}.png'.format(sample_name))
        out_path_probmap = osp.join(save_dir_probmap,
                                    '{}.npy'.format(sample_name))
        probmap = np.load(out_path_probmap)
        rw_probmap = np.zeros(probmap.shape, np.single)
        cls_thresh[
            cls_thresh ==
            0] = 1.0  # cls_thresh = 0 means there is no prediction in this class
        ############# pseudo-label assignment
        for idx_cls in np.arange(0, num_classes):
            cls_thresh_temp = cls_thresh[idx_cls]
            cls_probmap = probmap[idx_cls, :, :]
            cls_rw_probmap = np.true_divide(cls_probmap, cls_thresh_temp)
            rw_probmap[idx_cls, :, :] = cls_rw_probmap.copy()

        rw_probmap_max = np.amax(rw_probmap, axis=0)
        pseudo_label = np.argmax(rw_probmap, axis=0)
        ############# pseudo-label selection
        idx_unconfid = rw_probmap_max < 1
        idx_confid = rw_probmap_max >= 1
        # pseudo-labels with labelID
        pseudo_label = pseudo_label.astype(np.uint8)
        pseudo_label_labelID = dargs.id_2_label_tgt[pseudo_label]
        rw_pred_label = pseudo_label_labelID.copy()
        # ignore label assignment, compatible with labelIDs
        pseudo_label_labelID[idx_unconfid] = 0
        ############# save pseudo-label
        im_to_save_pseudo = Image.fromarray(
            pseudo_label_labelID.astype(np.uint8))
        im_to_save_pseudocol = im_to_save_pseudo.copy()
        if dargs.cmap is not None:  # save segmentation prediction with color
            im_to_save_pseudocol.putpalette(dargs.cmap.ravel())
        out_path_pseudo = osp.join(save_dir_pseudo_labelIds,
                                   '{}.png'.format(sample_name))
        out_path_colpseudo = osp.join(save_dir_pseudo_color,
                                      '{}.png'.format(sample_name))
        im_to_save_pseudo.save(out_path_pseudo)
        im_to_save_pseudocol.save(out_path_colpseudo)
        ############# save reweighted pseudo-label in cbst
        if args.kc_policy == 'cb':
            im_to_save_rw = Image.fromarray(rw_pred_label.astype(np.uint8))
            im_to_save_rwcolor = im_to_save_rw.copy()
            if dargs.cmap is not None:
                im_to_save_rwcolor.putpalette(dargs.cmap.ravel())
            out_path_rw = osp.join(save_dir_rwlabelIds,
                                   '{}.png'.format(sample_name))
            out_path_rwcolor = osp.join(save_dir_rwcolor,
                                        '{}.png'.format(sample_name))
            # save no prior prediction with labelIDs and colors
            im_to_save_rw.save(out_path_rw)
            im_to_save_rwcolor.save(out_path_rwcolor)

    ## remove probmap folder
    import shutil
    shutil.rmtree(save_dir_probmap)
예제 #3
0
def _val_impl(args, model_specs, logger):
    assert args.prefetch_threads == 1
    assert args.weights is not None
    net_args, net_auxs = util.load_params_from_file(args.weights)
    mod = _get_module(model_specs)
    has_gt = args.split in (
        'train',
        'val',
    )
    scale_, mean_, std_ = _get_scalemeanstd()
    if args.test_scales is None:
        crop_sizes = [model_specs['crop_size']]
    else:
        crop_sizes = sorted([int(_)
                             for _ in args.test_scales.split(',')])[::-1]

    batch_images = args.batch_images

    if has_gt:
        gt_labels = np.array(
            parse_split_file(model_specs['split_filename'], args.data_root)[1])
    save_dir = os.path.join(args.output, os.path.splitext(args.log_file)[0])
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    preds = []
    for crop_size in crop_sizes:
        save_path = os.path.join(save_dir, 'preds_sz{}'.format(crop_size))
        if os.path.isfile(save_path):
            logger.info('File %s exists, skipped crop size %d', save_path,
                        crop_size)
            with open(save_path) as f:
                preds.append(cPickle.load(f))
            continue
        ts_list = [
            ts.Scale(crop_size),
            ts.ThreeCrops(crop_size)
            if args.test_3crops else ts.CenterCrop(crop_size),
        ]
        if scale_ > 0:
            ts_list.append(ts.ListInput(ts.ColorScale(np.single(scale_))))
        ts_list += [ts.ListInput(ts.ColorNormalize(mean_, std_))]
        transformer = ts.Compose(ts_list)
        dataiter = FileIter(
            split_filename=model_specs['split_filename'],
            data_root=args.data_root,
            has_gt=has_gt,
            batch_images=batch_images,
            transformer=transformer,
            prefetch_threads=args.prefetch_threads,
            prefetcher_type=args.prefetcher,
        )
        dataiter.reset()
        mod.bind(dataiter.provide_data,
                 dataiter.provide_label,
                 for_training=False,
                 force_rebind=True)
        if not mod.params_initialized:
            mod.init_params(arg_params=net_args, aux_params=net_auxs)
        this_call_preds = []
        start = time.time()
        counter = [0, 0]
        for nbatch, batch in enumerate(dataiter):
            mod.forward(batch, is_train=False)
            outputs = mod.get_outputs()[0].asnumpy()
            outputs = outputs.reshape(
                (batch_images, -1, model_specs['classes'])).mean(1)
            this_call_preds.append(outputs)
            if args.test_flipping:
                batch.data[0] = mx.nd.flip(batch.data[0], axis=3)
                mod.forward(batch, is_train=False)
                outputs = mod.get_outputs()[0].asnumpy()
                outputs = outputs.reshape(
                    (batch_images, -1, model_specs['classes'])).mean(1)
                this_call_preds[-1] = (this_call_preds[-1] + outputs) / 2
            score_str = ''
            if has_gt:
                counter[0] += batch_images
                counter[1] += (this_call_preds[-1].argmax(1) ==
                               gt_labels[nbatch * batch_images:(nbatch + 1) *
                                         batch_images]).sum()
                score_str = ', Top1 {:.4f}%'.format(100.0 * counter[1] /
                                                    counter[0])
            logger.info('Crop size {}, done {}/{} at speed: {:.2f}/s{}'.\
                format(crop_size, nbatch+1, dataiter.batches_per_epoch, 1.*(nbatch+1)*batch_images / (time.time()-start), score_str))
        logger.info('Done crop size {} in {:.4f}s.'.format(
            crop_size,
            time.time() - start))
        this_call_preds = np.vstack(this_call_preds)
        with open(save_path, 'wb') as f:
            cPickle.dump(this_call_preds, f)
        preds.append(this_call_preds)
    for num_sizes in set((
            1,
            len(crop_sizes),
    )):
        for this_pred_inds in itertools.combinations(xrange(len(crop_sizes)),
                                                     num_sizes):
            this_pred = np.mean([preds[_] for _ in this_pred_inds], axis=0)
            this_pred_label = this_pred.argsort(1)[:, -1 - np.arange(5)]
            logger.info('Done testing crop_size %s',
                        [crop_sizes[_] for _ in this_pred_inds])
            if has_gt:
                top1 = 100. * (this_pred_label[:, 0]
                               == gt_labels).sum() / gt_labels.size
                top5 = 100. * sum(
                    map(lambda x, y: y in x.tolist(), this_pred_label,
                        gt_labels)) / gt_labels.size
                logger.info('Top1 %.4f%%, Top5 %.4f%%', top1, top5)
            else:
                # TODO: Save predictions for submission
                raise NotImplementedError('Save predictions for submission')
예제 #4
0
def _val_impl(args, model_specs, logger):
    assert args.prefetch_threads == 1
    assert args.weights is not None

    margs = argparse.Namespace(**model_specs)
    dargs = argparse.Namespace(**get_dataset_specs(args, model_specs))

    image_list, label_list = parse_split_file(margs.dataset, args.split)
    net_args, net_auxs = mxutil.load_params_from_file(args.weights)
    net = None
    mod = _get_module(args, margs, dargs, net)
    has_gt = args.split in (
        'train',
        'val',
    )

    crop_sizes = sorted([int(_) for _ in args.test_scales.split(',')])[::-1]
    # TODO: multi-scale testing
    assert len(crop_sizes) == 1, 'multi-scale testing not implemented'
    label_stride = margs.feat_stride
    crop_size = crop_sizes[0]

    save_dir = osp.join(args.output, osp.splitext(args.log_file)[0])
    _make_dirs(save_dir)

    x_num = len(image_list)

    do_forward = True
    if do_forward:
        batch = None
        transformers = [ts.Scale(crop_size, Image.CUBIC, False)]
        transformers += _get_transformer_image()
        transformer = ts.Compose(transformers)

    scorer = ScoreUpdater(dargs.valid_labels, margs.classes, x_num, logger)
    scorer.reset()
    start = time.time()
    done_count = 0
    for i in xrange(x_num):
        sample_name = osp.splitext(osp.basename(image_list[i]))[0]

        # skip computed images
        if args.save_predictions:
            pred_save_path = osp.join(save_dir, 'predictions',
                                      '{}.h5'.format(sample_name))
            if osp.isfile(pred_save_path):
                logger.info('Skipped {} {}/{}'.format(sample_name, i + 1,
                                                      x_num))
                continue

        im_path = osp.join(args.data_root, image_list[i])
        rim = np.array(Image.open(im_path).convert('RGB'), np.uint8)

        if do_forward:
            im = transformer(rim)
            imh, imw = im.shape[:2]

            # init
            if batch is None:
                if dargs.ident_size:
                    input_h = make_divisible(imh, margs.feat_stride)
                    input_w = make_divisible(imw, margs.feat_stride)
                else:
                    input_h = input_w = make_divisible(crop_size,
                                                       margs.feat_stride)
                label_h, label_w = input_h / label_stride, input_w / label_stride
                test_steps = args.test_steps
                pred_stride = label_stride / test_steps
                pred_h, pred_w = label_h * test_steps, label_w * test_steps

                input_data = np.zeros((1, 3, input_h, input_w), np.single)
                input_label = 255 * np.ones((1, label_h * label_w), np.single)
                dataiter = mx.io.NDArrayIter(input_data, input_label)
                batch = dataiter.next()
                mod.bind(dataiter.provide_data,
                         dataiter.provide_label,
                         for_training=False,
                         force_rebind=True)
                if not mod.params_initialized:
                    mod.init_params(arg_params=net_args, aux_params=net_auxs)

            nim = np.zeros((3, imh + label_stride, imw + label_stride),
                           np.single)
            sy = sx = label_stride // 2
            nim[:, sy:sy + imh, sx:sx + imw] = im.transpose(2, 0, 1)

            net_preds = np.zeros((margs.classes, pred_h, pred_w), np.single)
            sy = sx = pred_stride // 2 + np.arange(test_steps) * pred_stride
            for ix in xrange(test_steps):
                for iy in xrange(test_steps):
                    input_data = np.zeros((1, 3, input_h, input_w), np.single)
                    input_data[0, :, :imh, :imw] = nim[:, sy[iy]:sy[iy] + imh,
                                                       sx[ix]:sx[ix] + imw]
                    batch.data[0] = mx.nd.array(input_data)
                    mod.forward(batch, is_train=False)
                    this_call_preds = mod.get_outputs()[0].asnumpy()[0]
                    if args.test_flipping:
                        batch.data[0] = mx.nd.array(input_data[:, :, :, ::-1])
                        mod.forward(batch, is_train=False)
                        this_call_preds = 0.5 * (
                            this_call_preds +
                            mod.get_outputs()[0].asnumpy()[0][:, :, ::-1])
                    net_preds[:, iy:iy + pred_h:test_steps,
                              ix:ix + pred_w:test_steps] = this_call_preds

        # save predicted probabilities
        if args.save_predictions:
            _make_dirs(osp.dirname(pred_save_path))
            tmp = (rim.shape[:2], net_preds.astype(np.float16), pred_stride,
                   imh, imw)
            util.h5py_save(pred_save_path, *tmp)

        if args.save_results:
            # compute pixel-wise predictions
            interp_preds = interp_preds_as(rim.shape[:2], net_preds,
                                           pred_stride, imh, imw)
            pred_label = interp_preds.argmax(0)
            if dargs.id_2_label is not None:
                pred_label = dargs.id_2_label[pred_label]

            # save predicted labels into an image
            out_path = osp.join(save_dir, '{}.png'.format(sample_name))
            im_to_save = Image.fromarray(pred_label.astype(np.uint8))
            if dargs.cmap is not None:
                im_to_save.putpalette(dargs.cmap.ravel())
            im_to_save.save(out_path)
        else:
            assert not has_gt

        done_count += 1
        if not has_gt:
            logger.info('Done {}/{} with speed: {:.2f}/s'.format(
                i + 1, x_num, 1. * done_count / (time.time() - start)))
            continue

        label_path = osp.join(args.data_root, label_list[i])
        label = np.array(Image.open(label_path), np.uint8)

        # save correctly labeled pixels into an image
        out_path = osp.join(save_dir, 'correct', '{}.png'.format(sample_name))
        _make_dirs(osp.dirname(out_path))
        invalid_mask = np.logical_not(np.in1d(
            label, dargs.valid_labels)).reshape(label.shape)
        Image.fromarray(
            (invalid_mask * 255 + (label == pred_label) * 127).astype(
                np.uint8)).save(out_path)

        scorer.update(pred_label, label, i)
    logger.info('Done in %.2f s.', time.time() - start)
예제 #5
0
def _val_impl(args, model_specs, logger):
    assert args.prefetch_threads == 1
    assert args.weights is not None

    margs = argparse.Namespace(**model_specs)
    dargs = argparse.Namespace(**get_dataset_specs(args, model_specs))

    image_list, label_list = parse_split_file(margs.dataset, args.split)
    net_args, net_auxs = mxutil.load_params_from_file(args.weights)
    net = None
    mod = _get_module(args, margs, dargs, net)
    has_gt = args.split in (
        'train',
        'val',
    )

    crop_sizes = sorted([int(_) for _ in args.test_scales.split(',')])[::-1]
    crop_size = crop_sizes[0]
    # TODO: multi-scale testing
    assert len(crop_sizes) == 1, 'multi-scale testing not implemented'
    label_stride = margs.feat_stride

    save_dir_color = osp.join(args.output, 'color')
    save_dir_labelId = osp.join(args.output, 'labelId')
    save_dir_trainId = osp.join(args.output, 'trainId')
    _make_dirs(save_dir_color)
    _make_dirs(save_dir_labelId)
    _make_dirs(save_dir_trainId)

    x_num = len(image_list)

    do_forward = True
    # for all images that has the same resolution
    if do_forward:
        batch = None
        transformers = [ts.Scale(crop_size, Image.CUBIC, False)]
        transformers += _get_transformer_image()
        transformer = ts.Compose(transformers)

    scorer = ScoreUpdater(dargs.valid_labels, margs.classes, x_num, logger)
    scorer.reset()
    start = time.time()
    done_count = 0
    ############################
    #for i in xrange(x_num):
    for i in range(5):
        ############################ network forward on single image
        sample_name = osp.splitext(osp.basename(image_list[i]))[0]
        im_path = osp.join(args.data_root, image_list[i])
        rim = np.array(Image.open(im_path).convert('RGB'), np.uint8)
        if do_forward:
            im = transformer(rim)
            print('Image shape :', im.shape, rim.shape)
            imh, imw = im.shape[:2]
            # init
            if batch is None:
                if dargs.ident_size:
                    input_h = make_divisible(imh, margs.feat_stride)
                    input_w = make_divisible(imw, margs.feat_stride)
                else:
                    input_h = input_w = make_divisible(crop_size,
                                                       margs.feat_stride)
                label_h, label_w = input_h / label_stride, input_w / label_stride
                test_steps = args.test_steps
                pred_stride = label_stride / test_steps
                pred_h, pred_w = label_h * test_steps, label_w * test_steps
                input_data = np.zeros((1, 3, input_h, input_w), np.single)
                input_label = 255 * np.ones((1, label_h * label_w), np.single)
                dataiter = mx.io.NDArrayIter(input_data, input_label)
                batch = dataiter.next()

                mod.bind(dataiter.provide_data,
                         dataiter.provide_label,
                         for_training=False,
                         force_rebind=True)
                # since we could use different transformers, but the same variables in the loop.
                if not mod.params_initialized:
                    mod.init_params(arg_params=net_args, aux_params=net_auxs)

            nim = np.zeros((3, imh + label_stride, imw + label_stride),
                           np.single)
            sy = sx = label_stride // 2
            nim[:, sy:sy + imh, sx:sx + imw] = im.transpose(2, 0, 1)

            nim = im.transpose(2, 0, 1)

            net_preds = np.zeros((margs.classes, pred_h, pred_w), np.single)
            #sy = sx = pred_stride // 2 + np.arange(test_steps) * pred_stride

            patchSize = 512
            nPatches = 100
            pixelCount = np.ones((imh, imw))

            interp_preds = np.zeros((19, imh, imw))

            net_preds = np.zeros((margs.classes, pred_h, pred_w), np.single)

            for ix in xrange(test_steps):
                for iy in xrange(test_steps):
                    input_data = np.zeros((1, 3, input_h, input_w), np.single)
                    #print('input_data shape:', input_data.shape, imh, imw, nim.shape, sx, sy)
                    #input_data[0, :, :imh, :imw] = nim[:, sy[iy]:sy[iy] + imh, sx[ix]:sx[ix] + imw]
                    input_data[0, :, :imh, :imw] = nim
                    batch.data[0] = mx.nd.array(input_data)
                    #print('shape :', batch.data[0].shape)
                    mod.forward(batch, is_train=False)
                    this_call_preds = mod.get_outputs()[0].asnumpy()[0]
                    #print('shape :', this_call_preds.shape)
                    if args.test_flipping:
                        batch.data[0] = mx.nd.array(input_data[:, :, :, ::-1])
                        mod.forward(batch, is_train=False)
                        # average the original and flipped image prediction
                        this_call_preds = 0.5 * (
                            this_call_preds +
                            mod.get_outputs()[0].asnumpy()[0][:, :, ::-1])
                    net_preds[:, iy:iy + pred_h:test_steps,
                              ix:ix + pred_w:test_steps] = this_call_preds

# batch = None
# print('I am here')
            interp_preds = interp_preds_as(rim.shape[:2], net_preds,
                                           pred_stride, imh, imw)

            startRow = np.random.randint(0, imh - patchSize - 4, size=nPatches)
            startCol = np.random.randint(0, imw - patchSize - 4, size=nPatches)
            #print(startRow, startCol)

            for patch in range(nPatches):

                for ix in xrange(test_steps):
                    for iy in xrange(test_steps):
                        #input_data = np.zeros((1, 3, input_h, input_w), np.single)
                        #print('input_data shape:', input_data.shape, imh, imw, nim.shape, sx, sy)
                        #input_data[0, :, :imh, :imw] = nim[:, sy[iy]:sy[iy] + imh, sx[ix]:sx[ix] + imw]

                        input_data = np.zeros((1, 3, patchSize, patchSize),
                                              np.single)
                        pixelCount_temp = np.zeros((imh, imw))
                        #print('input_data shape:', input_data.shape, imh, imw, nim.shape, sx, sy)

                        #rowIdx = sy[iy] + startRow[patch]
                        #colIdx = sx[ix] + startCol[patch]

                        rowIdx = startRow[patch]
                        colIdx = startCol[patch]

                        input_data[0, :, :, :] = nim[:,
                                                     rowIdx:rowIdx + patchSize,
                                                     colIdx:colIdx + patchSize]
                        pixelCount_temp[rowIdx:rowIdx + patchSize,
                                        colIdx:colIdx + patchSize] = 1
                        pixelCount += pixelCount_temp

                        batch.data[0] = mx.nd.array(input_data)
                        #print('shape :', batch.data[0].shape)
                        #np.save('/media/itu/CV_Lab/JavedData/cbst-master/debug_own/imBatch/' + str(ix)+ str(iy)+ '.npy', batch.data[0].asnumpy())
                        mod.forward(batch, is_train=False)
                        this_call_preds = mod.get_outputs()[0].asnumpy()[0]
                        #np.save('/media/itu/CV_Lab/JavedData/cbst-master/debug_own/imBatch/' + str(ix)+ str(iy)+ '.npy', this_call_preds)
                        #print('shape :', this_call_preds.shape)
                        if args.test_flipping:
                            batch.data[0] = mx.nd.array(
                                input_data[:, :, :, ::-1])
                            mod.forward(batch, is_train=False)
                            # average the original and flipped image prediction
                            this_call_preds = 0.5 * (
                                this_call_preds +
                                mod.get_outputs()[0].asnumpy()[0][:, :, ::-1])
                        #net_preds[:, iy:iy + pred_h:test_steps, ix:ix + pred_w:test_steps] = this_call_preds

                interp_preds_temp = interp_preds_as(input_data.shape[2:],
                                                    this_call_preds,
                                                    pred_stride, patchSize,
                                                    patchSize)
                #print(rowIdx, colIdx)
                interp_preds[:, rowIdx:rowIdx + patchSize,
                             colIdx:colIdx + patchSize] += interp_preds_temp

        interp_preds = interp_preds / pixelCount
        # batch = None
        #print('I am here')
        #interp_preds = interp_preds_as(rim.shape[:2], net_preds, pred_stride, imh, imw)
        if args.save_results:
            # compute pixel-wise predictions
            pred_label = interp_preds.argmax(0)
            # with trainIDs
            pred_label_trainId = pred_label.copy()
            # with labelIDs
            if dargs.id_2_label is not None:
                pred_label = dargs.id_2_label[pred_label_trainId]
            # #
            # save predicted label with trainIDs, labelIDs, colors
            out_path_trainId = osp.join(save_dir_trainId,
                                        '{}.png'.format(sample_name))
            out_path_labelId = osp.join(save_dir_labelId,
                                        '{}.png'.format(sample_name))
            out_path_color = osp.join(save_dir_color,
                                      '{}.png'.format(sample_name))
            im_to_save_trainId = Image.fromarray(
                pred_label_trainId.astype(np.uint8))
            im_to_save = Image.fromarray(pred_label.astype(np.uint8))
            im_to_save_labelId = im_to_save.copy()
            # save color prediction
            im_to_save.putpalette(dargs.cmap.ravel())
            im_to_save.save(out_path_color)
            im_to_save_trainId.save(out_path_trainId)
            im_to_save_labelId.save(out_path_labelId)

        else:
            assert not has_gt

        done_count += 1

        if not has_gt:
            logger.info('Done {}/{} with speed: {:.2f}/s'.format(
                i + 1, x_num, 1. * done_count / (time.time() - start)))
            continue
        if args.split in ('train', 'train+', 'val'):
            label_path = osp.join(args.data_root, label_list[i])
            label = np.array(Image.open(label_path), np.uint8)

            # save correctly labeled pixels into an image
            out_path = osp.join(save_dir_color, 'correct',
                                '{}.png'.format(sample_name))
            _make_dirs(osp.dirname(out_path))
            invalid_mask = np.logical_not(np.in1d(
                label, dargs.valid_labels)).reshape(label.shape)
            Image.fromarray(
                (invalid_mask * 255 + (label == pred_label) * 127).astype(
                    np.uint8)).save(out_path)
            scorer.update(pred_label, label, i)
    print('End is here :', scorer.overall_scores(logger))
    logger.info('Done in %.2f s.', time.time() - start)