示例#1
0
def get_spixel_image(given_img,
                     spix_index,
                     n_spixels=600,
                     b_enforce_connect=False):

    if not isinstance(given_img, np.ndarray):
        given_img_np_ = given_img.detach().cpu().numpy().transpose(1, 2, 0)
    else:  # for cvt lab to rgb case
        given_img_np_ = given_img

    if not isinstance(spix_index, np.ndarray):
        spix_index_np = spix_index.detach().cpu().numpy().transpose(0, 1)
    else:
        spix_index_np = spix_index

    h, w = spix_index_np.shape
    given_img_np = cv2.resize(given_img_np_,
                              dsize=(w, h),
                              interpolation=cv2.INTER_CUBIC)

    if b_enforce_connect:
        spix_index_np = spix_index_np.astype(np.int64)
        segment_size = (given_img_np_.shape[0] *
                        given_img_np_.shape[1]) / (int(n_spixels) * 1.0)
        min_size = int(0.06 * segment_size)
        max_size = int(3 * segment_size)
        spix_index_np = enforce_connectivity(spix_index_np[None, :, :],
                                             min_size, max_size)[0]
    cur_max = np.max(given_img_np)
    spixel_bd_image = mark_boundaries(given_img_np / cur_max,
                                      spix_index_np.astype(int),
                                      color=(0, 1, 1))  #cyna
    return (cur_max * spixel_bd_image).astype(np.float32).transpose(
        2, 0, 1), spix_index_np  #
示例#2
0
def compute_spixels(data_type, n_spixels, num_steps,
                    caffe_model, out_folder, is_connected = True):

    image_list = IMG_LIST[data_type]

    if not os.path.exists(out_folder):
        os.makedirs(out_folder)

    p_scale = 0.40
    color_scale = 0.26

    with open(image_list) as list_f:
        for imgname in list_f:
            print(imgname)
            imgname = imgname[:-1]
            [inputs, height, width] = \
                fetch_and_transform_data(imgname, data_type,
                                         ['img', 'label', 'problabel'],
                                         int(n_spixels))

            height = inputs['img'].shape[2]
            width = inputs['img'].shape[3]
            [spixel_initmap, feat_spixel_initmap, num_spixels_h, num_spixels_w] =\
                transform_and_get_spixel_init(int(n_spixels), [height, width])

            dinputs = {}
            dinputs['img'] = inputs['img']
            dinputs['spixel_init'] = spixel_initmap
            dinputs['feat_spixel_init'] = feat_spixel_initmap

            pos_scale_w = (1.0 * num_spixels_w) / (float(p_scale) * width)
            pos_scale_h = (1.0 * num_spixels_h) / (float(p_scale) * height)
            pos_scale = np.max([pos_scale_h, pos_scale_w])

            net = load_ssn_net(height, width, int(num_spixels_w * num_spixels_h),
                               float(pos_scale), float(color_scale),
                               num_spixels_h, num_spixels_w, int(num_steps))

            if caffe_model is not None:
                net.copy_from(caffe_model)
            else:
                net = initialize_net_weight(net)

            num_spixels = int(num_spixels_w * num_spixels_h)
            result = net.forward_all(**dinputs)

            given_img = np.array(Image.open(IMG_FOLDER[data_type] + imgname + '.jpg'))
            spix_index = np.squeeze(net.blobs['new_spix_indices'].data).astype(int)

            if enforce_connectivity:
                segment_size = (given_img.shape[0] * given_img.shape[1]) / (int(n_spixels) * 1.0)
                min_size = int(0.06 * segment_size)
                max_size = int(3 * segment_size)
                spix_index = enforce_connectivity(spix_index[None, :, :], min_size, max_size)[0]
            
            spixel_image = get_spixel_image(given_img, spix_index)
            out_img_file = out_folder + imgname + '_bdry.jpg'
            imsave(out_img_file, spixel_image)
            out_file = out_folder + imgname + '.npy'
            np.save(out_file, spix_index)

    return
示例#3
0
def test(model, img_paths, save_path, spixeIds, idx, scale):
    # Data loading code
    input_transform = transforms.Compose([
        flow_transforms.ArrayToTensor(),
        transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
        transforms.Normalize(mean=[0.411, 0.432, 0.45], std=[1, 1, 1])
    ])

    img_file = img_paths[idx]
    load_path = str(img_file)
    imgId = os.path.basename(img_file)[:-4]

    # origin size 481*321 or 321*481
    img_ = imread(load_path)
    H_, W_, _ = img_.shape

    # choose the right spixelIndx
    spixl_map_idx_tensor = spixeIds[0]
    img = cv2.resize(img_, (int(1024 * scale), int(512 * scale)),
                     interpolation=cv2.INTER_CUBIC)
    # if H_ == 321 and W_==481:
    #     spixl_map_idx_tensor = spixeIds[0]
    #     img = cv2.resize(img_, (int(480 * scale), int(320 * scale)), interpolation=cv2.INTER_CUBIC)
    # elif H_ == 481 and W_ == 321:
    #     spixl_map_idx_tensor = spixeIds[1]
    #     img = cv2.resize(img_, (int(320 * scale), int(480 * scale)), interpolation=cv2.INTER_CUBIC)
    # else:
    #     print('The image size is wrong!')
    #     return

    img1 = input_transform(img)
    ori_img = input_transform(img_)
    mean_values = torch.tensor([0.411, 0.432, 0.45],
                               dtype=img1.cuda().unsqueeze(0).dtype).view(
                                   3, 1, 1)

    # compute output
    tic = time.time()
    output = model(img1.cuda().unsqueeze(0))

    # assign the spixel map and  resize to the original size
    curr_spixl_map = update_spixl_map(spixl_map_idx_tensor, output)
    ori_sz_spixel_map = F.interpolate(curr_spixl_map.type(torch.float),
                                      size=(H_, W_),
                                      mode='nearest').type(torch.int)

    spix_index_np = ori_sz_spixel_map.squeeze().detach().cpu().numpy(
    ).transpose(0, 1)
    spix_index_np = spix_index_np.astype(np.int64)
    segment_size = (spix_index_np.shape[0] *
                    spix_index_np.shape[1]) / (int(600 * scale * scale) * 1.0)
    min_size = int(0.06 * segment_size)
    max_size = int(3 * segment_size)
    spixel_label_map = enforce_connectivity(spix_index_np[None, :, :],
                                            min_size, max_size)[0]

    torch.cuda.synchronize()
    toc = time.time() - tic

    n_spixel = len(np.unique(spixel_label_map))
    given_img_np = (ori_img + mean_values).clamp(
        0, 1).detach().cpu().numpy().transpose(1, 2, 0)
    spixel_bd_image = mark_boundaries(given_img_np / np.max(given_img_np),
                                      spixel_label_map.astype(int),
                                      color=(0, 1, 1))
    spixel_viz = spixel_bd_image.astype(np.float32).transpose(2, 0, 1)

    # ************************ Save all result********************************************
    # save img, uncomment it if needed
    # if not os.path.isdir(os.path.join(save_path, 'img')):
    #     os.makedirs(os.path.join(save_path, 'img'))
    # spixl_save_name = os.path.join(save_path, 'img', imgId + '.jpg')
    # img_save = (ori_img + mean_values).clamp(0, 1)
    # imsave(spixl_save_name, img_save.detach().cpu().numpy().transpose(1, 2, 0))

    # save spixel viz
    if not os.path.isdir(os.path.join(save_path, 'spixel_viz')):
        os.makedirs(os.path.join(save_path, 'spixel_viz'))
    spixl_save_name = os.path.join(save_path, 'spixel_viz',
                                   imgId + '_sPixel.png')
    imsave(spixl_save_name, spixel_viz.transpose(1, 2, 0))

    # save the unique maps as csv for eval
    # if not os.path.isdir(os.path.join(save_path, 'map_csv')):
    #     os.makedirs(os.path.join(save_path, 'map_csv'))
    # output_path = os.path.join(save_path, 'map_csv', imgId + '.csv')
    # # plus 1 to make it consistent with the toolkit format
    # np.savetxt(output_path, (spixel_label_map + 1).astype(int), fmt='%i', delimiter=",")

    if idx % 10 == 0:
        print("processing %d" % idx)

    return toc, n_spixel
def compute_spixels(data_type,
                    n_spixels,
                    num_steps,
                    caffe_model,
                    out_folder,
                    is_connected=True):

    image_list = IMG_LIST[data_type]

    if not os.path.exists(out_folder):
        os.makedirs(out_folder)

    p_scale = 0.40
    color_scale = 0.26

    asa_total = 0.0
    asa_count = 0
    with open(image_list) as list_f:
        for imgname in list_f:
            print(imgname)
            imgname = imgname[:-1]
            [inputs, height, width] = \
                fetch_and_transform_data(imgname, data_type,
                                         ['img', 'label', 'problabel'],
                                         int(n_spixels))

            height = inputs['img'].shape[2]
            width = inputs['img'].shape[3]
            [spixel_initmap, feat_spixel_initmap, num_spixels_h, num_spixels_w] =\
                transform_and_get_spixel_init(int(n_spixels), [height, width])

            dinputs = {}
            dinputs['img'] = inputs['img']
            dinputs['spixel_init'] = spixel_initmap
            dinputs['feat_spixel_init'] = feat_spixel_initmap

            pos_scale_w = (1.0 * num_spixels_w) / (float(p_scale) * width)
            pos_scale_h = (1.0 * num_spixels_h) / (float(p_scale) * height)
            pos_scale = np.max([pos_scale_h, pos_scale_w])

            net = load_ssn_net(height, width,
                               int(num_spixels_w * num_spixels_h),
                               float(pos_scale), float(color_scale),
                               num_spixels_h, num_spixels_w, int(num_steps))

            if caffe_model is not None:
                net.copy_from(caffe_model)
            else:
                net = initialize_net_weight(net)

            num_spixels = int(num_spixels_w * num_spixels_h)
            result = net.forward_all(**dinputs)

            given_img = fromimage(
                Image.open(IMG_FOLDER[data_type] + imgname + '.jpg'))
            spix_index = np.squeeze(
                net.blobs['new_spix_indices'].data).astype(int)

            if enforce_connectivity:
                segment_size = (given_img.shape[0] *
                                given_img.shape[1]) / (int(n_spixels) * 1.0)
                min_size = int(0.06 * segment_size)
                max_size = int(3 * segment_size)
                spix_index = enforce_connectivity(spix_index[None, :, :],
                                                  min_size, max_size)[0]
            # evaluate
            spList = spix_index.flatten().tolist()
            gt_filename = 'data/BSR/BSDS500/data/groundTruth/test/{}.mat'.format(
                imgname)
            from scipy.io import loadmat
            gtseg_all = loadmat(gt_filename)['groundTruth'][0]
            nlabel = len(gtseg_all)
            for t in range(nlabel):
                gtseg = gtseg_all[t][0][0][0]
                gtList = gtseg.flatten().tolist()
                asa = computeASA(spList, gtList, 0)
                asa_total += asa
                asa_count += 1
            print('count: {}, asa: {}'.format(asa_count,
                                              asa_total / asa_count))
            # save image
            # spixel_image = get_spixel_image(given_img, spix_index)
            # out_img_file = out_folder + imgname + '_bdry.jpg'
            # imsave(out_img_file, spixel_image)
            # out_file = out_folder + imgname + '.npy'
            # np.save(out_file, spix_index)
    return
示例#5
0
def compute_spixels(num_spixel, num_steps, pre_model, out_folder):

    if not os.path.exists(out_folder):
        os.makedirs(out_folder)
        # os.makedirs(out_folder+'png')
        # os.makedirs(out_folder + 'mat')

    dtype = 'test'
    dataloader = data.DataLoader(Dataset_T(num_spixel=num_spixel),
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=1)
    model = create_ssn_net(num_spixels=num_spixel,
                           num_iter=num_steps,
                           num_spixels_h=10,
                           num_spixels_w=10,
                           dtype=dtype,
                           ssn=0)
    model = torch.nn.DataParallel(model)
    if pre_model is not None:
        if torch.cuda.is_available():
            model.load_state_dict(torch.load(pre_model))
        else:
            model.load_state_dict(torch.load(pre_model, map_location='cpu'))
    else:
        raise ('no model')
    criten = Loss()
    device = torch.device('cpu')
    if torch.cuda.is_available():
        model.cuda()
        device = torch.device('cuda')
    for iter, [
            inputs, num_h, num_w, init_index, cir_index, p2sp_index_,
            invisible, file_name
    ] in enumerate(dataloader):
        with torch.no_grad():
            img = inputs['img'].to(device)
            label = inputs['label'].to(device)
            problabel = inputs['problabel'].to(device)
            num_h = num_h.to(device)
            num_w = num_w.to(device)
            init_index = [x.to(device) for x in init_index]
            cir_index = [x.to(device) for x in cir_index]
            p2sp_index_ = p2sp_index_.to(device)
            invisible = invisible.to(device)
            recon_feat2, recon_label, new_spix_indices = model(
                img, p2sp_index_, invisible, init_index, cir_index, problabel,
                num_h, num_w, device)
            # loss, loss_1, loss_2 = criten(recon_feat2, img, recon_label, label)

            given_img = np.asarray(Image.open(file_name[0]))
            h, w = given_img.shape[0], given_img.shape[1]
            new_spix_indices = new_spix_indices[:, :h, :w].contiguous()
            spix_index = new_spix_indices.cpu().numpy()[0]
            spix_index = spix_index.astype(int)

            if enforce_connectivity:
                segment_size = (given_img.shape[0] * given_img.shape[1]) / (
                    int(num_h * num_w) * 1.0)
                min_size = int(0.06 * segment_size)
                max_size = int(3 * segment_size)
                spix_index = enforce_connectivity(spix_index[np.newaxis, :, :],
                                                  min_size, max_size)[0]
            # given_img_ = np.zeros([spix_index.shape[0], spix_index.shape[1], 3], dtype=np.int)
            # h, w = given_img.shape[0], given_img.shape[1]
            # given_img_[:h, :w] = given_img

            counter_image = np.zeros_like(given_img)
            counter_image = get_spixel_image(counter_image, spix_index)
            spixel_image = get_spixel_image(given_img, spix_index)

            imgname = file_name[0].split('/')[-1][:-4]
            out_img_file = out_folder + imgname + '_bdry_.jpg'
            imageio.imwrite(out_img_file, spixel_image)
            # out_file = out_folder + imgname + '.npy'
            # np.save(out_file, spix_index)

            # validation code only for sp_pix 400
            # out_file_mat = out_folder + 'mat/'+ imgname + '.mat'
            # scio.savemat(out_file_mat, {'segs': spix_index})

            # out_count_file = out_folder + 'png/' + imgname + '.png'
            # imageio.imwrite(out_count_file, counter_image)
            print(iter)
示例#6
0
def compute_spixels(data_type,
                    n_spixels,
                    num_steps,
                    caffe_model,
                    out_folder,
                    is_connected=True):

    image_list = IMG_LIST[data_type]

    if not os.path.exists(out_folder):
        os.makedirs(out_folder)

    p_scale = 0.23
    color_scale = 0.26
    start_time = time.time()
    bound = range(25, 33, 2)
    # bound = range(31, 71, 2)
    for i in range(len(bound)):
        threshold = bound[i] / 100.0
        if threshold <= 0.3:
            min_size = [0]
        else:
            # min_size = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
            min_size = [0]

        for j in range(len(min_size)):
            new_save_root = os.path.join(
                out_folder,
                'threshold_{:.2f}_{}'.format(threshold, min_size[j]))
            if not os.path.exists(new_save_root):
                os.mkdir(new_save_root)

            with open(image_list) as list_f:
                for imgname in list_f:
                    print(imgname)
                    imgname = imgname[:-1]
                    [inputs, height, width] = \
                        fetch_and_transform_data(imgname, data_type,
                                                 ['img', 'label', 'problabel'],
                                                 int(n_spixels))

                    height = inputs['img'].shape[2]
                    width = inputs['img'].shape[3]
                    [spixel_initmap, feat_spixel_initmap, num_spixels_h, num_spixels_w] = \
                        transform_and_get_spixel_init(int(n_spixels), [height, width])

                    dinputs = {}
                    dinputs['img'] = inputs['img']
                    dinputs['spixel_init'] = spixel_initmap
                    dinputs['feat_spixel_init'] = feat_spixel_initmap

                    print j
                    # 新加
                    thresholda = np.random.randint(bound[i],
                                                   bound[i] + 1,
                                                   size=(1, 1, 1, 1))
                    thresholda = thresholda / 100.0
                    min_sizea = np.random.randint(min_size[j],
                                                  min_size[j] + 1,
                                                  size=(1, 1, 1, 1))
                    dinputs['bound_param'] = thresholda
                    dinputs['minsize_param'] = min_sizea

                    pos_scale_w = (1.0 * num_spixels_w) / (float(p_scale) *
                                                           width)
                    pos_scale_h = (1.0 * num_spixels_h) / (float(p_scale) *
                                                           height)
                    pos_scale = np.max([pos_scale_h, pos_scale_w])

                    net = load_ssn_net(height, width,
                                       int(num_spixels_w * num_spixels_h),
                                       float(pos_scale), float(color_scale),
                                       num_spixels_h, num_spixels_w,
                                       int(num_steps))

                    if caffe_model is not None:
                        net.copy_from(caffe_model)
                    else:
                        net = initialize_net_weight(net)

                    num_spixels = int(num_spixels_w * num_spixels_h)
                    result = net.forward_all(**dinputs)

                    given_img = fromimage(
                        Image.open(IMG_FOLDER[data_type] + imgname + '.jpg'))

                    spix_index = np.squeeze(
                        net.blobs['new_spix_indices'].data).astype(int)

                    # spix_index = spix_index[None, :, :]
                    print spix_index.shape

                    if enforce_connectivity:
                        segment_size = (given_img.shape[0] * given_img.shape[1]
                                        ) / (int(n_spixels) * 1.0)
                        min_size_sp = int(0.06 * segment_size)
                        max_size_sp = int(3 * segment_size)
                        spix_index = enforce_connectivity(
                            spix_index[None, :, :], min_size_sp,
                            max_size_sp)[0]

                    net.blobs['new_spix_indices'].data[0] = spix_index

                    out = net.forward(start='segmentation')

                    # 保存mat格式
                    # out2 = np.squeeze(net.blobs['segmentation'].data).astype(int)

                    # # enforce_connectivity
                    out2 = net.blobs['segmentation'].data[0].copy().astype(int)
                    # #
                    # #
                    if enforce_connectivity:
                        segment_size = (given_img.shape[0] * given_img.shape[1]
                                        ) / (int(n_spixels) * 1.0)
                        min_size1 = int(0.24 * segment_size)
                        max_size1 = int(200 * segment_size)
                        out2 = enforce_connectivity(out2, min_size1,
                                                    max_size1)[0]

                    out2 = out2[None, :, :]
                    out2 = out2.transpose((1, 2, 0)).astype(dtype=np.uint16)
                    sio.savemat(new_save_root + '/' + imgname + '.mat',
                                {'Segmentation': out2})

    diff_time = time.time() - start_time

    print('Detection took {:.3f}s per image'.format(diff_time))

    return