Ejemplo n.º 1
0
 def display_results(self, mode='show'):
     if mode == 'show':
         print("displaying top {} matches..".format(
             self.__relevant_images_count))
         for counter, result in enumerate(
                 sorted(self.results.items(),
                        key=lambda element: element[1],
                        reverse=False)):
             if counter == self.__relevant_images_count:
                 break
             print(result[1])
             io.imshow_collection(
                 [self.__sample_image,
                  io.imread(result[0])])
             io.show()
     else:
         os.mkdir('D:/UGWork/res_slic_exp_new/' + name)
         for counter, result in enumerate(
                 sorted(self.results.items(),
                        key=lambda element: element[1],
                        reverse=False)):
             if counter == self.__relevant_images_count:
                 break
             print(result[0].split('/')[len(result[0].split('/')) - 1],
                   result[1])
             io.imsave(fname='D:/UGWork/res_slic_exp_new/' + name + '/' +
                       str(counter + 1) + '.jpg',
                       arr=io.imread(result[0]))
Ejemplo n.º 2
0
def main():
    image_data = io.imread("test.png", True)
    image_data = util.invert(image_data)
    ceed = [[1, 1, 1], [0, 0, 0], [0, 0, 0]]
    new_data = convolute(image_data, ceed)
    max_data = maxpul(new_data, [3, 3])
    #    new_data = convolute(max_data, ceed)
    #    max_data = maxpul(new_data, [2,2])
    io.imshow_collection([image_data, new_data, max_data])
    io.show()
    print(new_data)
Ejemplo n.º 3
0
    def debug():
        marked = np.zeros(image.shape, dtype=np.uint8)

        for rectangle in rectangles:
            rr, cc = rectangle.pixels(marked.shape)
            randcolor = randint(0, 255), randint(0, 255), randint(0, 255)
            marked[rr, cc] = randcolor

        print(image.shape, segments.shape, marked.shape)

        io.imshow_collection([image, segments, marked])
        io.show()
Ejemplo n.º 4
0
def show(img):
    """
      Inputs: Accepts either a list of Numpy arrays objects or a single Numpy array object
      Outputs: None
      Example:
      >> img = np.ones((10,10))
      >> show(img)
      >> imgs = [img, np.random.rand(10,10)]
      >> show(imgs)
    """
    if isinstance(img, list):
        io.imshow_collection(img)
    else:
        io.imshow(img)
        io.show()
def match_images(petal_image, vein_image, s1, s2):
    sz = petal_image.shape
    #Consruct an initial guess of the transformation required to align the two images
    (PetalCenter, PetalArea, PetalAngle, PetalLength,
     PetalWidth) = shapeStatistics(s1)
    (VeinCenter, VeinArea, VeinAngle, VeinLength,
     VeinWidth) = shapeStatistics(s2)
    scale = math.sqrt(VeinArea / PetalArea)
    number_of_iterations = 100
    termination_eps = 1e-5
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                number_of_iterations, termination_eps)
    AngleDifference = VeinAngle - PetalAngle
    warp_matrix = cv2.getRotationMatrix2D(PetalCenter, AngleDifference,
                                          scale).astype(np.float32)
    warp_matrix[0][2] += VeinCenter[0] - PetalCenter[0]
    warp_matrix[1][2] += VeinCenter[1] - PetalCenter[1]
    #Getting annotations; recreate new dimensional conditions

    #    print('vein_image: ',vein_image.shape)

    #    test = cv2.warpAffine(cv2.bitwise_and(vein_image,s2), warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
    #    io.imshow_collection([s1, test])
    #    io.show()
    try:
        (cc, final_warp) = cv2.findTransformECC(s1,
                                                s2,
                                                warp_matrix,
                                                cv2.MOTION_AFFINE,
                                                criteria,
                                                inputMask=None,
                                                gaussFiltSize=5)
    except (cv2.error):
        cc = 0

    if cc < 0.9:
        print(
            "Error: can't find satisfactory alignment, displaying masks for debug"
        )
        io.imshow_collection([s1, s2])
        io.show()
    elif cc == 0:
        raise ValueError("Cannot find any alignment for the images provided.")
    return cv2.warpAffine(cv2.bitwise_and(vein_image, s2),
                          final_warp, (sz[1], sz[0]),
                          flags=cv2.INTER_LINEAR +
                          cv2.WARP_INVERSE_MAP), final_warp
Ejemplo n.º 6
0
def load_dump_test_data():
    paths = glob.glob(os.path.join(data_root_path, 'imgs/test/*.jpg'))[0:500]
    x_test = []
    y_test = []
    for single_img_path in tqdm(enumerate(paths), total=len(paths)):
        img = io.imread(single_img_path[1])
        img = scale(img)
        img = normalize(img)
        x_test.append(img)

    x_test = np.array(x_test)

    if DISPLAY_SAMPLE:
        io.imshow_collection(x_test)
        io.show()

    with open("test_data.pkl", 'wb') as f:
        pickle.dump((x_test), f)
Ejemplo n.º 7
0
    def _compose_images_for_slice(im, msk, inner, outer, out_labels):
        def colorize_regions(image,
                             region1,
                             region2,
                             h1=0.35,
                             h2=0.,
                             s1=0.7,
                             s2=0.7):
            hsv = np.zeros(image.shape + (3, ), dtype=float)
            hsv[..., 2] = 0.5 + 0.5 * image / 255.
            hsv[..., 1] = region1 * s1 + region2 * s2
            hsv[..., 0] = h1 * region1 + h2 * region2
            return hsv2rgb(hsv)

        original_segm = colorize_regions(im, msk, 1 - msk)
        inner_and_outer = colorize_regions(im, inner, outer)
        walker_results = colorize_regions(im, out_labels == 1, out_labels == 2)

        io.imshow_collection((original_segm, inner_and_outer, walker_results))
        io.show()
Ejemplo n.º 8
0
def load_dump_training_data():
    training_imgs_list = pd.read_csv(
        os.path.join(data_root_path, 'driver_imgs_list.csv'))
    print(list(training_imgs_list))
    print(training_imgs_list[1000:1020])
    number_of_classes = len(training_imgs_list.classname.value_counts())
    grouped_training_img_list = training_imgs_list.groupby("classname")
    print(grouped_training_img_list.describe())
    x_train = []
    y_train = []

    for class_number in range(number_of_classes):
        training_group = grouped_training_img_list.get_group(
            'c{}'.format(class_number))
        group_path = os.path.join(
            data_root_path,
            'imgs/train/c{}/'.format(class_number)) + training_group.img
        paths = group_path.tolist()[0:SAMPLE_SIZE]
        # sanity check
        # print("number of image files for class {} is {}".format(class_number, len(group_path)))

        for single_img_path in tqdm(enumerate(paths), total=len(paths)):
            img = io.imread(single_img_path[1])
            img = scale(img)
            img = normalize(img)
            x_train.append(img)
            y_train.append(class_number)

    x_train = np.array(x_train)
    y_train = np.array(y_train)
    #
    # print("sample image data: shape[{}] max [{}] min[{}]".format(img.shape, img.max(),
    #                                                              img.min()))
    if DISPLAY_SAMPLE:
        io.imshow_collection(x_train)
        io.show()

    with open("preprocessed_data.pkl", 'wb') as f:
        pickle.dump((x_train, y_train), f)
Ejemplo n.º 9
0
def show_boxes(images, labels, color=(1, 0, 0), **kwargs):
    o = int(kwargs.get('width', 4) / 2)
    path = kwargs.get('path', 'detected/')
    only = kwargs.get('only')
    save = kwargs.get('save')
    display = kwargs.get('display', True)

    if save:
        if not os.path.exists(path):
            os.makedirs(path)

    boxed_images = []
    for index in tqdm(range(len(images)), desc='Processing images'):
        if only is not None and index not in only:
            continue
        img = images[index].copy()
        img_h, img_l = img.shape[:2]
        boxes = labels[labels[:, 0] == index + 1]

        # Print boxes
        for box in boxes:
            x, y, h, l = box[1:5]
            xe, xs = max(0, x - o), min(x + o, img_h)
            ye, ys = max(0, y - o), min(y + o, img_l)

            img[xe:xs + h, ye:ys] = color
            img[xe:xs + h, ye + l:ys + l] = color
            img[xe:xs, ye:ys + l] = color
            img[xe + h:xs + h, ye:ys + l] = color

        if save:
            imsave(os.path.join(path, f"{index}-{len(boxes)}d.jpg"), img)
        if display:
            boxed_images.append(img)

    if display:
        imshow_collection(boxed_images)
Ejemplo n.º 10
0
            start = end
        return z_unflatten
    
    def set_actnorm_init(self):
        # a method to set actnorm to True to prevent re-initializing for resuming training
        for i in range( len(self.glow_modules) ):
                module_name = self.glow_modules[i].__class__.__name__
                if module_name == "Flow":
                    self.glow_modules[i].actnorm.initialized = True
                    self.glow_modules[i].coupling.net.actnorm1.initialized = True
                    self.glow_modules[i].coupling.net.actnorm2.initialized = True
                    

if __name__ == "__main__":
    size = (16,3,64,64)
    images = sio.imread_collection("./images/*.png")
    x = np.array([ img.astype("float")/255 for img in images ]).transpose([0,3,1,2])
    x = torch.tensor(x, device=device, dtype=torch.float, requires_grad=True)
    logdet = torch.tensor(0.0,requires_grad=False,device=device,dtype=torch.float)
    
    with torch.no_grad():
        glow = Glow((3,64,64),K=32,L=4,
                    coupling="affine",nn_init_last_zeros=True,
                    device=device)
        z,logdet, actloss = glow(x, logdet=logdet, reverse=False)
        x_rev = glow(z, reverse=True)
    print(torch.norm(x_rev - x).item())       
    reconstructed = x_rev.data.cpu().numpy().transpose([0,2,3,1])
    sio.imshow_collection(images)
    sio.imshow_collection(reconstructed)
Ejemplo n.º 11
0
 def explore(self, k: int = 12):
     rnd_indexes = torch.randperm(self.df.shape[0])[:k]
     samples = [self[int(i)]["image"] for i in rnd_indexes]
     samples = map(lambda x: x.numpy().transpose(1, 2, 0), samples)
     io.imshow_collection(list(samples))
     io.show()
Ejemplo n.º 12
0
    X_HSD = np.concatenate((cx, cy, D), 2)
    return X_HSD


def HSD2RGB2(X_HSD):
    X_HSD_0 = X_HSD[..., 2]
    X_HSD_1 = X_HSD[..., 0]
    X_HSD_2 = X_HSD[..., 1]
    D_R = np.expand_dims(np.multiply(X_HSD_1 + 1, X_HSD_0), 2)
    D_G = np.expand_dims(
        np.multiply(0.5 * X_HSD_0, 2 - X_HSD_1 + np.sqrt(3.0) * X_HSD_2), 2)
    D_B = np.expand_dims(
        np.multiply(0.5 * X_HSD_0, 2 - X_HSD_1 - np.sqrt(3.0) * X_HSD_2), 2)

    X_OD = np.concatenate((D_R, D_G, D_B), axis=2)
    X_RGB = 1.0 * np.exp(-X_OD)

    return X_RGB


if __name__ == '__main__':
    from skimage import data, io

    image = data.astronaut()  # RGB image
    hsd_img = rgb2hsd(image)
    rgb_img = hsd2rgb(hsd_img)
    hsd_img2 = RGB2HSD2(image)
    rgb_img2 = HSD2RGB2(hsd_img2)
    io.imshow_collection([image, hsd_img, rgb_img, hsd_img2, rgb_img2])
    io.show()
Ejemplo n.º 13
0
def show_imgs_skimage(imgs):
    io.imshow_collection(imgs)
    io.show()
Ejemplo n.º 14
0
# Editing an image
# Applying filters (using filter modules)
# assits in various thresholding techniques
# assits in applying numerous filter algorithms onto an image

from skimage import filters
from skimage import data, io

image = data.astronaut()
image_median = filters.median(image)  # median turns a smoothened out image

io.imshow_collection([image_median, image])
io.show()
Ejemplo n.º 15
0
def test_acc(image_name,nnet_model=nnet):
    angle_indicator = int(image_name.split('_')[1])
    image = misc.imread('test_sample/' + image_name + '.jpg',flatten = True).astype(float)
    image_rgb = misc.imread('test_sample/' + image_name + '.jpg')
    image_float = image_rgb.astype(float)
    image_mask = misc.imread('train_masks/' + image_name + '_mask.gif',flatten = True)
    image_mask = image_mask/255
#io.imshow(image_mask)
    image_index = np.where(image >= 0)
    sobel = filters.sobel(image)   # working
#io.imshow(sobel)
    sobel_blurred = filters.gaussian(sobel,sigma=1)  # Working
#io.imshow(sobel_blurred)
    canny_filter_image = canny(image/255.)
#io.imshow(canny_filter_image)
#    threshold_niblack_11 = filters.threshold_niblack(sobel_blurred,201)
#io.imshow(threshold_niblack)
    threshold_li = filters.threshold_li(image)
    mask_li = image > threshold_li
#io.imshow(mask)
    sobel_h = filters.sobel_h(image)
    sobel_v = filters.sobel_v(image)
    laplace = filters.laplace(image)
    threshold_local_51 = filters.threshold_local(image,51)
    mask_local_51 = image > threshold_local_51
#io.imshow(mask)
    df = pd.DataFrame()
    df['l1_dist_y'] = abs(image_index[0] - 639.5)/639.5
    df['l1_dist_x'] = abs(image_index[1] - 958.5)/958.5
    df['l2_dist'] = np.sqrt((df.l1_dist_y)**2 + (df.l1_dist_x)**2)/np.sqrt(2)
    df['grey_values'] = image.reshape((1,1918*1280))[0]/255.
    df['red_values'] = image_rgb.reshape((3,1918*1280))[0]/255.
    df['blue_values'] = image_rgb.reshape((3,1918*1280))[1]/255.
    df['green_values'] = image_rgb.reshape((3,1918*1280))[2]/255.
    df['red_float'] = image_float.reshape((3,1918*1280))[0]/255.
    df['blue_float'] = image_float.reshape((3,1918*1280))[1]/255.
    df['green_float'] = image_float.reshape((3,1918*1280))[2]/255.
    df['sobel_blurred'] = sobel_blurred.reshape((1,1918*1280))[0]/255.
    df['canny_filter_image'] = canny_filter_image.reshape((1,1918*1280))[0].astype(int)
    df['sobel_h'] = sobel_h.reshape((1,1918*1280))[0]/255.
    df['sobel_v'] = sobel_v.reshape((1,1918*1280))[0]/255.
    df['laplace'] = laplace.reshape((1,1918*1280))[0]/511.
    df['threshold_local_51'] = mask_local_51.reshape((1,1918*1280))[0].astype(int)
#    df['threshold_niblack_11'] = threshold_niblack_11.reshape((1,1918*1280))[0]#/255.
    df['threshold_li'] = mask_li.reshape((1,1918*1280))[0].astype(int)
    for i in range(1,17):
        if i == angle_indicator:
            df['angle_indicator_' + str(i)] = 1
        else:
            df['angle_indicator_' + str(i)] = -1
    df['mask'] = image_mask.reshape((1,1918*1280))[0]
    df['mask'] = df['mask']
    df['pred_mask'] = nnet_model.predict(X = df[[col for col in img_df.columns if col != 'mask']])
    z = skm.confusion_matrix(df['mask'],df['pred_mask'])
    accuracy = 100*(z[0][0] + z[1][1])/float(sum(sum(z)))
    print 'Accuracy:', accuracy
    precision = 100*(z[1][1])/float(z[0][1] + z[1][1])
    print 'Precision:', precision
    recall = 100*(z[1][1])/float(z[1][0]+z[1][1])
    print 'Recall:', recall
    act_mask = np.array(df['mask'])
    act_mask = act_mask.reshape((1280,1918))
    pred_mask = np.array(df['pred_mask']).astype(float)
    pred_mask = pred_mask.reshape((1280,1918))
    io.imshow_collection([img_rgb,act_mask,pred_mask])
    return df
Ejemplo n.º 16
0
#RGB images can be converted to grayscale and wiseversa
#Computational complexity is reduced when grayscale images are used

from skimage import data, color, io

#[height, width,channel] (a grayscale image would not have any channel(no color information))
image = data.astronaut()

gray = color.rgb2gray(image)  #conversion to grayscale
color = color.gray2rgb(gray)  #back to color

print(gray.shape)
print(color.shape)

io.imshow_collection([gray, color, image])
io.show()
Ejemplo n.º 17
0
frames_rgb_fileNames = ['data/test_videos_b/seq_1/frame_%d.jpg' % sequence_frame_id for sequence_frame_id in sequence_frames_ids]
frames_rgb_seq_end = io.imread_collection(frames_rgb_fileNames, conserve_memory=True)
# reading seq_link
SEQUENCE_NUM_FRAMES_LENGTH = 148
SEQUENCE_NUM_FRAMES_OFFSET = 0
SELECTED_FRAMES_IDS = [0, 147]
sequence_frames_ids = [id + SEQUENCE_NUM_FRAMES_OFFSET for id in list(range(SEQUENCE_NUM_FRAMES_LENGTH))]
frames_rgb_fileNames = ['data/test_videos_b/seq_2/frame_%d.jpg' % sequence_frame_id for sequence_frame_id in sequence_frames_ids]
frames_rgb_seq_link = io.imread_collection(frames_rgb_fileNames, conserve_memory=True)


a = sample_representative_frames(frames_rgb_seq_link, frames_rgb_seq_init[-1], frames_rgb_seq_end[0])
a=0
if True:
    selected_frames = [frames_rgb[selected_frame_id] for selected_frame_id in SELECTED_FRAMES_IDS]
    io.imshow_collection(selected_frames)
a=0

frames = [rgb2gray(frame) for frame in frames_rgb]
# estimate correc frames order
astar_pl = astar_planner(frames)
f_heuristic = estimate_framesPair_OF
path = astar_pl.search(0, SEQUENCE_NUM_FRAMES_LENGTH-1, f_heuristic, aproximate_frames_number=10)
print(astar_pl.costMatrix_g_inc)
print(astar_pl.costMatrix_g)
print(astar_pl.costMatrix_h)
print(astar_pl.costMatrix_f_hat)
io.imshow_collection([frames_rgb[image_i] for image_i in path])


    # skio.imshow_collection([img_left, img_right, ground_truth])
    # plt.show()
    # print("Image resolution: {}".format(img_left.shape[::-1]))
    # max_disp = np.max(ground_truth)
    # print("Max disparity: {}".format(max_disp))

    # Initialise a MRF and calculate the some (possible sub-optimal) disparity assignment
    img_res = img_left.shape
    # img_left = np.concatenate([img_left[0, 100] * np.ones((1, img_left.shape[1])), img_left[:-1, :]], axis=0)
    mrf = StereoMRF(img_res, n_levels=55 + 1)
    disp_map = mrf.lbp(img_left / 255., img_right / 255., n_iter=15)
    # disp_map[disp_map <= 5] = 0
    # plt.imshow(disp_map, cmap=cm.jet)
    # plt.colorbar()
    skio.imshow_collection([disp_map, img_right, img_left])
    plt.show()

    # Run the LBP algorithm again with the same image pair but provide a retina-like prior,
    # sampled from the ground truth + noise
    # prior_density = 0.01
    # edges_mask = skft.canny(ground_truth.astype('float'), sigma=2)
    # prior = ground_truth * (np.random.uniform(size=img_left.shape) <= prior_density)
    # prior = prior * edges_mask
    # prior[prior == 0] = max_disp + 2
    # skio.imshow(prior)
    # plt.show()

    # disp_map_with_prior = mrf.lbp(img_left, img_right, prior=None, prior_trust_factor=0, n_iter=10)
    # skio.imshow_collection([disp_map, disp_map_with_prior, prior])
    # plt.show()
Ejemplo n.º 19
0
from skimage import data, io, filters


def getPixelColor(img, x, y):
    '''
        usar threading;
        conferir OutOfRange;
        calcular cor;
        retornar cor;
    '''
    return (0, 0, 0)  #(R,G,B)


image = data.chelsea()
width, heigth, s = image.shape

for i in range(0, width):
    for j in range(0, heigth):
        image[i, j] = getPixelColor(image, i, j)

io.imshow_collection((data.chelsea(), image))
io.show()
Ejemplo n.º 20
0
from skimage import io, external, img_as_ubyte
import os
import numpy as np
import matplotlib.pyplot as plt

path = "images/fluo-series"
list_L = []  #tworzy liste

for fn in os.listdir(path):
    obraz = io.ImageCollection(path + '/' + fn)
    io.imshow_collection(obraz)
    io.show()
    list_L.append(obraz)

for image in list_L:
    #print(image[0].shape)
    temp = np.transpose(image[0])
    #print(temp.shape)
    plt.imshow(temp)
    plt.show()

path1 = "images/input2/multipage_rgb.tif"
ndimage = external.tifffile.imread(path1)
print(ndimage.shape)
external.tifffile.imshow(ndimage)
plt.show()

print(img_as_ubyte(ndimage))
print(ndimage.dtype)
Ejemplo n.º 21
0
# Adjusting brightness
# exposure module is used for analyzing image light intensities using histograms
from skimage import exposure, io, data

image = data.rocket()  #by default gamma value is 1
image_bright = exposure.adjust_gamma(image, gamma=0.5)
image_dark = exposure.adjust_gamma(image, gamma=2)

io.imshow_collection([image, image_bright, image_dark])
io.show()
Ejemplo n.º 22
0
                Delta_3_array.append(delta_percent_3)
                abs_rel_array.append(abs_rel)
                sqr_rel_array.append(sqr_rel)
                rmse_lin_array.append(rmse_lin)
                rmse_l_array.append(rmse_l)

Delta_1_avg = sum(Delta_1_array) / len(Delta_1_array)
Delta_2_avg = sum(Delta_2_array) / len(Delta_2_array)
Delta_3_avg = sum(Delta_3_array) / len(Delta_3_array)
abs_rel_avg = sum(abs_rel_array) / len(abs_rel_array)
sqr_rel_avg = sum(sqr_rel_array) / len(sqr_rel_array)
rmse_lin_avg = sum(rmse_lin_array) / len(rmse_lin_array)
rmse_l_avg = sum(rmse_l_array) / len(rmse_l_array)

print("Delta_1_avg {0}".format(Delta_1_avg))
print("Delta_2_avg {0}".format(Delta_2_avg))
print("Delta_3_avg {0}".format(Delta_3_avg))
print("abs_rel_avg {0}".format(abs_rel_avg))
print("sqr_rel_avg {0}".format(sqr_rel_avg))
print("rmse_lin_avg {0}".format(rmse_lin_avg))
print("rmse_l_avg {0}".format(rmse_l_avg))
print("Number of images {0}".format(len(Delta_1_array)))

result_images = [
    "test_image.jpg", "input_image.jpg", "depth_pred_inter.jpg",
    "sliced_depth_gt.jpg"
]
result_col = io.imread_collection(result_images)
io.imshow_collection(result_col)
io.show()
Ejemplo n.º 23
0
def test_collection():
    ic = io.imread_collection('*.png', conserve_memory=False, plugin='test')
    io.imshow_collection(ic)
Ejemplo n.º 24
0
import numpy as np
from sklearn.neighbors import NearestNeighbors
from glob import glob
import os
from skimage import io
import matplotlib.pyplot as plt

""" Use kNN as baseline algorithm for finidng nearest neighbors.  Validate results by observing classification error.  
The concept being that a model with less classification error will find nearest neighbors better as well."""

imgs = np.load(r'D:\pycharm_projects\AWSgeo\data.npy')
# imgs_labels = np.load(r'D:\pycharm_projects\AWSgeo\labels.npy')

logits = np.load(r'D:\pycharm_projects\AWSgeo\Tensorboard\model_2019-12-18-08-45-59\data_logits.npy')

############## sklearn ##############
rand_arrange = np.random.permutation(len(logits))
ind = -1
neigh = NearestNeighbors(5)
neigh.fit(logits[rand_arrange[:-1000]])
knns = neigh.kneighbors(logits[rand_arrange[ind]].reshape(1,-1), 6, return_distance=False)

plt.figure();plt.imshow(imgs[rand_arrange[ind]])
io.imshow_collection(imgs[rand_arrange[knns[0]]])




########## openCV ##################

Ejemplo n.º 25
0
def test_collection():
    ic = io.imread_collection('*.png', conserve_memory=False, plugin='test')
    io.imshow_collection(ic)
Ejemplo n.º 26
0
def view_slices(mri: str = 'brain', count: int = 12):
    subjs = choices(subjects.subjects, k=count)
    subjs = [sub.load_mri(mri)[:, 128, :] for sub in subjs]

    imshow_collection(subjs, cmap='gray')
    plt.show()