Beispiel #1
0
def get_images(phrases, whitelist=None, withParts=False):

    # TODO:
    # get list of filenames that match
    # iterate over filenames, import images, construct ADESubset object
    # REMEMBER TO USE ADEImage classmethod to construct objects from filepaths
    # ^^ NO don't do that because imread_collection handles caching across many images better

    if whitelist is None:
        img_paths, segmap_paths, folder_paths = get_filepaths(
            phrases, whitelist, withParts)
        imgs = imread_collection(img_paths)
        # This could be a problem: segmaps has sublists for images with _parts_* maps
        # --> MAKE SURE TO TEST FOR THIS
        segmaps = imread_collection(segmap_paths)

        return ADESubset(imgs, img_paths, segmaps, segmap_paths, folder_paths)
    else:
        whitelisted_folder_paths = []
        for i, folder in enumerate(folder_paths):
            new_segmap_path, whitelisted_folder_path = _knockout_segmap(
                segmaps[i], folder, whitelist)
            segmaps[i] = imread(new_segmap_path)
            segmap_paths[i] = new_segmap_path
            whitelisted_folder_paths.append(whitelisted_folder_path)

        return ADESubset(imgs, img_paths, segmaps, segmap_paths, folder_paths,
                         whitelisted_folder_paths)
def escolhe_base(base_escolhida):
    if base_escolhida == 0:
        # ph2
        imagens = imread_collection('IMAGENS/PH2PROPORCIONAL128/imagens/*')
        mascaras_medico = imread_collection('IMAGENS/PH2PROPORCIONAL128/mascaras/*')
    elif base_escolhida == 1:
        melanoma_imagens = imread_collection('IMAGENS/DERMIS128/melanoma/*orig*')
        melanoma_mascaras_medico = imread_collection('IMAGENS/DERMIS128/melanoma/*contour*')

        notmelanoma_imagens = imread_collection('IMAGENS/DERMIS128/notmelanoma/*orig*')
        notmelanoma_mascaras_medico = imread_collection('IMAGENS/DERMIS128/notmelanoma/*contour*')

        imagens = np.concatenate((melanoma_imagens, notmelanoma_imagens), axis=0)
        mascaras_medico = np.concatenate((melanoma_mascaras_medico, notmelanoma_mascaras_medico), axis=0)
    elif base_escolhida == 2:
        melanoma_imagens = imread_collection('IMAGENS/ISIC2018-128/MELANOMA/*')
        melanoma_mascaras_medico = imread_collection('IMAGENS/ISIC2018-128/MASKMELANOMA/*')

        notmelanoma_imagens = imread_collection('IMAGENS/ISIC2018-128/NMELANOMA/*')
        notmelanoma_mascaras_medico = imread_collection('IMAGENS/ISIC2018-128/MASKNMELANOMA/*')

        imagens = np.concatenate((melanoma_imagens, notmelanoma_imagens), axis=0)
        mascaras_medico = np.concatenate((melanoma_mascaras_medico, notmelanoma_mascaras_medico), axis=0)
    else:
        print(" Escolha uma base de imagens")
        
    
    return np.array(imagens), np.array(mascaras_medico)
Beispiel #3
0
 def load_set(self, repim, repmask):
     """load images and get the train set
     width and height are required for the image to support the successives
     convolutions
     """
     self.images = io.imread_collection(repim, plugin='tifffile')
     self.masks = io.imread_collection(repmask, plugin='tifffile')
Beispiel #4
0
def load_modified_cifar10():
    class_regex = re.compile(r'.*\\(\d)\\.*')
    train_data = io.imread_collection('CIFAR10\\Train\\*\\*.png')
    test_data = io.imread_collection('CIFAR10\\Test\\*\\*.png')

    train_labels = np.array([
        int(class_regex.match(path).group(1)) for path in train_data.files
    ])[:, None]
    test_labels = np.array([
        int(class_regex.match(path).group(1)) for path in test_data.files
    ])[:, None]
    # To verify that the dataset looks correct
    # class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
    # plt.figure(figsize=(10, 4))
    # for i,j in product(range(10), range(1)):
    #     plt.subplot(2, 5, i+j + 1)
    #     plt.xticks([])
    #     plt.yticks([])
    #     plt.grid(False)
    #     plt.imshow(train_data[i*1000+j],cmap='gray', vmin=0, vmax=255)
    #     plt.xlabel(class_names[train_labels[i*1000+j][0]])
    # plt.show()

    train_data_processed = np.stack(train_data).astype(float) / 255
    train_data_processed = train_data_processed.reshape((-1, 28, 28, 1))
    test_data_processed = np.stack(test_data).astype(float) / 255
    test_data_processed = test_data_processed.reshape((-1, 28, 28, 1))
    return train_data_processed, train_labels, test_data_processed, test_labels
Beispiel #5
0
def load_data():
    """
    Loads data from the directory path_hr and path_lr
    :return: x, y
    """
    temp_x = io.imread_collection(path_lr)
    print("loaded_x")
    temp_y = io.imread_collection(path_hr)
    print("loaded_y")

    xx = [images for i, images in enumerate(temp_x)]
    yy = [images for i, images in enumerate(temp_y)]

    # Take only the first 3 channels, i.e. RGB
    xx = xx[0::3]
    yy = yy[0::3]

    x = np.array(xx)
    print("shape of x data", x.shape)

    y = np.array(yy)
    print("shape of y data", x.shape)

    print("data loaded")
    return x, y
def load_images():
    '''
    input: None
    output: all_classes (image collection), mergers (image collection)
    '''
    # gets list of files for one example image for each class, and more merger examples
    new_names = [name.lower().replace(' ', '_') for name in names]
    all_class_files = [path_to_project_presentation+'{}0.jpg'.format(name) for name in new_names]
    merger_files = [path_to_project_presentation+'merger{}.jpg'.format(i+1) for i in xrange(6)]
    # loads image collections
    all_classes = io.imread_collection(all_class_files)
    mergers = io.imread_collection(merger_files)
    return all_classes, mergers
Beispiel #7
0
def pose_category(dataset, dataset_csv):
    pose = info_angle(dataset_csv)
    pose_classes = {"Half Left" : list(imread_collection(itemgetter(*pose["Half Left"])(dataset)).files),
                    "Straight" : list(imread_collection(itemgetter(*pose["Straight"])(dataset)).files),
                    "Half Right" : list(imread_collection(itemgetter(*pose["Half Right"])(dataset)).files),
                    "Right" : list(imread_collection(itemgetter(*pose["Right"])(dataset)).files),
                    "Left" : list(imread_collection(itemgetter(*pose["Left"])(dataset)).files)}
    y_pose = {"Half Left" : dataset_csv.loc[dataset_csv.index.isin(pose["Half Left"])].Encoding_expression,
              "Straight" : dataset_csv.loc[dataset_csv.index.isin(pose['Straight'])].Encoding_expression,
              "Half Right" : dataset_csv.loc[dataset_csv.index.isin(pose["Half Right"])].Encoding_expression,
              "Right" : dataset_csv.loc[dataset_csv.index.isin(pose["Right"])].Encoding_expression,
              "Left" : dataset_csv.loc[dataset_csv.index.isin(pose["Left"])].Encoding_expression}
    return pose_classes, y_pose   
Beispiel #8
0
def displayGabors():
    collection1 = io.imread_collection("images/exercise3/base/*.jpg")
    collection2 = io.imread_collection("images/exercise3/query/*.jpg")
    images1 = [collection1[i] for i in range(0, len(collection1))]
    images2 = [collection2[i] for i in range(0, len(collection2))]
    images = images1 + images2
    for image in images:
        figure(figsize=(10.0 * scale, 5.0 * scale), dpi=80)
        gabor_image = applyGaborsFilters(image)
        subplot(1, 2, 1)
        plt.imshow(image)
        subplot(1, 2, 2)
        plt.imshow(gabor_image, cmap='jet', vmin=0.0, vmax=0.5)
        plt.show()
Beispiel #9
0
def contact_sheet(filename, df, sample_size=-1):

    size = len(df)

    if sample_size > 0:
        rnd_idx = np.random.permutation(size)
        size = sample_size if sample_size < size else size
        df = df.iloc[rnd_idx, :]

    df = df.reset_index()

    x1 = np.array(io.imread_collection(df.loc[:size, "X1"]))
    x2 = np.array(io.imread_collection(df.loc[:size, "X2"]))

    x1_frame = "frame : " + df.X1.str.split("/").str[-1].str.split(
        "--").str[1].str.split(".").str[0]
    x1_id = "id : " + df.X1.str.split("/").str[-1].str.split(
        "--").str[0].str.split("bee").str[1]

    x2_frame = "frame : " + df.X2.str.split("/").str[-1].str.split(
        "--").str[1].str.split(".").str[0]
    x2_id = "id : " + df.X2.str.split("/").str[-1].str.split(
        "--").str[0].str.split("bee").str[1]

    y = df.y_pred.round(2)
    columns = 20
    rows = int(size / 10.0) + 1
    j = 0
    fig = plt.figure(figsize=(40, 4 * rows))
    for x in range(size):

        fig.add_subplot(rows, columns, j + 1)
        plt.imshow(x1[x, :, :, :])
        plt.xlabel(x1_id[x] + " | SC: " + str(y[x]))
        plt.title(x1_frame[x])
        plt.xticks([], [])
        plt.yticks([], [])

        fig.add_subplot(rows, columns, j + 2)
        plt.imshow(x2[x, :, :, :])
        plt.xlabel(x2_id[x] + " | SC: " + str(y[x]))
        plt.title(x2_frame[x])
        plt.xticks([], [])
        plt.yticks([], [])

        j += 2

    plt.show()
    fig.savefig(filename)
Beispiel #10
0
def batch_mil_sampling(imagelist, region_result_npy, mil_data_save_dir,
                       class_name, class_dict, model_ft):
    images = io.imread_collection(imagelist)
    images = np.stack(images)
    #    images_torch = Variable(torch.from_numpy(images.copy().transpose((0,3, 1, 2))).float().div(255).cuda())
    model_ft.eval()
    with torch.no_grad():
        test_epoch = test.Test_epoch_from_array(model_ft, images, 256)
        output_predict = test_epoch.predict()
    output_order = np.argsort(output_predict[:, class_dict[class_name]])[::-1]
    if class_dict[class_name] == 0:
        #weight = 0.5
        output_slect = output_order[:int(0.5 * len(output_order))]
    else:
        #weight = 0.2
        output_slect = output_order[int(0.3 * len(output_order)
                                        ):int(0.5 * len(output_order))]
    for i in range(len(output_slect)):
        os.system('cp ' + imagelist[output_slect[i]] + ' ' +
                  os.path.join(mil_data_save_dir, class_name))
        file_name = os.path.basename(imagelist[output_slect[i]]).split('.')[0]
        # e.g. TCGA-5M-AAT6-01Z-00-DX1-98_6_3_0_M_norm.png to TCGA-5M-AAT6-01Z-00-DX1-98_6_3_0_M_norm
        x = int(file_name.split('-')[-1].split('_')[0])
        # e.g. TCGA-5M-AAT6-01Z-00-DX1-98_6_3_0_M_norm to 98
        y = int(file_name.split('-')[-1].split('_')[1])
        # e.g. TCGA-5M-AAT6-01Z-00-DX1-98_6_3_0_M_norm to 6
        y_nd = int(file_name.split('-')[-1].split('_')[2])
        # e.g. TCGA-5M-AAT6-01Z-00-DX1-98_6_3_0_M_norm to 3
        y_nd_i = int(file_name.split('-')[-1].split('_')[3])
        region_result_npy[y * y_nd + y_nd_i,
                          x] = int(class_dict[class_name]) + 1

    return region_result_npy
Beispiel #11
0
    def __getitem__(self, vid_idx):
        """ Arguments:
            vid_idx(int): Video Index to be fetched form the video list
        """
        vid_name_path = os.path.join(self.root_dir,
                                     self.vot_list.iloc[vid_idx, 0], '*.jpg')

        gt = pd.read_csv(os.path.join(self.root_dir,
                                      self.vot_list.iloc[vid_idx, 0],
                                      'groundtruth.txt'),
                         header=None)

        im_seq = imread_collection(vid_name_path)

        # Image collection to np.array
        images = concatenate_images(im_seq)  # Shape(Nr. of images, h, w, RGB)

        # Also convert the gt to np.array
        gt = gt.values

        sample = {'Video': images, 'gt': gt}

        # Cant tell yet what this is for
        if self.transform:
            sample = self.transform(sample)
        return sample
def transform_database(parameters):
    path_output = parameters.PATH_OUTPUT + '/label_images.bin'

    collection = io.imread_collection(parameters.NAME_IMAGES)

    res = parallel.apply_parallel(
        collection, collection.files, parameters.LABELS,
        transform_database_parallel, {
            'preprocessing': parameters.PREPROCESSING,
            'im_size1': parameters.NEW_IMAGE_SIZE1,
            'im_size2': parameters.NEW_IMAGE_SIZE2,
            'num_channels': parameters.NUM_CHANNELS
        })

    vector_images = []
    files = []
    for cont, e in enumerate(res):
        vector_images.append(e[0])
        files.append(e[1])
        parameters.LABELS[cont] = e[2]
    parameters.NAME_IMAGES = files
    parameters.IMAGE_SIZE1 = res[0][3][0]
    parameters.IMAGE_SIZE2 = res[0][3][1]

    vector_images = np.asarray(vector_images)
    vector_images.tofile(path_output)
Beispiel #13
0
def generate_centers_image():
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    # get image info
    image = imread_collection(image_sample, plugin='tifffile')
    im_size = image[0].shape[::-1]

    # read bbxs file
    assert os.path.isfile(feature_table), '{} not found!'.format(feature_table)
    # if file exist -> load
    table = pd.read_csv(feature_table, sep=',')
    centers = table[['centroid_x', 'centroid_y']].values
    # ICE file requires different format, restore to original format
    centers[:, 1] = im_size[1] - centers[:, 1]

    # generate image for all the cells
    center_image(os.path.join(save_dir, 'all.tif'),
                 centers,
                 im_size,
                 r=center_size)

    for name, thresh in biomarker_dict.items():
        bioM = table[table[name] >= thresh]
        centers = bioM[['centroid_x', 'centroid_y']].values
        # ICE file requires different format, restore to original format
        centers[:, 1] = im_size[1] - centers[:, 1]
        center_image(os.path.join(save_dir, name + '.tif'),
                     centers,
                     im_size,
                     r=center_size)
Beispiel #14
0
def load_images_data(data_path, rescale_ratio=1):
    ic = imread_collection(data_path)
    imgs = []
    for f in ic.files:
        image = imread(f, as_gray=True)
        if rescale_ratio != 1:
            image = rescale(image,
                            rescale_ratio,
                            anti_aliasing=False,
                            preserve_range=True)
        imgs.append(image)

    sample_img = imgs[0]
    img_height, img_width = imgs[0].shape
    pxl_num = img_height * img_width
    channel_len = len(imgs)

    # prepare dataset
    channel = []
    location = []
    y_true = [-1] * pxl_num
    for h in range(img_height):
        for w in range(img_width):
            data = []
            for i in range(channel_len):
                data.append(imgs[i][h][w])
            channel.append(data)
            location.append((w + 1, h + 1))

    test_dataset = FolioDataset(location, channel, y_true)

    return test_dataset, sample_img
Beispiel #15
0
def masks_merge(masks_files, label = False, flag_resize = False, new_shape = 0):
    """
    Descripition: Merge all mask images into one.

    Args
    ----
    masks_files: Path to masks files.

    label: bool.
    
    Returns
    -------
    image: A matrix of image.
    """
    masks = imread_collection(masks_files)
    # check resize option.
    if flag_resize and new_shape != 0:
        masks = list(map(lambda x:resize(x, new_shape, mode='constant', preserve_range=True), masks))
    # print(masks[0].shape)
    if masks:
        shape = masks[0].shape
    else:
        print("Masks io error!")
        exit(False)
    image = np.zeros(shape, np.int8 if label else np.bool)
    sign = 0 if label else 1
    for mask in masks:
        sign = sign+1 if label else sign
        image[mask > 0] = sign
    return image
Beispiel #16
0
def test_null_imread_collection():
    # Note that the null plugin doesn't define an `imread_collection` plugin
    # but this function is dynamically added by the plugin manager.
    path = os.path.join(data_dir, '*.png')
    with warnings_as_errors():
        collection = io.imread_collection(path, plugin='null')
        collection[0]
Beispiel #17
0
 def load(self):
     images = io.imread_collection(self.root+'*.jpg')
     
     for (image, fn) in zip(images, images.files):
         name = self.getFileName(fn)
         print(name)
         self.resize1(image, name)
 def convert_to_3D_png(self, in_path, out_path):
     print("reading.")
     imgs = io.imread_collection(in_path)
     print("concatenating")
     imgs = collection.concatenate_images(imgs)
     print("writing")
     np.save(out_path, imgs)
 def __getitem__(self, vid_idx, T = 10):
     """ Arguments:
         vid_idx(int): Video Index to be fetched form the video list
         T(int): Nr of Images in sequence - default == 10
     """
     gt = pd.read_csv(os.path.join(self.root_dir, 
                                   self.vot_list.iloc[vid_idx,0],
                                   'groundtruth.txt'), header = None)
     
     vid_name_path = os.path.join(self.root_dir, 
                                  self.vot_list.iloc[vid_idx,0],
                                  '*.jpg')
     
     file_names = glob.glob(vid_name_path)
     
     rand_start = np.random.randint(0, len(file_names)-T+1)
     
     file_names = file_names[rand_start:(rand_start+T-1)]
     
     im_seq = imread_collection(file_names)
     
     # Image collection to np.array
     images = concatenate_images(im_seq) # Shape(Nr. of images, h, w, RGB)
     
     # Also convert the gt to np.array
     gt = gt.values
     gt = gt[rand_start:(rand_start+T-1),:]
     
     sample = {'Video': images, 'gt': gt}
     
     # Cant tell yet what this is for
     if self.transform:
         sample = self.transform(sample)    
     return sample
Beispiel #20
0
 def __init__(self, filename, transform):
     # read images to memory as np arrays
     file_spec = '*.jpg'
     file_path = os.path.join(filename, file_spec)
     self.images = io.imread_collection(file_path)
     self.transform = transform
     self.bw_transform = transforms.Grayscale(num_output_channels=1)
Beispiel #21
0
def test_imread_collection_single_MEF():
    io.use_plugin('fits')
    testfile = os.path.join(data_dir, 'multi.fits')
    ic1 = io.imread_collection(testfile)
    ic2 = io.ImageCollection([(testfile, 1), (testfile, 2), (testfile, 3)],
                             load_func=fplug.FITSFactory)
    assert _same_ImageCollection(ic1, ic2)
Beispiel #22
0
def readOneRing(ringindex, FILEPATH, FILENAME, TRANSPOSE):
    # Read in all images corresponding to a particular chemostat ring
    # Inputs are tiff images whose filenames are of the form '...ringindex_*_imagenumber.tiff'
    # where 'ringindex' and 'imagenumber' identify the ring and sequence number

    # Output is an image collection, max and min values, and the number of files
    # Output image collection shape is (sequence, x ,y)

    listfiles = glob.glob(FILEPATH + FILENAME + '_' + str(int(ringindex)) +
                          '_*.tiff')
    numberoffiles = len(listfiles)

    # Sort filenames by sequence number of images
    listfiles = sorted(listfiles,
                       key=lambda x: float(re.findall(r'\d\d\d\d\d\.', x)[0]))

    # Transposed for new Hama camera the images must be transposed 90 degrees
    ic = np.transpose(skio.imread_collection(listfiles), axes=TRANSPOSE)

    max_ind = np.zeros(numberoffiles)  # Report max value
    min_ind = np.zeros(numberoffiles)  # Report min value

    for i in range(numberoffiles):
        max_ind[i] = np.max(ic[i, :, :])
        min_ind[i] = np.min(ic[i, :, :])

    return ic, max_ind, min_ind, numberoffiles, listfiles
Beispiel #23
0
    def load_images(self, filenames):
        '''
        Loads microscope images of the sample as numpy arrays and saves the
        image metadata. Make sure to have filenames and image_pathes set so
        load the images.
        '''
        # Setting up variables
        self.image_pathes = filenames
        self.image_count = len(self.image_pathes)
        self.image_names = [None] * self.image_count
        self.image_extensions = [None] * self.image_count
        self.images = [None] * self.image_count

        # Determining names
        for i, img in enumerate(self.image_pathes):
            _, name = img.rsplit('/')
            name, extension = name.split('.')
            self.image_names[i] = name
            self.image_extensions[i] = extension

        # Loading the images
        if self.image_count > 1:
            self.images = io.imread_collection(self.image_pathes)
        else:
            self.images = io.imread(str(self.image_pathes))
Beispiel #24
0
def test_null_imread_collection():
    # Note that the null plugin doesn't define an `imread_collection` plugin
    # but this function is dynamically added by the plugin manager.
    path = os.path.join(data_dir, '*.png')
    with warnings_as_errors():
        collection = io.imread_collection(path, plugin='null')
        collection[0]
Beispiel #25
0
def prepare_images(filename_list, original_path, preprocessed_path):
    # filter the images that is already preprocessed
    need_preprocessing_list = [os.path.join(original_path, filename) for filename in filename_list if
                               not os.path.isfile(os.path.join(preprocessed_path, filename))]
    image_dict = PreprocessingManager.pre_process(io.imread_collection(need_preprocessing_list, conserve_memory=True))

    # save the preprocessed images
    for name, img in image_dict.items():
        io.imsave(os.path.join(preprocessed_path, name), img)

    # load the preprocessed images
    has_preprocessed_list = [os.path.join(original_path, filename) for filename in filename_list if
                             os.path.isfile(os.path.join(preprocessed_path, filename))]
    image_dict.update(load_images(io.imread_collection(has_preprocessed_list, conserve_memory=True)))

    return image_dict
Beispiel #26
0
def test_imread_collection_single_MEF():
    io.use_plugin('fits')
    testfile = os.path.join(data_dir, 'multi.fits')
    ic1 = io.imread_collection(testfile)
    ic2 = io.ImageCollection([(testfile, 1), (testfile, 2), (testfile, 3)],
              load_func=fplug.FITSFactory)
    assert _same_ImageCollection(ic1, ic2)
Beispiel #27
0
    def load_WISH_images(self, images_dir, annotation_type="symbol"):
        """Load image files of WISH pattern

        Parameters
        ----------
        images_dir : str
            PNG files are included in this directory.
            the file name should be gene symbol + .png.

        annotation_type : str
            Type of gene annotation.
            Examples: symbol, uid (Unigene ID)

        Return
        ------
        self : object
            Returns the instance itself.

        """
        png_path = os.path.join(images_dir, "*.png")
        self.wish_images_ = io.imread_collection(png_path)
        self.genes_ = [
            os.path.splitext(strings)[0].split("/")[-1]
            for strings in self.wish_images_.files
        ]

        self.annotation_type_ = annotation_type

        return self
Beispiel #28
0
def load_modified_cifar10():
    class_regex = re.compile(r'.*/(\d)/.*')
    train_data = io.imread_collection('CIFAR10/Train/*/*.png')
    test_data = io.imread_collection('CIFAR10/Test/*/*.png')
    class_encoder = OneHotEncoder(10)
    train_classes = class_encoder.fit_transform(
        np.array([
            int(class_regex.match(path).group(1)) for path in train_data.files
        ])[:, None]).toarray()
    test_classes = class_encoder.transform(
        np.array([
            int(class_regex.match(path).group(1)) for path in test_data.files
        ])[:, None]).toarray()
    train_data_processed = np.stack(train_data).astype(float) / 255
    test_data_processed = np.stack(test_data).astype(float) / 255
    return train_data_processed, train_classes, test_data_processed, test_classes
def read_dataset(data_txt_file, image_data_path):
    """Read data into a Python dictionary.
    Args:
        data_txt_file(str): path to the data txt file.
        image_data_path(str): path to the image directory.
    Returns:
        data(dict): A Python dictionary with keys 'image' and 'label'.
            The value of dict['image'] is a numpy array of dimension (N,28,28)
            containing the loaded images.

            The value of dict['label'] is a numpy array of dimension (N,)
            containing the loaded label.

            N is the number of examples in the data split, the exampels should
            be stored in the same order as in the txt file.
    """
    f = open(data_txt_file, 'r')
    image = []
    label = []
    for line in f:
        line.strip('\n')
        items = line.split('\t')
        image.append(image_data_path + '/' + items[0])
        label.append(int(items[1]))

    ic = io.imread_collection(image, True)
    images = io.concatenate_images(ic)
    data = {'image': images, 'label': np.asarray(label)}

    return data
def normalize_convert_mu_sd(path, image_channel_path, scale_path, target_std,
                            target_mean, max_num, min_num):
    #legacy renaming
    dir = path
    # joining
    joined_list = []
    for i in np.ndarray.tolist(image_channel_path):
        joined = os.path.join(dir, i)
        joined_list.append(joined)
    #I collapse the list into the standard input format for image collections
    collapsed_list = ':'.join(joined_list)
    #I load data
    image_coll = io.imread_collection(collapsed_list)

    #I scale and save data
    image_scaled = []
    for i in range(len(image_coll)):
        #image_scaled.append(StandardScaler().fit_transform(image_coll[i]))
        tmp = image_coll[i]
        #tmp = tmp.astype(np.uint16)
        #I rescale
        mat_ms = target_mean + (tmp - tmp.mean()) * (target_std / tmp.std())
        #I trim
        mat_ms[mat_ms > max_num] = max_num
        mat_ms[mat_ms < min_num] = min_num
        #mat_ms = mat_ms.astype(np.int16)
        mat_ms = mat_ms * 65535
        mat_ms = mat_ms.astype(np.uint16)
        #I store
        imageio.imwrite(uri=scale_path[i], im=mat_ms)
        #imageio.imwrite(uri = collapsed_list, im = mat_ms)

        print("normalized files by mean and std in: {0}".format(scale_path[i]))
Beispiel #31
0
def read_mul_img():
    ic = io.imread_collection('../source/*.jpg')
    f, axes = plt.subplots(nrows=1, ncols=len(ic), figsize=(15, 10))
    for i, image in enumerate(ic):
        axes[i].imshow(image)
        axes[i].axis('off')
    plt.show()
def imdict_fromdir(data_dir):
    """
    Load images from multiple subdirs in data_dir to dictionary

    Arguments
    ---------
    data_dir: string
        root directory containing subirectories with images

    Returns
    ---------
    im_collection: dictionary
        dict containing sample:(dic, gfp, zoom)
    """
    data_dirs = glob.glob(data_dir + '*')
    im_collection = {}
    for d in data_dirs:
        strain = d.split('/')[-1].split('_')[0]
        im_dirs = glob.glob(d + '/*' + '.tif')
        ims = []
        for im_dir in im_dirs:
            im_name = im_dir.split('/')[-1]
            im_stack = io.imread_collection(im_dir)
            # Get channels, DIC is always first array
            dic = im_stack[0]
            gfp = im_stack[1]
            # Get zoom
            if 'x' in im_name:
                zoom = int(im_name.split('_')[-1].split('x')[0])
            else:
                zoom = 40
            ims.append((dic, gfp, zoom))
        im_collection[strain] = ims
    return im_collection
Beispiel #33
0
def generate_data(dir):
    file_dir = os.path.join(DATASET_DIR, dir)
    cases = os.listdir(file_dir)
    images = np.zeros((len(cases), HEIGHT, WIDTH, 3), dtype=np.uint8)
    labels = np.zeros((len(cases), HEIGHT, WIDTH, 1), dtype=np.bool)

    print('Getting and resizing TRAIN images and masks ... ')

    for i, case in tqdm(enumerate(cases), total=len(cases)):
        # get and resize image
        img_path = file_dir+"\\{}\\images\\{}.png".format(case, case)
        # only need RGB, discard A channel
        img_array = imread(img_path)[:, :, :3]
        img_array = resize(img_array, (HEIGHT, WIDTH, 3),
                           mode='constant', preserve_range=True)
        images[i] = img_array

        # get and resize mask
        mask_path = file_dir + "\\{}\\masks\\*".format(case)
        mask_arrays = imread_collection(mask_path).concatenate()
        mask_arrays = np.expand_dims(mask_arrays, -1)

        label_array = np.zeros((HEIGHT, WIDTH, 1), dtype=np.bool)
        for j, mask_array in enumerate(mask_arrays):
            mask_array = resize(
                mask_array, (HEIGHT, WIDTH, 1), mode='constant')
            label_array = np.maximum(label_array, mask_array)
        labels[i] = label_array
    return images, labels
    print('Done!')
def load_image(files, collection=False):
    '''
    input: files (str), collection (Bool, optional, default=False)
    if collection = True, files is a list of filenames to be read in as a collection)
    output: image (np array with dimensions (60,60,3) if collection=False, or dimensions (n_files, 60, 60, 3)
    '''
    if collection:
	images = io.imread_collection(files)
Beispiel #35
0
 def load_data(self, dir_path):
     raw_image_collection = imread_collection(dir_path + '/*.jpg')
     for index, img in enumerate(raw_image_collection):
         image_case = ImageCases(raw_image_collection.files[index],
                                 img_as_float(img),
                                 self.feature_machines,
                                 )
         self.image_collection.append(image_case)
Beispiel #36
0
def ensemble_average_images(dirin,pattern,filout):
     """A function to do an ensemble average of images and a spatial average of this ensemble average 
  
        Parameters
        ----------
	dirin: str
.	        Input directory
	pattern: str
	        Input Pattern images to average
	filout: str
	        Output filename 
    """
     #to be able to read high precision images (e.g. 16bits)
     io.use_plugin('freeimage')

     #im=io.imread("B00001_0.tif",as_grey=True)
     #plt.imshow(im)
     #plt.show()
     #pattern="B0*_0.tif"
     #dirin='/media/HDImage/SMARTEOLE/PIV-Nov2015-TIF/PIV_10ms_000lmin_05deg_z539mm_dt35us_1000im/'
     
     # list automatically the file names 
     list_image=sorted( glob.glob( os.path.join( os.path.abspath(dirin), pattern ) ) )
     # Read this list
     imlist=io.imread_collection(list_image)

#not possible to do the average like this: meanimg=sum(imlist)/len(imlist)
     #plt.imshow(imlist[0],clim=(0,1000),cmap=cm.gray)
     #plt.show() 

     # Accumulation of pixel value in float type (to avoid saturation)
     fimlist=imlist[0].astype(float)
     for i in range(len(imlist)-1):
          fimlist += imlist[i+1].astype(float)
     
     
     #plt.hist(fimlist[0].ravel(),bins=10000,range=(30,5500))
     #plt.show()     

     #mean value with float rounded with np.rint()                    
     meanimg_ens=np.rint(fimlist/len(imlist))
     #convert to integer
     meanimg_int=meanimg_ens.astype(np.uint16)
     #mean spatial value rounded with np.rint()
     meanimg_space=np.rint(np.mean(meanimg_ens))
     #convert to integer
     meanimg_space=meanimg_space.astype(np.uint16)

     # Dimensionless mean value 
     mean_out=(np.rint(meanimg_ens/np.mean(meanimg_ens))).astype(np.uint16)
     #plt.imshow(meanimg,clim=(0,100),cmap=cm.gray)
     #plt.show() 
     #io.imsave(filout,meanimg_int)
     
     # save in a file
     io.imsave(filout,mean_out)
     return  mean_out;
Beispiel #37
0
def test_imread_collection_MEF_and_simple():
    io.use_plugin('fits')
    testfile1 = os.path.join(data_dir, 'multi.fits')
    testfile2 = os.path.join(data_dir, 'simple.fits')
    ic1 = io.imread_collection([testfile1, testfile2])
    ic2 = io.ImageCollection([(testfile1, 1), (testfile1, 2),
                             (testfile1, 3), (testfile2, 0)],
                             load_func=fplug.FITSFactory)
    assert _same_ImageCollection(ic1, ic2)
def load_images(df, typ):
    '''
    input: df (pd DataFrame), typ(str) (galaxy type)
    output: list of n_images, each with dimensions (n_rows, n_cols, n_channels), filelist
    '''
    if typ == 'edge_on_disk' or typ == 'face_on_spiral':
	filelist = [path_to_project_data+'spiral_images/{}_{}.jpg'.format(ra, dec) for ra, dec in df[['RA', 'DEC']].itertuples(index=False)]
    else:
	filelist = [path_to_project_data+'{}_images/{}_{}.jpg'.format(typ, ra, dec) for ra, dec in df[['RA', 'DEC']].itertuples(index=False)]
    return io.imread_collection(filelist), filelist
Beispiel #39
0
def main(argv):
    print('reading images')
    images = io.imread_collection(argv[1:],
                                  conserve_memory=False, plugin='tifffile')
    images = normalize_images(images)
    print('extracting data')
    table, df, weights = extract_properties_multi_image(images)

    print('preparing plots')
    bokeh_plot(df)
Beispiel #40
0
def _main(argv=sys.argv):
    """Run trace on each of the given input files, save to profile.npz

    Parameters
    ----------
    argv : list of string, optional
        The argument vector. argv[1:] will be considered input
        filenames. Used mainly for testing.
    """
    from skimage import io
    images = io.imread_collection(sys.argv[1:])
    profiles = map(trace_profile, images)
    np.savez_compressed('profiles.npz', profiles)
def ab_load_im_fold(fold_name):
    """Load all images from a folder"""
    file_list = glob.glob(os.path.join(fold_name, '*.tif'))
    im_coll = imread_collection(file_list)
    dataset = np.ndarray(shape = (len(im_coll),img_size,img_size),
                         dtype = np.float32)    
    
    for im_idx,im in enumerate(im_coll):        
        dataset[im_idx, :, :] = skimage.img_as_float(im)
        
    print('Full dataset tensor:', dataset.shape)
    print('Mean:', np.mean(dataset))
    print('Standard deviation:', np.std(dataset))
    return dataset
Beispiel #42
0
def prepare_labeled(root='training', parallel=False):
    root = {'training': training_data_dir, 'testing': testing_data_dir}[root]

    # Create all destination directories.
    labels = product('ABC', '0123456789')
    for group, label in labels:
        path = root/group/label
        if not path.exists():
            path.mkdir(parents=True)

    ic = imread_collection('%s/*/*/*.jpg' % labeled_data_dir)
    images = ((ic[n], name, root) for n, name in enumerate(ic.files))

    if parallel:
        pool = mp.Pool(7)
        pool.map(prepare_labeled_inner, images)
    else:
        for img in images:
            prepare_labeled_inner(img)
def generate_template(path):
    path += '/*'
    files = glob.glob(path)
    images = io.imread_collection(files)
    width = 0
    height = 0
    for img in images:
        width += img.shape[0]
        height += img.shape[1]
    width = int(round(width/len(images)))
    height = int(round(height/len(images)))
    template = zeros((width, height), dtype='float64')
    for i in range(0, len(images)):
        img = transform.resize(images[i], (width, height))
        template += img
    template /= len(images)
    thresh = threshold_otsu(template, 4)
    template = template > thresh+thresh*0.11
    return template
 def test_collection(self):
     io.imread_collection('*.png', conserve_memory=False, plugin='test')
Beispiel #45
0
    >>> image = np.vstack([edges, middle, edges])
    >>> trace_profile(image, sigma=0)
    array([ 18.,   0.,  18.])
    """
    if image.ndim > 2:
        image = image.sum(axis=0)
    if check_vertical:
        top_bottom_mean = np.mean(image[[0, image.shape[0] - 1], :])
        left_right_mean = np.mean(image[:, [0, image.shape[1] - 1]])
        if top_bottom_mean < left_right_mean:
            image = image.T
    top_distribution = nd.gaussian_filter1d(image[0], sigma)
    bottom_distribution = nd.gaussian_filter1d(image[-1], sigma)
    top_loc, top_whm = estimate_mode_width(top_distribution)
    bottom_loc, bottom_whm = estimate_mode_width(bottom_distribution)
    angle = np.arctan(np.abs(float(bottom_loc - top_loc)) / image.shape[0])
    width = np.int(np.ceil(max(top_whm, bottom_whm) * np.cos(angle)))
    profile = profile_line(image,
                           (0, top_loc), (image.shape[0] - 1, bottom_loc),
                           linewidth=width, mode='nearest')
    return profile


if __name__ == '__main__':
    import sys
    from skimage import io
    images = io.imread_collection(sys.argv[1:])
    profiles = map(trace_profile, images)
    np.savez_compressed('profiles.npz', profiles)

Beispiel #46
0
import pandas as pd
from skimage import io
import trf1 as tr
import matplotlib as mpl

direct = "/Volumes/BLACK HOLE/201507 aukbgfp files/"
dnkd_ims = io.imread_collection(direct + "*huAUKBDNKD*.tif")
pegfp_ims = io.imread_collection(direct + "*pegfp*.tif")
wt_ims = io.imread_collection(direct + "*huAUKBDWT*.tif")
mpl.style.use("ggplot")
colnames = [
    "filename",
    "file-number",
    "aukb-kd",
    "size(pixels)",
    "raw-mean",
    "raw-total",
    "raw-max",
    "pre-mean",
    "pre-total",
    "pre-max",
    "post-mean",
    "post-total",
    "post-max",
    "eccentricity",
]
kds = map(tr.trf_quantify, dnkd_ims)
cos = map(tr.trf_quantify, pegfp_ims)
wts = map(tr.trf_quantify, wt_ims)
result = []
for i, (kd_fn, kd) in enumerate(zip(dnkd_ims.files, kds)):
Beispiel #47
0
"""

import os
import sys

import numpy as np
from skimage import io
from skimage import transform
from tflearn.data_utils import shuffle

from model import setup_model

SIZE = (32, 32)

image_dir = os.path.abspath("images")
cat = io.imread_collection(os.path.join(image_dir, "cat/*"))
not_cat = io.imread_collection(os.path.join(image_dir, "not_cat/*"))


X_cat = np.asarray([transform.resize(im, SIZE) for im in cat])
X_not_cat = np.asarray([transform.resize(im, SIZE) for im in not_cat])

X = np.concatenate((X_cat, X_not_cat), axis=0)
Y = np.concatenate((np.ones(X_cat.shape[0]),
                    np.zeros(X_not_cat.shape[0])))

Y = np.zeros((X.shape[0], 2))
Y[:X_cat.shape[0], 1] = np.ones(X_cat.shape[0])
Y[-X_not_cat.shape[0]:, 0] = np.ones(X_not_cat.shape[0])

Beispiel #48
0
import skimage.color as skcr
import skimage.transform as sktf
import numpy as np

import random as rd

data_npy = "../data/data_128_%d.npy"
label_npy = "../data/label_128_%d.npy"

cats_files = "../data/train/cat.*.jpg"
dogs_files = "../data/train/dog.*.jpg"

size = 128
shape = (size, size)

cats_all = skio.imread_collection(cats_files)
dogs_all = skio.imread_collection(dogs_files)

print "loaded all data"


def rgb2grey_reshape(img):
    img_resize = sktf.resize(img, shape)
    return skcr.rgb2grey(img_resize)


def rotate(img, angle):
    img_rotate = sktf.rotate(img, angle)
    return rgb2grey_reshape(img_rotate)

Beispiel #49
0
 def test_collection(self):
     io.imread_collection("*.png", conserve_memory=False, plugin="test")
Beispiel #50
0
import pandas as pd
from skimage import io
import trf1 as tr
import matplotlib as mpl

kd_ims = io.imread_collection("20150417-aukb-kd/*.tif")
con_ims = io.imread_collection("20150417-control/*.tif")
mpl.style.use("ggplot")
colnames = [
    "filename",
    "file-number",
    "aukb-kd",
    "size(pixels)",
    "raw-mean",
    "raw-total",
    "raw-max",
    "pre-mean",
    "pre-total",
    "pre-max",
    "post-mean",
    "post-total",
    "post-max",
    "eccentricity",
]
kds = map(tr.trf_quantify, kd_ims)
cos = map(tr.trf_quantify, con_ims)
result = []
for i, (kd_fn, kd) in enumerate(zip(kd_ims.files, kds)):
    for blob_data in kd:
        result.append([kd_fn, i, "kd"] + list(blob_data))
for j, (con_fn, co) in enumerate(zip(con_ims.files, cos), start=len(kd_ims)):
Beispiel #51
0
def test_collection():
    ic = io.imread_collection('*.png', conserve_memory=False, plugin='test')
    io.imshow_collection(ic)
Beispiel #52
0
from skimage.io import imread_collection
from skimage.filters import threshold_otsu
from skimage.transform import resize
from skimage.util import pad
import os
import numpy as np
import h5py
from tqdm import tqdm

folder = "{}/iam/**/**/*.png".format(os.getenv("DATA_PATH"))
collection = imread_collection(folder)
collection = list(collection)
w_crop = 64
h_crop = 64
c, w, h = 1, 64, 64


def gen(nb):
    X_out = np.empty((nb, c, w, h))
    for i in range(nb):
        while True:
            img = np.random.choice(collection)
            ch = min(img.shape[0], h_crop)
            cw = min(img.shape[1], w_crop)
            crop_pos_y = np.random.randint(0, img.shape[0] - ch + 1)
            crop_pos_x = np.random.randint(0, img.shape[1] - cw + 1)
            x = crop_pos_x
            y = crop_pos_y
            im = img[y:y+ch, x:x+cw]
            im = im / 255.
            im = 1 - im
folders = os.listdir('data/train')
train = np.empty((0, 2))

for folder in folders:
    examples = [[folder, os.path.join('data/train', folder, example)] for
                example in os.listdir(os.path.join('data/train', folder))]
    train = np.concatenate((train, examples), axis=0)

np.random.seed(0)
np.random.shuffle(train)
train_y = train[:, 0]
train_x = train[:, 1]
classes = list(set(train_y))
classes.sort()
print("Reading train images...")
train_x = np.array(imread_collection(train_x, conserve_memory=False))

kf = StratifiedKFold(train_y, n_folds=3, shuffle=True)

pipeline = Pipeline([
    ('normalize_imgs', NormalizeImages(capture_percentage=.8)),
    ('resized_imgs', ResampleImages(32)),
    ('features', FeatureUnion([
        ('image', IdentityTransformer()),
        ('fft', FftTransformer())
    ])),
    ('classifier', SVC(probability=True, verbose=True))
])

print("Cross validating...")
scores = []
Beispiel #54
0
import pandas as pd
from skimage import io
import trf1 as tr
import matplotlib as mpl
kd_ims = io.imread_collection('../0.2uM APH N5/*.tif')
con_ims = io.imread_collection('../control N5/*.tif')
mpl.style.use('ggplot')
colnames = ['filename', 'file-number', 'aukb-kd', 'size(pixels)',
            'raw-mean', 'raw-total', 'raw-max', 'pre-mean', 'pre-total',
            'pre-max', 'post-mean', 'post-total', 'post-max', 'eccentricity']
kds = map(tr.trf_quantify, kd_ims)
cos = map(tr.trf_quantify, con_ims)
result = []
for i, (kd_fn, kd) in enumerate(zip(kd_ims.files, kds)):
    for blob_data in kd:
        result.append([kd_fn, i, 'kd'] + list(blob_data))
for j, (con_fn, co) in enumerate(zip(con_ims.files, cos), start=len(kd_ims)):
    for blob_data in co:
        result.append([con_fn, j, 'con'] + list(blob_data))
df = pd.DataFrame(result, columns=colnames)
df.to_csv('full-dataset-aph vs control trf1 N5.csv')
Beispiel #55
0
# Author : fcbruce <*****@*****.**>
#
# Time : Thu 15 Dec 2016 18:07:36
#
#

import numpy as np

import skimage.io as skio
import skimage.color as skcr
import skimage.transform as sktf

size = 128
shape = (size, size)

all_img = skio.imread_collection('../data/test/*.jpg')

print 'all test image load'

def rgb2grey_reshape(img):
    img_resize = sktf.resize(img, shape)
    return skcr.rgb2grey(img_resize)

imgs = [rgb2grey_reshape(img).reshape(-1) for img in all_img]

data = np.array(imgs, dtype=np.float32)

print data.shape

np.save('../data/test_data.npy', data)
        # sorted like 01.jpg, 02.jpg, 03.jpg and so on.
        current_category_filenames.sort()

        # Take the images from the beginning or from the end.
        if fetch_from_beginnig:
            images_filenames_to_add = current_category_filenames[:amount_of_images_to_take]
        else:
            images_filenames_to_add = current_category_filenames[-amount_of_images_to_take:]

        images_filenames_to_add = map(lambda x: os.path.join(current_category_folder_full_path, x), images_filenames_to_add)

        images_filenames_list.extend(images_filenames_to_add)

    return images_filenames_list



images_folder = 'images'
# image_categories_folders = ['buildings', 'cars', 'faces', 'food', 'people', 'trees']
# amount_of_first_images_to_take = 9

image_categories_folders = ['buildings', 'cars']
amount_of_first_images_to_take = 1

images_filenames = get_images_filenames(images_folder, image_categories_folders,
                                              amount_of_first_images_to_take)

print images_filenames
images = io.imread_collection(images_filenames)

centers, image_descriptors = train_classifier(images)