Exemplo n.º 1
0
def test_inertia_tensor_2d():
    image = np.zeros((40, 40))
    image[15:25, 5:35] = 1  # big horizontal rectangle (aligned with axis 1)
    T = inertia_tensor(image)
    assert T[0, 0] > T[1, 1]
    np.testing.assert_allclose(T[0, 1], 0)
    v0, v1 = inertia_tensor_eigvals(image, T=T)
    np.testing.assert_allclose(np.sqrt(v0/v1), 3, rtol=0.01, atol=0.05)
Exemplo n.º 2
0
def test_inertia_tensor_2d():
    image = np.zeros((40, 40))
    image[15:25, 5:35] = 1  # big horizontal rectangle (aligned with axis 1)
    T = inertia_tensor(image)
    assert T[0, 0] > T[1, 1]
    np.testing.assert_allclose(T[0, 1], 0)
    v0, v1 = inertia_tensor_eigvals(image, T=T)
    np.testing.assert_allclose(np.sqrt(v0/v1), 3, rtol=0.01, atol=0.05)
Exemplo n.º 3
0
def test_inertia_tensor_eigvals():
    # Floating point precision problems could make a positive
    # semidefinite matrix have an eigenvalue that is very slightly
    # negative.  Check that we have caught and fixed this problem.
    image = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
    # mu = np.array([[3, 0, 98], [0, 14, 0], [2, 0, 98]])
    eigvals = inertia_tensor_eigvals(image=image)
    assert (min(eigvals) >= 0)
Exemplo n.º 4
0
def get_inertia(mask, mu=None):
    """compute inertia tensor and eigenvalues from mask, if moments are give the function is much faster"""
    if mu is None:
        mu = measure.moments(mask)

    inertia_tensor = measure.inertia_tensor(mask, mu)
    inertia_eigen = measure.inertia_tensor_eigvals(mask,
                                                   mu=mu,
                                                   T=inertia_tensor)
    return inertia_tensor, inertia_eigen
Exemplo n.º 5
0
def test_inertia_tensor_2d(dtype):
    image = np.zeros((40, 40), dtype=dtype)
    image[15:25, 5:35] = 1  # big horizontal rectangle (aligned with axis 1)
    expected_dtype = _supported_float_type(image.dtype)

    T = inertia_tensor(image)
    assert T.dtype == expected_dtype
    assert T[0, 0] > T[1, 1]
    np.testing.assert_allclose(T[0, 1], 0)

    v0, v1 = inertia_tensor_eigvals(image, T=T)
    assert v0.dtype == expected_dtype
    assert v1.dtype == expected_dtype
    np.testing.assert_allclose(np.sqrt(v0 / v1), 3, rtol=0.01, atol=0.05)
Exemplo n.º 6
0
def moments_features(image):
    """calculates the set of moments for each channel
    
    Calculates the intertia tensor, intertia tensor eigenvalues, as well as 
    the moments of the image (https://en.wikipedia.org/wiki/Image_moment)

    Parameters
    ----------
    image : 3D array, shape (M, N, C)
        The input image with multiple channels.

    Returns
    -------
    features :  dict  
        dictionary including percentiles, moments and sum per channel 

    """
    # storing the feature values
    features = dict()
    for ch in range(image.shape[2]):
        hu_moments = moments_hu(image[:, :, ch])
        for i in range(len(hu_moments)):
            features["moments_hu_" + str(i + 1) + "_Ch" +
                     str(ch + 1)] = hu_moments[i]

        inertia_tensor_calculated = inertia_tensor(image[:, :, ch]).ravel()
        features["inertia_tensor_1_ch" +
                 str(ch + 1)] = inertia_tensor_calculated[0]
        features["inertia_tensor_2_ch" +
                 str(ch + 1)] = inertia_tensor_calculated[1]
        features["inertia_tensor_3_ch" +
                 str(ch + 1)] = inertia_tensor_calculated[3]

        inertia_tensor_eigvalues = inertia_tensor_eigvals(image[:, :, ch])
        features["inertia_tensor_eigvalues_1_ch" +
                 str(ch + 1)] = inertia_tensor_eigvalues[0]
        features["inertia_tensor_eigvalues_2_ch" +
                 str(ch + 1)] = inertia_tensor_eigvalues[1]

        the_moments = moments(image[:, :, ch], order=5).ravel()

        for i in range(len(the_moments)):
            features["moments_" + str(i + 1) + "_Ch" +
                     str(ch + 1)] = the_moments[i]

    return features
Exemplo n.º 7
0
    def calculate_features(self, feature_mode, debug=False):

        fruit_image = io.imread(self.path, as_gray=True)

        sigma = 0.005 * fruit_image.shape[0]
        filtered_fruit = filters.gaussian(fruit_image, sigma=sigma)

        # Apply triangle threshold to gaussian filtered image
        self.threshold = filters.threshold_triangle(filtered_fruit)
        thresholded_fruit = filtered_fruit < self.threshold

        fruit_central_moments = measure.moments_central(thresholded_fruit)
        hu_moments = measure.moments_hu(
            measure.moments_normalized(fruit_central_moments))
        # We only keep relevant hu moments, that is, components 1 and 3
        self.hu_moments = hu_moments[[1, 3]]

        # And we apply a log transform to them
        self.hu_moments = np.array([
            -1 * np.sign(j) * np.log10(np.abs(j)) for j in self.hu_moments[:]
        ])

        fruit_eigvalues = measure.inertia_tensor_eigvals(
            thresholded_fruit, mu=fruit_central_moments)
        self.moment_ratio = max(fruit_eigvalues) / min(fruit_eigvalues)

        if feature_mode == 'hu_plus_ratio':
            self.features = np.append(self.hu_moments, self.moment_ratio)
            self.feature_size = 3
        elif feature_mode == 'hu_only':
            self.features = np.array(self.hu_moments)
            self.feature_size = 2

        if debug:
            print("threshold: \n", self.threshold)
            print("Central moments: \n", fruit_central_moments)
            print("Hu moments:\n", hu_moments)
            print(
                "Normalized Hu moments:\n",
                [-1 * np.sign(j) * np.log10(np.abs(j)) for j in hu_moments[:]])
            print("Inertia tensor eigenvalues:\n", fruit_eigvalues)
            print("Moment ratio:\n", self.moment_ratio)
            print("Features: \n", self.features)
Exemplo n.º 8
0
def main():
    """Main function"""

    #===========================
    #==   PARSE ARGS
    #===========================
    logger.info("Get script args ...")
    try:
        args = get_args()
    except Exception as ex:
        logger.error("Failed to get and parse options (err=%s)", str(ex))
        return 1

    # - Input filelist
    datalist = args.datalist
    nmax = args.nmax

    # - Data process options
    nx = args.nx
    ny = args.ny
    normalize = args.normalize
    scale_to_abs_max = args.scale_to_abs_max
    scale_to_max = args.scale_to_max
    log_transform = args.log_transform
    resize = args.resize
    augment = args.augment
    shuffle = args.shuffle
    draw = args.draw
    dump_stats = args.dump_stats
    dump_sample_stats = args.dump_sample_stats
    dump_flags = args.dump_flags
    scale = args.scale
    scale_factors = []
    if args.scale_factors != "":
        scale_factors = [
            float(x.strip()) for x in args.scale_factors.split(',')
        ]
    standardize = args.standardize
    img_means = []
    img_sigmas = []
    if args.img_means != "":
        img_means = [float(x.strip()) for x in args.img_means.split(',')]
    if args.img_sigmas != "":
        img_sigmas = [float(x.strip()) for x in args.img_sigmas.split(',')]

    chan_divide = args.chan_divide
    chan_mins = []
    if args.chan_mins != "":
        chan_mins = [float(x.strip()) for x in args.chan_mins.split(',')]
    erode = args.erode
    erode_kernel = args.erode_kernel
    outfile_stats = "stats_info.dat"
    outfile_flags = "stats_flags.dat"
    outfile_sample_stats = "stats_sample_info.dat"
    exit_on_fault = args.exit_on_fault
    skip_on_fault = args.skip_on_fault
    save_fits = args.save_fits
    fthr_zeros = args.fthr_zeros

    #===========================
    #==   READ DATA
    #===========================
    # - Create data loader
    dl = DataLoader(filename=datalist)

    # - Read datalist
    logger.info("Reading datalist %s ..." % datalist)
    if dl.read_datalist() < 0:
        logger.error("Failed to read input datalist!")
        return 1

    source_labels = dl.snames
    nsamples = len(source_labels)
    if nmax > 0 and nmax < nsamples:
        nsamples = nmax

    logger.info("#%d samples to be read ..." % nsamples)

    # - Read data
    logger.info("Running data loader ...")
    data_generator = dl.data_generator(batch_size=1,
                                       shuffle=shuffle,
                                       resize=resize,
                                       nx=nx,
                                       ny=ny,
                                       normalize=normalize,
                                       scale_to_abs_max=scale_to_abs_max,
                                       scale_to_max=scale_to_max,
                                       augment=augment,
                                       log_transform=log_transform,
                                       scale=scale,
                                       scale_factors=scale_factors,
                                       standardize=standardize,
                                       means=img_means,
                                       sigmas=img_sigmas,
                                       chan_divide=chan_divide,
                                       chan_mins=chan_mins,
                                       erode=erode,
                                       erode_kernel=erode_kernel,
                                       retsdata=True)

    img_counter = 0
    img_stats_all = []
    img_flags_all = []
    pixel_values_per_channels = []

    while True:
        try:
            data, sdata = next(data_generator)
            img_counter += 1

            sname = sdata.sname
            label = sdata.label
            classid = sdata.id

            logger.info("Reading image no. %d (name=%s, label=%s) ..." %
                        (img_counter, sname, label))
            #print("data shape")
            #print(data.shape)

            nchannels = data.shape[3]

            # - Check for NANs
            has_naninf = np.any(~np.isfinite(data))
            if has_naninf:
                logger.warn(
                    "Image %d (name=%s, label=%s) has some nan/inf, check!" %
                    (img_counter, sname, label))
                if exit_on_fault:
                    return 1
                else:
                    if skip_on_fault:
                        break

            # - Check for fraction of zeros in radio mask
            cond = np.logical_and(data[0, :, :, 0] != 0,
                                  np.isfinite(data[0, :, :, 0]))
            for i in range(1, nchannels):
                data_2d = data[0, :, :, i]
                data_1d = data_2d[cond]
                n = data_1d.size
                n_zeros = np.count_nonzero(data_1d == 0)
                f = n_zeros / n
                if n_zeros > 0:
                    logger.info(
                        "Image %d chan %d (name=%s, label=%s): n=%d, n_zeros=%d, f=%f"
                        % (img_counter, i + 1, sname, label, n, n_zeros, f))

                if f >= fthr_zeros:
                    logger.warn(
                        "Image %d chan %d (name=%s, label=%s) has a zero fraction %f, check!"
                        % (img_counter, i + 1, sname, label, f))
                    if skip_on_fault:
                        break

            # - Check if channels have elements all equal
            for i in range(nchannels):
                data_min = np.min(data[0, :, :, i])
                data_max = np.max(data[0, :, :, i])
                same_values = (data_min == data_max)
                if same_values:
                    logger.error(
                        "Image %d chan %d (name=%s, label=%s) has all elements equal to %f, check!"
                        % (img_counter, i + 1, sname, label, data_min))
                    if exit_on_fault:
                        return 1
                    else:
                        if skip_on_fault:
                            break

            # - Check correct norm
            if normalize:
                data_min = np.min(data[0, :, :, :])
                data_max = np.max(data[0, :, :, :])
                if scale_to_max:
                    correct_norm = (data_max == 1)
                else:
                    correct_norm = (data_min == 0 and data_max == 1)
                if not correct_norm:
                    logger.error(
                        "Image %d chan %d (name=%s, label=%s) has invalid norm (%f,%f), check!"
                        %
                        (img_counter, i + 1, sname, label, data_min, data_max))
                    if exit_on_fault:
                        return 1
                    else:
                        if skip_on_fault:
                            break

            # - Dump image flags
            if dump_flags:
                img_flags = [sname]

                for i in range(nchannels):
                    ##cond_i= np.logical_and(data[0,:,:,i]!=0, np.isfinite(data[0,:,:,i]))
                    data_2d = data[0, :, :, i]
                    data_1d = data_2d[cond]  # pixel in radio mask
                    n = data_1d.size
                    n_bad = np.count_nonzero(
                        np.logical_or(~np.isfinite(data_1d), data_1d == 0))
                    n_neg = np.count_nonzero(data_1d < 0)
                    f_bad = float(n_bad) / float(n)
                    f_negative = float(n_neg) / float(n)
                    data_min = np.nanmin(data_1d)
                    data_max = np.nanmax(data_1d)
                    same_values = int(data_min == data_max)

                    img_flags.append(same_values)
                    img_flags.append(f_bad)
                    img_flags.append(f_negative)

                # - Compute peaks & aspect ratio of first channel
                kernsize = 7
                footprint = np.ones((kernsize, ) * data[0, :, :, i].ndim,
                                    dtype=bool)
                peaks = peak_local_max(np.copy(data[0, :, :, i]),
                                       footprint=footprint,
                                       min_distance=4,
                                       exclude_border=True)

                bmap = cond.astype(np.uint8)
                polygon = None
                try:
                    contours = cv2.findContours(np.copy(bmap),
                                                cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)
                    contours = imutils.grab_contours(contours)
                    if len(contours) > 0:
                        contour = np.squeeze(contours[0])
                        polygon = Polygon(contour)
                except Exception as e:
                    logger.warn("Failed to compute mask contour (err=%s)!" %
                                (str(e)))

                if polygon is None:
                    peaks_sel = peaks
                else:
                    peaks_sel = []
                    for peak in peaks:
                        point = Point(peak[1], peak[0])
                        has_peak = polygon.contains(point)
                        if has_peak:
                            peaks_sel.append(peak)

                npeaks = len(peaks_sel)

                eigvals = inertia_tensor_eigvals(image=data_2d)
                aspect_ratio = eigvals[0] / eigvals[1]

                img_flags.append(npeaks)
                img_flags.append(aspect_ratio)

                img_flags.append(classid)
                img_flags_all.append(img_flags)

            # - Dump image stats
            if dump_stats:
                img_stats = [sname]

                for i in range(nchannels):
                    data_masked = np.ma.masked_equal(data[0, :, :, i],
                                                     0.0,
                                                     copy=False)
                    data_min = data_masked.min()
                    data_max = data_masked.max()
                    data_mean = data_masked.mean()
                    data_std = data_masked.std()

                    img_stats.append(data_min)
                    img_stats.append(data_max)
                    img_stats.append(data_mean)
                    img_stats.append(data_std)

                img_stats.append(classid)
                img_stats_all.append(img_stats)

            # - Dump sample image stats
            if dump_sample_stats:
                if not pixel_values_per_channels:
                    pixel_values_per_channels = [[] for i in range(nchannels)]

                for i in range(nchannels):
                    cond = np.logical_and(data[0, :, :, i] != 0,
                                          np.isfinite(data[0, :, :, i]))

                    data_masked_1d = data[0, :, :, i][cond]
                    data_masked_list = list(data_masked_1d)
                    #data_masked= np.ma.masked_equal(data[0,:,:,i], 0.0, copy=False)
                    #data_masked_list= data_masked[~data_masked.mask].tolist() # Extract non-masked values and put to list
                    #print("type(data_masked_list)")
                    #print(type(data_masked_list))
                    #print(data_masked_list)

                    if type(data_masked_list) != list:
                        logger.error(
                            "Collection of non-masked pixels in image %d chan %d (name=%s, label=%s) is not a list!"
                            % (img_counter, i + 1, sname, label))
                        #print(type(data_masked_list))
                        return 1
                    else:
                        for item in data_masked_list:
                            item_type = type(item)
                            if item_type != float and item_type != np.float and item_type != np.float32:
                                logger.error(
                                    "Current pixel in collection of non-masked pixels in image %d chan %d (name=%s, label=%s) is not a float!"
                                    % (img_counter, i + 1, sname, label))
                                #print("item")
                                #print(item)
                                #print("item_type")
                                #print(item_type)
                                #print(data_masked_list)
                                return 1

                    if not data_masked_list:
                        logger.error(
                            "Image %d chan %d (name=%s, label=%s) has non masked pixels!"
                            % (img_counter, i + 1, sname, label))
                        if exit_on_fault:
                            return 1
                        else:
                            if skip_on_fault:
                                break
                    pixel_values_per_channels[i].extend(data_masked_list)

            # - Draw data
            if draw:
                logger.info("Drawing data ...")
                fig = plt.figure(figsize=(20, 10))
                for i in range(nchannels):
                    data_ch = data[0, :, :, i]
                    data_masked = np.ma.masked_equal(data_ch, 0.0, copy=False)
                    data_min = data_masked.min()
                    data_max = data_masked.max()
                    data_ch[data_ch == 0] = data_min

                    #logger.info("Reading nchan %d ..." % i+1)
                    plt.subplot(1, nchannels, i + 1)
                    plt.imshow(data_ch, origin='lower')

                plt.tight_layout()
                plt.show()

            # - Dump fits
            if save_fits:
                logger.info("Writing FITS ...")
                for i in range(nchannels):
                    outfile_fits = sname + '_id' + str(classid) + '_ch' + str(
                        i + 1) + '.fits'
                    Utils.write_fits(data[0, :, :, i], outfile_fits)

            # - Stop generator
            if img_counter >= nsamples:
                logger.info("Sample size (%d) reached, stop generation..." %
                            nsamples)
                break

        except (GeneratorExit, KeyboardInterrupt):
            logger.info("Stop loop (keyboard interrupt) ...")
            break
        except Exception as e:
            logger.warn("Stop loop (exception catched %s) ..." % str(e))
            break

    # - Dump img flags
    if dump_flags:
        logger.info("Dumping img flag info to file %s ..." % (outfile_flags))

        head = "# sname "

        for i in range(nchannels):
            ch = i + 1
            s = 'equalPixValues_ch{i} badPixFract_ch{i} negativePixFract_ch{i} '.format(
                i=ch)
            head = head + s
        head = head + "npeaks_ch1 aspectRatio_ch1 "
        head = head + "id"
        logger.info("Flag file head: %s" % (head))

        # - Dump to file
        Utils.write_ascii(np.array(img_flags_all), outfile_flags, head)

    # - Dump img stats
    if dump_stats:
        logger.info("Dumping img stats info to file %s ..." % (outfile_stats))

        head = "# sname "
        for i in range(nchannels):
            ch = i + 1
            s = 'min_ch{i} max_ch{i} mean_ch{i} std_ch{i} '.format(i=ch)
            head = head + s
        head = head + "id"
        logger.info("Stats file head: %s" % (head))

        # - Dump to file
        Utils.write_ascii(np.array(img_stats_all), outfile_stats, head)

    # - Dump sample pixel stats
    if dump_sample_stats:
        logger.info("Computing sample pixel stats ...")
        img_sample_stats = [[]]

        for i in range(len(pixel_values_per_channels)):
            #print("type(pixel_values_per_channels)")
            #print(type(pixel_values_per_channels))
            #print("type(pixel_values_per_channels[i])")
            #print(type(pixel_values_per_channels[i]))
            #print(pixel_values_per_channels[i])
            #print("len(pixel_values_per_channels[i])")
            #print(len(pixel_values_per_channels[i]))

            for j in range(len(pixel_values_per_channels[i])):
                item = pixel_values_per_channels[i][j]
                item_type = type(item)
                if item_type != np.float32 and item_type != np.float and item_type != float:
                    logger.error("Pixel no. %d not float (ch=%d)!" %
                                 (j + 1, i + 1))
                    #print("item_type")
                    #print(item_type)
                    #print("item")
                    #print(item)
                    return 1
            data = np.array(pixel_values_per_channels[i], dtype=np.float)
            #print("type(data)")
            #print(type(data))
            data_min = data.min()
            data_max = data.max()
            data_mean = data.mean()
            data_std = data.std()
            data_median = np.median(data)
            data_q3, data_q1 = np.percentile(data, [75, 25])
            data_iqr = data_q3 - data_q1

            img_sample_stats[0].append(data_min)
            img_sample_stats[0].append(data_max)
            img_sample_stats[0].append(data_mean)
            img_sample_stats[0].append(data_std)
            img_sample_stats[0].append(data_median)
            img_sample_stats[0].append(data_iqr)

        logger.info("Dumping pixel sample stats info to file %s ..." %
                    (outfile_sample_stats))

        head = "# "
        for i in range(len(pixel_values_per_channels)):
            ch = i + 1
            s = 'min_ch{i} max_ch{i} mean_ch{i} std_ch{i} median_ch{i} iqr_ch{i} '.format(
                i=ch)
            head = head + s
        logger.info("Sample stats file head: %s" % (head))

        Utils.write_ascii(np.array(img_sample_stats), outfile_sample_stats,
                          head)

    return 0