def contour_embryo(self,img,init=None,sigma=3):
        '''
        Fit a contour to the embryo to separate the background
        
        Parameters
        ----------
        img : 2D np.array
            2D image from a single timepoint to mask
        init : 400x2 ellipse array, optional
            Starting ellipse array that is bigger than the embryo
        sigma : int, optional 
            Kernel size for the Gaussian smoothing step
        
        Returns
        -------
        Masked image where all background points = 0
        '''

        # Assign global variables if not specified
        if init == None:
            init = self.fell

        # Fit contour based on starting ellipse
        snake = active_contour(gaussian(img, sigma),
                           init, alpha=0.015, beta=10, gamma=0.001)

        # Create boolean mask based on contour
        mask = grid_points_in_poly(img.shape, snake).T

        return(mask)
Exemple #2
0
    def fill_in_contour(contour, label_data, value=1):
        ymin, xmin = (np.floor(contour.min(axis=0)) - 1).astype('int')
        ymax, xmax = (np.ceil(contour.max(axis=0)) + 1).astype('int')
        # possibly roll the data to deal with periodicity
        roll_x, roll_y = 0, 0
        if ymin < 0:
            roll_y = -ymin
        if ymax > ny:
            roll_y = ny - ymax
        if xmin < 0:
            roll_x = -xmin
        if xmax > nx:
            roll_x = nx - xmax

        contour_rel = contour - np.array([ymin, xmin])

        ymax += roll_y
        ymin += roll_y
        xmax += roll_x
        xmin += roll_x

        data = np.roll(np.roll(label_data, roll_x, axis=1), roll_y, axis=0)
        region_slice = (slice(ymin, ymax), slice(xmin, xmax))
        region_data = data[region_slice]
        data[region_slice] = value * grid_points_in_poly(
            region_data.shape, contour_rel)

        return np.roll(np.roll(data, -roll_x, axis=1), -roll_y, axis=0)
Exemple #3
0
 def fill_in_contour(contour, value=1):
     ymin, xmin = (np.floor(contour.min(axis=0)) - 1).astype('int')
     ymax, xmax = (np.ceil(contour.max(axis=0)) + 1).astype('int')
     region_slice = (slice(ymin, ymax), slice(xmin, xmax))
     region_data = data[region_slice]
     contour_rel = contour - np.array([ymin, xmin])
     data[region_slice] = value * grid_points_in_poly(
         region_data.shape, contour_rel)
Exemple #4
0
def convert1roi(myroi, layershape, verbose=False):
    points = np.array(myroi['points'])
    shift = np.array(myroi['pos'])
    shift -= np.array([0.5, 0.5])
    if verbose:
        print(type(shift))
        print(myroi)
        print(points + shift)
    return grid_points_in_poly(layershape, points + shift)
Exemple #5
0
def convert1roi(myroi, layershape, verbose=False):
    points = np.array(myroi['points'])
    shift = np.array(myroi['pos'])
    shift -= np.array([0.5,0.5])
    if verbose:
        print(type(shift))
        print(myroi)
        print(points+shift)
    return grid_points_in_poly(layershape, points+shift)    
def crop(image, polygon):
    new_image = np.copy(image)
    xmin, xmax, ymin, ymax = polygon_border(polygon)
    included = grid_points_in_poly(new_image.shape, polygon)
    # set values outside the polygon to white
    new_image[~included] = 255
    cropped_image = new_image[xmin:xmax, ymin:ymax]

    return cropped_image
def test_grid_points_in_poly():
    v = np.array([[0, 0],
                  [5, 0],
                  [5, 5]])

    expected = np.tril(np.ones((5, 5), dtype=bool))

    assert_array_equal(grid_points_in_poly((5, 5), v),
                       expected)
def points_inside_contour(cnt, num_samples=None):
    xmin, ymin = cnt.min(axis=0)
    xmax, ymax = cnt.max(axis=0)
    h, w = (ymax-ymin+1, xmax-xmin+1)
    inside_ys, inside_xs = np.where(grid_points_in_poly((h, w), cnt[:, ::-1]-(ymin,xmin)))

    if num_samples is None:
        inside_points = np.c_[inside_xs, inside_ys] + (xmin, ymin)
    else:
        n = inside_ys.size
        random_indices = np.random.choice(range(n), min(1000, n), replace=False)
        inside_points = np.c_[inside_xs[random_indices], inside_ys[random_indices]]

    return inside_points
def points_inside_contour(cnt, num_samples=None):
    xmin, ymin = cnt.min(axis=0)
    xmax, ymax = cnt.max(axis=0)
    h, w = (ymax-ymin+1, xmax-xmin+1)
    inside_ys, inside_xs = np.where(grid_points_in_poly((h, w), cnt[:, ::-1]-(ymin,xmin)))

    if num_samples is None:
        inside_points = np.c_[inside_xs, inside_ys] + (xmin, ymin)
    else:
        n = inside_ys.size
        random_indices = np.random.choice(range(n), min(1000, n), replace=False)
        inside_points = np.c_[inside_xs[random_indices], inside_ys[random_indices]]

    return inside_points
Exemple #10
0
 def region_stats(self, img, region, sat_threshold=None):
     """Provide regional statistics for a image over a region
     Inputs: img is any image ndarray, region is a skimage shape
     Outputs: mean, std, count, and saturated count tuple for the region"""
     rev_panel_pts = np.fliplr(region) #skimage and opencv coords are reversed
     w, h = img.shape
     mask = measure.grid_points_in_poly((w,h),rev_panel_pts)
     num_pixels = mask.sum()
     panel_pixels = img[mask]
     stdev = panel_pixels.std()
     mean_value = panel_pixels.mean()
     saturated_count = 0
     if sat_threshold is not None:
         saturated_px = np.asarray(np.where(panel_pixels > sat_threshold))
         saturated_count = saturated_px.sum()
     return mean_value, stdev, num_pixels, saturated_count
def read_data_from_dicom(rs_file, roi_pattern, spacing, masks, start_point):
    """
    :param rs_file: rtstructure 文件
    :param roi_pattern:  器官名称
    :param spacing: 像素间空隙
    :param masks: 原图大小的全1矩阵
    :param start_point: 序列的起始位置
    :return:
    """
    rois = [roi.ROIName for roi in rs_file.StructureSetROISequence]
    print(rois)
    print(roi_pattern)
    id = find_roi_id(rois, roi_pattern)
    print(id)
    count = 0
    if id > -1:
        # 判断当前的rs_file.ROIContourSequence[id]是否具有ContourSequence属性
        if hasattr(rs_file.ROIContourSequence[id], 'ContourSequence'):
            if masks.shape[0] < len(
                    rs_file.ROIContourSequence[id].ContourSequence):
                print('-----------')
                return masks
            for contour in rs_file.ROIContourSequence[id].ContourSequence:
                contour_data = contour.ContourData
                if len(contour_data) < 9:
                    return masks
                contour_data = np.array(contour_data).reshape(-1, 3)
                X = (np.round((contour_data[:, 0] - start_point[0]) /
                              spacing[2]).astype(np.int))
                Y = (np.round((contour_data[:, 1] - start_point[1]) /
                              spacing[1]).astype(np.int))
                z = (np.round((contour_data[0, 2] - start_point[2]) /
                              spacing[0]).astype(np.int))
                V_poly = np.stack([Y, X], axis=1)
                masks[z, :, :] = measure.grid_points_in_poly([512, 512],
                                                             V_poly)
                count = count + 1
        else:
            print('no this organ')
    else:
        print('no this organ')
    print(count)
    print((np.where(masks == 1))[0])
    return masks
Exemple #12
0
def points_in_poly(poly):
    """
    Return polygon as grid of points inside polygon. Only works for polygons
    defined with points which are all integers

    Parameters
    ----------
    poly : `list` or `numpy.ndarray`
        n x 2 list, defines all points at the edge of a polygon

    Returns
    -------
        `list`
        n x 2 array, all points within the polygon

    """
    if np.shape(poly)[1] != 2:
        raise ValueError("Polygon must be defined as a n x 2 array!")

    # convert to integers
    poly = np.array(poly, dtype=int).tolist()

    xs, ys = zip(*poly)
    minx, maxx = min(xs), max(xs)
    miny, maxy = min(ys), max(ys)
    # New polygon with the staring point as [0, 0]
    newPoly = [(int(x - minx), int(y - miny)) for (x, y) in poly]
    mask = measure.grid_points_in_poly(
        (round(maxx - minx) + 1, round(maxy - miny) + 1), newPoly)
    # all points in polygon
    points = [[x + minx, y + miny] for (x, y) in zip(*np.nonzero(mask))]

    # add edge points if missing
    for p in poly:
        if p not in points:
            points.append(p)

    return points
Exemple #13
0
def contourToSeg(contour, origin, dims, spacing):
    '''
    Converts an ordered set of points to a segmentation
    (i.e. fills the inside of the contour), uses the point in polygon method

    args:
    	@a contour: numpy array, shape = (num points, 2), ordered list of points
    	forming a closed contour
    	@a origin: The origin of the image, corresponds to top left corner of image
    	@a dims: (xdims,ydims) dimensions of the image corresponding to the segmentation
    	@a spacing: the physical size of each pixel
    '''
    #print contour
    dims_ = (int(dims[0]), int(dims[1]))
    d = np.asarray([float(dims[0]) / 2, float(dims[1]) / 2])

    seg = np.zeros(dims_)

    origin_ = np.asarray([origin[0], origin[1]])
    spacing_ = np.asarray([spacing[0], spacing[1]])
    a = grid_points_in_poly(dims_, (contour[:, :2] - origin_) / spacing_ + d)
    seg[a] = 1.0
    return np.flipud(seg.T)
def mask_from_contours(contours: List[np.ndarray],
                       shape: Tuple[int]) -> np.ndarray:
    """ Make a mask from a set of contours

    :param list[ndarray] contours:
        The list of contours for the image
    :param tuple[float] shape:
        The rows, cols for the new boolean image
    :returns:
        A 2D ndarray with the specified shape
    """

    rows, cols = shape

    final_mask = np.zeros((rows, cols), dtype=np.bool)
    for contour in contours:
        contour[contour < 0] = 0
        contour[contour[:, 0] > cols, 0] = cols
        contour[contour[:, 1] > rows, 1] = rows

        contour_mask = measure.grid_points_in_poly((rows, cols),
                                                   contour[:, [1, 0]])
        final_mask = np.logical_or(contour_mask, final_mask)
    return final_mask
Exemple #15
0
skinMoleFillHoles4 = binary_fill_holes(skinMoleOpening4, structure=data['se']['se4'][0][0])
#skinMoleFillHoles6 = binary_fill_holes(skinMoleOpening6, structure=data['se']['se6'][0][0])

#Step 8

'''
contours0 = find_contours(skinMoleFillHoles0, 0.5)[0]
contours0 = contours0[0:-1, :]
skinMoleSnakes0 = active_contour(skinMoleC, contours0, bc='free')
skinMoleSnakes0 = grid_points_in_poly(I.shape, skinMoleSnakes0)
'''

contours4 = find_contours(skinMoleFillHoles4, 0.5)[0]
contours4 = contours4[0:-1, :]
skinMoleSnakes4 = active_contour(skinMoleC, contours4, bc='free')
skinMoleSnakes4 = grid_points_in_poly(I.shape, skinMoleSnakes4)

'''
contours6 = find_contours(skinMoleFillHoles6, 0.5)[0]
contours6 = contours6[0:-1, :]
skinMoleSnakes6 = active_contour(skinMoleC, contours6, bc='free')
skinMoleSnakes6 = grid_points_in_poly(I.shape, skinMoleSnakes6)
'''

# Step 9

'''
ILabel0 = label(skinMoleSnakes0)
skinMoleOpenArea0 = np.zeros((I.shape[0:2]), dtype=np.uint8)

if ILabel0.max() > 1:
    tiff_fn, boundaries_fn, out_fn = parse_command_line_args()


    ### Load data
    print 'Loading %s...' % tiff_fn,
    sys.stdout.flush()
    frames = tiff_to_ndarray(tiff_fn)
    all_boundary_pts = np.load(boundaries_fn)
    print 'done.'
    sys.stdout.flush()


    ### visualize cell motion to check the results ###
    boundary_pts = all_boundary_pts[0]
    boundary_x, boundary_y = interpolate_boundary_pts(boundary_pts)
    mask = measure.grid_points_in_poly(frames[0].shape, boundary_pts)
    center = measure.regionprops(mask)[0].centroid

    plt.ion()
    fig = plt.figure()
    implot = plt.imshow(frames[0], cmap='gray')
    boundary_plot, = plt.plot(boundary_y, boundary_x, 'bo', markeredgecolor='none', markersize=1)
    boundary_pts_plot, = plt.plot(boundary_pts[:,1], boundary_pts[:,0], 'ro', markeredgecolor='none', markersize=4)
    center_point, = plt.plot(center[1], center[0], 'ro', markeredgecolor='r', markersize=7)
    plt.axis('off')
    fig.canvas.draw()

    for frame_num, (frame, boundary_pts) in enumerate(zip(frames, all_boundary_pts)):

        boundary_x, boundary_y = interpolate_boundary_pts(boundary_pts)
        mask = measure.grid_points_in_poly(frame.shape, boundary_pts)
Exemple #17
0
    ### Parse command line arguments
    boundaries_fn, tif_fn = parse_command_line_args()

    ### Load data
    all_boundary_pts = np.load(boundaries_fn)
    try:
        cell_label = re.findall(r'\d+', boundaries_fn)[0]
    except IndexError:
        cell_label = -1

    frames = pims.TiffStack(tif_fn)
    frame = frames[0]

    ### smooth and plot cell trajectory ###
    masks = [grid_points_in_poly(frame.shape, pts) for pts in all_boundary_pts]
    centers = np.array([regionprops(mask)[0].centroid for mask in masks])

    # estimate initial velocity via regression on first few timepoints
    init_pts = 5
    vx0, _, _, _, _ = linregress(range(init_pts), centers[:init_pts, 0])
    vy0, _, _, _, _ = linregress(range(init_pts), centers[:init_pts, 1])
    initial_state = np.array([centers[0, 0], centers[0, 1], vx0, vy0])

    # smooth the cell center positions
    position_noise = 15.0  # higher noise -> heavier smoothing
    smoother = KalmanSmoother2D(position_noise, position_noise)
    smoother.set_initial_state(initial_state)
    smoother.set_measurements(centers)
    smooth_cell_centers = smoother.get_smoothed_measurements()
    cell_velocities = smoother.get_velocities()
Exemple #18
0
def delete_unnecessary_obj(img):
    bit_img = measure.grid_points_in_poly((512, 512), img['Contour'])
    matrix = np.zeros((512, 512))
    if bit_img.any():
        matrix[bit_img] = img['oldData'].astype(np.float64)[bit_img]
    return np.rint(matrix).astype(np.uint16)
def fill_shape(labelled, label=1, tol=0):
    in_shape = grid_points_in_poly(labelled.shape[:2],
                                   make_poly(labelled, label, tol=tol))
    return np.where(in_shape, label, labelled)
Exemple #20
0
    def validate(self, outfile, low, corner=None, align_frames=True):
        """
        Train for one epoch
        """

        # Set model in training mode
        self.model.eval()

        # Mask in Fourier plane
        x = np.linspace(-1, 1, self.npix_image)
        xx, yy = np.meshgrid(x, x)
        rho = np.sqrt(xx ** 2 + yy ** 2)
        mask = rho <= 0.5
        mask_simple = np.fft.fftshift(mask.astype('float32'))
        
        tmp = self.get_obs(outfile, low, corner, align_frames=align_frames)

        start = time.time()

        images = torch.tensor(tmp[0].astype('float32')[None, :, :])
        images_ft = torch.tensor(tmp[1].astype('float32')[None, :, :])
        variance = 1e-6*torch.ones(self.batch_size)
        images, images_ft, variance = images.to(self.device), images_ft.to(self.device), variance.to(self.device)

        with torch.no_grad():
            
            coeff, numerator, denominator, psf, psf_ft, loss = self.model(images, images_ft, variance)

        tmp = complex_multiply_astar_b(numerator, numerator)
        filt = 1.0 - complex_division(denominator, tmp)[..., 0]
        filt[filt < 0.2] = 0.0
        filt[filt > 1.0] = 1.0
        tmp = np.fft.fftshift(filt.cpu().numpy())
        all_contours = measure.find_contours(tmp[0,:,:], 0.01)
        origin = 128/2 * np.ones((1,2))
        index_contour = -1
        for i in range(len(all_contours)):
            is_inside = measure.points_in_poly(origin, all_contours[i])
            if (is_inside[0]):
                index_contour = i
                break

        if (index_contour != -1):
            mask = np.fft.fftshift(tmp[0, :, :] * measure.grid_points_in_poly((128, 128), all_contours[index_contour]))
        else:
            mask = np.copy(mask_simple)
        mask_torch = torch.tensor(mask).to(self.device)

        F = complex_division(numerator, denominator) * mask_torch[None, :, :, None]
        im = torch.ifft(F, 2)[:, :, :, 0]

        im = im.detach().cpu().numpy()

        if (np.isnan(im).sum() != 0):
            print("NaN detected in image. Defaulting to standard mask")
            mask_torch = torch.tensor(mask_simple).to(self.device)

            F = complex_division(numerator, denominator) * mask_torch[None, :, :, None]
            im = torch.ifft(F, 2)[:, :, :, 0]

            im = im.detach().cpu().numpy()

        coeff = coeff.detach().cpu().numpy()
        im[im < 0] = 0.0
        psf = psf[..., 0].detach().cpu().numpy()
        images = images[0, :, :, :].detach().cpu().numpy()

        final = time.time()

        print(f"Elapsed time : {final-start} s")

        im_ft = np.fft.fft2(im[0,:,:])
        psf_ft = np.fft.fft2(psf, axes=(1,2))
        im_degraded = np.fft.ifft2(im_ft[None, :, :] * psf_ft).real

        psf = np.fft.fftshift(psf[:,:,:])

        return images, im_degraded, psf, im, coeff, loss
Exemple #21
0
def test_grid_points_in_poly():
    v = np.array([[0, 0], [5, 0], [5, 5]])

    expected = np.tril(np.ones((5, 5), dtype=bool))

    assert_array_equal(grid_points_in_poly((5, 5), v), expected)
Exemple #22
0
ax.set_title('Pioneer Burn Scar and Fire Contour', fontsize=12)
#==============================================================================

# Recopy the original DeltaNBR as the previous operations altered the values
deltanbrcat = deepcopy(deltanbr)
fire_dnbr = deltanbrcat
print(fire_dnbr.min(), fire_dnbr.max())

# Get extent of a pixel grid for the fire scar
nx, ny = (fire_dnbr.shape[0], fire_dnbr.shape[1])

x = np.arange(-100, 100, 1)
y = np.arange(0, 32000, 1)

fire_mask = measure.grid_points_in_poly(
    (nx, ny), the_contour)  # This seems to run slow or cause some problems

burned_pixels = fire_dnbr[fire_mask] * 1000  #scaled by 1000 to get dNBR

fig, ax = plt.subplots(figsize=(10, 6))
counts, bins, patches = ax.hist(burned_pixels,
                                bins=80,
                                facecolor='blue',
                                edgecolor='gray')

# Make matplotlib work for you and not against you
ax.set_xlim([-500, 1400])
ax.set_ylim([0, 80000])

ax.set_ylabel('Severity frequency (pixels)', fontsize=12)
ax.set_xlabel('dNBR', fontsize=12)
fill_exterior_pixels(final, cur_x, cur_y)

#final = np.logical_not(final)  # display _walkable_ pixels
contours = find_contours(final, 0.5, 'low', 'low')
contours = [approximate_polygon(contour, 3.0) for contour in contours]
contours = np.array(contours)

# pick most large and/or detailed contour containing our interior point
interior_contours = \
    np.nonzero([bool(points_in_poly([[cur_y, cur_x]], contour))
                for contour in contours])[0]

# %% GRID FROM PIXELS

inside_contour_masks = [
    grid_points_in_poly(final.shape, contour) for contour in contours
]
walkable_masks = np.dstack(inside_contour_masks)
for index in range(len(contours)):
    if index not in interior_contours:
        walkable_masks[:, :, index] = \
            np.logical_not(walkable_masks[:, :, index])
walkable_grid = np.logical_and.reduce(walkable_masks, axis=2)

walkable_grid_small = downscale_local_mean(walkable_grid, (4, 4))

plt.imshow(walkable_grid_small == 1.0)

# %% CONTOUR MESHING

#plt.subplot(2,3,1)
        out_fn = os.path.join('.', fn)
        print 'Output movie name unspecified, movie will be saved to %s.' % out_fn


    ### Load data
    print 'Loading %s...' % tiff_fn,
    sys.stdout.flush()
    frames = tiff_to_ndarray(tiff_fn)
    all_boundary_pts = np.load(boundaries_fn)
    print 'done.'
    sys.stdout.flush()


    ### compute masks from boundaries
    frame_sz = frames[0].shape
    masks = [grid_points_in_poly(frame_sz , pts) for pts in all_boundary_pts]


    ### compute window size (max mask size + padding)
    padding = 10
    bboxes = [regionprops(mask)[0].bbox for mask in masks]
    bbox_widths = [(bbox[2]-bbox[0]) for bbox in bboxes]
    bbox_heights = [(bbox[3]-bbox[1]) for bbox in bboxes]
    win_size = (max(bbox_widths)+2*padding, max(bbox_heights)+2*padding)

    # make it square (apparently avconv doesn't work correctly with arbitrary frame size...)
    win_size = (max(win_size), max(win_size))


    ### centroids
    cell_centers = np.array([regionprops(mask)[0].centroid for mask in masks])
Exemple #25
0
    # Frame-by-frame snake fit. This is fairly slow; takes ~80 sec on my laptop.
    tsta = time.clock()
    print('Tracking cell %i across %i frames...' % (selected_label, len(frames)))
    for frame_num, frame in enumerate(frames):

        # print progress
        if frame_num%10 == 0:
            print('Frame %i of %i' % (frame_num, len(frames)))
            sys.stdout.flush()

        # compute distance transforms and fit snake
        edge_dist, corner_dist = frame_to_distance_images(frame)
        boundary_pts = fit_snake(boundary_pts, edge_dist, alpha=alpha, beta=beta, nits=40)

        # check if the cell went off the edge (i.e., out of view)
        single_cell_mask = measure.grid_points_in_poly(frame.shape, boundary_pts)
        if np.any(np.logical_and(~mask, single_cell_mask)):
            print('cell went off edge on frame %i' % frame_num)
            all_boundary_pts = np.delete(all_boundary_pts, np.s_[frame_num:], 0)
            break

        # TODO: resample the points along the curve to maintain contant spacing?
        # store results in big array
        all_boundary_pts[frame_num] = boundary_pts

    print('elapsed time:', time.clock() - tsta)

    ### write boundary points to file
    if np_fn is None:
        outdir = '.'
        out_fn = 'cell%i_boundary_points.npy' % selected_label
Exemple #26
0
    ### Parse command line arguments
    tiff_fn, boundaries_fn, out_fn = parse_command_line_args()

    ### Load data
    print 'Loading %s...' % tiff_fn,
    sys.stdout.flush()
    frames = tiff_to_ndarray(tiff_fn)
    all_boundary_pts = np.load(boundaries_fn)
    print 'done.'
    sys.stdout.flush()

    ### visualize cell motion to check the results ###
    boundary_pts = all_boundary_pts[0]
    boundary_x, boundary_y = interpolate_boundary_pts(boundary_pts)
    mask = measure.grid_points_in_poly(frames[0].shape, boundary_pts)
    center = measure.regionprops(mask)[0].centroid

    plt.ion()
    fig = plt.figure()
    implot = plt.imshow(frames[0], cmap='gray')
    boundary_plot, = plt.plot(boundary_y,
                              boundary_x,
                              'bo',
                              markeredgecolor='none',
                              markersize=1)
    boundary_pts_plot, = plt.plot(boundary_pts[:, 1],
                                  boundary_pts[:, 0],
                                  'ro',
                                  markeredgecolor='none',
                                  markersize=4)
Exemple #27
0
    # Frame-by-frame snake fit. This is fairly slow; takes ~80 sec on my laptop.
    tsta = time.clock()
    print 'Tracking cell %i across %i frames...' % (selected_label, len(frames))
    for frame_num, frame in enumerate(frames):

        # print progress
        if frame_num%10 == 0:
            print 'Frame %i of %i' % (frame_num, len(frames))
            sys.stdout.flush()

        # compute distance transforms and fit snake
        edge_dist, corner_dist = frame_to_distance_images(frame)
        boundary_pts = fit_snake(boundary_pts, edge_dist, alpha=alpha, beta=beta, nits=40)

        # check if the cell went off the edge (i.e., out of view)
        single_cell_mask = measure.grid_points_in_poly(frame.shape, boundary_pts)
        if np.any(np.logical_and(~mask, single_cell_mask)):
            print 'cell went off edge on frame %i' % frame_num
            all_boundary_pts = np.delete(all_boundary_pts, np.s_[frame_num:], 0)
            break

        # TODO: resample the points along the curve to maintain contant spacing?
        # store results in big array
        all_boundary_pts[frame_num,:,:] = boundary_pts

    print 'elapsed time:', time.clock() - tsta

    ### write boundary points to file
    if np_fn is None:
        outdir = '.'
        out_fn = 'cell%i_boundary_points.npy' % selected_label
def get_morph_single(im,exp,morph,hwidth=10,hwidthS=3,noerrors=False):
    """ Measure morphological parameters of a source in a small image
    """
    
    # Get rectangular footprint
    #boundary = get_rec_footprint(im,[morph['y0'],morph['x0']])

    # Get small sub-image just to compute the flux center
    #  just with the pixels near the peak
    if noerrors is False:
        subexpS = get_subim(im,morph['x0'],morph['y0'],hwidthS,mask=exp.mask,noise=exp.noise)
    else:
        subexpS = get_subim(im,morph['x0'],morph['y0'],hwidthS,mask=exp.mask)

    # Getting flux center from small subimage
    if noerrors is False:
        xcenS, ycenS, xcenerrS, ycenerrS = get_fluxcenter(subexpS.flux,mask=subexpS.mask,noise=subexpS.noise)
    else:
        xcenS, ycenS, xcenerrS, ycenerrS = get_fluxcenter(subexpS.flux,mask=subexpS.mask)
    morph['x'] = xcenS + morph['x0'] - hwidthS
    morph['y'] = ycenS + morph['y0'] - hwidthS
    morph['xerr'] = xcenerrS
    morph['yerr'] = ycenerrS
    
    # Getting maximum from small subimage
    maxim = np.max(subexpS.flux*(1-subexpS.mask))
    morph['max'] = maxim
    
    # Get larger sub-image
    # If bbox exists then use that to get the width/subimage
    if 'bbox_x0' in morph.dtype.names:
        hwidth1 = np.max([  np.abs([morph['bbox_x0'],morph['bbox_x1']]-morph['x0']),
                            np.abs([morph['bbox_y0'],morph['bbox_y1']]-morph['y0']) ])
        hwidth1 = hwidth1 if (hwidth1>hwidth) else 5
    # Using preset subimage size
    else:
        hwidth1 = hwidth
    # Get the large subexposure
    if noerrors is False:
        subexp = get_subim(im,morph['x0'],morph['y0'],hwidth1,mask=exp.mask,noise=exp.noise)
    else:
        subexp = get_subim(im,morph['x0'],morph['y0'],hwidth1,mask=exp.mask)
        
    # Get the mask of the "good" pixels
    goodmask = (subexp.flux > 0.0) & (subexp.mask == False)

    # Make xcen/ycen for the larger subimage
    xcen = xcenS + (hwidth1-hwidthS)
    ycen = ycenS + (hwidth1-hwidthS)
    
    # Construct X- and Y-arrays
    #xx = np.zeros([2*hwidth+1,2*hwidth+1],'f')
    #for j in xrange(2*hwidth+1):
    #    xx[:,j] = j
    #yy = np.zeros([2*hwidth+1,2*hwidth+1],'f')
    #for j in xrange(2*hwidth+1):
    #    yy[j,:] = j   
    yy, xx = np.indices([2*hwidth1+1,2*hwidth1+1],'f')
    #print "need to test this np.indices code"
    
    # Calculate the flux in the subimage
    morph['flux'] = np.sum(subexp.flux * goodmask)

    # Getting the contour at 1/2 maximum flux
    #  add a perimeter of zeros to ensure that
    #  we get back a contour even if it hits the edge
    ny1, nx1 = subexp.flux.shape
    tflux = np.zeros([ny1+2,nx1+2],'f')
    tflux[1:ny1+1,1:nx1+1] = subexp.flux
    contour = get_contour(tflux,maxim*0.5,[ycen+1,xcen+1])
    # Offset the coordinates to the original image
    if len(contour) > 0:
        contour -= 1
            
    # Good contour, make the measurements
    if len(contour) > 0:
        # Getting the path
        xpath = contour[:,1]
        ypath = contour[:,0]
        xmnpath = np.mean(xpath)
        ymnpath = np.mean(ypath)
    
        # Calculating the FWHM
        dist = np.sqrt((xpath-xmnpath)**2.0 + (ypath-ymnpath)**2.0)  
        fwhm1 = 2.0 * np.mean(dist)
        morph['contour_fwhm'] = fwhm1
            
        # Measuring "ellipticity", (1-a/b)
        elip = 2*np.std(dist-fwhm1)/fwhm1
        morph['contour_elip'] = elip
    
        # Calculate the position angle
        #  angle for point where dist is maximum
        #  angle from positive x-axis
        maxind = np.where(dist == dist.max())[0]
        theta = np.rad2deg( np.arctan2(ypath[maxind]-ymnpath,xpath[maxind]-xmnpath) )
        theta = theta[0] % 360
        # want values between -90 and +90
        if theta > 180:
            theta -= 180
        if theta > 90:
            theta -= 180
        morph['contour_theta'] = theta
            
    else:
        morph['contour_fwhm'] = np.nan
        morph['contour_elip'] = np.nan
        morph['contour_theta'] = np.nan
        # THIS CAN HAPPEN IF THE CONTOUR HITS THE EDGE

    # Computing the "round" factor
    # round = difference of the heights of the two 1D Gaussians
    #         -------------------------------------------------
    #                 average of the two 1D Gaussians
    #
    # Where the 1D Gaussians are of the marginal sums, i.e. sum
    # along either the x or y dimensions
    # round~0 is good
    # round<0 object elongated in x-direction
    # round>0 object elongated in y-direction
    htx = np.max(np.sum(subexp.flux*(1-subexp.mask),axis=0))
    hty = np.max(np.sum(subexp.flux*(1-subexp.mask),axis=1))
    morph['round'] = (hty-htx)/np.mean([htx,hty])

    # 2D Gaussian fitting???

    # Create a "window" mask, only including pixels within 1/2 maximum contour
    if len(contour) > 0:
        contmask = measure.grid_points_in_poly(subexp.flux.shape,contour)
        cgoodmask = goodmask*contmask
    else:
        cgoodmask = np.copy(goodmask)
            
    # Second MOMENTS of the windowed image
    #  The factor of 3.33 corrects for the fact that we are missing
    #  a decent chunk of the 2D Gaussian.  I derived this empirically
    #  using simulations.
    posflux = np.sum( subexp.flux*cgoodmask )
    ixx = np.sum( subexp.flux*cgoodmask * (xx-xcen)**2 ) / posflux * 3.33
    iyy = np.sum( subexp.flux*cgoodmask * (yy-ycen)**2 ) / posflux * 3.33
    ixy = np.sum( subexp.flux*cgoodmask * (xx-xcen) * (yy-ycen) ) / posflux * 3.33
    morph['ixx'] = ixx
    morph['iyy'] = iyy
    morph['ixy'] = ixy
        
    # Computing semi-major, semi-minor and theta from the moments
    #  The SExtractor manual has the solutions to this on pg.31
    # semi-major axis = (ixx+iyy)/2 + sqrt( ((ixx-iyy)/2)^2 + ixy^2 ) = sigx^2
    # semi-minor axis = (ixx+iyy)/2 - sqrt( ((ixx-iyy)/2)^2 + ixy^2 ) = sigy^2
    # tan(2*theta) = 2*ixy/(ixx-iyy)
    # two solutions between -90 and +90 with opposite signs
    # by definition, theta is the position angle for which ixx_theta is maximized
    # (in rotated frame), counter-clockwise from positive x-axis.
    # so theta is the solution the tan equation that has the same sign as ixy
    siga = np.sqrt( (ixx+iyy)/2 + np.sqrt( ((ixx-iyy)/2)**2 + ixy**2 ) )
    sigb = np.sqrt( (ixx+iyy)/2 - np.sqrt( ((ixx-iyy)/2)**2 + ixy**2 ) ) \
           if np.sqrt( ((ixx-iyy)/2)**2 + ixy**2 ) <= (ixx+iyy)/2 else 0.1
    if ixx != iyy:
        theta = np.rad2deg( np.arctan2(2*ixy,ixx-iyy) / 2 )
        theta = np.abs(theta)*np.sign(ixy)
    else:
        theta = 0.0
    morph['siga'] = siga
    morph['sigb'] = sigb
    morph['theta'] = theta
    # THETA is more accurate than CONTOUR_THETA
    
    # Construct Gaussian model for Gaussian weighted photometry
    #  https://en.wikipedia.org/wiki/Gaussian_function
    #  theta in the wikipedia equation is CLOCKWISE so add - before theta
    thetarad = np.deg2rad(theta)
    a = ((np.cos(-thetarad)**2) / (2*siga**2)) + ((np.sin(-thetarad)**2) / (2*sigb**2))
    b = -((np.sin(-2*thetarad)) / (4*siga**2)) + ((np.sin(-2*thetarad)) / (4*sigb**2))
    c = ((np.sin(-thetarad)**2) / (2*siga**2)) + ((np.cos(-thetarad)**2) / (2*sigb**2))
    g = np.exp(-(a*(xx-xcen)**2 + 2*b*(xx-xcen)*(yy-ycen) + c*(yy-ycen)**2))
    g /= np.sum(g)

    # Gaussian weighted flux
    #  from Valdes (2007)
    gausswtflux = np.sum( g*subexp.flux*cgoodmask/subexp.noise**2 )
    gausswtflux /= np.sum( g**2 * cgoodmask/subexp.noise**2 )
    morph['gausswtflux'] = gausswtflux
    
    # Compute gaussian scaling factor using a 
    # weighted mean of the fraction flux/gaussian
    #  weight by (S/N)^2
    wt = np.zeros(subexp.flux.shape,'f')
    wt[:,:] = (subexp.flux/subexp.noise)**2
    # only use values where the gaussian is large enough
    # and the image isn't masked out
    gmask = (g > np.max(g)*0.05) & (cgoodmask == True)
    wt *= gmask
    wt /= np.sum(wt)
    # set values of the gaussian in the denominator
    #   to 1.0 where they are too low
    gdenom = np.copy(g)
    gdenom[gmask == False] = 1
    gauss_scale = np.sum( subexp.flux*wt / gdenom )
    morph['gaussflux'] = gauss_scale
        
    # compute chi-squared
    chisq = np.sum( ((subexp.flux-g*gausswtflux)*gmask / subexp.noise)**2 ) / np.sum(gmask)
    morph['chisq'] = chisq
        
    return morph
Exemple #29
0
def get_pixels_in_contour(image_shape, contour) :
  '''Uses grid_points_in_poly to get the boolean for pixels inside the contour
      Returns an nxm array of True and False'''

  return measure.grid_points_in_poly(image_shape, contour)
Exemple #30
0
    inpath = args.inputpath

if args.roi:
    myroifilename = args.roi

if args.outfile:
    outfname = args.outfile    

out_file = open(outfname,"w")
out_file.write("#layer \t max \t meaninroi \n")
    
freader = FileReader(inpath, False, args.verbose)
data, unusedROI = freader.read(True)

roireader = roiFileHandler()
rois, roisSetted = roireader.read(myroifilename)

nFette = len(data)
if nFette != len(rois):
    print("error: len rois = ",len(rois)," but len dicom=",nFette)

    
for layer in xrange(0,nFette):
    thisroi = grid_points_in_poly(data[layer].shape, rois[layer]['points'])
    masked = data[layer]*thisroi
    maximum = data[layer].max()
    meaninroi = masked.mean()
    out_file.write(str(layer)+"\t"+str(maximum)+"\t"+str(meaninroi)+"\n")

out_file.close()