Exemplo n.º 1
0
    def scale_keypoint_maps(self, keypoint_maps, image_size):
        scale_keypoint0_maps = []
        scale_keypoint1_maps = []
        scale_keypoint2_maps = []
        scale_keypoint3_maps = []

        for i in self.scale:
            s = 2 ** i
            xkp0 = pyramid.pyr_down(keypoint_maps[0], downscale=s)
            xkp0 = numpy.expand_dims(xkp0, axis=0)
            
            xkp1 = pyramid.pyr_down(keypoint_maps[1], downscale=s)
            xkp1 = numpy.expand_dims(xkp1, axis=0)

            xkp2 = pyramid.pyr_down(keypoint_maps[2], downscale=s)
            xkp2 = numpy.expand_dims(xkp2, axis=0)

            xkp3 = pyramid.pyr_down(keypoint_maps[3], downscale=s)
            xkp3 = numpy.expand_dims(xkp3, axis=0)
            
            scale_keypoint0_maps.append(xkp0.astype(numpy.float32))
            scale_keypoint1_maps.append(xkp1.astype(numpy.float32))
            scale_keypoint2_maps.append(xkp2.astype(numpy.float32))
            scale_keypoint3_maps.append(xkp3.astype(numpy.float32))

        return(scale_keypoint0_maps, scale_keypoint1_maps, scale_keypoint2_maps, scale_keypoint3_maps)
Exemplo n.º 2
0
    def preprocess_image(self, i):
        downscale = self.downscale
        lab_frame_image = freeimage.read(self.timepoint_list[i].image_path('bf'))
        lab_frame_image = lab_frame_image.astype(numpy.float32)
        height, width = lab_frame_image.shape[:2]

        try:
            metadata = self.timepoint_list[i].position.experiment.metadata
            optocoupler = metadata['optocoupler']
        except KeyError:
            optocoupler = 1
        mode = process_images.get_image_mode(lab_frame_image, optocoupler=optocoupler)

        #### DownSample the image 
        if downscale > 0 and downscale != 1:#and set_name!='train':        
            #t_size = (int(width / downscale), int(height / downscale))  
            shrink_image = pyramid.pyr_down(lab_frame_image, downscale=downscale)
            #shrink_image = numpy.clip(shrink_image, 0, 40000)   
        else:
            shrink_image = lab_frame_image

        shrink_image = shrink_image.astype(numpy.float32)

        ## scale the image pixel value into a trainable range
        # map image image intensities in range (100, 2*mode) to range (0, 2)
        bf = colorize.scale(shrink_image, min=100, max=2*mode, output_max=2)
        # now shift range to (-1, 1)
        bf -= 1
        return bf
Exemplo n.º 3
0
def get_cost_image(image, optocoupler, image_gamma, center_tck, width_tck,
                   downscale, gradient_sigma, sigmoid_midpoint,
                   sigmoid_growth_rate, edge_weight):
    """Trace the edges of a worm and return a new center_tck and width_tck.

    Parameters:
        image: ndarray of the brightfield image
        optocoupler: optocoupler magnification (to correctly calculate the image
            vignette)
        center_tck: spline defining the pose of the worm.
        width_tck: spline defining the distance from centerline to worm edges.
        image_gamma: gamma value for intensity transform to highlight worm edges
        downscale: factor by which to downsample the image
        gradient_sigma: sigma for gaussian gradient to find worm edges
        sigmoid_midpoint: midpoint of edge_highlighting sigmoid function for
            gradient values, expressed as a percentile of the gradient value
            over the whole image.
        sigmoid_growth_rate: steepness of the sigmoid function.
        edge_weight: how much to weight image edge strength vs. distance from
            the average widths in the cost function.

    Returns: image defining the cost function for edge tracing
    """
    # normalize, warp, and downsample image
    image = process_images.pin_image_mode(image, optocoupler=optocoupler)
    image = colorize.scale(image,
                           min=600,
                           max=26000,
                           gamma=image_gamma,
                           output_max=1)
    warped = worm_spline.to_worm_frame(image,
                                       center_tck,
                                       width_tck,
                                       width_margin=40)
    warped = pyramid.pyr_down(warped, downscale=downscale)

    # calculate the edge costs
    gradient = ndimage.gaussian_gradient_magnitude(warped, gradient_sigma)
    gradient = sigmoid(gradient, numpy.percentile(gradient, sigmoid_midpoint),
                       sigmoid_growth_rate)
    gradient = gradient.max() - abs(gradient)

    # penalize finding edges away from the width along the worm
    widths = (interpolate.spline_interpolate(width_tck,
                                             warped.shape[0])) / downscale
    centerline_index = (warped.shape[1] - 1) / 2
    distance_from_centerline = abs(
        numpy.arange(0, warped.shape[1]) - centerline_index)
    distance_from_average = abs(
        numpy.subtract.outer(widths, distance_from_centerline))
    return edge_weight * gradient + distance_from_average
Exemplo n.º 4
0
    def forward(self, Keypoint0, Output):
            
        K0loss = 0  
        ##image1 mask image2 output
        for i in self.scale: 
            s = 2**i      
            N,C,H,W = Keypoint0[i].size()
            scaled_mask = pyramid.pyr_down(self.mask, downscale=s)
            m = numpy.array([[scaled_mask]*C]*N) #get mask into the same dimension as keypoint should be (N, 1, H, W)
            tensor_mask = torch.tensor(m) #make the mask into a tensor
            l = self.reglLoss(Output[('Keypoint0',i)][tensor_mask>0], Keypoint0[i][tensor_mask>0])/(N*C*H*W)
            print('Loss: {}, scale: {}'.format(l, i))
            K0loss += l

        return K0loss
Exemplo n.º 5
0
def get_well_mask(image):
    small_image = pyramid.pyr_down(image, 4)
    smoothed, gradient, sobel = canny.prepare_canny(small_image, 2)
    local_maxima = canny.canny_local_maxima(gradient, sobel)
    # well outline has ~6000 px full-size = 1500 px at 4x downsampled
    # So find the intensity value of the 2000th-brightest pixel, via percentile:
    highp = 100 * (1-2000/local_maxima.sum())
    highp = max(highp, 90)
    low_edge, high_edge = numpy.percentile(gradient[local_maxima], [90, highp])
    # Do canny edge-finding starting with gradient pixels as bright or brighter
    # than the 2000th-brightest pixel, and spread out to the 90th percentile
    # intensity:
    well_edge = canny.canny_hysteresis(local_maxima, gradient, low_edge, high_edge)
    # connect nearby edges and remove small unconnected bits
    well_edge = ndimage.binary_closing(well_edge, structure=S)
    well_edge = mask.remove_small_area_objects(well_edge, 300, structure=S)
    # Get map of distances and directions to nearest edge to use for contour-fitting
    distances, nearest_edge = active_contour.edge_direction(well_edge)
    # initial curve is the whole image less one pixel on the outside
    initial = numpy.ones(well_edge.shape, dtype=bool)
    initial[:,[0,-1]] = 0
    initial[[0,-1],:] = 0
    # Now evolve the curve inward until it contacts the canny well edges.
    gac = active_contour.GAC(initial, nearest_edge, advection_mask=(distances < 10), balloon_direction=-1)
    stopper = active_contour.StoppingCondition(gac, max_iterations=200)
    while stopper.should_continue():
        # otherwise evolve the curve by shrinking, advecting toward edges, and smoothing
        gac.balloon_force(iters=3)
        gac.advect(iters=2)
        gac.smooth()
    gac.smooth(depth=3)
    # now erode everywhere the contour edge is right on a canny edge:
    gac.move_to_outside(well_edge[tuple(gac.inside_border_indices.T)])
    gac.smooth()
    well_mask = gac.mask
    if well_mask.sum() / well_mask.size < 0.25:
        # if the well mask is too small, something went very wrong
        well_mask = None
    return small_image, well_mask
Exemplo n.º 6
0
def preprocess_image(timepoint, downscale):
    img_path = timepoint.image_path('bf')
    lab_frame_image = freeimage.read(img_path)
    lab_frame_image = lab_frame_image.astype(numpy.float32)
    height, width = lab_frame_image.shape[:2]
    objective, optocoupler, magnification, temp = get_metadata(timepoint)

    mode = process_images.get_image_mode(lab_frame_image,
                                         optocoupler=optocoupler)
    #### DownSample the image
    if downscale > 0 and downscale != 1:  #and set_name!='train':
        shrink_image = pyramid.pyr_down(lab_frame_image, downscale=downscale)
        #shrink_image = numpy.clip(shrink_image, 0, 40000)
    else:
        shrink_image = lab_frame_image

    shrink_image = shrink_image.astype(numpy.float32)

    ## scale the image pixel value into a trainable range
    # map image image intensities in range (100, 2*mode) to range (0, 2)
    bf = colorize.scale(shrink_image, min=100, max=2 * mode, output_max=2)
    # now shift range to (-1, 1)
    bf -= 1
    return bf
Exemplo n.º 7
0
def width_finding(image,
                  ggm_sigma=1.03907545,
                  sig_per=61.3435119,
                  sig_growth_rate=2.42541565,
                  alpha=1.00167702,
                  mcp_alpha=1.02251872):
    """From the image, find the widths
    NOTE: assume the image is the warped image
    with the width of the image = int(center_tck[0][-1]//5)
    This is the same width as the spline view of the pose annotator

    Parameters:
        image: image of a straightened worm to get the widths from
        width_tck: tcks that give the widths for the worm
        params: list of the parameter values in the order [ggm sigma, sigmoid percentile, 
                sigmoid growth rate, alpha for the penalties]
    """
    #normalize the image based on mode
    #print(ggm_sigma,sig_per,sig_growth_rate,alpha)
    #down sample the image
    #image_down = image
    #image_down = scale_image(image)
    #the centerline is half of the width
    image_down = pyramid.pyr_down(image, downscale=2)
    #image_down = image.astype(np.float32)

    #get the gradient
    gradient = ndimage.filters.gaussian_gradient_magnitude(
        image_down, ggm_sigma)
    y_grad = ndimage.filters.gaussian_gradient_magnitude(image_down, ggm_sigma)
    top_ten = np.percentile(gradient, sig_per)
    gradient = sigmoid(gradient, gradient.min(), top_ten, gradient.max(),
                       sig_growth_rate)
    sig = gradient
    gradient = gradient.max() - abs(gradient)

    #get the widths from the width_tck and then get the distance matrix of the
    #widths to the centerline of the worm
    #NOTE: widths are the avg widths from the pca
    widths = interpolate.spline_interpolate(avg_width_tck, image_down.shape[0])
    widths = widths / 2
    #widths = interpolate.spline_interpolate(width_tck, image_down.shape[0])
    #widths = widths/2 #need to divide by the downscale factor to get the right pixel values

    #penalizing the costs for being too far from the widths
    #half_distance_matrix = abs(np.subtract.outer(widths, np.arange(0, image_down.shape[1]/2)))

    #calculate the penalty
    #we want to penalize things that are farther from the average widths, so we square the distance
    distance_matrix = np.flip(
        abs(np.subtract.outer(widths, np.arange(0, image_down.shape[1]))), 1)
    penalty = alpha * (distance_matrix)
    new_costs = gradient + penalty
    #new_costs = gradient
    #set start and end points for the traceback
    start = (0, int((image_down.shape[1] - 1) - widths[0]))
    end = (len(widths) - 1, int((image_down.shape[1] - 1) - widths[-1]))

    #start = (0, int((image_down.shape[1]/2)-widths[0]))
    #end = (len(widths)-1, int((image_down.shape[1]/2)-widths[-1]))
    #print(start, end)
    #print(new_costs.shape)
    offsets = [(1, -1), (1, 0), (1, 1)]
    #mcp = graph.MCP(new_costs, offsets=offsets, fully_connected=True)
    mcp = Smooth_MCP(new_costs, mcp_alpha, offsets=offsets)
    costs, _ = mcp.find_costs([start], [end])
    #print(costs.shape)
    route = mcp.traceback(end)
    #visualize things
    new_widths_image = make_traceback_image(route, image_down.shape)
    #width_image = visualize_widths(widths, image_down.shape)
    return (image_down, route, new_widths_image, new_costs, y_grad, sig)
Exemplo n.º 8
0
def find_edges(image,
               avg_width_tck,
               ggm_sigma=1,
               sig_per=61,
               sig_growth_rate=2,
               alpha=1,
               mcp_alpha=1):
    """Find the edges of one side of the worm and return the x,y positions of the new widths
    NOTE: This function assumes that the image is only half of the worm (ie. from the centerline
    to the edges of the worm)

    Parameters:
        image: ndarray of the straightened worm image (typically either top or bottom half)
        avg_width_tck: width spline defining the average distance from the centerline
            to the worm edges (This is taken from the pca things we did earlier)
        ggm_sigma, sig_per, sig_growth_rate, alpha, mcp_alpha: hyperparameters for 
            the edge-detection scheme
    
    Returns:
        route: tuple of x,y positions of the identfied edges
    """

    #down sample the image
    image_down = pyramid.pyr_down(image, downscale=2)

    #get the gradient
    gradient = ndimage.filters.gaussian_gradient_magnitude(
        image_down, ggm_sigma)
    print(sig_per)
    top_ten = np.percentile(gradient, sig_per)
    gradient = sigmoid(gradient, gradient.min(), top_ten, gradient.max(),
                       sig_growth_rate)
    gradient = gradient.max() - abs(gradient)

    #penalize finding edges near the centerline or outside of the avg_width_tck
    #since the typical worm is fatter than the centerline and not huge
    #Need to divide by 2 because of the downsampling
    pen_widths = (interpolate.spline_interpolate(avg_width_tck,
                                                 image_down.shape[0]))
    #pen_widths = pen_widths/2
    distance_matrix = abs(
        np.subtract.outer(pen_widths, np.arange(0, image_down.shape[1])))
    #distance_matrix = np.flip(abs(np.subtract.outer(pen_widths, np.arange(0, image_down.shape[1]))), 1)
    penalty = alpha * (distance_matrix)
    new_costs = gradient + penalty

    #set start and end points for the traceback
    start = (0, int(pen_widths[0]))
    end = (len(pen_widths) - 1, int(pen_widths[-1]))
    #start = (0, int((image_down.shape[1]-1)-pen_widths[0]))
    #end = (len(pen_widths)-1, int((image_down.shape[1]-1)-pen_widths[-1]))
    #start = (0,0)
    #end = (len(pen_widths)-1, 0)

    #begin edge detection
    offsets = [(1, -1), (1, 0), (1, 1)]
    mcp = Smooth_MCP(new_costs, mcp_alpha, offsets=offsets)
    mcp.find_costs([start], [end])
    route = mcp.traceback(end)

    return image_down, route