def jet_heatmap(heatmap, norm=False):
     if norm:
         heatmap = (heatmap - torch.min(heatmap)) / (torch.max(heatmap) - torch.min(heatmap)) #* 255.0
     heatmap = cm.jet(heatmap.squeeze().data.cpu().numpy())
     heatmap = torch.from_numpy(heatmap)
     heatmap = heatmap.transpose(1, 2).transpose(0, 1).unsqueeze(0)
     return heatmap
Ejemplo n.º 2
0
def transparent_jet(x):
    '''
    Uses jet colormap
    '''
    cmap = list(cm.jet(x))
    cmap[3] = x # alpha channel
    return tuple_to_rgba(cmap)
Ejemplo n.º 3
0
    def post(self, request):
        print(request.data['image'])
        img = tf.image.decode_png(request.data['image'].read(), channels=3)
        new_model = tf.keras.models.load_model('./../mymodel.h5', custom_objects={'KerasLayer':hub.KerasLayer})
        img_resized = tf.image.resize(img, [224, 224])
        gray_image_3ch = np.array([img_resized/225.0])
        print(img_resized.shape)
        
        W = new_model.get_layer('predictions').get_weights()[0]

        conv_output = new_model.layers[-3].output

        fcc_output = new_model.layers[-2].output
        intermediate_model1 = tf.keras.Model(inputs=new_model.input, outputs=fcc_output)
        intermediate_prediction1 = intermediate_model1.predict(gray_image_3ch)


        intermediate_model2 = tf.keras.models.Model(inputs=new_model.input, outputs=conv_output)
        intermediate_prediction2 = intermediate_model2.predict(gray_image_3ch)


        predictions = new_model.predict(gray_image_3ch)
        winner = np.argmax(predictions, axis=-1)
        W = np.transpose(W[:,winner])
        updated_weights = ((W*intermediate_prediction1)+10)/10

        print(predictions)
        mask = np.multiply(intermediate_prediction2, updated_weights)

        mask = np.mean(mask[0,:,:,:], axis=-1)

        mask = cv2.resize(mask, (224, 224))

        m = np.uint8(mask*5000)

        m = cm.jet(m)*255
        #m = cv2.cvtColor(m, cv2.COLOR_RGBA2RGB)
        m = np.uint8(m)
        #m = cv2.cvtColor(m, cv2.COLOR_RGBA2BGR)
        m = cv2.cvtColor(m, cv2.COLOR_RGBA2RGB)
        img_resized = np.uint8(img_resized)
        print(type(img_resized))
        print(type(m))
        fin = cv2.addWeighted(m, 0.5, img_resized, 0.5, 0)
        image = Image.fromarray(fin)

        response = HttpResponse(content_type="image/png")
        image.save(response, "PNG")
        
        #image = Image.fromarray(np.uint8(masked_image*100))
        #retval, buffer_img= cv2.imencode('.jpg', masked_image)
        #final = base64.b64encode(buffer_img)
        #base64_string = final.decode('utf-8')
        #response = HttpResponse(content_type="image/png")
        #image.save(response, "PNG")

        return response
def post_treatment(image, rel_goal, arrowpoint, min, max):
    image = ((image - image.min()) * (1 / (6 - 0) * 255)).astype('uint8')
    image = np.uint8(cm.jet(image) * 255)
    image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
    image = cv2.resize(image, (int(640), int(360)))
    image = cv2.arrowedLine(image, (320, 320), arrowpoint, (0, 0, 255), 2,
                            cv2.LINE_AA)
    text = "%.3f" % (min) + "m." + "                        " + "%.3f" % (
        max) + "m."
    image = cv2.putText(
        image, "%.3f" % (np.sqrt(rel_goal[0]**2 + rel_goal[1]**2)) + 'm.',
        (250, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
    image = cv2.putText(image, text, (0, 320), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (255, 255, 255))
    return image
Ejemplo n.º 5
0
def display_trajectory(image, rel_goal, rel_orientation):

    display_angle = rel_orientation + (np.pi / 2)
    arrowpoint = (int(320 + 40 * np.cos(display_angle)),
                  int(320 - 40 * np.sin(display_angle)))

    # image = ((image - image.min()) * (1 / (6 - 0) * 255)).astype('uint8') # to be used with raw dataset
    image = image * 6.0  # to be used with the normalized dataset (adjust scale)
    image = np.uint8(cm.jet(image) * 255.0)
    image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
    image = cv2.resize(image, (int(640), int(360)))
    image = cv2.arrowedLine(image, (320, 320), arrowpoint, (0, 0, 255), 2,
                            cv2.LINE_AA)
    # text = "%.3f"%(min) + "m." + "                        " + "%.3f"%(max) + "m."
    image = cv2.putText(
        image, "%.3f" % (np.sqrt(rel_goal[0]**2 + rel_goal[1]**2)) + 'm.',
        (250, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
    image = cv2.putText(image,
                        str(rel_orientation) + " rad", (0, 320),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
    return image
Ejemplo n.º 6
0
intermediate_model1 = tf.keras.Model(inputs=new_model.input,
                                     outputs=fcc_output)
intermediate_prediction1 = intermediate_model1.predict(gray_image_3ch)

intermediate_model2 = tf.keras.models.Model(inputs=new_model.input,
                                            outputs=conv_output)
intermediate_prediction2 = intermediate_model2.predict(gray_image_3ch)

predictions = new_model.predict(gray_image_3ch)
winner = np.argmax(predictions, axis=-1)
W = np.transpose(W[:, winner])
updated_weights = ((W * intermediate_prediction1) + 10) / 10

print(predictions)
mask = np.multiply(intermediate_prediction2, updated_weights)

mask = np.mean(mask[0, :, :, :], axis=-1)

mask = cv2.resize(mask, (224, 224))

m = np.uint8(mask * 5000)

m = cm.jet(m) * 255
#m = cv2.cvtColor(m, cv2.COLOR_RGBA2RGB)
m = np.uint8(m)
#m = cv2.cvtColor(m, cv2.COLOR_RGBA2BGR)
m = cv2.cvtColor(m, cv2.COLOR_RGBA2RGB)
fin = cv2.addWeighted(m, 0.5, img_resized, 0.5, 0)
plt.imshow(fin)
plt.show()
img_S0, img_S1, img_S2 = cv2.split(img_stokes)

# Convert the Stokes vector to Intensity, DoLP and AoLP
img_intensity = pa.cvtStokesToIntensity(img_stokes)
img_DoLP = pa.cvtStokesToDoLP(img_stokes)
img_AoLP = pa.cvtStokesToAoLP(img_stokes)

plt.imshow(img_intensity)
plt.imshow(img_DoLP, cmap='jet', vmin=0, vmax=1)
plt.imshow(img_AoLP, cmap='hsv')

img_AoLP_cmapped = pa.applyColorToAoLP(img_AoLP, value=img_DoLP)

plt.imshow(img_AoLP_cmapped)

img_DoLP_col = cm.jet(img_DoLP)
plt.imshow(img_DoLP_col.astype(np.float64))

img_AoLP_col = cm.hsv(img_AoLP / np.pi)
plt.imshow(img_AoLP_col.astype(np.float64))

img_DoLP_col_inv = cv2.cvtColor(img_DoLP_col.astype(np.float32),
                                cv2.COLOR_RGB2BGR)
# plt.imshow(imgs_DoLP_col_inv.astype(np.float64))
img_AoLP_col_inv = cv2.cvtColor(img_AoLP_col.astype(np.float32),
                                cv2.COLOR_RGB2BGR)
# plt.imshow(imgs_AoLP_col_inv.astype(np.float64))

fln = os.path.basename(os.path.dirname(imfile))  #crop the file type
# cv2.imwrite(os.path.dirname(imfile)+'/HDR_Int_'+fln+".png",255*img_intensity.astype(np.float64)/np.max(img_intensity))#np.uint8))
cv2.imwrite(
Ejemplo n.º 8
0
def tracking(rgb, depth, depth_scale, cam_matrix, ball_radius, verbosity=0, use_hsv=False):
    """
    computes the position of the center of the ball in the camera frame
    :param rgb: rgb image numpy array
    :param depth: depth image numpy array. must be aligned with rgb image on
    realsense use align_depth:=true when starting ros node or use rs.align if
    using pyrealsense2
    :param depth_scale: conversion factor from depth value to distance
    :param cam_matrix: fundamental matrix of depth camera
    :param ball_radius: radius of ball
    :param verbosity: Determines how much to print, display and save. Higher
    verbosity is better for debugging but slower. If set to 0 will only
    print error messages. If set to 1 will also print pixel location of ball
    center. If set to 2 will also display RGB and depth images with ball
    detection. If set to 3 will also save RGB, depth, blurred images and mask.
    :param use_hsv: if true converts image to hsv and uses hsv bounds
    :return: location of center of ball in camera frame
    """

    # converts from RGB to BGR because cv2 expects BGR
    rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)

    # Gaussian blur to reduce noise
    blurred = cv2.GaussianBlur(rgb, (11, 11), 0)

    if use_hsv:
        image = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        lower = hsv_lower
        upper = hsv_upper
    else:
        image = blurred
        lower = rgb_lower
        upper = rgb_upper

    # construct a mask for the color "green", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    mask = cv2.inRange(image, lower, upper)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)

    # find contours in the mask and initialize the current
    # (x, y) center of the ball
    contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    center = None
    
    # only proceed if at least one contour was found
    if len(contours) > 0:
        # find the largest contour in the mask, then use it to compute the
        # minimum enclosing circle and centroid
        c = max(contours, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        moments = cv2.moments(c)
        center = (int(moments["m10"] / moments["m00"]),
                  int(moments["m01"] / moments["m00"]))

        # get distance to center and find location of point in camera frame
        distance = depth[int(y), int(x)] * depth_scale            
        
        # depth sensor reads 0.0 when it can't compute depth for pixel
        if distance == 0.0:
            point = None
        else:
            point = pixel_to_point(x, y, distance, cam_matrix, ball_radius)

        if verbosity >= 1:
            print(int(x), int(y))
            print(center)
            if distance != 0.0:
                print('center of detected object is ', distance, 'meters away')

        # convert depth to rgb so it will display colors well need to convert
        # before drawing circle and center below
        depth = np.uint8(cm.jet(depth)*255)
        depth = cv2.cvtColor(depth, cv2.COLOR_RGBA2BGR)

        # only proceed if the radius meets a minimum size
        if radius > 5:
            # draw the circle and centroid on the frame, then update the list
            # of tracked points
            cv2.circle(rgb, (int(x), int(y)), int(radius), (0, 255, 255), 2)
            cv2.circle(rgb, center, 5, (0, 0, 255), -1)
            cv2.circle(depth, (int(x), int(y)), int(radius), (0, 255, 255), 2)
            cv2.circle(depth, center, 5, (0, 0, 255), -1)
    else:
        point = None
    # update the points queue
    pts.appendleft(center)

    # loop over the set of tracked points
    for i in range(1, len(pts)):
        # if either of the tracked points are None, ignore them
        if pts[i - 1] is None or pts[i] is None:
            continue
        # otherwise, compute the thickness of the line and draw the connecting
        # lines
        thickness = int(np.sqrt(n / float(i + 1)) * 2.5)
        cv2.line(rgb, pts[i - 1], pts[i], (0, 0, 255), thickness)
        cv2.line(depth, pts[i - 1], pts[i], (0, 0, 255), thickness)

    # convert depth to rgb so it will display colors well
    if len(contours) == 0:
        depth = np.uint8(cm.jet(depth)*255)
        depth = cv2.cvtColor(depth, cv2.COLOR_RGBA2BGR)

    if verbosity >= 2:
        cv2.imshow("RGB", rgb)
        cv2.imshow("Depth", depth)
        cv2.imshow("Mask", mask)
        cv2.waitKey(1)
    if verbosity >= 3:
        cv2.imwrite('rgb.png', rgb)
        cv2.imwrite('blurred.png', blurred)
        cv2.imwrite('image.png', image)
        cv2.imwrite('depth.png', depth)
    return point