예제 #1
0
    def linkEbsdMap(self, ebsdMap, transformType="affine", order=2):
        """Calculates the transformation required to align EBSD dataset to DIC

        Args:
            ebsdMap(ebsd.Map): EBSD map object to link
            transformType(string, optional): affine, piecewiseAffine or polynomial
            order(int, optional): Order of polynomial transform to apply
        """

        self.ebsdMap = ebsdMap
        if transformType == "piecewiseAffine":
            self.ebsdTransform = tf.PiecewiseAffineTransform()
            self.ebsdTransformInv = self.ebsdTransform.inverse
        elif transformType == "polynomial":
            self.ebsdTransform = tf.PolynomialTransform()
            # You can't calculate the inverse of a polynomial transform
            # so have to estimate by swapping source and destination
            # homog points
            self.ebsdTransformInv = tf.PolynomialTransform()
            self.ebsdTransformInv.estimate(np.array(self.ebsdMap.homogPoints),
                                           np.array(self.homogPoints),
                                           order=order)
            # calculate transform from EBSD to DIC frame
            self.ebsdTransform.estimate(np.array(self.homogPoints),
                                        np.array(self.ebsdMap.homogPoints),
                                        order=order)
            return
        else:
            self.ebsdTransform = tf.AffineTransform()
            self.ebsdTransformInv = self.ebsdTransform.inverse

        # calculate transform from EBSD to DIC frame
        self.ebsdTransform.estimate(np.array(self.homogPoints),
                                    np.array(self.ebsdMap.homogPoints))
def transform_img(img, t2, t1=None):
    cval = 100 * np.nanmax(img)
    # print(cval)

    rez = [img]
    if t1 is not None:
        at = skt.AffineTransform(t1.T)
        rez += [skt.warp(img, at.inverse, mode='constant', cval=cval)]

    A, B = t2
    pt = skt.PolynomialTransform(np.array([A, B]))
    polyRez = skt.warp(rez[-1], pt, mode='constant', cval=cval)
    polyRez = polyRez[::-1].T
    polyRez[polyRez > cval / 10] = np.nan
    rez += [polyRez]

    if t1 is not None:
        rez[-2][rez[-2] > cval / 10] = np.nan

    #     if t1 is not None:
    #         at = skt.AffineTransform(t1.T)
    #         trans = [lambda x: msimg.dest_est(x, at)]
    #         matCSRT1 = msimg.mapping_list_to_matrix(img.shape, trans)
    #         rez += [msimg.map_with_matrix(img, matCSRT1)]

    #     A, B = t2
    # #     pt = skt.PolynomialTransform(np.array([A, B]))
    # #     transLst += [lambda x: msimg.dest_est(x, pt)]
    #     trans = [lambda x: msimg.poly_transform(x, [A, B])]
    #     matCSRT2 = msimg.mapping_list_to_matrix(img.shape, trans)
    #     rez += [msimg.map_with_matrix(rez[-1], matCSRT2)]

    return rez[1:]
예제 #3
0
파일: hrdic.py 프로젝트: ApexBurger/DefDAP
    def linkEbsdMap(self, ebsdMap, transformType="affine", order=2):
        """Calculates the transformation required to align EBSD dataset to DIC.

        Parameters
        ----------
        ebsdMap : defdap.ebsd.Map
            EBSD map object to link.
        transformType : str, optional
            affine, piecewiseAffine or polynomial.
        order : int, optional
            Order of polynomial transform to apply.

        """
        self.ebsdMap = ebsdMap
        if transformType.lower() == "piecewiseaffine":
            self.ebsdTransform = tf.PiecewiseAffineTransform()
            self.ebsdTransformInv = self.ebsdTransform.inverse
        elif transformType.lower() == "projective":
            self.ebsdTransform = tf.ProjectiveTransform()
            self.ebsdTransformInv = self.ebsdTransform.inverse
        elif transformType.lower() == "polynomial":
            self.ebsdTransform = tf.PolynomialTransform()
            # You can't calculate the inverse of a polynomial transform
            # so have to estimate by swapping source and destination
            # homog points
            self.ebsdTransformInv = tf.PolynomialTransform()
            self.ebsdTransformInv.estimate(np.array(self.ebsdMap.homogPoints),
                                           np.array(self.homogPoints),
                                           order=order)
            # calculate transform from EBSD to DIC frame
            self.ebsdTransform.estimate(np.array(self.homogPoints),
                                        np.array(self.ebsdMap.homogPoints),
                                        order=order)
            return
        else:
            # default to using affine
            self.ebsdTransform = tf.AffineTransform()
            self.ebsdTransformInv = self.ebsdTransform.inverse

        # calculate transform from EBSD to DIC frame
        self.ebsdTransform.estimate(np.array(self.homogPoints),
                                    np.array(self.ebsdMap.homogPoints))
예제 #4
0
    def face_registration(self, image_rgb, shape, face_size=225, scale=1.0):
        standard_model = TEMPLATE * 500
        if (len(image_rgb.shape) > 2):
            image_rgb = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2RGB)
        npLandmarks = self.shape_to_np(shape)
        #npLandmarks= self.extra_landmarks(npLandmarks)
        t = transform.PolynomialTransform()
        t.estimate(standard_model, npLandmarks, 3)
        img_warped = transform.warp(image_rgb,
                                    t,
                                    order=2,
                                    mode='constant',
                                    cval=float('nan'))
        cropped_registered_face = self.crop_face(img_warped, standard_model)

        return img_warped
예제 #5
0
    def _tform(self):
        """Return a skimage transform object."""
        xcoef = self.xcoef
        ycoef = self.ycoef
        dim = self.dim
        if not xcoef or not ycoef or dim is None:
            return None
        a = xcoef
        b = ycoef
        # Affine transform
        if dim in range(0, 4):
            if dim == 0:
                tmatrix = np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape((3, 3))
            elif dim == 1:
                tmatrix = np.array([1, 0, a[0], 0, 1, b[0], 0, 0, 1]).reshape(
                    (3, 3))
            elif dim == 2:  # Special case, swap b[1] and b[2] (look at original Reconstruct code: nform.cpp)
                tmatrix = np.array([a[1], 0, a[0], 0, b[1], b[0], 0, 0,
                                    1]).reshape((3, 3))
            elif dim == 3:
                tmatrix = np.array(
                    [a[1], a[2], a[0], b[1], b[2], b[0], 0, 0, 1]).reshape(
                        (3, 3))
            return tf.AffineTransform(tmatrix)
        # Polynomial transform
        elif dim in range(4, 7):
            tmatrix = np.array([
                a[0], a[1], a[2], a[4], a[3], a[5], b[0], b[1], b[2], b[4],
                b[3], b[5]
            ]).reshape((2, 6))
            # create matrix of coefficients
            tforward = tf.PolynomialTransform(tmatrix)

            def getrevt(pts):  # pts are a np.array
                newpts = []  # list of final estimates of (x,y)
                for i in range(len(pts)):
                    # (u,v) for which we want (x,y)
                    u, v = pts[i, 0], pts[i, 1]  # input pts
                    # initial guess of (x,y)
                    x0, y0 = 0.0, 0.0
                    # get forward tform of initial guess
                    uv0 = tforward(np.array([x0, y0]).reshape([1, 2]))[0]
                    u0 = uv0[0]
                    v0 = uv0[1]
                    e = 1.0  # reduce error to this limit
                    epsilon = 5e-10
                    i = 0
                    while e > epsilon and i < 100:  # NOTE: 10 -> 100
                        i += 1
                        # compute Jacobian
                        l = a[1] + a[3] * y0 + 2.0 * a[4] * x0
                        m = a[2] + a[3] * x0 + 2.0 * a[5] * y0
                        n = b[1] + b[3] * y0 + 2.0 * b[4] * x0
                        o = b[2] + b[3] * x0 + 2.0 * b[5] * y0
                        p = l * o - m * n  # determinant for inverse
                        if abs(p) > epsilon:
                            # increment x0,y0 by inverse of Jacobian
                            x0 = x0 + ((o * (u - u0) - m * (v - v0)) / p)
                            y0 = y0 + ((l * (v - v0) - n * (u - u0)) / p)
                        else:
                            # try Jacobian transpose instead
                            x0 = x0 + (l * (u - u0) + n * (v - v0))
                            y0 = y0 + (m * (u - u0) + o * (v - v0))
                        # get forward tform of current guess
                        uv0 = tforward(np.array([x0, y0]).reshape([1, 2]))[0]
                        u0 = uv0[0]
                        v0 = uv0[1]
                        # compute closeness to goal
                        e = abs(u - u0) + abs(v - v0)
                    # append final estimate of (x,y) to newpts list
                    newpts.append((x0, y0))
                newpts = np.asarray(newpts)
                return newpts

            tforward.inverse = getrevt
            return tforward
예제 #6
0
    keypoints = exercise1Lib.shape2points(shape)

    # 1. Load the landmark position of the standard face model
    standardModel = np.zeros((68, 2))
    tmp = 0
    with open('mean.csv', 'rt') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=',')
        for row in spamreader:
            standardModel[tmp, 0] = float(row[0])
            standardModel[tmp, 1] = float(row[1])
            tmp += 1
    standardModel = standardModel * 500

    # 2. Calculating the transorfmation between the two set of keypoints
    # 2.1 Instantiating a PolynomialTransform() transform function
    tform = transform.PolynomialTransform()

    # 2.2 Calculating the transformation by calling the estimate() method.
    #     You do not need to retuern any value after calling this methods,
    #     because the transformation parameter is store in the object you instantiated after calling this methods.
    tform.estimate(standardModel, keypoints)

    # 3. Warping your example image using the transform.warp() function
    warped = transform.warp(img, tform)

    # 4. Crop the face from registered image using the provided cropFace function.
    cropedExampleFace = exercise1Lib.cropFace(warped, standardModel)

    # 5. Croping the face from the example image using detected landmarks
    cropedExampleFace2 = exercise1Lib.cropFace(img, keypoints)
if (frame_RGB.shape[0] / max_height > 1):
    frame_RGB_res = transform.pyramid_reduce(frame_RGB,
                                             sigma=0,
                                             downscale=frame_RGB.shape[0] /
                                             max_height)
else:
    frame_RGB_res = frame_RGB
frame_thermal_res = transform.pyramid_expand(frame_thermal / 255,
                                             sigma=0,
                                             upscale=scale_factor_of_thermal)

transform_matrix = calculate_transform_matrix(frame_RGB_res,
                                              frame_thermal_res,
                                              division_depth=8)

trans = transform.PolynomialTransform(transform_matrix)

print(','.join([str(a) for a in trans.params.flatten()]))

warped = transform.pyramid_expand(frame_thermal.astype(float),
                                  sigma=0,
                                  upscale=scale_factor_of_thermal)
warped = transform.warp(warped, trans)

plt.figure()
plt.imshow(warped)

scaled_aligned_thermal = cv2.applyColorMap((warped).astype('uint8'),
                                           cv2.COLORMAP_JET)[..., ::-1]

overlay = cv2.addWeighted(scaled_aligned_thermal, 0.3,
def _calculate_transform_matrix(frame_RGB, frame_thermal,
                               thermal_canny_percentage = 4,
                               rgb_canny_percentage = 4,
                               division_depth = 6,
                               desired_thermal_scale = 1,
                               denoise_weight_rgb = 0.3, denoise_weight_thermal = 0.1,
                               degree = 2,
                               plot = False):
    '''
    Calculate the second degree polynomial transformation matrix to map the
    thermal frame on the RGB frame.

    Parameters
    ----------
    frame_RGB : ndarray
        RGB frame without alpha.
    frame_thermal : ndarray
        2D Thermal frame resized to have the same size with RGB frame.
    thermal_canny_percentage : int, optional
        Coverage of the edges for the canny output. Recommended: 2 to 6, default: 4.
    rgb_canny_percentage : int, optional
        Coverage of the edges for the canny output. Recommended: 3 to 8, default: 4.
    division_depth : int, optional
        Maximum region count for the vertical division. Needs to be chosen proportionally
        with the frames' quality and information density. Smallest division should
        not have a smaller width than the expected shift, but the outliers are mostly
        handled. Default: 8.

    Returns
    -------
    ndarray
        2x6 second degree polynomial transformation matrix.
    '''

    rgb_edge = _canny_with_TV(frame_RGB, denoise_weight_rgb, rgb_canny_percentage)
    therm_edge = _canny_with_TV(frame_thermal, denoise_weight_thermal, thermal_canny_percentage)
    
    orig_width, orig_height = rgb_edge.shape
    half_width, half_height = int(orig_width/2), int(orig_height/2)
    
    rgb_proc = np.zeros((orig_width*2, orig_height*2))
    rgb_proc[half_width:half_width*3,half_height:half_height*3] = rgb_edge
    therm_proc = np.zeros((orig_width*2, orig_height*2))
    therm_proc[half_width:half_width*3,half_height:half_height*3] = therm_edge
    max_width, max_height = rgb_proc.shape[:2]
    
    # Divide image into vertical areas and save the centers before a possible shift.
    points_x = []
    points_y = []
    weights = []
    for region_count in (np.logspace(0,division_depth,division_depth, base = 2)).astype(int):

        # Determine division limits
        region_divisions_with_zero = np.linspace(0, max_width, num = region_count,
                                                 endpoint = False, dtype = int)
        region_divisions = region_divisions_with_zero[1:]
        all_region_bounds = np.append(region_divisions_with_zero, max_width)
        # Divide the frames into the regions
        lum_regions = np.hsplit(rgb_proc,region_divisions)
        therm_regions = np.hsplit(therm_proc,region_divisions)
        
        region_divisions_with_zero = np.insert(region_divisions, 0, 0)
        # Calculate the shifts for each region and save the points. Weight of a point
        # is proportional with its size ( thus, amount of information) and its
        # closeness to the center of the image ( which is the expected location
        # of the baby)
        for ind, (lumreg, thermreg) in enumerate(zip(lum_regions, therm_regions)):
            
            shifts, error, _ = feature.register_translation(thermreg.astype(int), lumreg.astype(int), 10)
            min_h, min_w = shifts
    
            reg_width = all_region_bounds[ind+1] - region_divisions_with_zero[ind]
            point_y = max_height/2-min_h
            point_x = region_divisions_with_zero[ind] + reg_width/2 - min_w
            
            points_y.append(point_y)
            points_x.append(point_x)

            sum_t = np.count_nonzero(thermreg)
            sum_r = np.count_nonzero(lumreg)
            try:
                weights.append(sum_t*sum_r/(sum_t+sum_r))
            except ZeroDivisionError:
                weights.append(0)
#           weights.append(reg_width*max_height)
#            weights.append( (division_depth - region_count + 1) * abs(point_x-(max_width/2))/max_width )
    
    # Remove the points that are certainly miscalculations: First filter by
    # the location of the cameras, then remove outliers (i.e. points more than 1 iqr away 
    # from the closest percentile.)
    
    clean_mask_1 = np.array([True if y > max_height*11/20 else False for y in points_y])
    clean_mask_1 = np.array([True if True else False for y in points_y])
    semiclean_points_x = np.array(points_x)[clean_mask_1]
    semiclean_points_y = np.array(points_y)[clean_mask_1]
    semiclean_weights = np.array(weights)[clean_mask_1]
    
    from collections import Counter
    #weighted percentiles
    q1, q3 = np.percentile(list(Counter(dict(zip(semiclean_points_y, semiclean_weights.astype(int)))).elements()), [25 ,75])
    #q1, q3 = np.percentile(semiclean_points_y, [25 ,75])
    iqr_y = (q3-q1)*1
    clean_mask_2 = np.array([True if q1 - iqr_y < y < q3 + iqr_y else False for y in semiclean_points_y])
    clean_points_x = np.array(semiclean_points_x)[clean_mask_2]
    clean_points_y = np.array(semiclean_points_y)[clean_mask_2]
    clean_weights = np.array(semiclean_weights)[clean_mask_2]

    # Create the polynomial features and fit the regression.
    poly = PolynomialFeatures(degree=degree)
    X_t = poly.fit_transform(np.array(clean_points_x).reshape((-1,1)))
    
    clf = LinearRegression()
    clf.fit(X_t, clean_points_y, sample_weight = clean_weights)
    
    points = np.linspace(0,max_width,10)
    data = poly.fit_transform(points.reshape((-1,1)))
    line = clf.predict(data)
    
    # Create a grid of values from the regression to estimate the transformation matrix.
    x_points_grid = np.array([points , points, points, points, points])
    y_points_grid = np.array([line-20, line-10, line, line+10, line+20])
    src = np.array([(x-half_width,y-half_height) for x,y in zip(x_points_grid.flatten(), y_points_grid.flatten())])
    cent = max_height/2
    y_points_truegrid = np.broadcast_to(np.array([[cent-20], [cent-10], [cent], [cent+10], [cent+20]]), y_points_grid.shape)
    dest = np.array([(x-half_width,y-half_height) for x,y in zip(x_points_grid.flatten(), y_points_truegrid.flatten())])
    
    trans = transform.PolynomialTransform()
    trans.estimate(src*desired_thermal_scale,dest*desired_thermal_scale,degree)
    
    if plot:
        
        import cv2
        fig, ax = plt.subplots(nrows=1, ncols=5, figsize = (20,5))
        
        ax[0].imshow(frame_thermal)
        ax[0].set_title('thermal frame. Initial res: 80x60')
        ax[1].imshow(frame_RGB)
        ax[1].set_title('RGB frame. Initial res: 640x480')
        ax[2].imshow(frame_thermal)
        ax[2].scatter(points_x, points_y,color = 'r')
        ax[2].scatter(clean_points_x, clean_points_y,color = 'g')
        ax[2].plot(points, line,scalex = False, scaley= False)
        ax[2].set_xlim(0,max_height)
        ax[2].set_ylim(max_width,0)
        ax[2].set_title('Corr. points and the quadratic fit. Red: outliers.')
        ax[3].imshow(therm_proc)
        ax[3].set_title('Edges of thermal frame')
        
        warped = transform.warp(frame_thermal,trans)
        
        scaled_aligned_thermal = cv2.applyColorMap((warped*256).astype('uint8'), cv2.COLORMAP_JET)[...,::-1]
        
        overlay = cv2.addWeighted(scaled_aligned_thermal, 0.3, (frame_RGB).astype('uint8'), 0.7, 0)
        ax[4].imshow(overlay)
        ax[4].set_title('final overlay')
    
    return trans.params
예제 #9
0
def record_baby_temperature(bgr_frame, thermal_frame, baby_temp, face_detector, shared_transform_matrix,
                            aux_temperature, baby_temp_deque, baby_feverish):

    # First, try to detect faces. If you can, get the maximum value on the face.
    # If face cannot be detected, detect blob and get ROI.

        # Acquire the transform matrix and if it is new, create transform object
    with shared_transform_matrix.get_lock(): 
        transform_matrix = np.array(shared_transform_matrix).reshape((2,6))
    transform_obj = transform.PolynomialTransform(transform_matrix)
    
    # Rorate for demo purposes
    rotat = cv2.rotate(bgr_frame, cv2.ROTATE_90_CLOCKWISE)
    faces = face_detector.detectMultiScale(rotat,1.05,1, minSize = (10,10),  maxSize = (150,150))

    if not isinstance(faces,tuple):  # if faces are returned
        
        rotated_back_faces = []
        for x,y,w,h in faces:
            zeros = np.zeros_like(rotat)
            zeros[y,x] = 1
            rotat_zeros = cv2.rotate(zeros, cv2.ROTATE_90_COUNTERCLOCKWISE)
            y_rotat,x_rotat,_ = np.unravel_index(rotat_zeros.argmax(), rotat_zeros.shape)
            rotated_back_faces.append((x_rotat, y_rotat, h, w))
        
        faces = rotated_back_faces
        # filter by location
        faces = [(x,y,w,h) for x,y,w,h in faces if (180 >= x >= 60) and (135 >= y >= 45)]
        
        if faces:
            # Take the first face.
            x,y,w,h = faces[0]
            x,y = (transform_obj(np.array([x,y]).reshape((1,2))).astype(int))[0]
            w,h = int(w*thermal_frame.shape[1]/bgr_frame.shape[1]), int(h*thermal_frame.shape[0]/bgr_frame.shape[0])
            
            face = thermal_frame[y-h:y,x:x+w]
            if face.size > 0:
                new_temp = np.max(face) * 0.0403 + aux_temperature.value * 1.2803 - 322.895
                if 70 >= new_temp >= 20:
                    baby_temp.value = new_temp
            
            print('******************************')
            print('Face detected. Max Temp: {}'.format(baby_temp.value))
            print('******************************')
            sys.stdout.flush()
        else:
            new_temp = np.max(thermal_frame[15:45,20:60]) * 0.0403 + aux_temperature.value * 1.2803 - 322.895
            if 70 >= new_temp >= 20:
                baby_temp.value = new_temp
            
            print('******************************')
            print('Face not detected. Max Temp: {}'.format(baby_temp.value))
            print('******************************')
            sys.stdout.flush()
            
    else:
        new_temp = np.max(thermal_frame[15:45,20:60]) * 0.0403 + aux_temperature.value * 1.2803 - 322.895
        if 70 >= new_temp >= 20:
            baby_temp.value = new_temp
        
        print('******************************')
        print('Face not detected. Max Temp: {}'.format(baby_temp.value))
        print('******************************')
        sys.stdout.flush()
    
    baby_temp_deque.append(baby_temp.value)
    peak_count = np.count_nonzero( np.array(baby_temp_deque) > 33.5)
            
    if( peak_count > 16):
        baby_feverish.value = True
    else:
        baby_feverish.value = False
def video_routine(frame_queue, bgr_thermal_queue, aux_temp, temp_dict, shared_transform_matrix,
                  baby_is_crying, faces_queue, baby_temp):

    '''
    Routine that:
        + Acquires BGR and Thermal video frames,
        + Aligns them using the alignment vector provided by the control process,
        + overlaying them according to a predetermined colormap.
    
    It also feeds the frames into the control thread, but this action does not
    take much time (tested) and it doesn't need to be modified in the future.
    
    Beware that this routine takes the initial thermal frame first for better
    alignment, since the rgb is much faster. That is the reason for the thermal 
    capture before the 'while' loop.
    '''
        
    # IMPORTANT CONSTANTS
    RESOLUTION = (240,180) # Beware that this is the inverse of numpy ordering!
    NP_COMPAT_RES = (180,240)
    THERMAL_RES = (60,80)
    #CHIP_DESELECT = 0.185 # Deselect duration after corruption, in miliseconds.
    CHIP_DESELECT = 0.25
    
    # Initialize necessary variables
    transform_matrix = np.array([[ 2.81291628e-11, 1.00000000e+00, -5.06955742e-13,  8.35398829e-16, -1.56637280e-15,  2.92389590e-15],
                                 [-3.00482974e+01, 1.20000000e-01,  1.00000000e+00, -5.00000000e-04, 6.40729980e-16,  6.20157957e-16]])
    transform_obj = transform.PolynomialTransform(transform_matrix)
    
    # Initialize the BGR camera, set the resolution, create the empty memory 
    # array to get continuous frames
    bgr_camera = PiCamera()
    bgr_camera.resolution = RESOLUTION
    bgr_camera.sensor_mode = 4 # 4:3 aspect ratio, high frame rate, large FoV
    bgr_camera.framerate = 30
    bgr_output_array = PiRGBArray(bgr_camera, size=RESOLUTION)
    
    # detected faces initialization
    detected_faces = None
    max_temp = 45
    
    # Initialize the thermal camera, create the handle. 
    # DO NOT FORGET TO CLOSE IT MANUALLY!
    thermal_camera = Lepton("/dev/spidev0.1")
    
    print('yes')
    sys.stdout.flush()
    
    # Wrap the function into a try/finally block to handle the exit
    try:
        
        # Take the first thermal frame to couple with the first BGR frame.
        # Wait if the frame is bad, until you get a good one.
        # Lepton sometimes return the same frame, so if the frame id is 
        # identical, no processing is necessary.
        # Walrus operator would be so nice to use here...
            
        raw_thermal_frame = False
        while not type(raw_thermal_frame) is np.ndarray:
            time.sleep(CHIP_DESELECT)
            with thermal_camera as tcam:
                raw_thermal_frame, thermal_id = tcam.capture(retry_reset = False,
                                                             return_false_if_error = True)
                print('Waiting for correct frame')
            sys.stdout.flush() 
        
        start_time = time.perf_counter()
        # Flag for the unique thermal frame and corrupted frame.
        # Corrupted frame flag carries a time value to leave
        # the chip deselected for CHIP_DESELECT duration
        thermal_frame_is_unique = True
        thermal_frame_is_corrupted = (False, time.perf_counter())
        
        # Start the loop! 
        # capture_continuous method just spits out the  BGR frames continuously. 
        for frame in bgr_camera.capture_continuous(bgr_output_array, format = 'bgr',
                                                   use_video_port = True):
            
            # Acquire the BGR frame
            # There was a copy() here. WAS IT NECESSARY, REALLY? CHECK.
            bgr_frame = np.flip(frame.array,0).astype(np.uint8)
            #raw_thermal_frame = np.flip(raw_thermal_frame,0)
            rotated_thermal_frame = np.flip(cv2.rotate(raw_thermal_frame, cv2.ROTATE_90_CLOCKWISE),1)
            raw_thermal_frame = np.zeros(THERMAL_RES)
            raw_thermal_frame[:-7,8:-12] = rotated_thermal_frame[27:,:]
            
            #print(raw_thermal_frame)
            #print(raw_thermal_frame.shape)
            #print(raw_thermal_frame.dtype)
            
            # Do the processing if the thermal frame is unique. If not,
            # nothing much to do! 
            # BEWARE THAT raw_thermal_frame IS NOT USABLE! IF YOU WANT TO USE IT,
            # THEN A COPY IS NECESSARY!
            if( thermal_frame_is_unique):
                
                # Put them into the queue
                bgr_thermal_queue.put((bgr_frame, raw_thermal_frame))
                
                # Preprocess the thermal frame to clip the values into some
                # predetermined thresholds
                aux_temperature = aux_temp.value
                    
                # min_thresh = [x[0] for x in temp_dict.items() if x[1] == 32 + temperature_offset][0] # 32 and 42 degrees, respectively
                # max_thresh = [x[0] for x in temp_dict.items() if x[1] == 42 + temperature_offset][0]
                #min_thresh = 7600
                #max_thresh = 8000
                min_thresh = (20 + 322.895 - aux_temperature * 1.2803) / 0.0403 #23 degrees
                max_thresh = (35 + 322.895 - aux_temperature * 1.2803) / 0.0403 #38 degrees
                dummy_added_thermal_frame = raw_thermal_frame.copy()
                dummy_added_thermal_frame[0,1] = min_thresh
                dummy_added_thermal_frame[0,0] = max_thresh
                corrected_thermal_frame = np.clip(dummy_added_thermal_frame, min_thresh, max_thresh)
        
                # Normalize thermal frame to 0-255
                cv2.normalize(corrected_thermal_frame, 
                              corrected_thermal_frame, 0, 255, cv2.NORM_MINMAX)
                
                # Acquire the transform matrix and if it is new, create transform object
                with shared_transform_matrix.get_lock(): 
                    new_transform_matrix = np.array(shared_transform_matrix).reshape((2,6))
                if not np.array_equal(new_transform_matrix, transform_matrix):
                    transform_matrix = new_transform_matrix
                    transform_obj = transform.PolynomialTransform(transform_matrix)
                
                # Warp the thermal image according to the transform object.
                # TODO: Correct the division by 255 thing, it is weird.
                warped_thermal_frame = transform.warp(corrected_thermal_frame.astype(float), transform_obj).astype(np.uint8)
                                
                # Scale the thermal frame to have the same size with the BGR.
                # Pyramid expand method from skimage creates a smoother output,
                # but the the difference in speed is more than 20x. So, we will
                # use this method from cv2.
                scale = NP_COMPAT_RES[0] / THERMAL_RES[0]
                scaled_thermal_frame = cv2.resize(warped_thermal_frame, None,
                                                  fx = scale, fy = scale,
                                                  interpolation = cv2.INTER_LINEAR)
                
                # Apply color map to the scaled frame
                # Beware that the output is also BGR.
                colored_thermal_frame = cv2.applyColorMap(scaled_thermal_frame,
                                                          cv2.COLORMAP_JET)
            
            # Sum the thermal and BGR frames (even if it is not unique)
            overlay = cv2.addWeighted(colored_thermal_frame, 0.25, bgr_frame, 0.75, 0)
            
            # Write temperature on overlay
            #max_temp = temp_dict[int(np.max(raw_thermal_frame))] + temperature_offset
            max_temp = baby_temp.value
            # print('max value: {}'.format(np.max(raw_thermal_frame)))


            if(70 >= max_temp >= 15):
            
                cv2.putText(overlay, 'Temp-in-range: {}'.format(round(max_temp,2)),
                            (10,NP_COMPAT_RES[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                            0.25, (255,255,255), 1)
            
            cv2.putText(overlay, (':(' if baby_is_crying.value else ':)'),
                        (NP_COMPAT_RES[1] - 30, NP_COMPAT_RES[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (255,255,255), 1)
            
            try:
                detected_faces = faces_queue.get( block = False)
            except Empty:
                pass
            
            if( detected_faces is not None):
                for (x,y,w,h) in detected_faces:
                    cv2.rectangle(overlay, (x,y), (x+w,y-h), (255,255,255), 1)
            
            # Video processed!
##            print('Unique frame' if thermal_frame_is_unique else 'Repeating frame')
##            print('FPS: {}'.format(1/(time.perf_counter()- start_time)))
##            sys.stdout.flush()
            print('MAX TEMP: {}'.format(max_temp))
            start_time = time.perf_counter()
            
            # Send the frame to queue
            frame_queue.put(overlay[45:,:]) # cut 45 from above
            
            # Now, get the next thermal frame. First, check if the last thermal 
            # frame was corrupted. If not, just capture as usual.
            if( thermal_frame_is_corrupted[0]): 
                
                # If that is the case, check if CHIP_DESELECT time has passed since
                # the corruption. If so, take a new frame and turn the flag to False.
                # If not, just use the old frames as the new ones. The id's will 
                # be checked to prevent reprocessing in the next if block.
                if(time.perf_counter() - thermal_frame_is_corrupted[1] > CHIP_DESELECT):
                    with thermal_camera as tcam:
                        new_raw_thermal_frame, new_thermal_id = tcam.capture(retry_reset = False,
                                                                                       return_false_if_error = True)
                    thermal_frame_is_corrupted = (False, time.perf_counter())
                
                else:
                    new_raw_thermal_frame, new_thermal_id = raw_thermal_frame, thermal_id
            
            else:
                with thermal_camera as tcam:
                    new_raw_thermal_frame, new_thermal_id = tcam.capture(retry_reset = False,
                                                                                   return_false_if_error = True)
            
            # If the capture was successful or the necessary time for the
            # corruption flag to be removed has not passed, check the uniqueness and 
            # continue. 
            if type(new_raw_thermal_frame) is np.ndarray:
                
                if thermal_id != new_thermal_id:
                    raw_thermal_frame = new_raw_thermal_frame
                    thermal_frame_is_unique = True
                else:
                    thermal_frame_is_unique = False
            
            # If the frame was corrupted, just use the old frames and set the 
            # corruption flag.
            else:
                thermal_frame_is_unique = False
                thermal_frame_is_corrupted = (True, time.perf_counter())
            
            # truncate the output array
            bgr_output_array.truncate(0)
    
    finally:
        
        bgr_camera.close()
        print('video routine stopped.')
        sys.stdout.flush()