Exemplo n.º 1
0
    def validation_shift(self, original_IMG, Shifted_IMG, path, Image_ID):
        Costmatrix, shift_used = COSTMtrix.matrix_cal_corre_full_version3_2GPU(
            original_IMG, Shifted_IMG, 0)
        if Clip_matrix_flag == True:
            #Costmatrix = np.clip(Costmatrix, 20,254)
            Costmatrix = self.random_min_clip_by_row(5, 30, Costmatrix)
        Shifted_IMG2 = Shifted_IMG
        shift = self.shift_predictor.predict(original_IMG, Shifted_IMG,
                                             Shifted_IMG2)
        path_deep = shift + path * 0

        ##middle_point  =  PATH.calculate_ave_mid(mat)
        #path1,path_cost1=PATH.search_a_path(mat,start_point) # get the path and average cost of the path
        show1 = np.zeros((Costmatrix.shape[0], Costmatrix.shape[1], 3))
        cv2.imwrite(self.data_mat_root_origin + str(Image_ID) + ".jpg", show1)
        show1[:, :, 0] = Costmatrix
        show1[:, :, 1] = Costmatrix
        show1[:, :, 2] = Costmatrix

        for i in range(len(path)):
            painter = min(path[i], Window_LEN - 1)
            #painter2= min(path_tradition[i],Window_LEN-1)
            painter3 = min(path_deep[i], Window_LEN - 1)
            show1[int(painter), i, :] = [255, 255, 255]
            #show1[int(painter2),i,:]=[254,0,0]
            show1[int(painter3), i, :] = [0, 0, 254]
        # save the  matrix to fil dir
        cv2.imwrite(self.data_mat_root + str(Image_ID) + ".jpg", show1)
Exemplo n.º 2
0
     def validation(self,original_IMG,Shifted_IMG,path,Image_ID):
        #Costmatrix,shift_used = COSTMtrix.matrix_cal_corre_full_version3_2GPU(original_IMG,Shifted_IMG,0) 
        Costmatrix,shift_used = COSTMtrix.matrix_cal_corre_block_version3_3GPU(original_IMG,Shifted_IMG,0) 

        # Costmatrix  = myfilter.gauss_filter_s(Costmatrix) # smooth matrix
        #tradition way to find path
 
        start_point= PATH.find_the_starting(Costmatrix) # starting point for path searching

        #path_tradition,pathcost1  = PATH.search_a_path(Costmatrix,start_point) # get the path and average cost of the path
        #path_deep,path_cost2=PATH.search_a_path_Deep_Mat2longpath(Costmatrix) # get the path and average cost of the path
        path_deep,path_cost2=PATH.search_a_path_deep_multiscal_small_window(Costmatrix) # get the path and average cost of the path
        
        path_deep = gaussian_filter1d(path_deep,3) # smooth the path 

        ##middle_point  =  PATH.calculate_ave_mid(mat)
        #path1,path_cost1=PATH.search_a_path(mat,start_point) # get the path and average cost of the path
        show1 =  Costmatrix 
        cv2.imwrite(self.data_mat_root_origin  + str(Image_ID) +".jpg", show1)

        for i in range ( len(path)):
            painter = min(path[i],Window_LEN-1)
            #painter2= min(path_tradition[i],Window_LEN-1)
            painter3 = min(path_deep[i],Window_LEN-1) 
            show1[int(painter),i]=128
            #show1[int(painter2),i]=128
            show1[int(painter3),i]=254

        cv2.imwrite( self.data_mat_root  + str(Image_ID) +".jpg", show1)
Exemplo n.º 3
0
    def func2(self):
        start_time = time()
        print('NURD start')
        image1 = self.stream2[self.strmlen - 1, :, :]
        h, w = image1.shape
        window_wid = self.path_predictor.Original_window_Len
        self.costmatrix = np.zeros((window_wid, w))

        self.costmatrix, self.shift_used2 = COSTMtrix.matrix_cal_corre_block_version3_3GPU(
            self.stream2[self.strmlen - 1, :, :],
            self.stream2[self.strmlen - 2, :, :],
            0,
            block_wid=3,
            Down_sample_F=1,
            Down_sample_F2=2)

        #self.costmatrix2,self.shift_used2= COSTMtrix.matrix_cal_corre_block_version3_3GPU  (
        #                                                        self.stream2[self.strmlen-1,50:211,:] ,
        #                                                        self.stream2[self.strmlen-2,50:211,:], 0,
        #                                                        block_wid = 3,Down_sample_F = 5,Down_sample_F2 = 5)
        ##self.costmatrix = self.costmatrix1
        #self.costmatrix = 0.6*self.costmatrix1+ 0.4*self.costmatrix2
        Hm, Wm = self.costmatrix.shape
        self.costmatrix = cv2.resize(self.costmatrix, (Wm, Standard_LEN),
                                     interpolation=cv2.INTER_AREA)

        self.costmatrix = myfilter.gauss_filter_s(
            self.costmatrix)  # smooth matrix
        #self.costmatrix  = cv2.GaussianBlur(self.costmatrix,(3,3),0)
        #self.costmatrix = self.costmatrix*1.5 +30
        # down sample the materix and up sample
        #Hm,Wm= self.costmatrix.shape
        #self.costmatrix = cv2.resize(self.costmatrix, (int(Wm/2),int(Hm/2)), interpolation=cv2.INTER_AREA)
        #self.costmatrix = cv2.resize(self.costmatrix, (Wm,Hm), interpolation=cv2.INTER_AREA)

        # THE COST MATRIX COST 0.24 S
        if Graph_searching_flag == True:
            start_point = PATH.find_the_starting(
                self.costmatrix)  # starting point for path searching
            self.path, pathcost1 = PATH.search_a_path(self.costmatrix,
                                                      start_point)
        else:
            self.path = PATH.get_warping_vextor(
                self.costmatrix)  # THIS COST 0.03S
        self.path = self.path * Window_LEN / Standard_LEN
        #self.path = self.path_predictor.predict(self.stream2[self.strmlen-1,:,:],  self.stream2[self.strmlen-2,:,:])
        end_time = time()

        print('NURD end  ')
        print(" A time is [%f] " % (end_time - start_time))
Exemplo n.º 4
0
    def validation(self, original_IMG, Shifted_IMG, path, Image_ID):
        #Costmatrix,shift_used = COSTMtrix.matrix_cal_corre_full_version3_2GPU(original_IMG,Shifted_IMG,0)
        #Costmatrix,shift_used = COSTMtrix.matrix_cal_corre_full_version3_2GPU(original_IMG,Shifted_IMG,0)
        Costmatrix, shift_used = COSTMtrix.matrix_cal_corre_full_version3_2GPU(
            original_IMG, Shifted_IMG, 0)

        #Costmatrix=cv2.blur(Costmatrix,(5,5))
        Costmatrix = myfilter.gauss_filter_s(Costmatrix)  # smooth matrix
        # Costmatrix =cv2.GaussianBlur(Costmatrix,(5,5),0)
        # down sample the materix and up sample
        #Hm,Wm= Costmatrix.shape
        #Costmatrix = cv2.resize(Costmatrix, (int(Wm/2),int(Hm/2)), interpolation=cv2.INTER_LINEAR)
        #Costmatrix = cv2.resize(Costmatrix, (Wm,Hm), interpolation=cv2.INTER_LINEAR)

        if Clip_matrix_flag == True:
            Costmatrix = np.clip(Costmatrix, 20, 254)
            #Costmatrix=self.random_min_clip_by_row(5,30,Costmatrix)
        #Costmatrix = self.add_lines_to_matrix(Costmatrix)
        #Costmatrix=np.clip(Costmatrix, 20, 255)
        # Costmatrix  = myfilter.gauss_filter_s(Costmatrix) # smooth matrix
        #tradition way to find path

        ##middle_point  =  PATH.calculate_ave_mid(mat)
        #path1,path_cost1=PATH.search_a_path(mat,start_point) # get the path and average cost of the path
        show1 = np.zeros((Costmatrix.shape[0], Costmatrix.shape[1], 3))
        show1[:, :, 0] = Costmatrix
        show1[:, :, 1] = Costmatrix
        show1[:, :, 2] = Costmatrix

        for i in range(len(path)):
            painter = np.clip(path[i], 1, Window_LEN - 2)

            show1[int(painter), i, :] = show1[int(painter) - 1,
                                              i, :] = [254, 254, 254]
        if Show_nurd_compare == True:
            start_point = PATH.find_the_starting(
                Costmatrix)  # starting point for path searching

            path_tradition, pathcost1 = PATH.search_a_path(
                Costmatrix,
                start_point)  # get the path and average cost of the path
            #path_tradition=(path_tradition -Window_LEN/2)*  Down_sample_F2 +Window_LEN/2
            #path_deep,path_cost2=PATH.search_a_path_Deep_Mat2longpath(Costmatrix) # get the path and average cost of the path
            path_deep, path_cost2 = PATH.search_a_path_GPU(
                Costmatrix)  # get the path and average cost of the path
            #path_deep=(path_deep -Window_LEN/2)*  Down_sample_F2 +Window_LEN/2
            path_deep = gaussian_filter1d(path_deep, 3)  # smooth the path
            show1 = np.clip(show1, 1, 190)

            for i in range(len(path)):
                painter = np.clip(path[i], 1, Window_LEN - 2)

                show1[int(painter), i, :] = show1[int(painter) - 1,
                                                  i, :] = [254, 254, 254]
                painter2 = np.clip(path_tradition[i], 1, Window_LEN - 2)
                painter3 = np.clip(path_deep[i], 1, Window_LEN - 2)
                show1[int(painter2), i, :] = show1[int(painter2) - 1,
                                                   i, :] = [0, 254, 0]
                show1[int(painter3), i, :] = show1[int(painter3) - 1,
                                                   i, :] = [0, 0, 254]

        # save the  matrix to fil dir
        cv2.imwrite(self.data_mat_root_origin + str(Image_ID) + ".jpg",
                    Costmatrix)

        cv2.imwrite(self.data_mat_root + str(Image_ID) + ".jpg", show1)
        # show the signal comparison in visdom
        if visdom_show_flag == True:
            x = np.arange(0, len(path))
            self.vis_ploter.plot_multi_arrays_append(x,
                                                     path,
                                                     title_name=str(Image_ID),
                                                     legend='truth')
            self.vis_ploter.plot_multi_arrays_append(x,
                                                     path_deep,
                                                     title_name=str(Image_ID),
                                                     legend='Deep Learning')
            self.vis_ploter.plot_multi_arrays_append(x,
                                                     path_tradition,
                                                     title_name=str(Image_ID),
                                                     legend='Traditional')
        # save comparison signals to matlab
        if Save_matlab_flag == True:
            self.matlab.buffer_4(Image_ID, path, path_deep, path_tradition)
            self.matlab.save_mat()
            pass
Exemplo n.º 5
0
    def func1(self):
        start_time2 = time()

        print('shift star')
        img1 = self.stream1[self.strmlen - 1, :, :]
        self.shift_used1 = self.add_shift

        img2 = self.stream1[self.strmlen - 2, :, :]
        img3 = self.stream1[0, :, :]
        img1 = cv2.GaussianBlur(img1, (5, 5), 0)
        img2 = cv2.GaussianBlur(img2, (5, 5), 0)
        img3 = cv2.GaussianBlur(img3, (5, 5), 0)

        H, W = img1.shape
        #self.overall_shifting2 = self.shift_predictor.predict_shaking(img1,self.stream2[self.strmlen-2,:,:])

        #self.overall_shifting,shift_used1 = COSTMtrix.Img_fully_shifting_correlation (img1[200:H,:],
        #                                                       img3[200:H,:],  self.shift_used1 )
        #self.shift_used1 += self.overall_shifting
        #self.overall_shifting,shift_used1 = COSTMtrix.Img_fully_shifting_correlation (img1[0:210,:],
        #                                                       img3[0:210,:],  self.shift_used1)
        #self.overall_shifting,shift_used1 = COSTMtrix.stack_fully_shifting_correlation (self.stream1[:,0:210,:],
        #                                                       self.stream2[:,0:210,:],  self.shift_used1)
        img1 = np.roll(img1, int(self.shift_used1),
                       axis=1)  # Positive x rolls right
        crop1 = img1[27:83, :]
        crop3 = img3[27:83, :]
        crop1 = cv2.resize(crop1, (int(W / 3), 30),
                           interpolation=cv2.INTER_LINEAR)
        crop3 = cv2.resize(crop3, (int(W / 3), 30),
                           interpolation=cv2.INTER_LINEAR)
        crop1 = cv2.resize(crop1, (int(W), 90), interpolation=cv2.INTER_LINEAR)
        crop3 = cv2.resize(crop3, (int(W), 90), interpolation=cv2.INTER_LINEAR)
        self.overall_shifting, _, matrix = COSTMtrix.Img_fully_shifting_correlation(
            crop1, crop3, 0)

        #img1_c =  cv2.rotate(img1[30:90,:],rotateCode = cv2.ROTATE_90_CLOCKWISE)
        #img3_c =  cv2.rotate(img3[30:90,:],rotateCode = cv2.ROTATE_90_CLOCKWISE)

        ##img3_c = img3[27:200,:]

        #img1c = cv2.cvtColor(img1_c.astype(np.uint8), cv2.COLOR_GRAY2RGB)
        #img3c = cv2.cvtColor(img3_c.astype(np.uint8), cv2.COLOR_GRAY2RGB)
        #img1c=cv2.resize(img1c, (30,int(W/3)), interpolation=cv2.INTER_LINEAR)
        #img3c=cv2.resize(img3c, (30,int(W/3)), interpolation=cv2.INTER_LINEAR)
        #img1c=cv2.resize(img1c, (60,int(W )), interpolation=cv2.INTER_LINEAR)
        #img3c=cv2.resize(img3c, (60,int(W )), interpolation=cv2.INTER_LINEAR)
        ## find the keypoints and descriptors with ORB
        #kp1, des1 = self.sift.detectAndCompute( img1c.astype(np.uint8) ,None)
        #kp2, des2 = self.sift.detectAndCompute( img3c.astype(np.uint8) ,None)
        ## create BFMatcher object
        ##bf = cv2.BFMatcher()
        #bf = cv2.BFMatcher( )

        #matches = bf.knnMatch(des1,des2,k=2)
        # # Sort them in the order of their distance.
        #good = []
        #for m,n in matches:
        #     if m.distance < 0.75*n.distance:
        #         good.append(m)

        #res = cv2.drawMatches(img1c,kp1,img3c,kp2,good,None)
        #cv2.imshow("Result", res)
        #cv2.waitKey(1)
        #plt.imshow(result),plt.show()
        #cv2.wait(1)
        self.shift_used1 += self.overall_shifting
        ###self.overall_shifting = 0
        img1 = np.roll(img1, int(self.shift_used1),
                       axis=1)  # Positive x rolls right

        self.overall_shifting = self.shift_predictor.predict(
            img1, img2, img3)  # THIS COST 0.01 s
        self.overall_shifting2 = self.shift_predictor.predict(
            img2, img1, img1)  # THIS COST 0.01 s
        self.overall_shifting = 0.5 * self.overall_shifting + 0.5 * (
            Standard_LEN - self.overall_shifting2)
        # self.costmatrix_o,_= COSTMtrix.matrix_cal_corre_block_version3_3GPU  (
        #                                                        img1[0:200,:]  ,
        #                                                        img3 [0:200,:], 0,
        #                                                        block_wid = 3,Down_sample_F = 1,Down_sample_F2 = 2)

        ##self.costmatrix2,self.shift_used2= COSTMtrix.matrix_cal_corre_block_version3_3GPU  (
        ##                                                        self.stream2[self.strmlen-1,50:211,:] ,
        ##                                                        self.stream2[self.strmlen-2,50:211,:], 0,
        ##                                                        block_wid = 3,Down_sample_F = 5,Down_sample_F2 = 5)
        ###self.costmatrix = self.costmatrix1
        ##self.costmatrix = 0.6*self.costmatrix1+ 0.4*self.costmatrix2
        # Hm,Wm= self.costmatrix_o.shape
        # self.costmatrix_o = cv2.resize(self.costmatrix_o, (Wm,Standard_LEN), interpolation=cv2.INTER_AREA)
        # self.path_o  =  PATH.get_warping_vextor(self.costmatrix_o)  # THIS COST 0.03S
        # self.path_o = self.path_o * Window_LEN/Standard_LEN
        # self.overall_shifting = np.mean(self.path_o)
        #self.overall_shifting = 0.7* self.overall_shifting + 0.3*self.overall_shifting2
        #self.overall_shifting =  self.overall_shifting

        #self.overall_shifting = 0.5*self.overall_shifting + 0.5 * self.last_overall_shift
        #self.last_overall_shift = self.overall_shifting

        #self.overall_shifting,shift_used1 = COSTMtrix.Img_fully_shifting_correlation (img1[0:200,:],
        #                                                       img3[0:200,:],  self.shift_used1 )

        print('shift end')
        end_time2 = time()

        print(" B time is [%f] " % (end_time2 - start_time2))
Exemplo n.º 6
0
    def main():
        read_sequence = os.listdir(operatedir_video)  # read all file name
        seqence_Len = len(read_sequence)  # get all file number
        img_path = operatedir_video + "5.jpg"
        video = cv2.imread(img_path)  #read the first one to get the image size
        gray_video = cv2.cvtColor(video, cv2.COLOR_BGR2GRAY)
        Len_steam = 5
        H, W = gray_video.shape  #get size of image
        H_start = 80
        H_end = 200
        steam = np.zeros((Len_steam, H_end - H_start, W))
        steam2 = np.zeros((Len_steam, H, W))
        save_sequence_num = 0  # processing iteration initial
        addition_window_shift = 0  # innitial shifting parameter
        Window_ki_error = 0
        Window_kp_error = 0
        Kp = 0  # initial shifting paramerter
        for sequence_num in range(seqence_Len):
            #for i in os.listdir("E:/estimagine/vs_project/PythonApplication_data_au/pic/"):
            start_time = time()
            # read imag for process
            img_path = operatedir_video + str(sequence_num +
                                              0) + ".jpg"  # starting from 10
            video = cv2.imread(img_path)
            gray_video = cv2.cvtColor(video, cv2.COLOR_BGR2GRAY)
            if (sequence_num < 10):
                # bffer a resized one to coputer the path and cost matrix
                steam = np.append(steam, [gray_video[H_start:H_end, :]],
                                  axis=0)  # save sequence
                # normal beffer process
                steam = np.delete(steam, 0, axis=0)

                steam2 = np.append(steam2, [gray_video],
                                   axis=0)  # save sequence
                steam2 = np.delete(steam2, 0, axis=0)
            else:
                steam = np.append(steam, [gray_video[H_start:H_end, :]],
                                  axis=0)  # save sequence
                # no longer delete the fist  one
                steam = np.delete(steam, 1, axis=0)
                steam2 = np.append(steam2, [gray_video],
                                   axis=0)  # save sequence
                steam2 = np.delete(steam2, 0, axis=0)
                # shifting used is zero in costmatrix caculation
                #Costmatrix,shift_used = COSTMtrix.matrix_cal_corre_full_version_2(steam,0)
                overall_shifting, shift_used1 = COSTMtrix.Img_fully_shifting_distance(
                    steam[Len_steam - 1, :, :], steam[Len_steam - 2, :, :],
                    addition_window_shift)
                #overall_shifting0,shift_used0 = COSTMtrix.Img_fully_shifting_correlation(steam[Len_steam-1,:,:],
                #                                          steam[Len_steam-2,:,:],  addition_window_shift)
                #overall_shifting =  overall_shifting
                #Corrected_img,path,path_cost=   VIDEO_PEOCESS.correct_video_with_shifting(gray_video,overall_shifting,int(sequence_num),shift_used1 )

                Costmatrix = np.zeros((Window_LEN, W))
                #test_show = steam2[Len_steam-2,:,:]
                #cv2.imshow('correcr video',test_show.astype(np.uint8))
                Costmatrix, shift_used2 = COSTMtrix.matrix_cal_corre_full_version3_2GPU(
                    steam2[Len_steam - 1, :, :], steam2[Len_steam - 2, :, :],
                    0)
                ###Costmatrix = Costmatrix2
                #Costmatrix = cv2.blur(Costmatrix,(5,5))
                Costmatrix = myfilter.gauss_filter_s(
                    Costmatrix)  # smooth matrix

                ###get path and correct image
                ###Corrected_img,path,path_cost=   VIDEO_PEOCESS.correct_video(gray_video,Costmatrix,int(i),addition_window_shift +Kp )
                Corrected_img, path, path_cost = VIDEO_PEOCESS.correct_video(
                    gray_video, overall_shifting, Costmatrix,
                    int(sequence_num),
                    shift_used2 + Window_ki_error + Window_kp_error)
                #overall_shifting3,shift_used3 = COSTMtrix.Img_fully_shifting_correlation(Corrected_img[H_start:H_end,:],
                #                                          steam[0,:,:],  0)
                #Corrected_img,path,path_cost=   VIDEO_PEOCESS.correct_video_with_shifting(Corrected_img,overall_shifting3,int(sequence_num),shift_used3 )

                # remove the central shifting
                #addition_window_shift = -0.00055*(np.mean(path)- int(Window_LEN/2))+addition_window_shift
                path_mean_error = (np.mean(path) - int(Window_LEN / 2))
                shift_mean_error = int(overall_shifting -
                                       int(Overall_shiftting_WinLen / 2))

                addition_window_shift = shift_mean_error + addition_window_shift
                #addition_window_shift = 0
                #Window_kp_error =  - 0.1* path_mean_error
                #Window_ki_error = -0.000005*path_mean_error+Window_ki_error
                #re!!!!!Next time remenber to remove the un-corrected image from the stream
                steam = np.append(steam, [Corrected_img[H_start:H_end, :]],
                                  axis=0)  # save sequence
                # no longer delete the fist  one
                steam = np.delete(steam, 1, axis=0)

                steam2 = np.append(steam2, [Corrected_img],
                                   axis=0)  # save sequence
                # no longer delete the fist  one
                steam2 = np.delete(steam2, 0, axis=0)

                if (Save_signal_flag == True):

                    new = np.zeros((signal_saved.DIM, 1))
                    new[Save_signal_enum.image_iD.value] = sequence_num
                    new[Save_signal_enum.additional_kp.value] = Kp
                    new[Save_signal_enum.additional_ki.
                        value] = addition_window_shift
                    new[Save_signal_enum.path_cost.value] = path_cost
                    new[Save_signal_enum.mean_path_error.
                        value] = path_mean_error
                    signal_saved.add_new_iteration_result(new, path)
                    signal_saved.display_and_save2(sequence_num, new)
                test_time_point = time()
                show1 = Costmatrix
                new_frame = cv2.rotate(Corrected_img, rotateCode=2)
                circular = cv2.linearPolar(
                    new_frame,
                    (new_frame.shape[1] / 2, new_frame.shape[0] / 2), 200,
                    cv2.WARP_INVERSE_MAP)
                for i in range(len(path)):
                    show1[int(path[i]), i] = 254
                cv2.imwrite(savedir_path + str(sequence_num) + ".jpg",
                            circular)
                cv2.imwrite(
                    operatedir_matrix_unprocessed + str(sequence_num) + ".jpg",
                    Costmatrix)
                cv2.imwrite(operatedir_matrix + str(sequence_num) + ".jpg",
                            show1)
                cv2.imwrite(savedir_rectan_ + str(sequence_num) + ".jpg",
                            Corrected_img)

                print("[%s]   is processed. test point time is [%f] " %
                      (sequence_num, test_time_point - start_time))