Ejemplo n.º 1
0
    def get_hmap_one_video(self, video_name, model_name):
        """
        get hmap of one video with step = 10, note that should frame step should
        be 20 as the sample rate is 2 * frame rate
        """
        FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
            video_name)
        step_size = 10
        frame_step = 20
        step_total = int(FRAMESCOUNT / 10)

        one_video_lon, one_video_lat = self.get_raw_data(video_name)
        one_video_lon = np.array(one_video_lon)
        one_video_lat = np.array(one_video_lat)

        print('>>>>>>>>>>>>>>>>>>>>>>>step_total: ', step_total)
        for i_step in range(0, step_total):

            if i_step >= 0:
                i_frame = i_step * frame_step

                one_frame_lon = [
                    np.round(float(i)) for i in one_video_lon[:, i_frame]
                ]
                one_frame_lat = [
                    np.round(float(i)) for i in one_video_lat[:, i_frame]
                ]

                fixation_data = []
                for i in range(len(one_frame_lon)):
                    if one_frame_lon[i] >= 360:
                        one_frame_lon[i] = 359
                    if one_frame_lat[i] >= 180:
                        one_frame_lon[i] = 179
                    fixation_data.append([one_frame_lon[i], one_frame_lat[i]])

                hmap = self.fixation2salmap_sp_my_sigma(
                    fixation_data, 360, 180)
                # plt.subplot(211)
                # plt.scatter(one_frame_lon, one_frame_lat)
                # plt.xlim( 180, -180 )
                # plt.ylim( -90, 90 )
                # plt.subplot(212)
                # plt.imshow(hmap)
                # plt.show()

                self.save_heatmap(heatmap=hmap,
                                  path='/Data/Hmaps_0120/' + model_name,
                                  name=video_name + '_' + str(i_step))

                print(
                    '>>>>>>>>>>>>>>>>>>>>>>> processing finished: step%d/%d' %
                    (i_step, step_total))
Ejemplo n.º 2
0
    def run(self):
        print('>>>>>>>>>>>>test_sauc')
        from config import video_dic, video_test, f_dic_train, f_dic_for_sauc
        from support import get_video_config, save_txt, read_txt

        frame = 100
        i_splits = 1

        # self.get_shuffed_map(1)

        for i_video in range(len(video_test)):

            if i_video == 14:
                FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
                    video_test[i_video])
                print('>>>>>>>>>>>>>>>>> video, frmaes', video_test[i_video],
                      FRAMESCOUNT)

                frame_per_step = 10
                total_step = int(FRAMESCOUNT / frame_per_step)

                for i_step in range(total_step):

                    if i_step == 82:
                        hmap_path = '/home/ml/Data/Hmaps/ours/ff_best_heatmaps_ours_without_fcb/' + video_test[
                            i_video] + '_' + str(i_step) + '.jpg'
                        print('>>>>>>>>>>>>>>: ', hmap_path)
                        predict_map = self.get_predict_hmap(hmap_path)
                        ground_fixation = self.get_ground_fixation(
                            video_test[i_video], i_step)

                        plt.subplot(211)
                        plt.imshow(predict_map)
                        plt.subplot(212)
                        plt.imshow(ground_fixation)
                        plt.show()

                        'cal the sauc'
                        sauc = []
                        for i_splits in range(self.Nsplits):
                            shuffed_map = self.get_shuffed_map(i_splits)
                            sauc_0 = self.calc_score(ground_fixation,
                                                     predict_map, shuffed_map)
                            sauc.append(sauc_0)

                            print('>>>>>sauc_splits:%d, score: : ' %
                                  (i_splits, sauc_0))

                        ave_sauc = np.mean(sauc)
                        print('>>>>>>>>>>>. ave_sauc: ', ave_sauc)
Ejemplo n.º 3
0
    def get_total_config(self, video_name):
        from support import get_video_config
        '''load in mat data of head movement'''
        # matfn = '../../'+self.data_base+'/FULLdata_per_video_frame.mat'
        matfn = '/home/ml/video_data_mat.mat'
        data_all = sio.loadmat(matfn)
        # print('>>>>>>>>>>>>>: ', np.shape(data_all))
        self.env_id = video_name
        data = data_all[self.env_id]

        self.subjects_total, self.data_total, self.subjects, _ = get_subjects(
            data, 0)
        print('>>>>>>>>>>>>>>debug1: ', self.subjects_total, self.data_total,
              self.subjects)

        print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>subjects_total: " +
              str(self.subjects_total))
        print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>data_total: " +
              str(self.data_total))
        '''init video and get paramters'''
        # video = cv2.VideoCapture('../../'+self.data_base+'/' + self.env_id + '.mp4')
        FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
            video_name)

        # video = cv2.VideoCapture('/home/minglang/vr_new/'+self.env_id + '.mp4')
        # print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>video: "+video)
        # video = cv2.VideoCapture('/home/minglang/vr_new/A380.mp4')
        self.frame_per_second = FRAMERATE
        self.frame_total = FRAMESCOUNT
        print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>frame_total: " +
              str(self.frame_total))
        print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>self.env_id: " +
              str(self.env_id))
        self.video_size_width = int(IMAGEWIDTH)
        self.video_size_heigth = int(IMAGEHEIGHT)
        self.second_total = self.frame_total / self.frame_per_second
        self.data_per_frame = self.data_total / self.frame_total
        '''compute step lenth from data_tensity'''
        data_tensity = 10
        self.second_per_step = max(
            data_tensity / self.frame_per_second,
            data_tensity / self.data_per_frame / self.frame_per_second)
        self.frame_per_step = self.frame_per_second * self.second_per_step
        self.data_per_step = self.data_per_frame * self.frame_per_step
        '''compute step_total'''
        self.step_total = int(self.data_total / self.data_per_step) + 1

        print(">>>>>>>>>>>>>>>>>step_total: ", str(self.step_total))
Ejemplo n.º 4
0
def get_one_video_predictions():

    for i_video in range(len(video_dic)):
        if i_video == 70:

            'get video config'
            FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
                video_path, video_dic[i_video])

            for i_frame in range(1, FRAMESCOUNT + 1):
                if i_frame >= 1:

                    image_path = '/media/ml/Data1/2_0925_全景视频的眼动研究/Salience_of_FOV/程序/Finding_Content/' + video_dic[
                        i_video] + '_raw_frames/'
                    read_image_path = image_path + '%03d' % i_frame + '.png'

                    get_frame_txt_rename(read_image_path, i_frame,
                                         detec_threshold[i_video])

                    print('>>>>>> processing: video_%d, %s, frame_%03d' %
                          (i_video, video_dic[i_video], i_frame))
Ejemplo n.º 5
0
def get_one_video_predictions_faster(i_video):

    'get video config'
    video_path = '/media/ml/Data1/2_0925_全景视频的眼动研究/VR_杨燕丹/Video_All/'
    read_video_path = video_path + video_dic[i_video] + '.mp4'

    FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
        video_path, video_dic[i_video])
    'start the detect'
    # command_detect_video(read_video_path, detect_threshold[i_video])

    for i_frame in range(1, FRAMESCOUNT + 1):
        if i_frame >= 1:

            find_flag = True

            while (find_flag):
                find_flag = get_frame_txt_rename_faster(
                    read_video_path, video_dic[i_video], i_frame,
                    detect_threshold[i_video])

            print('>>>>>> processing: video_%d, %s, frame_%03d' %
                  (i_video, video_dic[i_video], i_frame))
Ejemplo n.º 6
0
    def get_fixation_map(self, video_name):

        FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
            video_name)
        step_size = 10
        frame_step = 20
        step_total = int(FRAMESCOUNT / step_size)

        one_video_lon, one_video_lat = self.get_raw_data(video_name)
        one_video_lon = np.array(one_video_lon)
        one_video_lat = np.array(one_video_lat)
        read_image_path0 = '/home/ml/Pami_reponse_SAUC/Data/Hmaps_0120/groundtruth_0120/_' + video_name + '_'
        save_image_path0 = '/home/ml/OpenSALICON/Data/ml/fixation_images/' + video_name

        ave_sauc_one_video = []
        for i_step in range(step_total):
            if i_step > 0:  # for debug

                read_image_path = read_image_path0 + str(i_step) + '.png'
                save_image_path = save_image_path0 + '_' + '%03d' % (
                    i_step) + '.jpg'
                im = imageio.imread(read_image_path)
                imageio.imwrite(save_image_path, im)
Ejemplo n.º 7
0
    def get_fine_image(self, video_name):

        FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
            video_name)
        step_size = 10
        frame_step = 20
        step_total = int(FRAMESCOUNT / step_size)

        one_video_lon, one_video_lat = self.get_raw_data(video_name)
        one_video_lon = np.array(one_video_lon)
        one_video_lat = np.array(one_video_lat)
        read_image_path0 = '/media/ml/Data1/2_0925_全景视频的眼动研究/Salience_of_FOV/程序/Finding_Content/' + video_name + '_raw_frames/'
        save_image_path0 = '/home/ml/OpenSALICON/Data/ml/fine_images/' + video_name

        ave_sauc_one_video = []
        for i_step in range(step_total):
            if i_step > 0:  # for debug

                read_image_path = read_image_path0 + '%03d' % (10 *
                                                               i_step) + '.png'
                save_image_path = save_image_path0 + '_' + '%03d' % (
                    i_step) + '.jpg'
                im = imageio.imread(read_image_path)
                imageio.imwrite(save_image_path, im)
Ejemplo n.º 8
0
def run():
    print('hello!')

    'config'
    # ----------------------------------------------
    mode_dic = {
        1: 'get_prediction_box',
        2: 'get_fov',
        3: 'get_yuv',
        4: 'test_yuv_import',  # just for debug
        5: 'get_fov_object'
    }

    mode = mode_dic[1]

    if mode == 'get_prediction_box':

        sub_mode = 'get_box'  # 'get_box', 'get_sta'

        if sub_mode == 'get_box':

            for i_video in range(len(video_dic)):
                if i_video == 70:

                    video_path = '/home/ml/learn_darknet/Data/Video_All/'
                    read_video_path = video_path + video_dic[i_video] + '.mp4'

                    p_rename = Process(target=get_one_video_predictions_faster,
                                       args=(i_video, ))
                    p_video_detection = Process(
                        target=command_detect_video,
                        args=(read_video_path, detect_threshold[i_video]))

                    p_rename.start()
                    p_video_detection.start()

                    p_video_detection.join()
                    p_rename.join()

                    p_rename.terminate()

        if sub_mode == 'get_sta':
            """
                statistic the proportion of fixations fall into the box
                """
            from config import video_path
            from read_yuv import read_YUV420, merge_YUV2RGB_v1
            import subprocess

            for i_video in range(len(video_dic)):
                if i_video == 70:
                    'config'
                    FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
                        video_path, video_dic[i_video])

                    for i_frame in range(1, FRAMESCOUNT + 1):
                        if i_frame == 1:
                            read_box_path = '/home/ml/darknet/results/' + video_dic[
                                i_video] + '/test_' + '%03d' % i_video + '.txt'

                            f = open(read_box_path)
                            lines = f.readlines()
                            temp_file = []

                            for line in lines:
                                line = line.split()
                                line = [i for i in line]
                                temp_file.append(line)

                            print(temp_file)

    if mode == 'get_prediction_multi_frame':
        for i_video in range(len(video_dic)):
            if i_video == 70:
                pass

    if mode == 'get_yuv':

        import subprocess
        for i_video in range(len(video_dic)):
            if i_video == 37:
                video_path = '/media/ml/Data1/2_0925_全景视频的眼动研究/VR_杨燕丹/Video_All/'
                read_video_path = video_path + video_dic[i_video] + '.mp4'

                out_put_path = '/media/ml/Data0/yuv/'
                yuv_path = out_put_path + video_dic[i_video] + '.yuv'

                get_yuv(read_video_path, yuv_path)

    if mode == 'get_fov':
        print('>>>>>>>>>>>>>>>>>>>>>>>>mode change to: %s.' % mode)

        from vrplayer import get_view
        from config import video_path
        from read_yuv import read_YUV420, merge_YUV2RGB_v1
        import cv2
        import subprocess

        for i_video in range(len(video_dic)):
            if i_video == 70:
                'config'
                FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
                    video_path, video_dic[i_video])
                'config part1'
                video_size_width = IMAGEWIDTH
                video_size_heigth = IMAGEHEIGHT
                output_height = 1200
                output_width = 1080

                view_range_lon = 110
                view_range_lat = 113

                for i_frame in range(1, FRAMESCOUNT + 1):
                    if i_frame == 400:
                        'do not need png'
                        # image_path = '/media/ml/Data1/2_0925_全景视频的眼动研究/Salience_of_FOV/程序/Finding_Content/' + video_dic[i_video] + '_raw_frames/'
                        # read_image_path = image_path + '%03d'%i_frame + '.png'

                        'config part2'
                        cur_lon = 90
                        cur_lat = 0
                        temp_dir = "test_object/"
                        '''
                        the input is video/frame yuv, the out put is also yuv
                        for more about yuv and the meaning of the command, refere this link:
                            http://blog.csdn.net/beyond_cn/article/details/12998247
                        '''
                        cur_observation = get_view(
                            input_width=video_size_width,
                            input_height=video_size_heigth,
                            view_fov_x=view_range_lon,
                            view_fov_y=view_range_lat,
                            cur_frame=i_frame,
                            is_save=False,
                            output_width=output_width,
                            output_height=output_height,
                            view_center_lon=cur_lon,
                            view_center_lat=cur_lat,
                            video_name=video_dic[i_video],
                            save_dir=temp_dir,
                            file_='/media/ml/Data0/yuv/' + video_dic[i_video] +
                            '.yuv')

    if mode == 'test_yuv_import':
        print('>>>>>>>>>>>>>>>>>>>>>>>>mode change to: %s.' % mode)
        from read_yuv import yuv_import
        import cv2
        from vrplayer import get_view
        from config import video_path
        from PIL import Image

        sub_mode = 'test_yuv2'  # 'convert', 'get_yuv_image', 'test_yuv2'(works)

        if sub_mode == 'convert':

            for i_video in range(len(video_dic)):
                if i_video == 0:
                    'config'
                    FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
                        video_path, video_dic[i_video])

                    for i_frame in range(1, FRAMESCOUNT + 1):
                        if i_frame == 2:

                            yuv_file = '/media/ml/Data0/yuv/' + video_dic[
                                i_video] + '.yuv'

                            print('>>>>>>: ', video_dic[i_video])
                            data = yuv_import(yuv_file,
                                              (IMAGEWIDTH, IMAGEHEIGHT), 1, 0)
                            data = np.array(data)
                            print(data)
                            y = cv2.cvtColor(
                                data, cv2.COLOR_GRAY2BGR)  # convert to RGB
                            result = np.vstack([y])
                            cv2.imwrite('test_yuv.png', result)
                            print(data)

        if sub_mode == 'get_yuv_image':
            pass
            # img_in = cv2.imread('006.png')
            #
            # img_out = cv2.cvtColor(img_in, cv2.COLOR_BGR2YUV)
            # print(img_out)

        if sub_mode == 'test_yuv':
            'have some problem'
            from read_yuv import yuv_import_3, yuv2rgb
            from PIL import Image

            temp_1 = 'frame_100.yuv'
            output_height = 1200
            output_width = 1080
            size = (output_width, output_height)
            data = yuv_import_3(temp_1, (output_height, output_width), 1, 0)
            print('>>>>>>>>>>>>>>>>> data: ', data)

            R_ = data[0][0]
            G_ = data[1][0]
            B_ = data[2][0]
            RGB = yuv2rgb(R_, G_, B_, size[0], size[1])
            im_r = Image.frombytes('L', size, RGB[0].tostring())
            im_g = Image.frombytes('L', size, RGB[1].tostring())
            im_b = Image.frombytes('L', size, RGB[2].tostring())
            # im_r.show()
            # for m in range(2):
            #     print m,': ', R_[m,:]
            co = Image.merge('RGB', (im_r, im_g, im_b))
            # co.show()
            savePath = 'tett_yuv.png'
            print(savePath)
            co.save(savePath)

        if sub_mode == 'test_yuv2':
            'works for yuv 420P'
            from read_yuv import read_YUV420, merge_YUV2RGB_v1

            image_path = 'frame_100.yuv'
            output_height = 1200
            output_width = 1080

            Y, U, V = read_YUV420(image_path, output_height, output_width)

            dst = merge_YUV2RGB_v1(Y, U, V)

            cv2.imshow("dst", dst)
            cv2.imwrite('test_yuv.png', dst)
            cv2.waitKey(0)  # not enter key in the terminal

    if mode == 'get_fov_object':
        print('>>>>>>>>>>>>>>>>>>>>>>>>mode change to: %s.' % mode)
        from config import subject_dic, video_path
        from support import get_raw_data, fov_detection, fov_center
        from vrplayer import get_view

        source_path = 'filtered_Data'
        for i_video in range(len(video_dic)):
            if i_video == 70:

                'config'
                FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
                    video_path, video_dic[i_video])
                leas_num = round(FRAMERATE * 2)

                'config part1: for get fov image'
                video_size_width = IMAGEWIDTH
                video_size_heigth = IMAGEHEIGHT
                output_height = 1200
                output_width = 1080

                view_range_lon = 110
                view_range_lat = 113

                'get the raw data'
                one_video_lon, one_video_lat, one_video_eye_x, one_video_eye_y, one_video_eye_lon, one_video_eye_lat = get_raw_data(
                    subject_dic, source_path, video_dic[i_video])
                # print('>>>>>>>>>>>>>>>: ',np.shape(one_video_lon))

                'detect the fov for each subject'
                one_video_all_subjects_fov_zone = []
                one_video_all_subjects_fov_center = []
                subplot_mode = 'no_detection_fov'  # 'detect_fov', 'no_detection_fov'

                for i_subject in range(len(subject_dic)):
                    if i_subject == 14:

                        if subplot_mode == 'detect_fov':
                            '>>>>>>>>>>>>>>>>>.step1: detecte the fov zone'
                            print('>>>>>>>>>>>>> video:%s_%s subject:%s_%s' %
                                  (video_dic[i_video], str(i_video),
                                   subject_dic[i_subject], str(i_subject)))

                            fov_zone = fov_detection(
                                one_video_lon[i_subject][:],
                                one_video_lat[i_subject][:], 5, leas_num)
                            print('>>>>>>>>>>>>>>>debug1: ', fov_zone)

                            fov_center_location = (fov_center(
                                fov_zone, one_video_lon[i_subject][:],
                                one_video_lat[i_subject][:]))
                            fov_center_location = np.round(fov_center_location)
                            print('>>>>>>>>>>>>>>>debug2: ',
                                  np.round(fov_center_location))

                            one_video_all_subjects_fov_zone.append(fov_zone)
                            one_video_all_subjects_fov_center.append(
                                fov_center_location)
                            # print('>>>>>>>>>>>>>>>:debug3 ', one_video_all_subjects_fov_zone)
                            # print('>>>>>>>>>>>>>>>:debug3.1 ', one_video_all_subjects_fov_zone)

                            '>>>>>>>>>>>>>>>>>.step2: detecte the fov image'
                            save_fov_image_dir = "test_object/"
                            'for each fov zone'
                            for i_fov in range(len(fov_center_location)):

                                if i_fov >= 0:
                                    # cur_lon = fov_center_location[i_fov][0]
                                    # cur_lat = fov_center_location[i_fov][1]

                                    # i_frame = fov_zone[i_fov][0]

                                    'for each fov frame'
                                    for i in range(fov_zone[i_fov][0],
                                                   fov_zone[i_fov][1]):
                                        i_frame = i
                                        cur_lon = one_video_lon[i_subject][i]
                                        cur_lat = one_video_lat[i_subject][i]

                                        cur_observation = get_view(
                                            input_width=video_size_width,
                                            input_height=video_size_heigth,
                                            view_fov_x=view_range_lon,
                                            view_fov_y=view_range_lat,
                                            cur_frame=i_frame,
                                            is_save=True,
                                            output_width=output_width,
                                            output_height=output_height,
                                            view_center_lon=cur_lon,
                                            view_center_lat=cur_lat,
                                            video_name=video_dic[i_video] +
                                            '_subject_' + str(i_subject) +
                                            '_fov_' + str(i_fov),
                                            save_dir=save_fov_image_dir,
                                            file_='/media/ml/Data0/yuv/' +
                                            video_dic[i_video] + '.yuv')

                        if subplot_mode == 'no_detection_fov':
                            import subprocess

                            # save_fov_image_dir = "test_object/"
                            # for i in range(FRAMESCOUNT):
                            #
                            #     i_frame = i
                            #     cur_lon = one_video_lon[i_subject][i]
                            #     cur_lat = one_video_lat[i_subject][i]
                            #
                            #     cur_observation = get_view(input_width=video_size_width,
                            #                                input_height=video_size_heigth,
                            #                                view_fov_x=view_range_lon,
                            #                                view_fov_y=view_range_lat,
                            #                                cur_frame=i_frame,
                            #                                is_save=True,
                            #                                output_width=output_width,
                            #                                output_height=output_height,
                            #                                view_center_lon=cur_lon,
                            #                                view_center_lat=cur_lat,
                            #                                video_name = video_dic[i_video] + '_subject_' + str(i_subject),
                            #                                save_dir=save_fov_image_dir,
                            #                                file_='/media/ml/Data0/yuv/' + video_dic[i_video]  + '.yuv')

                            subprocess.call([
                                'ffmpeg', '-r',
                                str(FRAMERATE), '-i',
                                'test_object/VRBasketball_subject_1_frame_' +
                                '%03d' + '.png', '-b', '4000k', '-codec',
                                'mpeg4', 'test_object/' + video_dic[i_video] +
                                'subject_' + str(i_subject) + '_fov.mp4'
                            ])
Ejemplo n.º 9
0
    def cal_sauc_one_video(self, video_name, model_name):
        """
        cal sauc score of one video in on model
        """
        FRAMERATE, FRAMESCOUNT, IMAGEWIDTH, IMAGEHEIGHT = get_video_config(
            video_name)
        step_size = 10
        frame_step = 20
        step_total = int(FRAMESCOUNT / step_size)

        one_video_lon, one_video_lat = self.get_raw_data(video_name)
        one_video_lon = np.array(one_video_lon)
        one_video_lat = np.array(one_video_lat)

        ave_sauc_one_video = []
        for i_step in range(step_total):
            if i_step >= 0:  # for debug
                'get predicted map'
                hmap_path = 'Data/Hmaps_0120/' + model_name + '/' + video_name + '_' + str(
                    i_step) + '.jpg'
                if model_name == model_name_without_fcb[5]:
                    hmap_path = 'Data/Hmaps_0120/' + model_name + '/_' + video_name + '_' + str(
                        i_step) + '.png'
                predict_map = self.get_predict_hmap(hmap_path)

                if model_name == model_name_without_fcb[
                        3] or model_name == model_name_with_fcb[3]:
                    predict_map = self.FZ(predict_map)

                'get ground fixation list'
                ground_fixation, ground_binary_map = self.get_ground_fixation(
                    video_name, i_step)

                sauc = []
                for i_splits in range(self.Nsplits):
                    shuffed_map = self.get_shuffed_map(i_splits)
                    sauc_0 = self.calc_score(ground_fixation, predict_map,
                                             shuffed_map)
                    sauc.append(sauc_0)
                    'for debug'
                    # plt.subplot(312)
                    # plt.imshow(predict_map)
                    # plt.subplot(313)
                    # plt.imshow(ground_binary_map)
                    # 'test rmap coordinate bug'
                    # hmap_path_4_path = 'Data/Hmaps_0120/' + model_name_without_fcb[4] + '/' + video_name + '_' + str(i_step) + '.jpg'
                    # hmap_path_4 = self.get_predict_hmap(hmap_path_4_path)
                    # hmap_path_4_1 = self.FZ(hmap_path_4)
                    # print(hmap_path_4)
                    # plt.subplot(311)
                    # plt.imshow(hmap_path_4_1)
                    # plt.show()
                    print('>>>>>sauc_splits:%d, score: %f: ' %
                          (i_splits, sauc_0))
                    # print(t)

                ave_sauc = np.mean(sauc)
                ave_sauc_one_video.append(ave_sauc)
                print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.>ave_sauc: ', ave_sauc)

        ave_sauc_one_video = np.mean(ave_sauc_one_video)
        print(
            '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>final: ave_sauc_one_video: ',
            ave_sauc_one_video)

        return ave_sauc_one_video