Ejemplo n.º 1
0
def magicAnalysis(self):

    cv2.ocl.setUseOpenCL(True)
    self.w.analyzeVideoBar.set(0)
    self.w.analyzeVideoBar.show(True)

    data_source = self.settingsDict['recordingFolder']
    showVideo = self.settingsDict['showVideoAnalysis']

    export_source = join(data_source, "exports", "000")

    # The video resolution is automatically read from the info.csv file if available
    video_w = 1280
    video_h = 720

    # Start the video capture from file
    video_source = join(data_source, "world.mp4")

    if os.path.isfile(video_source) is False:
        print(f"Video not vound at {video_source}")
        return None

    cap = cv2.VideoCapture(video_source)
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    ##### read record info.csv #####
    info = readInfo(data_source)

    try:
        # this data is not always available
        video = info["World Camera Resolution"].split("x")
        video_w, video_h = int(video[0]), int(video[1])
        print("The video resolution is={video_w}x{video_h}")

    except Exception as ee:
        print("Unable to automatically read the video resolution.")
        print(ee)

    ##### read pupil_positions.csv #####
    # Unpacking the gaze data
    gaze_positions, gaze_positions_x, gaze_positions_y = readGaze(export_source)

    prev_frame_index = 0
    gaze_pix_positions = []
    gaze_frame_list_x = []
    gaze_frame_list_y = []
    gaze_frame_list_time = []

    prev_frame_x = 0
    prev_frame_y = 0

    # lets see how fast this thing can go
    start_time = t.time()
    index = -1

    # Reading all the gaze sample
    # going trough all the gaze samples

    for gaze_sample in gaze_positions:
        index = index+1

        frame_index = int(gaze_sample[1])
        frame_time = float(gaze_sample[0])

        if frame_index != prev_frame_index:

            # making sure the sample is within the frame
            gaze_frame_list_x = np.clip(gaze_frame_list_x, 0, video_w-1)
            gaze_frame_list_y = np.clip(gaze_frame_list_y, 0, video_h-1)

            gaze_pix_positions.append((frame_index, gaze_frame_list_x, gaze_frame_list_y, gaze_frame_list_time))
            gaze_frame_list_x = []
            gaze_frame_list_y = []
            gaze_frame_list_time = []

        if float(gaze_sample[2]) > 0.6:   # making sure the sample is good enough

            # scaling it to a pixel value from the normalized coordinates (0-1)
            gaze_frame_list_x.append(int(float(gaze_positions_x[index]) * video_w))
            gaze_frame_list_y.append(int((1-float(gaze_positions_y[index])) * video_h))

            # storing the previous frame to be used to replace low confidence values
            prev_frame_x = int(float(gaze_sample[3]) * video_w)
            prev_frame_i = int((1-float(gaze_sample[4])) * video_h)

        else:     # replace low confidence values
            gaze_frame_list_x.append(prev_frame_x)
            gaze_frame_list_y.append(prev_frame_y)

        gaze_frame_list_time.append(float(frame_time))
        prev_frame_index = frame_index

    ##### end read pupil_positions.csv #####

    frame_index = -1
    frame_index_alt = 0

    first_row = True

    gaze_positions_x = []
    gaze_positions_y = []

    row = ["frame_index", "time", "AVGlum", "SpotLum"]

    # Check if came=[]ra opened successfully
    if (cap.isOpened() is False):
        print("Error opening video stream or file")

    with open(join(data_source, 'outputFromVideo.csv'), 'w') as csvFile:
        writer = csv.writer(csvFile)
        if first_row:
            writer.writerow(row)
            first_row = False

        # Read until video is completed
        while(cap.isOpened()):

            # Capture frame-by-frame
            ret, frame = cap.read()
            frame_index += 1

            if ret is True:
                gaze_frame_n = gaze_pix_positions[frame_index_alt][0]

                if frame_index+1 > gaze_frame_n and frame_index_alt+2 < len(gaze_pix_positions):
                    frame_index_alt = frame_index_alt+1
                    gaze_frame_n = gaze_pix_positions[frame_index_alt][0]

                if gaze_frame_n == frame_index+1:
                    gaze_frame_n = gaze_pix_positions[frame_index_alt][0]

                    gaze_positions_x = gaze_pix_positions[frame_index_alt][1]
                    gaze_positions_y = gaze_pix_positions[frame_index_alt][2]
                    gaze_positions_time = gaze_pix_positions[frame_index_alt][3]

                    # mean and standard deviation of the rgb values
                    lumMean, lumStddev = cv2.meanStdDev(frame)

                    lum = relativeLuminanceClac(lumMean[0], lumMean[1], lumMean[2])            # mean relative luminance
                    avgStd = (float(lumStddev[0])+float(lumStddev[1])+float(lumStddev[2])) / 3  # mean sd across rgb

                    for i in range(0, len(gaze_positions_time)):
                        # for each gaze position relative to this frame
                        # frame=cv2.GaussianBlur(frame,(11,11),cv2.BORDER_DEFAULT)

                        # select the area with similar color and luminance around the gaze point within the specified tolerance

                        selection = magicSelection(frame,
                                                   gaze_positions_x[i],
                                                   gaze_positions_y[i],
                                                   avgStd*1.5,
                                                   connectivity=8)
                        stats = selection.return_stats()    # read the mean rgb of the selection

                        if showVideo:
                            selection.show()

                        R_pixval = float(stats[0])
                        G_pixval = float(stats[1])
                        B_pixval = float(stats[2])

                        pixval = relativeLuminanceClac(R_pixval, G_pixval, B_pixval)   # mean relative luminance of the selection
                        row = [frame_index, gaze_positions_time[i], float(lum), pixval]
                        writer.writerow(row)

                    # print the frame rate and persentage every 1000 frames
                    if frame_index % 100 == 0:
                        print ( round((frame_index/length)*100),"%" )
                        self.w.analyzeVideoBar.set(round((frame_index/length)*100))

                # Press Q on keyboard to    exit
                if showVideo:
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

            else:
                csvFile.close()
                break

    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()
Ejemplo n.º 2
0
def lumAnalysis(self):
    # self.plot.close()
    data_source = self.settingsDict['recordingFolder']
    lux_data_source = self.settingsDict['luxFolder']
    print(lux_data_source)
    recording_name = data_source.split("/")[-1]

    recording_source = os.path.dirname(data_source)

    # export inside the recording
    export_source = join(data_source, "exports", "000")
    # export all in a separate folder
    export_source_alt = self.settingsDict['exportFolder']

    # PlotSize
    fig, ax = self.plot.subplots(figsize=(10, 5))
    ax.set_ylim(-5, 10)

    ##### unified pupil size #####
    age = self.settingsDict['partAge']
    referenceAge = 28.58
    nOfEye = 2
    fieldAngle = 167

    ##### unified pupil size #####
    useCamera = self.settingsDict['useCamera']

    confidence_treshold = 0.6
    filterForConf = True

    ##### end cofig #####
    timelag = self.settingsDict['timelag']

    sampleFreq = 120
    distSampleLenght = 1 * sampleFreq  # eye_frames 120fps

    pupilFiltering = int(self.settingsDict['pupilFiltering']) * 2

    sampleFreqCamera = 30

    export = self.settingsDict['exportData']
    showPlot = self.settingsDict['showPlot']

    ##### read recond info #####
    pupil_coulmn = 6  # 13 in mm 6 in px
    pupil_offset = 0

    pupilData = readPupil(export_source)
    recordingInfo = readInfo(data_source)

    # get Time from the info file
    recStartTime = datetime.fromtimestamp(
        float(recordingInfo["Start Time (System)"]))
    recStartTimeAlt = float(recordingInfo["Start Time (Synced)"])
    bootTime = datetime.fromtimestamp(
        float(recordingInfo["Start Time (System)"]) - recStartTimeAlt)
    timeFromBoot = recStartTime - bootTime
    recDuration = recordingInfo["Duration Time"].split(":")
    recDurationSeconds = timedelta(
        seconds=(int(recDuration[0]) * 60 + int(recDuration[1])) * 60 +
        int(recDuration[2]))
    recEndTime = recStartTime + recDurationSeconds

    print("Reconding started at :", recStartTime)
    print("Computer booted  at :", bootTime)
    print("It was on for :", timeFromBoot)
    print("The recording lasted :", recDuration)

    pupilValues = processPupil(pupilData, pupil_coulmn, recStartTimeAlt,
                               filterForConf, confidence_treshold)
    recPupilValues, recTimeStamps, recFrames, recSimpleTimeStamps, recConfidence = pupilValues

    # remove nan form the pupil arrary
    recPupilValues = interpnan(recPupilValues)

    recPupilValues_filter = signal.savgol_filter(recPupilValues,
                                                 1 * sampleFreq + 1, 2)

    recPupilValues = signal.savgol_filter(recPupilValues,
                                          int(sampleFreq / 10) + 1, 6)
    recConfidence = signal.savgol_filter(recConfidence,
                                         int(sampleFreq / 10) + 1, 6)

    luxTimeStamps, luxValues = readLux(lux_data_source, data_source,
                                       recStartTime, recEndTime)
    luxTimeStamps = [x - timelag for x in luxTimeStamps]
    # filtered set of lux (10fps)
    luxValues = signal.savgol_filter(interpnan(luxValues), 10 + 1, 6)

    luxValues = upsampleLux(luxTimeStamps, luxValues, recTimeStamps,
                            recordingInfo, True)

    pupilValue = calcPupil(luxValues, age, referenceAge, nOfEye, fieldAngle)
    luxPupilValues = interpnan(pupilValue)

    meanLux = np.nanmean(luxPupilValues, axis=0)
    meanRec = np.nanmean(recPupilValues_filter, axis=0)

    stdLux = np.nanstd(luxPupilValues)
    stdRec = np.nanstd(recPupilValues_filter)

    pupil_coeff = meanLux / meanRec

    # pupil_coeff = ( meanLux-stdLux )/ (meanRec - stdRec )
    print(f"calculated pupil_coeff={pupil_coeff}")

    recPupilValues_scaled = [x * pupil_coeff for x in recPupilValues]
    recPupilValues_filter_scaled = [
        x * pupil_coeff for x in recPupilValues_filter
    ]

    graphPlot(self.plot, recSimpleTimeStamps, luxPupilValues, "blue", 0.8,
              "Sensor Calculated Pupil")

    if not useCamera:
        graphPlot(self.plot, recSimpleTimeStamps, recPupilValues_scaled,
                  "gray", 0.5, "Raw EyeTracker Pupil")
        graphPlot(self.plot, recSimpleTimeStamps, recPupilValues_filter_scaled,
                  "black", 0.8, "Smoothed EyeTracker Pupil")

    if useCamera:
        indexLum, timeStampsLum, avgLum, spotLum = readCamera(data_source)

        avgLum = upsampleLux(timeStampsLum, avgLum, recTimeStamps,
                             recordingInfo, False)
        spotLum = upsampleLux(timeStampsLum, spotLum, recTimeStamps,
                              recordingInfo, False)

        scaledSpotLum = []
        for i in range(0, len(recTimeStamps)):

            sensorLux = luxValues[i]
            cameraALum = avgLum[i]
            cameraSLum = spotLum[i]

            cameraLum_min = sensorLux / (cameraALum * 10 + 1)
            cameraLum_max = cameraLum_min * 11

            # linear interpolation method
            scaledSpot = ((cameraLum_max * cameraSLum) +
                          (cameraLum_min * (1 - cameraSLum))) / 2
            scaledSpotLum.append(scaledSpot)

        scaledSpotLum = signal.savgol_filter(
            interpnan(interpzero(scaledSpotLum)), sampleFreq * 3 + 1, 1)

        spotPupilValues = calcPupil(scaledSpotLum, age, referenceAge, nOfEye,
                                    fieldAngle)

        meanLum = np.nanmean(spotPupilValues, axis=0)
        meanRec = np.nanmean(recPupilValues_filter, axis=0)

        stdLum = np.nanstd(spotPupilValues)
        stdRec = np.nanstd(recPupilValues_filter)

        pupilLum_coeff = meanLum / meanRec

        print(f"pupilLum_coeff={pupilLum_coeff}")

        recPupilValues_filter_scaled_Lum = [
            x * pupilLum_coeff for x in recPupilValues_filter
        ]

        graphPlot(self.plot, recSimpleTimeStamps, spotPupilValues, "orange", 1,
                  "Camera Calculated Pupil")

        graphPlot(self.plot, recSimpleTimeStamps,
                  recPupilValues_filter_scaled_Lum, "black", 0.8,
                  "Smoothed EyeTracker Pupil")

    if useCamera:
        distanceVal, distanceTime = drawDistance(
            self.plot, recPupilValues_filter_scaled_Lum, spotPupilValues,
            recSimpleTimeStamps, distSampleLenght, pupilFiltering)
    else:
        distanceVal, distanceTime = drawDistance(
            self.plot, recPupilValues_filter_scaled, luxPupilValues,
            recSimpleTimeStamps, distSampleLenght, pupilFiltering)

    handles, labels = self.plot.gca().get_legend_handles_labels()
    by_label = OrderedDict(zip(labels, handles))

    self.plot.legend(by_label.values(), by_label.keys())

    self.plot.xlabel('Time s')
    self.plot.ylabel('Pupil diameter mm')
    self.plot.title(f"CW{recording_name}")
    if showPlot:
        self.plot.savefig(join(export_source, f'plot{recording_name}.pdf'),
                          bbox_inches='tight')
        self.plot.savefig(join(export_source_alt,
                               f'plot_{recording_name}.pdf'),
                          bbox_inches='tight')

    if export:
        csv_header = [
            "timestamp_unix", "timestamp_relative", "frame_n", "confidence",
            "mm_pupil_diameter_scaled", "mm_pupil_diameter_calc_lux",
            "px_pupil_diameter_raw", "recording_name", "age"
        ]

        csv_rows = [
            recTimeStamps, recSimpleTimeStamps, recFrames, recConfidence,
            recPupilValues_filter_scaled, luxPupilValues, recPupilValues,
            recording_name, age
        ]

        if useCamera:
            csv_header.append("mm_pupil_diameter_calc_camera")
            csv_rows.append(spotPupilValues)

        saveCsv(export_source, "pupilOutput.csv", csv_header, csv_rows)
        saveCsv(export_source_alt, f"{recording_name}_pupilOutput.csv",
                csv_header, csv_rows)

        csv_header = [
            "drelative_wl", "timestamp_relative", "recording_name", "age",
            "timestamp_unix"
        ]
        distanceTimeEpoch = [
            x + float(recordingInfo["Start Time (System)"])
            for x in distanceTime
        ]
        csv_rows = [
            distanceVal, distanceTime, recording_name, age, distanceTimeEpoch
        ]

        saveCsv(export_source_alt, f"{recording_name}_pupilOutputDistance.csv",
                csv_header, csv_rows)
        saveCsv(export_source, "pupilOutputDistance.csv", csv_header, csv_rows)

    if showPlot:
        self.plot.show(block=False)
Ejemplo n.º 3
0
def magicAnalysis(self):

    print("Your cpu has ", multitasking.config["CPU_CORES"], " cores")

    cv2.ocl.setUseOpenCL(True)

    # start with an empty progress bar
    self.w.analyzeVideoBar.set(0)
    self.w.analyzeVideoBar.show(True)

    # read initial parameters from the interface

    data_source = self.settingsDict['recordingFolder']
    showVideo = self.settingsDict['showVideoAnalysis']
    export_source = join(data_source, "exports", "000")

    cv_threads = int(multitasking.config["CPU_CORES"]) * 2

    #if showVideo:
    #cv_threads = int(multitasking.config["CPU_CORES"]);

    # The video resolution is automatically read from the info.csv file if available
    video_w = 1280
    video_h = 720

    # Start the video capture from file
    video_source = join(data_source, "world.mp4")
    #video_source = "/Users/giovannipignoni/Downloads/file_example_MP4_1920_18MG.mp4"

    if os.path.isfile(video_source) is False:
        print(f"Video not found at {video_source}")
        return None

    #count the frames in the video
    cap = cv2.VideoCapture(video_source)
    frames_n = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    print("frames_n=", frames_n)
    cap.release()

    ##### read record info.csv #####
    info = readInfo(data_source)

    try:
        # this data is not always available
        video = info["World Camera Resolution"].split("x")
        video_w, video_h = int(video[0]), int(video[1])
        print("The video resolution is={}x{}".format(video_w, video_h))

    except Exception as ee:
        print("Unable to automatically read the video resolution.")
        print(ee)

##### read pupil_positions.csv #####
# Unpacking the gaze data
    gaze_positions, gaze_positions_x, gaze_positions_y = readGaze(
        export_source)

    prev_frame_index = 0

    gaze_list_max_frame = int(gaze_positions[-1][1])
    gaze_list_min_frame = int(gaze_positions[0][1])

    gaze_pix_size = frames_n

    if gaze_list_max_frame > gaze_pix_size:
        gaze_pix_size = gaze_list_max_frame

    gaze_pix_positions = [None] * int(gaze_pix_size + 1)

    gaze_frame_list_x = []
    gaze_frame_list_y = []
    gaze_frame_list_time = []

    prev_frame_x = 0
    prev_frame_y = 0

    index = 0

    # Reading all the gaze sample

    for gaze_sample in gaze_positions:

        frame_index = int(gaze_sample[1])
        frame_time = float(gaze_sample[0])

        if frame_index != prev_frame_index:

            # making sure the sample is within the frame
            gaze_frame_list_x = np.clip(gaze_frame_list_x, 0, video_w - 1)
            gaze_frame_list_y = np.clip(gaze_frame_list_y, 0, video_h - 1)

            gaze_pix_positions[
                frame_index] = frame_index, gaze_frame_list_x, gaze_frame_list_y, gaze_frame_list_time

            gaze_frame_list_x = []
            gaze_frame_list_y = []
            gaze_frame_list_time = []

        if float(
                gaze_sample[2]) > 0.6:  # making sure the sample is good enough

            # scaling it to a pixel value from the normalized coordinates (0-1)
            gaze_frame_list_x.append(
                int(float(gaze_positions_x[index]) * video_w))
            gaze_frame_list_y.append(
                int((1 - float(gaze_positions_y[index])) * video_h))

            # storing the previous frame to be used to replace low confidence values
            prev_frame_x = int(float(gaze_sample[3]) * video_w)
            prev_frame_i = int((1 - float(gaze_sample[4])) * video_h)

        else:  # replace low confidence values
            gaze_frame_list_x.append(prev_frame_x)
            gaze_frame_list_y.append(prev_frame_y)

        gaze_frame_list_time.append(float(frame_time))
        prev_frame_index = frame_index

        index += 1

    ##### end read pupil_positions.csv #####

    #create  a list long as the video file to store the resoult of the analisis
    analised_list = [None] * int(gaze_pix_size)

    #List of the main open CV threads
    frame_grabbers = []

    #Number of frames to analise
    print("We will analise from", gaze_list_min_frame, "to",
          gaze_list_max_frame)

    frames_to_analise_n = gaze_list_max_frame - gaze_list_min_frame

    #Number of frames for each Open CV thread
    frame_range = int(frames_to_analise_n / cv_threads)

    #Time counter to calcualte fps
    start = t.time()

    #List to store the last frames analised (used only for visalisation)
    last_selection = [None] * int(cv_threads)

    for cv_thread in range(cv_threads):

        grabber_id = cv_thread

        first_frame = gaze_list_min_frame + (frame_range * cv_thread)

        frame_grabbers.append(
            frameGrabber(grabber_id, video_source, first_frame, frame_range,
                         gaze_pix_positions, analised_list, last_selection,
                         showVideo))

    #The main tread will ceck regularly if all the Open CV threads are finished
    grabbing = True
    if showVideo:
        mosaic_col_n = int(math.sqrt(cv_threads))
        mosaic_row_n = int(cv_threads / mosaic_col_n)

        scale = 1 / mosaic_col_n

        mosaic_empty = np.zeros((video_h, video_w, 3), dtype="uint8")
        mosaic_empty = cv2.resize(mosaic_empty, None, fx=scale, fy=scale)

    while grabbing:
        tempGrabbing = False

        for grabber in frame_grabbers:
            if (grabber.is_alive()):
                tempGrabbing = True
        grabbing = tempGrabbing

        #Count how many frames have been analised by cekking how many elements in the list have been populated
        anzl_frames = 0
        for row in analised_list:
            if row:
                anzl_frames += 1

        fps = int(anzl_frames / (t.time() - start))

        print("The analisis is at",
              round((anzl_frames / frames_to_analise_n) * 100), "%", "@", fps,
              "fps")

        self.w.analyzeVideoBar.set(
            round((anzl_frames / frames_to_analise_n) * 100))

        #The process can be visualised as a mosaic
        if showVideo:
            mosaic_list = []
            mosaic_row = []
            try:
                i = 0
                for selection in last_selection:
                    selection = cv2.resize(selection, None, fx=scale, fy=scale)

                    if i < mosaic_row_n:
                        mosaic_row.append(selection)
                        i += 1

                    else:
                        im_h = cv2.hconcat(mosaic_row)
                        mosaic_list.append(im_h)
                        mosaic_row = []
                        mosaic_row.append(selection)
                        i = 1

                if len(mosaic_row) < mosaic_row_n:
                    diff = mosaic_row_n - len(mosaic_row)
                    for a in range(diff):
                        mosaic_row.append(mosaic_empty)

                im_h = cv2.hconcat(mosaic_row)
                mosaic_list.append(im_h)

                cv2.imshow("Mosaic", cv2.vconcat(mosaic_list))

            except Exception as ee:
                print("No frames to show so far")

        t.sleep(1)

    t.sleep(2)
    print("Final analisis time is", int(t.time() - start), "s")
    print("saving to CSV...")

    first_row = True
    row = ["frame_index", "time", "AVGlum", "SpotLum"]

    with open(join(data_source, 'outputFromVideo.csv'), 'w') as csvFile:
        writer = csv.writer(csvFile)
        if first_row:
            writer.writerow(row)
            first_row = False

        for frame_rows in analised_list:

            if frame_rows:
                for gaze_row in frame_rows:
                    writer.writerow(gaze_row)

    print("saved to CSV!")

    self.w.analyzeVideoBar.set(100)

    # Closes all the frames
    cv2.destroyAllWindows()