コード例 #1
0
ファイル: meanvel.py プロジェクト: lewisfish/Triton-dolphin
def train(info: Tuple[str, List[float], int],
          root="/data/lm959/data/",
          crop=False):
    """Training function for HDBSCAN. Actually does the optical flow and
       returns the data needed for training.

    Parameters
    ----------
    info : Tuple[str, List[float], int]
        Tuple of video filename, framenumber, bounding box of object, and label of object.

    root : str, optional
        Root of file system location where videos are stored.

    crop : bool, optional
        If true then crop frames to bounding box of object.

    Returns
    -------
    velocitymeterPerSecond : np.ndarray
        List of velocities in m/s
    """

    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    fgbg = cv2.createBackgroundSubtractorMOG2()
    root = Path(root)
    videoFiles = list(root.glob("**/*.mp4"))

    vidname, fnumber, box, label = info
    fullname = _getFullFileName(videoFiles, vidname)

    frames, framenums, fps = getFramesCV2(fullname, fnumber, offset=15)
    contourpoints = []

    fpsPerFrame = 1. / fps
    alt = getAltitude(fullname, framenums[1], gpsdataPath="../data/gps/")
    magn = getMagnification(frames[1])

    dolphLength = 1714 * (magn / alt) + 16.5
    dolphPixelPerSecond = dolphLength / 2.
    if crop:
        frames = cropFrames(frames, box)

    frame = frames[0]
    for i in range(0, 2):
        dilated, gray1 = preprocessFrame(frame, fgbg)
        contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        contourpoints, frame = processContours(contours, contourpoints, frame)
        p0 = np.array(contourpoints, np.float32)
        gray2 = cv2.cvtColor(frames[i + 1], cv2.COLOR_RGB2GRAY)

        try:
            p1, _, _ = cv2.calcOpticalFlowPyrLK(gray1, gray2, p0, None,
                                                **lk_params)
            diff = np.array(p1) - np.array(p0)
            velocity = diff / fpsPerFrame
            velocity = [np.sqrt(item[0]**2 + item[1]**2) for item in velocity]
            frame = frames[1].copy()
            contourpoints = []
        except:
            # velocity = np.array([0.])
            # if not crop:
            continue

    velocitymeterPerSecond = velocity / dolphPixelPerSecond
    return velocitymeterPerSecond
コード例 #2
0
def calculate_velocity(kmeans, label_frequency):
    file = "LabelsdatabyCREEMTEAM.csv"
    rowcounter = 0
    with open(file, "r", encoding='utf-8-sig') as f:
        lines = f.readlines()
        for line in lines:
            split = line.split(",")
            videoname = split[0]
            framenumber = int(split[1])
            x0 = int(split[2])
            y0 = int(split[3])
            x1 = int(split[4])
            y1 = int(split[5])
            labels = split[6]
            print(videoname, framenumber, x0, y0, x1, y1)
            rowcounter = rowcounter + 1
            print("row counter------------------", rowcounter)
            lk_params = dict(winSize=(15, 15),
                             maxLevel=2,
                             criteria=(cv.TERM_CRITERIA_EPS
                                       | cv.TERM_CRITERIA_COUNT, 10, 0.03))
            # performing background subtraction to remove noise
            fgbg = cv.createBackgroundSubtractorMOG2()
            contourpoints = []
            box = []
            sheet = []
            cap = cv.VideoCapture(videoname)
            fps = cap.get(cv.CAP_PROP_FPS)
            fpsperframe = 1 / fps
            print(fpsperframe)
            setnumber = framenumber - 15
            print("first frame first iteration", setnumber)
            cap.set(cv.CAP_PROP_POS_FRAMES, setnumber)
            difference, frame = cap.read()
            #finding magnification and altitude
            magnification = getMagnification(frame)
            alt = getAltitude(videoname, framenumber, gpsdataPath="gpsdata/")
            dolpLength = 1714 * (magnification / alt) + 16.5
            #converting dolphins lenght to pixels per meter
            #2 meter is estimated lenht of dolphin
            dolpPixelpersecond = dolpLength / 2

            #croping frame to given region of interest
            frame = frame[x0:x1, y0:y1]
            count = 0
            while cap.isOpened():
                # taking a difference of 15 frames within one iteration to see a change in velocity
                setnumber = setnumber + 15
                print("first frame within iteration", setnumber)
                cap.set(cv.CAP_PROP_POS_FRAMES, setnumber)
                difference, frame2 = cap.read()
                frame2 = frame2[x0:x1, y0:y1]
                frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                fgmask = fgbg.apply(frame_gray)
                vis3 = frame2.copy()
                # applying gausian blur and dilation
                blur = cv.GaussianBlur(fgmask, (5, 5), 0)
                _, threshglobal = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)

                dilated = cv.dilate(threshglobal, None, iterations=3)
                _, contours, _ = cv.findContours(dilated, cv.RETR_EXTERNAL,
                                                 cv.CHAIN_APPROX_SIMPLE)
                # finding contours
                for i in range(0, len(contours)):
                    cnt = contours[i]
                    box.append(cv.boundingRect(cnt))
                    x, y, w, h = box[i]
                    cx = x + (w / 2)
                    cy = y + (h / 2)
                    contourpoints.append([cx, cy])
                    cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                new_gray = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)
                p0 = np.array(contourpoints, np.float32)
                count = count + 1
                if len(p0) > 0:
                    p1, _st, _err = cv.calcOpticalFlowPyrLK(
                        frame_gray, new_gray, p0, None, **lk_params)
                    p1array = np.array(p1)
                    p0array = np.array(p0)

                    difference = p1array - p0array
                    # dividing difference by fps:
                    velocity = np.divide(difference, fpsperframe)

                    velocity_ = [
                        maths.sqrt(each[0]**2 + each[1]**2)
                        for each in velocity
                    ]
                    cv.rectangle(vis3, (x0, y0), (x1, y1), (0, 255, 0), 2)
                elif len(p0) == 0:
                    #no motion detected in region of interest so no contour point and hence zero velocity
                    velocity_ = 0
                if count == 2:
                    #taking mean velocity of all detected contour in region of interest
                    meanVelocity = np.mean(velocity_)
                    #converting velocity to meters per second
                    # ms ^ -1 = (pixels s ^ -1) / (dolphLength / 2)
                    velocityMeterPerSecond = meanVelocity / dolpPixelpersecond

                    #assigning clusters to each region of interest on basis on mean velocity
                    Kmean_label = kmeans.predict(
                        np.array(velocityMeterPerSecond).reshape(-1, 1))
                    if (Kmean_label == 0):
                        x = 0
                    elif (Kmean_label == 1):
                        x = 1
                    elif (Kmean_label == 2):
                        x = 2
                    if (Kmean_label == 3):
                        x = 3
                    elif (Kmean_label == 4):
                        x = 4
                    elif (Kmean_label == 5):
                        x = 5
                    data = [
                        videoname, framenumber, x0, y0, x1, y1, labels,
                        meanVelocity, velocityMeterPerSecond, x, rowcounter
                    ]
                    print(data)
                    sheet.append(data)
                    df = pd.DataFrame(sheet)
                    df.to_csv('KMeansResult.csv',
                              mode='a',
                              header=False,
                              index=None,
                              encoding='utf_8_sig')
                    cap.release()
                cv.imshow("global thresh", threshglobal)
                cv.imshow("frame", frame)
                cv.imshow("vis3", vis3)
                frame = frame2
                del contourpoints[:]
                del box[:]
                cv.waitKey(50)
        cv.destroyAllWindows()
コード例 #3
0
ファイル: dolphins.py プロジェクト: lewisfish/Triton-dolphin
def main(filename, debug: int, noplot: bool, saveplot: bool):
    '''

    Parameters
    ----------

    filename :

    debug : int

    noplot : bool

    saveplot : bool

    Returns
    -------

    None

    '''

    if filename[0] is None:
        raise IOError("No file provided!!")

    start = time.time()
    # use magnification given by image to remove false positives
    videofile = filename[1]
    alt = getAltitude(videofile, filename[0], gpsdataPath="gps-data/")

    cap = cv2.VideoCapture(videofile)  # converts to RGB by default
    cap.set(cv2.CAP_PROP_POS_FRAMES, filename[0])
    _, frame = cap.read()
    cap.release()

    magn = getMagnification(frame)
    dolpLength = 1714 * (magn / alt) + 16.5  # 22.38*magn + 4.05#old

    dolpWidth = dolpLength / 2.195
    dolpArea = np.pi * dolpLength * dolpWidth

    img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0

    img = img[130:1030, 0:1990]
    # convert to ycbcr space and take yc values
    # as this appears to work better than converting to grayscale directly...
    data = rgb2ycbcr(img)[:, :, 0]

    if magn >= 4.0:
        labs, dax = method2(img, data, dolpArea, debug=debug)
    else:
        bkgsub, imgmask, dax = method1(img, data, debug=debug)
        labs, dax = getNPercentileConnectedPixels(bkgsub,
                                                  imgmask,
                                                  dolpArea,
                                                  debug=debug)
        # preform watershedding
        # labs = gradient_watershed(bkgsub, imgmask, magn, debug=debug)

    if not noplot:
        fig, ax = plt.subplots(1, 1)
        fig.canvas.manager.window.move(0, 0)
        ax = supressAxs(ax)
        ax.imshow(img, aspect="auto")

    dcount = 0

    areas = []
    for r in regionprops(labs):
        a = r.major_axis_length
        b = r.minor_axis_length
        ecc = r.eccentricity
        # remove false positives
        if a > .25 * dolpLength and b > 0 and ecc > 0.7 and ecc < 0.99 and a < 2. * dolpLength:
            areas.append(r.area)

    areas, *_ = stats.sigmaclip(areas, low=2.5, high=2.5)

    for region in regionprops(labs):
        a = region.major_axis_length
        b = region.minor_axis_length
        ecc = region.eccentricity
        # remove false positives
        if a > .25 * dolpLength and b > 0 and ecc > 0.7 and ecc < 0.99 and a < 2. * dolpLength:
            if region.area in areas:
                dcount += 1
                theta = region.orientation
                centre = region.centroid[::-1]
                ellipse = mpatches.Ellipse(centre,
                                           2. * b,
                                           2. * a,
                                           angle=-np.rad2deg(theta),
                                           fill=False,
                                           color="red",
                                           linewidth=2.)

                if debug > 2:
                    # need to use copy() as cant add same artist to different figs for whatever reason...
                    ellipsecopy = copy(ellipse)
                    dax[0].add_patch(ellipsecopy)
                if not noplot:
                    ax.add_patch(ellipse)
                # with open("output-new-test.dat", "a") as f:
                #     p = str(filename.name).rfind("_")
                #     f.write(f"{str(filename.name)[p+1:-4]}, {region.bbox}" + "\n")

    finish = time.time()
    if not noplot:
        text = f"Total dolphins:{dcount}\n"
        text += f"Total time:{finish-start:.03f}\n"
        text += f"Magnification:{magn}"
        textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5)
        ax.add_artist(textbox)

    print(filename[0], dcount)

    if not noplot:
        if saveplot:
            fig.set_figheight(11.25)
            fig.set_figwidth(20)
            plt.subplots_adjust(top=1,
                                bottom=0,
                                right=1,
                                left=0,
                                hspace=0,
                                wspace=0)
            # plt.savefig(f"output-harder/{str(filename.name)[:-4]}_output_004.png", dpi=96)
            fig.clear()
            plt.close(fig)
        else:
            plt.show()
            fig.clear()
            plt.close(fig)
コード例 #4
0
        fgbg = cv.createBackgroundSubtractorMOG2()
        contourpoints = []
        box = []
        sheet = []
        fileIssue = []
        cap = cv.VideoCapture(videoname)
        fps = cap.get(cv.CAP_PROP_FPS)
        fpsperframe = 1 / fps
        print(fpsperframe)
        setnumber = framenumber - 15
        print("first frame first iteration", setnumber)
        cap.set(cv.CAP_PROP_POS_FRAMES, setnumber)
        difference, frame = cap.read()
        #finding magnification and altitude
        magnification = getMagnification(frame)
        alt = getAltitude(videoname, framenumber, gpsdataPath="gpsdata/")
        dolpLength = 1714 * (magnification / alt) + 16.5
        #converting dolphins lenght to pixels per meter
        #2 meter is estimated lenht of dolphin
        dolpPixelpersecond = dolpLength / 2

        print("magnification and altitude values-----", magnification, alt)
        count = 0
        while cap.isOpened():
            # taking a difference of 15 frames within one iteration to see a change in velocity
            setnumber = setnumber + 15
            print("first frame within iteration", setnumber)
            cap.set(cv.CAP_PROP_POS_FRAMES, setnumber)
            difference, frame2 = cap.read()
            frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
            fgmask = fgbg.apply(frame_gray)