Ejemplo n.º 1
0
def parent():
    if not os.path.exists(pipe_name):
        print("No pipe exists, exiting")
        return

    # generate some fake data
    stamped_data = [utils.timestamp(), np.random.randn(3, 3)]
    outobj = pickle.dumps(stamped_data)

    #Make sure that the pipe is cleared before putting new data out
    try:
        pipe = os.open(pipe_name, os.O_RDONLY | os.O_NONBLOCK)
        line = os.read(pipe, 0)
    except OSError as err:
        print(err)
        if err.errno == 11:
            # this error indicates that the pipe has no data in it
            pass
        else:
            raise err
    finally:
        os.close(pipe)

    # Dump latest data to pipe
    try:
        pipe = os.open(pipe_name, os.O_WRONLY | os.O_NONBLOCK)
        os.write(pipe, outobj)
        os.close(pipe)
    except OSError as err:
        if err.errno == 6:
            # This error indicates no consumer is connected to pipe
            pass
        else:
            raise err
Ejemplo n.º 2
0
    def find_rect(self, image, depth):
        """Single rectangular buoy detection"""
        markers = 0
        threshold = 10

        # find buoy by changing blue channel threshold
        while (np.amax(markers) == 0):
            threshold += 5
            temp_t, img = cv2.threshold(image[:, :, 0], threshold, 255,
                                        cv2.THRESH_BINARY_INV)
            cv2.imshow('img', img)
            _, markers = cv2.connectedComponents(img)

        # define kernal and remove noise
        kernal = np.ones((5, 5), np.uint8)
        img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernal)
        img = cv2.morphologyEx(img, cv2.MORPH_DILATE, kernal)

        # get pixels of buoy and relay information
        nonzero = cv2.findNonZero(img)
        x = 0
        y = 0
        w = 0
        h = 0
        x, y, w, h = cv2.boundingRect(nonzero)

        # determine if detected object is ideal
        if ((x != 0 and y != 0) or (h > 20 and w > 20)):
            # ideal detected object was found
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            x_c = int(x)
            y_c = int(y)
            h_c = int(h)
            w_c = int(w)
            bb_ul = (x_c - w_c, y_c - h_c)
            bb_lr = (x_c + w_c, y_c + h_c)
            self.detections = []
            self.frame_num += 1
            self.buoy_detection(timestamp(), bb_ul, bb_lr)

        else:
            self.detections = None
            pass
Ejemplo n.º 3
0
def standard_output(detection_in):
    """Return up to three objects"""
    processing_timestamp = utils.timestamp()
    tstring = [image_timestamp] + detection_in + [processing_timestamp]
    detect_string = ", ".join(tstring)
    return detect_string
Ejemplo n.º 4
0
    .......v...-----------....................
    ..........................................
    ..........................................

timestamp format: 19_067_11_50_05_561
year_JulianDay_Hour_Min_Sec_MS
"""


import time
import datetime
from zoidberg import utils

object_001 = ['001', '463', '0432', '034', '056']
object_002 = ['002', '023', '0123', '347', '021']
object_003 = ['003', '765', '1002', '745', '102']

# Each image will have a time based ID
image_ID = 'img_' + utils.timestamp()

def standard_output(detection_in):
    """Return up to three objects"""
    processing_timestamp = utils.timestamp()
    tstring = [image_timestamp] + detection_in + [processing_timestamp]
    detect_string = ", ".join(tstring)
    return detect_string

for ob in [object_001, object_002, object_003]:
    print(standard_output(ob))
    time.sleep(1)
Ejemplo n.º 5
0
    def gate_detection(self, gate_legs):
        """Gate detection object creation"""
        r_g_ul_x = None
        r_g_ul_y = None
        r_g_br_x = None
        r_g_br_y = None
        r_g_w = None
        r_g_h = None
        l_g_ul_x = None
        l_g_ul_y = None
        l_g_br_x = None
        l_g_br_y = None
        l_g_w = None
        l_g_h = None
        gate_ul_x = None
        gate_ul_y = None
        gate_br_x = None
        gate_br_y = None
        gate_w = None
        gate_h = None

        r = Detection()
        l = Detection()
        g = Detection()

        if (gate_legs[0].x > gate_legs[1].x):
            # (x, y, w, h)1 = right gate --> [0]
            r_g_ul_x = gate_legs[0].x
            r_g_ul_y = gate_legs[0].y
            r_g_w = gate_legs[0].w
            r_g_h = gate_legs[0].h
            r_g_br_x = r_g_ul_x + r_g_w
            r_g_br_y = r_g_ul_y + r_g_h
            l_g_ul_x = gate_legs[1].x
            l_g_ul_y = gate_legs[1].y
            l_g_w = gate_legs[1].w
            l_g_h = gate_legs[1].h
            l_g_br_x = l_g_ul_x + l_g_w
            l_g_br_y = l_g_ul_y + l_g_h

        else:
            # (x, y, w, h)1 = right gate --> [1]
            l_g_ul_x = gate_legs[0].x
            l_g_ul_y = gate_legs[0].y
            l_g_w = gate_legs[0].w
            l_g_h = gate_legs[0].h
            l_g_br_x = l_g_ul_x + l_g_w
            l_g_br_y = l_g_ul_y + l_g_h
            r_g_ul_x = gate_legs[1].x
            r_g_ul_y = gate_legs[1].y
            r_g_w = gate_legs[1].w
            r_g_h = gate_legs[1].h
            r_g_br_x = r_g_ul_x + r_g_w
            r_g_br_y = r_g_ul_y + r_g_h

        gate_ul_x = l_g_ul_x
        gate_ul_y = l_g_ul_y
        gate_br_x = r_g_br_x
        gate_br_y = r_g_br_y
        gate_w = int(abs(gate_ul_x - gate_br_x))
        gate_h = int(abs(gate_ul_y - gate_br_y))

        """ 
        cv2.rectangle(origin, (gate_ul_x, gate_ul_y), (gate_br_x, gate_br_y), \
                     (255, 255, 255), 10)
        cv2.imshow('origin', origin)
        """

        r.write_gate(self.frame_num, self.r_gate_ID, timestamp(), r_g_ul_x, r_g_ul_y, r_g_br_x, r_g_br_y, \
                     r_g_w, r_g_h)

        l.write_gate(self.frame_num, self.l_gate_ID, timestamp(), l_g_ul_x, l_g_ul_y, l_g_br_x, l_g_br_y, \
                     l_g_w, l_g_h)

        g.write_gate(self.frame_num, self.g_gate_ID, timestamp(), gate_ul_x, gate_ul_y, gate_br_x, gate_br_y, \
                     gate_w, gate_h)
        
        self.frame_num += 1
        self.detections.append(r)
        self.detections.append(l)
        self.detections.append(g)
Ejemplo n.º 6
0
    def find_buoy(self, img):
        """Find objects by contour"""
        # set the minimum and maximum distance ratios for valid buoys
        ratioMax = 0.5
        ratioMin = 0.2

        # Step the frame number
        self.frame_num += 1

        img_color = img.copy()
        scan_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # initial function scan to find all contours
        scan_img = cv2.Sobel(scan_img, cv2.CV_8U, 0, 1, ksize=5)
        scan_img = cv2.threshold(scan_img, 200, 255, cv2.THRESH_BINARY)[1]
        scan_img = cv2.erode(scan_img, None, iterations=1)
        scan_img = cv2.dilate(scan_img, None, iterations=1)


        # find contours
        contours = cv2.findContours(scan_img,
                                    cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[1]

        # initialize buoy array and counter
        buoys = []

        for contour in contours:
            # if the length of the contour is long enough to be a potential buoy
            if (len(contour) > 45):

                # find coordinates in the frame for buoy validation
                max_Pt = [9999999, 9999999]  # highest y point, huge values for safety
                endPt1 = [9999999999, 0]  # the highest y point with lowest x coordinate
                endPt2 = [-1, 0]  # point with the highest x coordinate

                for Point in contour:
                    # temporary coordinates for comparison
                    temp_y = Point[0][1]
                    temp_x = Point[0][0]

                    # make decision based on location of temporary x and y coords
                    if (temp_y < max_Pt[1]):
                        max_Pt = Point[0]
                    if (temp_x < endPt1[0]):
                        endPt1 = Point[0]
                    if (temp_x > endPt2[0]):
                        endPt2 = Point[0]

                # store endPt1's coordinates into finalEndPt for comparison
                finalEndPt = endPt1;
                if (endPt1[1] > endPt2[1]):
                    finalEndPt = endPt2;

                # calculate width and height of contour
                w = abs(max_Pt[0] - finalEndPt[0])
                h = abs(max_Pt[1] - finalEndPt[1])

                # compute ratio necessary for a contour to be considered a buoy,
                # noise can still be considered a buoy
                if (w != 0):
                    ratio = (h / (2 * w))
                else:
                    ratio = 0

                if ratio >= ratioMin and ratio <= ratioMax:
                         buoys.append(contour)


        # initialize object array and ID count
        self.detections = []

        for buoy in buoys:
            # find inital (x, y) coordinates and radius of contour
            (x,y), radius = cv2.minEnclosingCircle(buoy)
            x_coord = int(x)
            y_coord = int(y)
            center = (x_coord, y_coord)
            radius = int(radius)

            # *** remove contours that are just noise ***
            # find arbitrary distance away from the center
            dist = radius / 4

            # for storing pixels that could potentially be noise
            outliers = 0;

            for point in buoy:
                point = point[0]
                # determine if a pixel is an outlier based on y-coord position from center
                if point[1] > center[1] and (point[0] > center[0] - dist and \
                                    point[0] < center[0] + dist):
                    outliers += 1

            # if too many outliers were found, it's noise
            if (outliers > 1):
                continue

            # compute bounding boxes
            bb_ul = (x_coord - radius, y_coord - radius)
            bb_lr = (x_coord + radius, y_coord + radius)

            # relay information to detection creator
            self.buoy_detection(timestamp(),
                                  bb_ul,
                                  bb_lr)