Exemplo n.º 1
0
    def __init__(self, x_length=10, y_length=10, origin=(0, 0), resolution=1):
        """ Initialize our interest grid.

            @param y_length:  int, y_length of the grid [m]
            @param x_length:  int, Columns of the grid [m]
            @param origin: tuple (x,y), where it is going to be located the lower-left corner of the image with respect to the grid.
            @param resolution: float, resolution of the grid in [meters/cells]
        """
        """ MEMBERS """
        self.height = np.int(
            y_length /
            resolution)  # @var self.height:  y_length of the grid [cells]
        self.width = np.int(
            x_length /
            resolution)  # @var self.width:  x_length of the grid [cells]
        self.origin = origin  # @var self.origin: tuple (x,y) where is it going to be located the upper/left corner of the image with respect to the grid.
        self.resolution = resolution  # @var self.resolution: resolution of the grid in meters/pixel
        self.grid = cv.CreateMat(
            self.height, self.width,
            cv.CV_32FC1)  # @var self.grid:  Opencv matrix;The actual grid
        self.__temp_grid__ = cv.CreateMat(
            self.height, self.width, cv.CV_32FC1
        )  # @var self.__temp_grid__:  Opencv matrix: A temporal array to do some calculations
        self.typical_dest_list = [
        ]  # [meters] This member contains a list of tuples [(x1,y1),(x2,y2) ...] with the locations of interest points in the /map reference frame
        """ INIT """
        cv.Set(self.grid, 0)
        cv.Set(self.__temp_grid__, 0)
        print "An instance of InterestGrid has been created"
Exemplo n.º 2
0
 def _mixImageAlphaMask(self, wipeSettings, level, image1, image2, image2mask, mixMat):
     if(level < 0.99):
         wipeMode, wipePostMix, wipeConfig = wipeSettings
         if((wipeMode == WipeMode.Fade) or (wipeMode == WipeMode.Default)):
             valueCalc = int(256 * (1.0 - level))
             rgbColor = cv.CV_RGB(valueCalc, valueCalc, valueCalc)
             whiteColor = cv.CV_RGB(255, 255, 255)
             cv.Set(mixMat, whiteColor)
             cv.Set(mixMat, rgbColor, image2mask)
             cv.Mul(image1, mixMat, image1, 0.004)
             valueCalc = int(256 * level)
             rgbColor = cv.CV_RGB(valueCalc, valueCalc, valueCalc)
             cv.Zero(mixMat)
             cv.Set(mixMat, rgbColor, image2mask)
             cv.Mul(image2, mixMat, image2, 0.004)
             cv.Add(image1, image2, image1)
             return image1
         else:
             if(wipePostMix == False):
                 image2, image2mask = self._wipeImage(wipeMode, wipeConfig, level, image2, image2mask, mixMat, False)
                 cv.Copy(image2, image1, image2mask)
                 return image1
             else:
                 cv.Copy(image1, mixMat)
                 cv.Copy(image2, mixMat, image2mask)
                 return self._wipeMix(wipeMode, wipeConfig, level, image1, mixMat, image2)
     cv.Copy(image2, image1, image2mask)
     return image1
Exemplo n.º 3
0
def redraw_grid(self):
    if not self.gui:
        return
    cv.Set(self.img_grid, cv.Scalar(0, 0, 0))
    cv.Set(self.img_peephole, cv.Scalar(0, 0, 0))
    self.grid_intersections = []
    self.grid_points_x.sort()
    self.grid_points_y.sort()

    for x in self.grid_points_x:
        cv.Line(self.img_grid, (x, 0), (x, self.img_target.height),
                cv.Scalar(0xff, 0x00, 0x00), 1)
        for y in self.grid_points_y:
            self.grid_intersections.append((x, y))
    self.grid_intersections.sort()
    for y in self.grid_points_y:
        cv.Line(self.img_grid, (0, y), (self.img_target.width, y),
                cv.Scalar(0xff, 0x00, 0x00), 1)
    for x, y in self.grid_intersections:
        cv.Circle(self.img_grid, (x, y),
                  self.config.radius,
                  cv.Scalar(0x00, 0x00, 0x00),
                  thickness=-1)
        cv.Circle(self.img_grid, (x, y),
                  self.config.radius,
                  cv.Scalar(0xff, 0x00, 0x00),
                  thickness=1)
        cv.Circle(self.img_peephole, (x, y),
                  self.config.radius + 1,
                  cv.Scalar(0xff, 0xff, 0xff),
                  thickness=-1)
Exemplo n.º 4
0
    def update_brightcont(self):
        # The algorithm is by Werner D. Streidt
        # (http://visca.com/ffactory/archives/5-99/msg00021.html)

        if self.contrast > 0:
            delta = 127. * self.contrast / 100
            a = 255. / (255. - delta * 2)
            b = a * (self.brightness - delta)
        else:
            delta = -128. * self.contrast / 100
            a = (256. - delta * 2) / 255.
            b = a * self.brightness + delta

        cv.ConvertScale(self.src_image, self.dst_image, a, b)
        cv.ShowImage("image", self.dst_image)

        cv.CalcArrHist([self.dst_image], self.hist)
        (min_value, max_value, _, _) = cv.GetMinMaxHistValue(self.hist)
        cv.Scale(self.hist.bins, self.hist.bins, float(self.hist_image.height) / max_value, 0)

        cv.Set(self.hist_image, cv.ScalarAll(255))
        bin_w = round(float(self.hist_image.width) / hist_size)

        for i in range(hist_size):
            cv.Rectangle(self.hist_image, (int(i * bin_w), self.hist_image.height),
                         (int((i + 1) * bin_w), self.hist_image.height - cv.Round(self.hist.bins[i])),
                         cv.ScalarAll(0), -1, 8, 0)
       
        cv.ShowImage("histogram", self.hist_image)
Exemplo n.º 5
0
 def find_lines_in_map_probabilistic(self, map_img):
     #Finds lines in the image using the probabilistic hough transform
     lines=cv.CreateMemStorage(0)
     line_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
     cv.Set(line_img, 255)
     lines = cv.HoughLines2(map_img,cv.CreateMemStorage(), cv.CV_HOUGH_PROBABILISTIC, self.hough_rho,np.pi/2,self.hough_threshold)
     np_line=np.asarray(lines)
     print "list of probabilistic lines: ", np_line
Exemplo n.º 6
0
 def replace_color(self,col=None):
     print self.hue[0][0]
     self.hue_val=col
     #cv2.imshow("hue",self.hue)
     if col!=None:
         cv.Set(cv.fromarray(self.hue),(self.hue_val),cv.fromarray(self.mask))
         
     self.scratch=cv2.merge([self.hue,self.sat,self.val])
     self.scratch=cv2.cvtColor(self.scratch,cv2.cv.CV_HSV2BGR)
     print 'replaced'
     return self.scratch
Exemplo n.º 7
0
 def test_40_tostringRepeat(self):
     cnt = 0
     image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
     cv.Set(image, cv.Scalar(0, 0, 0, 0))
     for i in range(self.repeat * 100):
         image.tostring()
     cnt = cv.CountNonZero(image)
     self.assertEqual(cnt,
                      0,
                      msg="Repeating tostring(): Mean CountNonZero=%.3f" %
                      (1. * cnt / self.repeat))
Exemplo n.º 8
0
 def get_corners_img(self):
     """
     Gets the an image of the salient points of the grid
     @return: corners_img, cvMat type CV_8UC1, useful to be displayed
     """
     features_x_y_vector = self.get_corners()
     corners_img = cv.CreateMat(self.grid.rows, self.grid.cols, cv.CV_8UC1)
     cv.Set(corners_img, 0)
     for (x, y) in features_x_y_vector:
         corners_img[y, x] = 255
     return corners_img
Exemplo n.º 9
0
 def test_2686307(self):
     lena = cv.LoadImage(find_sample("lena.jpg"), 1)
     dst = cv.CreateImage((512,512), 8, 3)
     cv.Set(dst, (128,192,255))
     mask = cv.CreateImage((512,512), 8, 1)
     cv.Zero(mask)
     cv.Rectangle(mask, (10,10), (300,100), 255, -1)
     cv.Copy(lena, dst, mask)
     self.snapL([lena, dst, mask])
     m = cv.CreateMat(480, 640, cv.CV_8UC1)
     print "ji", m
     print m.rows, m.cols, m.type, m.step
Exemplo n.º 10
0
 def test_4a_MemCreatedToString(self):
     cnt = 0
     v = []
     for i in range(self.repeat):
         image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
         cv.Set(image, cv.Scalar(0, 0, 0, 0))
         image.tostring()
         cnt += cv.CountNonZero(image)
         v.append(image)
     self.assertEqual(
         cnt,
         0,
         msg=
         "Repeating and memorizing after tostring(): Mean CountNonZero=%.3f"
         % (1. * cnt / self.repeat))
Exemplo n.º 11
0
def redraw_grid():
    global Grid
    global Peephole
    global Radius
    global Grid_Points_x
    global Grid_Points_y
    global Grid_Intersections

    cv.Set(Grid, cv.Scalar(0, 0, 0))
    cv.Set(Peephole, cv.Scalar(0, 0, 0))
    Grid_Intersections = []
    Grid_Points_x.sort()
    Grid_Points_y.sort()

    for x in Grid_Points_x:
        cv.Line(Grid, (x, 0), (x, Target.height), cv.Scalar(0xff, 0x00, 0x00),
                1)
        for y in Grid_Points_y:
            Grid_Intersections.append((x, y))
    Grid_Intersections.sort()
    for y in Grid_Points_y:
        cv.Line(Grid, (0, y), (Target.width, y), cv.Scalar(0xff, 0x00, 0x00),
                1)
    for x, y in Grid_Intersections:
        cv.Circle(Grid, (x, y),
                  Radius,
                  cv.Scalar(0x00, 0x00, 0x00),
                  thickness=-1)
        cv.Circle(Grid, (x, y),
                  Radius,
                  cv.Scalar(0xff, 0x00, 0x00),
                  thickness=1)
        cv.Circle(Peephole, (x, y),
                  Radius + 1,
                  cv.Scalar(0xff, 0xff, 0xff),
                  thickness=-1)
Exemplo n.º 12
0
	def addText(self, frame, textTop, textBottom):
		s = cv.GetSize(frame)
		offset = 8
		
		## add space for text notations
		textSize = cv.GetTextSize(textTop, self._font)
		textframe = cv.CreateImage( (s[0], s[1] + 4*textSize[1] + 2*offset), frame.depth, frame.channels)
		cv.Set(textframe, 0)
		cv.SetImageROI(textframe, (0, 2*textSize[1] + offset, s[0], s[1]))
		cv.Copy(frame, textframe)
		cv.ResetImageROI(textframe)
				
		## write text
		cv.PutText(textframe, textTop, (5, 2*textSize[1] + offset/2), self._font, self._fontcolor)
		cv.PutText(textframe, textBottom, (5, int(s[1] + 4*textSize[1] + 1.5 * offset)), self._font, self._fontcolor)
		
		return textframe
Exemplo n.º 13
0
def show_data():
    global Hex
    global Display_Data
    global Display_Binary
    global Search_HEX
    global Grid_Points_x
    global Grid_Points_y
    global Font
    global Data_Read
    global Radius

    if not Data_Read:
        return

    cv.Set(Hex, cv.Scalar(0, 0, 0))
    print
    dat = get_all_data()
    for row in range(Grid_Entries_y):
        out = ''
        outbin = ''
        for column in range(Grid_Entries_x / Bits):
            thisbyte = ord(dat[column * Grid_Entries_y + row])
            hexbyte = '%02X ' % thisbyte
            out += hexbyte
            outbin += to_bin(thisbyte) + ' '
            if Display_Binary:
                dispdata = to_bin(thisbyte)
            else:
                dispdata = hexbyte
            if Display_Data:
                if Search_HEX and Search_HEX.count(thisbyte):
                    cv.PutText(Hex, dispdata,
                               (Grid_Points_x[column * Bits],
                                Grid_Points_y[row] + Radius / 2 + 1), Font,
                               cv.Scalar(0x00, 0xff, 0xff))
                else:
                    cv.PutText(Hex, dispdata,
                               (Grid_Points_x[column * Bits],
                                Grid_Points_y[row] + Radius / 2 + 1), Font,
                               cv.Scalar(0xff, 0xff, 0xff))
        print outbin
        print
        print out
    print
Exemplo n.º 14
0
 def __init__(self, iplimage):
     # Rough-n-ready but it works dammit
     alpha = cv.CreateMat(iplimage.height, iplimage.width, cv.CV_8UC1)
     cv.Rectangle(alpha, (0, 0), (iplimage.width, iplimage.height),
                  cv.ScalarAll(255), -1)
     rgba = cv.CreateMat(iplimage.height, iplimage.width, cv.CV_8UC4)
     cv.Set(rgba, (1, 2, 3, 4))
     cv.MixChannels(
         [iplimage, alpha],
         [rgba],
         [
             (0, 0),  # rgba[0] -> bgr[2]
             (1, 1),  # rgba[1] -> bgr[1]
             (2, 2),  # rgba[2] -> bgr[0]
             (3, 3)  # rgba[3] -> alpha[0]
         ])
     self.__imagedata = rgba.tostring()
     super(IplQImage, self).__init__(self.__imagedata, iplimage.width,
                                     iplimage.height, QImage.Format_RGB32)
Exemplo n.º 15
0
    def __init__(self, map_name="hall_inria"):
        """ Initialize our interest grid.

            @param y_length:  int, y_length of the grid [m]
            @param x_length:  int, Columns of the grid [m]
            @param origin: tuple (x,y), where it is going to be located the lower-left corner of the image with respect to the grid.
            @param resolution: float, resolution of the grid in [meters/cells]
        """
        """ MEMBERS """

        """ INIT """
        self.__read_map__(map_name) #reads the map and the map metadata

        #The image with the detected goal points
        self.goal_map=cv.CreateMat(self.map_img.height, self.map_img.width, cv.CV_8UC1)
        cv.Set(self.goal_map, 255)



        self.goal_list = []
        self.hough_rho =1 #- self.map_img.height/600#Biggest == more lines
        self.hough_threshold = 10 #Biggest==less lines
        print "An instance of InterestGrid has been created"
Exemplo n.º 16
0
def show_data(self):
    if not self.data_read:
        return

    cv.Set(self.img_hex, cv.Scalar(0, 0, 0))
    print
    dat = get_all_data(self)
    for row in range(len(self.grid_points_y)):
        out = ''
        outbin = ''
        for column in range(len(self.grid_points_x) / self.group_cols):
            thisbyte = ord(dat[column * len(self.grid_points_y) + row])
            hexbyte = '%02X ' % thisbyte
            out += hexbyte
            outbin += to_bin(thisbyte) + ' '
            if self.config.img_display_binary:
                disp_data = to_bin(thisbyte)
            else:
                disp_data = hexbyte
            if self.config.img_display_data:
                if self.Search_HEX and self.Search_HEX.count(thisbyte):
                    cv.PutText(
                        self.img_hex, disp_data,
                        (self.grid_points_x[column * self.group_cols],
                         self.grid_points_y[row] + self.config.radius / 2 + 1),
                        self.font, cv.Scalar(0x00, 0xff, 0xff))
                else:
                    cv.PutText(
                        self.img_hex, disp_data,
                        (self.grid_points_x[column * self.group_cols],
                         self.grid_points_y[row] + self.config.radius / 2 + 1),
                        self.font, cv.Scalar(0xff, 0xff, 0xff))
        #print outbin
        #print
        #print out
    print
if __name__ == "__main__":

    cv.NamedWindow("mi grid", cv.CV_WINDOW_NORMAL)
    cv.NamedWindow("mi grid2", cv.CV_WINDOW_NORMAL)
    float_img = cv.CreateMat(10, 10, cv.CV_32FC1)
    float_img = cv.Load("interest_grid.xml", cv.CreateMemStorage())
    py_img = 1.0 * np.asarray(float_img)
    py_img = 255 * (py_img / np.max(py_img))
    img = cv.fromarray(py_img.astype(np.uint8))
    rgb_img = cv.CreateImage((img.cols, img.rows), 8, 4)
    """Creating RGB img"""
    img_r = cv.CloneMat(img)
    img_g = cv.CreateImage((img.cols, img.rows), 8, 1)
    img_b = cv.CreateImage((img.cols, img.rows), 8, 1)
    img_a = cv.CreateImage((img.cols, img.rows), 8, 1)
    cv.Set(img_g, 10)
    cv.Set(img_b, 100)
    cv.Set(img_a, 100)

    cv.Merge(img_b, img_g, img_r, img_a, rgb_img)
    """Precorner detect"""
    corners = cv.CreateMat(float_img.rows, float_img.cols, float_img.type)
    cv.PreCornerDetect(float_img, corners, 3)
    """Canny"""
    edges = cv.CreateImage((img.cols, img.rows), 8, 1)
    print img.rows, img.cols, edges.height
    cv.Canny(img, edges, 20.0, 160.0)
    disp2 = edges
    """Good features to track"""
    eig_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
    temp_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
Exemplo n.º 18
0
                    #min_edge = 6,
                    #max_edge = int(edge)      # Units of 2 pixels
                )
                if sym:
                    onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y)
                                for (x, y) in self.dm.stats(1)[1]]
                    found[sym] = onscreen
                else:
                    print "FAILED"
        t_brute = time.time() - started
        print "cull took", t_cull, "brute", t_brute
        return found


bg = cv.CreateMat(1024, 1024, cv.CV_8UC3)
cv.Set(bg, cv.RGB(0, 0, 0))
df = DmtxFinder()

cv.NamedWindow("camera", 1)


def mkdmtx(msg):
    dm_write = DataMatrix()
    dm_write.encode(msg)
    pi = dm_write.image  # .resize((14, 14))
    cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pi.tostring())
    return cv_im


# test = [('WIL', (100,100))]: # , ('LOW', (250,100)), ('GAR', (300, 300)), ('AGE', (500, 300))]:
Exemplo n.º 19
0
        cv.Circle(blink, hands[id]['current_position'], 10, hands[id]['color']['cv'], -1, cv.CV_AA, 0)
        if hands[id]['drawing'] == True:
          cv.Line(drawing, hands[id]['previous_position'], hands[id]['current_position'], hands[id]['color']['cv'], 10, cv.CV_AA, 0) 
    cv.ShowImage('Drawing', blink)

########################### MAIN ##################################

cv.NamedWindow('Video',1)
cv.MoveWindow('Video',0,0)
cv.NamedWindow('Drawing',1)
cv.MoveWindow('Drawing',720,0)

text_font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.6, 0.6, 0, 1, 4)

drawing = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 3)
cv.Set(drawing, (255.0,255.0,255.0))

cv_image = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 3)
hands = {}
buttons_size = (100, 60)
buttons = {'White': {'color': cv.CV_RGB(255,255,255), 'start': (500, 30), 'end': (500 + buttons_size[0], 30 + buttons_size[1])},
          'Black': {'color': cv.CV_RGB(0,0,0), 'start': (500, 100), 'end': (500 + buttons_size[0], 100 + buttons_size[1])},
          'Red': {'color': cv.CV_RGB(255,0,0), 'start': (500, 170), 'end': (500 + buttons_size[0], 170 + buttons_size[1])},
          'Green': {'color': cv.CV_RGB(0,255,0), 'start': (500, 240), 'end': (500 + buttons_size[0], 240 + buttons_size[1])},
          'Blue': {'color': cv.CV_RGB(0,0,255), 'start': (500, 310), 'end': (500 + buttons_size[0], 310 + buttons_size[1])},
          }

ni = Context()
ni.init()
ni.init_from_xml_file("OpenniConfig.xml")
video = ni.find_existing_node(NODE_TYPE_IMAGE)
Exemplo n.º 20
0
        # right
        draw_subdiv_facet(img, cv.Subdiv2DRotateEdge(edge, 3))


if __name__ == '__main__':
    win = "source"
    rect = (0, 0, 600, 600)

    active_facet_color = cv.RGB(255, 0, 0)
    delaunay_color = cv.RGB(0, 0, 0)
    voronoi_color = cv.RGB(0, 180, 0)
    bkgnd_color = cv.RGB(255, 255, 255)

    img = cv.CreateImage((rect[2], rect[3]), 8, 3)
    cv.Set(img, bkgnd_color)

    cv.NamedWindow(win, 1)

    storage = cv.CreateMemStorage(0)
    subdiv = cv.CreateSubdivDelaunay2D(rect, storage)

    print "Delaunay triangulation will be build now interactively."
    print "To stop the process, press any key\n"

    for i in range(200):
        fp = (random.random() * (rect[2] - 10) + 5,
              random.random() * (rect[3] - 10) + 5)

        locate_point(subdiv, fp, img, active_facet_color)
        cv.ShowImage(win, img)
Exemplo n.º 21
0
def run(selfl, img_fn=None, grid_file=None):
    global self
    self = selfl

    self.img_fn = img_fn
    grid_json = None
    if grid_file:
        with open(grid_file, 'rb') as gridfile:
            grid_json = json.load(gridfile)
        if self.img_fn is None:
            self.img_fn = grid_json.get('img_fn')
        if self.group_cols is None:
            self.group_cols = grid_json.get('group_cols')
            self.group_rows = grid_json.get('group_rows')
    else:
        # Then need critical args
        if not self.img_fn:
            raise Exception("Filename required")
        if not self.group_cols:
            raise Exception("cols required")
        if not self.group_rows:
            raise Exception("rows required")

    if self.img_fn is None:
        raise Exception("Image required")

    #self.img_original= cv.LoadImage(img_fn, iscolor=cv.CV_LOAD_IMAGE_GRAYSCALE)
    #self.img_original= cv.LoadImage(img_fn, iscolor=cv.CV_LOAD_IMAGE_COLOR)
    self.img_original = cv.LoadImage(self.img_fn)
    print 'Image is %dx%d' % (self.img_original.width,
                              self.img_original.height)

    self.basename = self.img_fn[:self.img_fn.find('.')]

    # image buffers
    self.img_target = cv.CreateImage(cv.GetSize(self.img_original),
                                     cv.IPL_DEPTH_8U, 3)
    self.img_grid = cv.CreateImage(cv.GetSize(self.img_original),
                                   cv.IPL_DEPTH_8U, 3)
    self.img_mask = cv.CreateImage(cv.GetSize(self.img_original),
                                   cv.IPL_DEPTH_8U, 3)
    self.img_peephole = cv.CreateImage(cv.GetSize(self.img_original),
                                       cv.IPL_DEPTH_8U, 3)
    cv.Set(self.img_mask, cv.Scalar(0x00, 0x00, 0xff))
    self.img_display = cv.CreateImage(cv.GetSize(self.img_original),
                                      cv.IPL_DEPTH_8U, 3)
    cv.Set(self.img_grid, cv.Scalar(0, 0, 0))
    self.img_blank = cv.CreateImage(cv.GetSize(self.img_original),
                                    cv.IPL_DEPTH_8U, 3)
    cv.Set(self.img_blank, cv.Scalar(0, 0, 0))
    self.img_hex = cv.CreateImage(cv.GetSize(self.img_original),
                                  cv.IPL_DEPTH_8U, 3)
    cv.Set(self.img_hex, cv.Scalar(0, 0, 0))

    self.config.font_size = 1.0
    self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,
                            hscale=self.config.font_size,
                            vscale=1.0,
                            shear=0,
                            thickness=1,
                            lineType=8)

    self.title = "rompar %s" % img_fn
    cv.NamedWindow(self.title, 1)
    cv.SetMouseCallback(self.title, on_mouse, self)

    self.img_target = cv.CloneImage(self.img_original)

    if grid_json:
        load_grid(self, grid_json)

    cmd_help()
    cmd_help2()

    # main loop
    while self.running:
        try:
            do_loop(self)
        except Exception:
            if self.debug:
                raise
            print 'WARNING: exception'
            traceback.print_exc()

    print 'Exiting'
Exemplo n.º 22
0
    def find_lines_in_map(self, map_img):
        #Finds lines in the image
        lines=cv.CreateMemStorage(0)
        line_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Set(line_img, 255)
        lines = cv.HoughLines2(map_img,cv.CreateMemStorage(), cv.CV_HOUGH_MULTI_SCALE, self.hough_rho,np.pi/2,self.hough_threshold)
        np_line=np.asarray(lines)
        print
        x0=[]#A vector of all the X0
        y0=[]#A vector of all the Y0
        theta0=[]
        n=0
        #Print the lines so that we can see what is happening
        for rho,theta in np_line:
            a = np.cos(theta)
            b = np.sin(theta)
            x0.append(a*rho)
            y0.append(b*rho)
            theta0.append(theta)
            x1 = int(x0[n] + 3000*(-b))
            y1 = int(y0[n] + 3000*(a))
            x2 = int(x0[n] - 3000*(-b))
            y2 = int(y0[n] - 3000*(a))
            cv.Line(map_img,(x1,y1),(x2,y2),0,1)
            cv.Line(line_img,(x1,y1),(x2,y2),0,1)
            n = n+1

        #create two lists with the x coordinate of vertical lines and y coordinate of horixontal lines.
        theta_rounded = np.round(theta0, 1)
        y_sorted = np.sort(np.round(y0,0))
        ylist=[]
        xlist=[]
        n=0
        for element in theta_rounded:
            if element == 0:#Horizontal line
                xlist.append(x0[n])
            else:
                ylist.append(y0[n])
            n=n+1

        Ym=[]
        Xm=[]
        ordered_y = np.sort(ylist)
        ordered_x = np.sort(xlist)
        print ordered_x
        last_element =0

        #Find middle points between lines
        for element in ordered_y:
            delta_element = element - last_element
            Ym.append(last_element+(delta_element)/2)
            last_element = deepcopy(element)
        last_element =0
        for element in ordered_x:
            delta_element = element - last_element
            Xm.append(last_element+(delta_element)/2)
            last_element = deepcopy(element)

        #Printing the points in a map
        for yi in Ym:
            for xi in Xm:
                if self.map_img[yi,xi] >= 250: #If free space
                    self.goal_list.append([yi,xi])
                    map_img[yi,xi]=255
                    self.goal_map[yi,xi]=0


        return map_img
Exemplo n.º 23
0
    def _wipeMix(self, wipeMode, wipeConfig, level, image1, image2, mixMat):
        if((wipeMode == WipeMode.Push)):
            wipeDirection = wipeConfig
            if(wipeDirection < 0.25):
                wipePosX = int(self._internalResolutionX * level)
                sourceLeft = self._internalResolutionX-wipePosX
                sourceTop = 0
                sourceWidth = wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = 0
                destTop = 0
            elif(wipeDirection < 0.5):
                wipePosX = self._internalResolutionX - int(self._internalResolutionX * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = self._internalResolutionX-wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = self._internalResolutionX-(self._internalResolutionX-wipePosX)
                destTop = 0
            elif(wipeDirection < 0.75):
                wipePosY = int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = self._internalResolutionY-wipePosY
                sourceWidth = self._internalResolutionX
                sourceHeight = wipePosY
                destLeft = 0
                destTop = 0
            else:
                wipePosY = self._internalResolutionY - int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = self._internalResolutionX
                sourceHeight = self._internalResolutionY-wipePosY
                destLeft = 0
                destTop = self._internalResolutionY-(self._internalResolutionY-wipePosY)
            destWidth = sourceWidth
            destHeight = sourceHeight
            src_region = cv.GetSubRect(image2, (sourceLeft, sourceTop, sourceWidth, sourceHeight))
            if(image1 == None):
                cv.SetZero(mixMat)
                dst_region = cv.GetSubRect(mixMat, (destLeft, destTop, destWidth, destHeight))
                return mixMat
            else:
                dst_region = cv.GetSubRect(mixMat, (destLeft, destTop, destWidth, destHeight))
            cv.Copy(src_region, dst_region)
            if(wipeDirection < 0.25):
                wipePosX = int(self._internalResolutionX * level)
                sourceLeft = wipePosX
                sourceTop = 0
                sourceWidth = self._internalResolutionX-wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = wipePosX
                destTop = 0
            elif(wipeDirection < 0.5):
                wipePosX = self._internalResolutionX - int(self._internalResolutionX * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = wipePosX
                sourceHeight = self._internalResolutionY
                destLeft = 0
                destTop = 0
            elif(wipeDirection < 0.75):
                wipePosY = int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = wipePosY
                sourceWidth = self._internalResolutionX
                sourceHeight = self._internalResolutionY-wipePosY
                destLeft = 0
                destTop = wipePosY
            else:
                wipePosY = self._internalResolutionY - int(self._internalResolutionY * level)
                sourceLeft = 0
                sourceTop = 0
                sourceWidth = self._internalResolutionX
                sourceHeight = wipePosY
                destLeft = 0
                destTop = 0
            destWidth = sourceWidth
            destHeight = sourceHeight
            src_region = cv.GetSubRect(image1, (sourceLeft, sourceTop, sourceWidth, sourceHeight))
            dst_region = cv.GetSubRect(mixMat, (destLeft, destTop, destWidth, destHeight))
            cv.Copy(src_region, dst_region)
            return mixMat
        if(wipeMode == WipeMode.Noize):
            scaleArg = wipeConfig
            noizeMask = getNoizeMask(level, self._internalResolutionX, self._internalResolutionY, 1.0 + (19.0 * scaleArg))
            if(image1 == None):
                cv.SetZero(mixMat)
                cv.Copy(image2, mixMat, noizeMask)
                return mixMat
            cv.Copy(image2, image1, noizeMask)
            return image1
        if(wipeMode == WipeMode.Zoom):
            xMove, yMove = wipeConfig
            xSize = int(self._internalResolutionX * level)
            ySize = int(self._internalResolutionY * level)
            xPos = int((self._internalResolutionX - xSize) * xMove)
            yPos = int((self._internalResolutionY - ySize) * (1.0 - yMove))
            cv.SetZero(mixMat)
            dst_region = cv.GetSubRect(mixMat, (xPos, yPos, xSize, ySize))
            cv.Resize(image2, dst_region,cv.CV_INTER_CUBIC)
            if(image1 == None):
                return mixMat
            cv.SetZero(self._mixMixMask1)
            dst_region = cv.GetSubRect(self._mixMixMask1, (xPos, yPos, xSize, ySize))
            cv.Set(dst_region, 256)
            cv.Copy(mixMat, image1, self._mixMixMask1)
            return image1
        if(wipeMode == WipeMode.Flip):
            flipRotation = wipeConfig
            rotation = 1.0 - level
            srcPoints = ((0.0, 0.0),(0.0,self._internalResolutionY),(self._internalResolutionX, 0.0))
            destPoint1 = (0.0, 0.0)
            destPoint2 = (0.0, self._internalResolutionY)
            destPoint3 = (self._internalResolutionX, 0.0)
            if(image1 == None):
                rotation = rotation / 2
            if(rotation < 0.5):
                flipAngle = rotation / 2
            else:
                flipAngle = level / 2
            destPoint1 = rotatePoint(flipRotation, destPoint1[0], destPoint1[1], self._halfResolutionX, self._halfResolutionY, flipAngle)
            destPoint2 = rotatePoint(flipRotation, destPoint2[0], destPoint2[1], self._halfResolutionX, self._halfResolutionY, flipAngle)
            destPoint3 = rotatePoint(flipRotation, destPoint3[0], destPoint3[1], self._halfResolutionX, self._halfResolutionY, flipAngle)
            dstPoints = ((destPoint1[0], destPoint1[1]),(destPoint2[0], destPoint2[1]),(destPoint3[0],destPoint3[1]))
            zoomMatrix = cv.CreateMat(2,3,cv.CV_32F)
#            print "DEBUG pcn: trasform points source: " + str(srcPoints) + " dest: " + str(dstPoints) 
            cv.GetAffineTransform(srcPoints, dstPoints, zoomMatrix)
            if(rotation < 0.5):
                cv.WarpAffine(image2, mixMat, zoomMatrix)
            else:
                cv.WarpAffine(image1, mixMat, zoomMatrix)
            cv.Set(self._mixMixMask2, (255,255,255))
            cv.WarpAffine(self._mixMixMask2, self._mixMixMask1, zoomMatrix)
            return mixMat
        return image2
Exemplo n.º 24
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from openni import *
import cv2.cv as cv

cv.NamedWindow('Video', 1)
cv.MoveWindow('Video', 0, 0)
cv.NamedWindow('Quadro', 1)
cv.MoveWindow('Quadro', 720, 0)

fonte_do_texto = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.6, 0.6, 0, 1, 4)

quadro = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3)
cv.Set(quadro, (255.0, 255.0, 255.0))

imagem_cv = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3)
maos = {}
traduz = {'Wave': 'Apagador', 'Click': 'Caneta'}
efeito = 'Nenhum'

ni = Context()
ni.init()
ni.open_file_recording("BothGestures.oni")
video = ni.find_existing_node(NODE_TYPE_IMAGE)
depth = ni.find_existing_node(NODE_TYPE_DEPTH)

gesture_generator = GestureGenerator()
gesture_generator.create(ni)
gesture_generator.add_gesture('Wave')
gesture_generator.add_gesture('Click')
Exemplo n.º 25
0
def main():

    face_detected_count = 0
    speech_detected_count = 0
    loop_count = 0
    listening = False

    main_data = []
    #init_speech_time        = 0
    mouth_data = {}  # current (temp) mouth data

    pause = False  # LOCAL only
    mouth_color = (255, 255, 255, 0)  # LOCAL only
    image = None  # LOCAL only

    if LOCAL:
        cv.NamedWindow(WINDOW_NAME, cv.CV_WINDOW_AUTOSIZE)
        image = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 3)

    if DEBUG: mouth_data["Frames"] = []

    while True:
        # se script in locale e finestra in pausa
        if LOCAL and pause: continue

        loop_count += 1

        FaceDetected = memoryProxy.getData("FaceDetected")
        SpeechDetected = memoryProxy.getData("SpeechDetected")
        WordRecognized = memoryProxy.getData("WordRecognized")
        FrontTactilTouched = memoryProxy.getData("FrontTactilTouched")

        # Uscita (unsubscribe)
        if FrontTactilTouched:
            if LOCAL: cv.DestroyWindow(WINDOW_NAME)
            break

        if SpeechDetected:  # inizio ascolto
            if not listening:
                mouth_data['InitSpeechTime'] = time.time()
                mouth_data['Frames'] = []
                # NOTA: questo tempo presenta un ritardo di qualche millisecondo!
                log("- SpeechDetected: Begin Listening @ " +
                    str(mouth_data['InitSpeechTime']))
                listening = True
                speech_detected_count += 1
                if LOCAL: mouth_color = (0, 0, 255, 0)
        else:  # fine ascolto
            if listening:
                mouth_data['EndSpeechTime'] = time.time()
                log("- SpeechDetected: Stop listening @ " +
                    str(mouth_data['EndSpeechTime']))
                listening = False
                main_data.append(mouth_data)
                mouth_data = {}
                if LOCAL: mouth_color = (255, 255, 255, 0)
        # calcolare elapsed in ms: 1000 * (end - init)

        # Face Detection

        FaceDetectedData = []

        if (  # listening and
                FaceDetected and isinstance(FaceDetected, list)
                and len(FaceDetected) >= 2):
            face_detected_count += 1
            log(str(face_detected_count) + "FD " +
                str(FaceDetected[0]))  # time.time

            FaceDetectedData = FaceDetected
            memoryProxy.insertData("FaceDetected", [])
            # collect data
            current_frame = {
                'FaceDetectionTimestamp': FaceDetectedData[0]  #,
                # 'MouthPoints': FaceDetectedData[1][0][1][8]
            }
            mouth_data['Frames'].append(current_frame)

        # osservazione: sicuramente wordrecognized avviene dopo l'end
        # di speechdetected, allora posso aggire sull'ultimo mouth_data,
        # che è salvato in main_data (append)
        # TODO: risolvere problemi di sincronizzazione qui
        # (questo codice andrebe modifcato) [se l'ultimo mouth_data non è ancora stato messo
        # in main data?]
        if WordRecognized[0] != '':
            log(WordRecognized)
            main_data[-1]['WordRecognized'] = WordRecognized
            memoryProxy.insertData("WordRecognized", [''])

        # CV window etc

        if LOCAL:  # local sleep (cv)
            k = cv.WaitKey(FacePeriod)
            if k == 13:
                cv.DestroyWindow(WINDOW_NAME)
                break
            elif k != -1:
                pause = not pause

            cv.Set(image, cv.CV_RGB(0, 0, 0))
            if FaceDetectedData != []:
                DrawPoints(FaceDetectedData, image, mouth_color)
            cv.ShowImage(WINDOW_NAME, image)

        else:  # NAO sleep
            time.sleep(0.1)

    # end of loop

    #saveData(main_data) # risolvere problema cartella non esistente in remoto
    #log(main_data)
    if DEBUG: main_data = mouth_data
    global md
    md = main_data
    time.sleep(1)
    log("-- Main DONE")
    pass
Exemplo n.º 26
0
    Img = cv.LoadImage(sys.argv[1])
    print 'Image is %dx%d' % (Img.width, Img.height)
else:
    print 'usage: %s <IMAGE> <BITS PER GROUP> <ROWS PER GROUP> [GRID FILE]' % sys.argv[
        0]
    print
    print "  hit 'h' when image has focus to print help text"
    print
    exit()

# image buffers
Target = cv.CreateImage(cv.GetSize(Img), cv.IPL_DEPTH_8U, 3)
Grid = cv.CreateImage(cv.GetSize(Img), cv.IPL_DEPTH_8U, 3)
Mask = cv.CreateImage(cv.GetSize(Img), cv.IPL_DEPTH_8U, 3)
Peephole = cv.CreateImage(cv.GetSize(Img), cv.IPL_DEPTH_8U, 3)
cv.Set(Mask, cv.Scalar(0x00, 0x00, 0xff))
Display = cv.CreateImage(cv.GetSize(Img), cv.IPL_DEPTH_8U, 3)
cv.Set(Grid, cv.Scalar(0, 0, 0))
Blank = cv.CreateImage(cv.GetSize(Img), cv.IPL_DEPTH_8U, 3)
cv.Set(Blank, cv.Scalar(0, 0, 0))
Hex = cv.CreateImage(cv.GetSize(Img), cv.IPL_DEPTH_8U, 3)
cv.Set(Hex, cv.Scalar(0, 0, 0))

FontSize = 1.0
Font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,
                   hscale=FontSize,
                   vscale=1.0,
                   shear=0,
                   thickness=1,
                   lineType=8)
Exemplo n.º 27
0
            def contour_iterator(contour):
                while contour:
                    yield contour
                    contour = contour.h_next()

            cv.Zero(markers)
            comp_count = 0
            for c in contour_iterator(contours):
                cv.DrawContours(markers, c, cv.ScalarAll(comp_count + 1),
                                cv.ScalarAll(comp_count + 1), -1, -1, 8)
                comp_count += 1

            cv.Watershed(img0, markers)

            cv.Set(wshed, cv.ScalarAll(255))

            # paint the watershed image
            color_tab = [
                (cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50,
                 cv.RandInt(rng) % 180 + 50) for i in range(comp_count)
            ]
            for j in range(markers.height):
                for i in range(markers.width):
                    idx = markers[j, i]
                    if idx != -1:
                        wshed[j, i] = color_tab[int(idx - 1)]

            cv.AddWeighted(wshed, 0.5, img_gray, 0.5, 0, wshed)
            cv.ShowImage("watershed transform", wshed)
Exemplo n.º 28
0
def segmentAndMask(img0, imgDepth0, staticMap0, mask0, thresh=10):
    debug = True

    staticMap = np.array(staticMap0)
    imgDepth = np.array(imgDepth0)
    img = np.array(img0)
    mask = np.array(mask0)
    #imshow('indepth', imgDepth*10)

    staticMap = np.int16(staticMap)
    imgDepth = np.int16(imgDepth)

    noDepthMask = imgDepth == 0
    #imshow('no depth mask', np.uint8(noDepthMask)*255)

    diff = (imgDepth - staticMap)
    adiff = np.abs(diff)

    tableMask = adiff < thresh
    if debug:
        #imshow('adiff', adiff*15)
        print imgDepth.max(), staticMap.max()
        #imshow('table mask', np.uint8(tableMask)*255)

    # numpy setting is really slow
    maskout = (tableMask) | (mask <= 0)
    maskin = (tableMask <= 0) & (mask)

    cvMask = util.array2cv(np.uint8(maskout) * 255)
    cvImg = util.array2cv(img)
    cv.Set(cvImg, (0, 0, 0), cvMask)
    img = util.cv2array(cvImg)
    #img[tableMask] = 0
    #img[mask <= 0] = 0

    cvDep = util.array2cv(imgDepth)
    depthBw = imgDepth > 0
    depthBw = depthBw | noDepthMask
    depthBw = depthBw & maskin
    depthBw = np.uint8(depthBw) * 255

    cv.Set(cvDep, 0, cvMask)
    imgDepth = util.cv2array(cvDep)
    imgDepth = np.uint16(imgDepth)

    # Find contours. Only keep the large ones to reduce noise.
    param = 2
    for d in range(param):
        depthBw = cv2.dilate(depthBw, None)
    for d in range(param):
        depthBw = cv2.erode(depthBw, None)

    #imshow('depthbw', depthBw)

    blobs = []
    blobs = traceContoursCV2(depthBw)
    blobs = [b for b in blobs if len(b) > 2]
    blobs = [b for b in blobs if util.blobsize(b, ignoreLessThan=150) > 150]

    foregroundMask = np.zeros(imgDepth.shape, 'uint8')
    mat = util.array2cv(foregroundMask)
    cv.FillPoly(mat, blobs, 255)
    foregroundMask = util.cv2array(mat)
    #imshow('foreground', foregroundMask)

    bgMask = util.array2cv(np.uint8(foregroundMask < 0) * 255)

    cv.Set(cvImg, (0, 0, 0), bgMask)
    cv.Set(cvDep, 0, bgMask)
    img = util.cv2array(cvImg)
    imgDepth = util.cv2array(cvDep)
    imgDepth = np.uint16(imgDepth)

    if debug:
        #imshow('seg img', .5*np.float32(img0) + .5*np.float32(img0[foregroundMask]))
        img1 = img0.copy()
        img1[foregroundMask <= 0] = .5 * img1[foregroundMask <= 0]
        #imshow('seg img', img1)
        #imshow('seg dep', imgDepth*10)
        #imshow('smoothed foreground mask', foregroundMask)

    return img, imgDepth, foregroundMask