def callback(data):
  global zero, prev

  #cv2.namedWindow("flow",1)
  #cv2.moveWindow("flow",500,00)

  #cv2.namedWindow("zero_image",1)
  #cv2.moveWindow("zero_image",600,0)

  bridge = CvBridge()

  if zero:
    prev = bridge.imgmsg_to_cv2(data, "mono8")
    cv_image = cv2.flip(prev,1)
    #ret1,prev = cv2.threshold(prev, 0, 0,cv2.THRESH_BINARY)
    zero = False
  
  #cv2.imshow("zero_image", prev)
#  cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
  cv_image = bridge.imgmsg_to_cv2(data, "mono8")
  #gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
  
  cv_image = cv2.flip(cv_image,1)
  #cv2.imshow("motion_detector", cv_image)
  
  motion_detector(cv_image, prev)
Пример #2
0
def imageFlipMirror(im, mirrored,flipped):
    ''' Flip and/or mirror the given image.

    Parameters
    -----------
    im : np.array
        2D array depicting an image as an numpy array
    mirrored : np.array
        self explanatory boolean parameter (left - right)
    fliped np.array
        self explanatory boolean parameter (top - bottom)

    Returns
    -----------
    im : np.array
        image array processed accordingly.
    '''

    if mirrored == 1 and flipped == 0:
        im = cv2.flip(im, 1)
    elif mirrored == 0 and flipped == 1:
        im = cv2.flip(im, 0)
    elif mirrored == 1 and flipped == 1:
        im = cv2.flip(im, -1)
    return im
def data_augmentation(img0):
    global imgid
    mirror_idx = [0,1] #0: not mirroing, #1 mirroring
    scaling_vals = [1.0,1.2,1.4] #scaling factor
    blur_vals = [0.0,1.2,1.5] #blur values, but 0.0 indicates not blurring
    h0 = img0.shape[0]
    w0 = img0.shape[1]
    imgs = []
    pathes = []
    for m_idx in mirror_idx:     

        for scl in scaling_vals:
            
            x1 = 0 + (w0 * scl - w0) / 2.0
            y1 = 0 + (h0 * scl - h0) / 2.0
            x2 = x1 + w0
            y2 = y1 + h0
           

            for blur_val in blur_vals:
                img = img0.copy()

                if m_idx == 1:
                    cv2.flip(img,1,img) 
                img = cv2.resize(img,(int(w0*scl), int(h0*scl))) 
                img = img[y1:y2, x1:x2] 
                if blur_val > 0.0:                    
                    img = cv2.GaussianBlur(img, (0,0), blur_val)

                imgs.append(img)

                path = 'pos%d.png' % (imgid)
                imgid += 1
                pathes.append(path)
    return imgs, pathes
Пример #4
0
 def getImage(self):
     img = self.image
     # Rotate image if it was recorded upside down.
     if self.upside_down:
         img = cv2.flip(img, 0)
         img = cv2.flip(img, 1)
     return img
Пример #5
0
def cv2rotateimage(image, angle):
  """Efficient rotation if 90 degrees rotations, slow otherwise.

  Not a tensorflow function, using cv2 and scipy on numpy arrays.

  Args:
    image: a numpy array with shape [height, width, channels].
    angle: the rotation angle in degrees in the range [-180, 180].
  Returns:
    The rotated image.
  """
  # Limit angle to [-180, 180] degrees.
  assert angle <= 180 and angle >= -180
  if angle == 0:
    return image
  # Efficient rotations.
  if angle == -90:
    image = cv2.transpose(image)
    image = cv2.flip(image, 0)
  elif angle == 90:
    image = cv2.transpose(image)
    image = cv2.flip(image, 1)
  elif angle == 180 or angle == -180:
    image = cv2.flip(image, 0)
    image = cv2.flip(image, 1)
  else:  # Slow rotation.
    image = ndimage.interpolation.rotate(image, 270)
  return image
Пример #6
0
def FindWand():
    global rval,old_frame,old_gray,p0,mask,color,ig,img,frame
    try:
        rval, old_frame = cam.read()
	cv2.flip(old_frame,1,old_frame)
        old_gray = cv2.cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
        equalizeHist(old_gray)
	old_gray = GaussianBlur(old_gray,(9,9),1.5)
        dilate_kernel = np.ones(dilation_params, np.uint8)
        old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
        clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
        old_gray = clahe.apply(old_gray)
        #TODO: trained image recognition
        p0 = cv2.HoughCircles(old_gray,cv2.HOUGH_GRADIENT,3,50,param1=240,param2=8,minRadius=4,maxRadius=15)
	if p0 is not None:
            p0.shape = (p0.shape[1], 1, p0.shape[2])
            p0 = p0[:,:,0:2] 
            mask = np.zeros_like(old_frame)
            ig = [[0] for x in range(20)]
        print "finding..."
        threading.Timer(3, FindWand).start()
    except:
        e = sys.exc_info()[1]
        print "Error: %s" % e 
        exit
Пример #7
0
    def trackRobot(self, imagePath):
        '''this function track the robot and return its coordinates'''
        img = cv2.imread(imagePath)
        img = cv2.flip(img, 1)
        img = cv2.flip(img, 0)

        # convert into hsv 
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        # Find mask that matches 
        green_mask = cv2.inRange(hsv, np.array((50., 30., 0.)), np.array((100., 255., 255.)))
        green_mask = cv2.erode(green_mask, None, iterations=2)
        green_mask = cv2.dilate(green_mask, None, iterations=2)

        green_cnts = cv2.findContours(green_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
        green_c = max(green_cnts, key=cv2.contourArea)

        # fit an ellipse and use its orientation to gain info about the robot
        green_ellipse = cv2.fitEllipse(green_c)

        # This is the position of the robot
        green_center = (int(green_ellipse[0][0]), int(green_ellipse[0][1]))

        red_mask = cv2.inRange(hsv, np.array((0., 100., 100.)), np.array((80., 255., 255.)))
        red_mask = cv2.erode(red_mask, None, iterations=2)
        red_mask = cv2.erode(red_mask, None, iterations=2)

        red_cnts = cv2.findContours(red_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
        red_c = max(red_cnts, key=cv2.contourArea)

        red_ellipse = cv2.fitEllipse(red_c)
        red_center = (int(red_ellipse[0][0]), int(red_ellipse[0][1]))


        return green_center, red_center   
 def start_anim(self):
     self.style_texture.blit_buffer(cv2.flip(root.art_style, 0).tostring(), colorfmt='bgr', bufferfmt='ubyte')
     self.photo_texture.blit_buffer(cv2.flip(root.photo_content, 0).tostring(), colorfmt='bgr', bufferfmt='ubyte')
     w, h = Window.width, Window.height
     rh = int(h *0.8)
     rw = rh*4//3
     self.output_rect.pos =((w-rw)//2,0)
     self.output_rect.size = (rw, rh)
     
     rh2 = int(h *0.3)
     rw2 = rh2*4//3
     self.photo_rect.pos =( -rw2,rh2+int(0.15*h))
     self.style_rect.pos =( -rw2,int(0.05*h))
     self.style_rect.size = (rw2, rh2)
     self.photo_rect.size = (rw2, rh2)
     self.canvas.ask_update()
     
     rh = int(h *0.6)
     rw = rh*4//3
     xpos2 = int((w-(rw+rw2))/3)
     idle1 = Animation(duration=1.)
     idle2 = Animation(duration=2.)
     idle3 = Animation(duration=3.)
     move1 = Animation(pos=(xpos2, rh2+int(0.15*h)), duration=2.)
     move2 = Animation(pos=(xpos2, int(0.05*h)) ,duration=2.)
     move3 = Animation(pos=(2*xpos2+rw2,int(0.1*h)), size=(rw, rh), duration=2.)
     (idle3 + move1).start(self.photo_rect)
     (idle2 + move2).start(self.style_rect)
     (idle1 + move3).start(self.output_rect)
Пример #9
0
    def detect(self, img):
        """
        :param img:{numpy}
        :return:
        """
        # vis为img副本
        vis = img.copy()

        # 转换为灰度图
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 直方图均衡处理
        gray = cv2.equalizeHist(gray)

        # 通过分类器得到rects
        rects = detect(gray, self.frontCascade)
        if len(rects) == 0:
            # 侧脸检测
            rects = detect(gray, self.profileCascade)
            if len(rects) == 0:
                # 镜像 在侧脸检测
                gray = cv2.flip(gray, 1)
                rects = detect(gray, self.profileCascade)
                vis = cv2.flip(vis, 1)

        result = []
        # 画矩形
        draw_rects(vis, rects, (0, 255, 0))
        if len(rects) != 0:
            for x1, y1, x2, y2 in rects:
                result.append(vis[y1:y2, x1:x2])
        else:
            result.append(vis)
        return result
Пример #10
0
def capture(flip_v = False, device = "/dev/spidev0.0"):

  with Lepton(device) as l:
    a,s = l.capture()
  if flip_v:
    cv2.flip(a,-1,a) #a flag to specify how to flip the array; 0 means flipping around the x-axis and positive value (for example, 1) means flipping around y-axis. Negative value (for example, -1) means flipping around both axes.
  return a,s
 def _tick(self, *args):
     
     ret, img = self.cap.read()
     img = cv2.flip(img, 1)
     img2 = cv2.addWeighted(img, 0.7, self.art_style, 0.3, 0)
     """
     mask = None
     for bg in self.bgs:
         diff = (np.linalg.norm(img.astype(float)-bg, axis=2)/3).astype(np.uint8)
         r, mask0 = cv2.threshold(diff,6,255,cv2.THRESH_BINARY)
         if mask is None:
             mask = mask0
         else:
             mask = cv2.bitwise_or(mask, mask0)
     mask_inv = cv2.bitwise_not(mask)
     style2 = (cv2.bitwise_and(self.art_style, self.art_style, mask=mask_inv)*0.5).astype(np.uint8)
     img3 = cv2.bitwise_and(img, img, mask=mask)
     img2 = cv2.add(img3, style2)
     """
     image = cv2.flip(img2, 0)
     self.video_texture.blit_buffer(image.tostring(), colorfmt='bgr', bufferfmt='ubyte')
     self.canvas.ask_update()
     img2 = img
     
     if self.state is not 'wait':
         if self.state == 0:
             print("照相")
             root.photo_content = img2
             root.art_style = self.art_style
             root.transition = NoTransition()
             root.current = "process"
         if self.state%20 == 0:
             self.ids.w_info.text = "倒數 %d 照相 (按鍵取消重選)"%(self.state//30)
             self.ids.w_info.font_size = 32
         self.state -=1
Пример #12
0
def read_im_and_landmarks(fname):
    blur_amount = 31
    im = cv2.imread(fname, cv2.IMREAD_COLOR)
    im_core = cv2.resize(im, (im.shape[1] * faceswap.SCALE_FACTOR,
                              im.shape[0] * faceswap.SCALE_FACTOR))

    core_shape = im_core.shape
    left   = int(core_shape[1] - 0.25 * core_shape[1])
    right  = int(2 * core_shape[1] + 0.25 * core_shape[1])
    top    = int(core_shape[0] - 0.25 * core_shape[0])
    bottom = int(2 * core_shape[0] + 0.25 * core_shape[0])
    # print("L,R,T,B {},{},{},{}".format(left, right, top, bottom))

    im_blur = cv2.GaussianBlur(im_core, (blur_amount, blur_amount), 0)
    im_blur = cv2.GaussianBlur(im_blur, (blur_amount, blur_amount), 0)
    blur_flipx = cv2.flip(im_blur, 1)
    blur_flipy = cv2.flip(im_blur, 0)
    blur_flipxy = cv2.flip(im_blur, -1)
    im_row1 = np.concatenate((blur_flipxy, blur_flipy, blur_flipxy), axis=1)
    im_row2 = np.concatenate((blur_flipx, im_core, blur_flipx), axis=1)
    im_row3 = np.concatenate((blur_flipxy, blur_flipy, blur_flipxy), axis=1)
    im_buffered = np.concatenate((im_row1, im_row2, im_row3), axis=0)
    im_final = im_buffered[top:bottom, left:right, :].astype(np.uint8)
    s = faceswap.get_landmarks(im_final)

    return im_final, s
Пример #13
0
def update_all(root, canvas, cam, fps_label):
    #update_image(canvas, cam)
    
    ret, img = cam.read()
    cv2.flip(img,1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    rects = detect(gray, cascade)
    vis = img.copy()
    vis = vis[...,::-1]#converts BGR to RGB
    a = Image.fromarray(vis)
    
    faces=[]
    for x1, y1, x2, y2 in rects:
        faces.append([[(x1+x2)/2,(y1+y2)/2]])
    draw_sprites.draw_sprites(a,faces)
        
    b = ImageTk.PhotoImage(image=a)
    #canvas.delete(tk.ALL)
    #canvas.delete('all')
    global frame 
    frame = canvas.create_image(0,0,image=b,anchor=tk.NW) 
    root.update()
    time.sleep(.001)
    update_fps(fps_label)
    root.after(0, func=lambda: update_all(root, canvas, cam, fps_label))
Пример #14
0
    def main(self, mode, path):
        """Main loop.
        """
        if not self.capture or not self.capture.isOpened() or self.mode != mode:
            self.capture = cv2.VideoCapture(path)

        self.mode = mode

        if not self.capture.isOpened():
            print "Couldn't read media " + path
        while self.capture.isOpened():
            if self.stopped:
                break
            ret, frame = self.capture.read()
            if frame is None:
                #self.capture = cv2.VideoCapture(path)
                #ret, frame = self.capture.read()
                if frame is None:
                    break
            if mode == self.mw.SOURCE_CAMERA:
                cv2.flip(frame, 1, frame)

            self.mw.displayImage(frame)
            QApplication.processEvents()
            if self.nextFrame:
                self.setNextFrameMode(False)
                break
Пример #15
0
    def loadMonitorImage(self,fname, allconts=False, testmc=False):
        if fname[-4:].lower()==".avi":
            #TODO get first frame - look this up
            pass
        elif fname[-4:].lower()==".png" or fname[-4:].lower()==".jpg":
            a=cv2.imread(fname, 0) #note 0 implies grayscale
            #getcontours
        else:
            #Error
            pass
        if self.mcflipx==True and self.mcflipy==True:
            a=cv2.flip(a,-1)
        elif self.mcflipx==True and self.mcflipy==False:
            a=cv2.flip(a,0)
        if self.mcflipy==True and self.mcflipx==False:
            a=cv2.flip(a,1)
        if self.crop1!=None and self.crop2!=None:
            #print str(self.crop1)
            #print str(self.crop2)
            a=a[np.min([self.crop1[1],self.crop2[1]]):np.max([self.crop1[1],self.crop2[1]]),np.min([self.crop1[0],self.crop2[0]]):np.max([self.crop1[0],self.crop2[0]])] 

            #TODO add other digit sizes
        if testmc==True:
            self.trlist=[]
            for dsize in self.dsizes:
                (self.tmonimage,self.dlist,rlist)=self.getContours(a,dsize)
                self.trlist+=rlist
            self.drawMonitorTest()
        else:
            if allconts==False:
                (self.monimage,self.dlist,self.rlist)=self.getContours(a,self.d1size)
                self.drawMonitor()
            else:
                (self.monimage,self.rlist1,self.rlist2)=self.getAllContours(a)
                self.drawMonitor(allconts=True)
Пример #16
0
	def rotate_imgs(self):
		'''
			Rotates left and right images to proper orientation. Note this is done with transpose and flip
			as opposed to cv2.warpAffine to improve speed.
		''' 
		self.rotated_image_left = cv2.flip(cv2.transpose(self.raw_image_left), 0)
		self.rotated_image_right = cv2.flip(cv2.transpose(self.raw_image_right), 1)
def callback(data):

  bridge = CvBridge()
#  cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
  cv_image = bridge.imgmsg_to_cv2(data, "mono8")
  cv2.flip(cv_image,1)
  color_presentation(cv_image)
Пример #18
0
def play(dev=0):
    cap = cv.VideoCapture(dev)
    # Background subtractor with default parameters: history = 500,
    # varThreshold = 16 and detectShadows = True
    bgsub = cv.createBackgroundSubtractorMOG2(500, 16, False)
    key = 0
    pause = False
    list_frames = []
    img_index = 0

    while(True):
        key = cv.waitKey(1) & 0xFF
        ret, frame = cap.read()

        bgs = bgsub.apply(frame)
        bgs_count = sum(sum(bgs))

        if bgs_count > MAX_BGS:
            list_frames.append(frame)

        if len(list_frames) > MAX_FRAMES:
            cv.imwrite(path + 'capt' + str(img_index) + '.png', cv.flip(frame, 1))
            img_index+=1
            list_frames = []

        cv.imshow('frame', cv.flip(bgs, 1))

        if key == 27:
            break
        if key == 32:
            pause = not pause
        if pause:
            continue

    cv.destroyAllWindows()
Пример #19
0
 def run(self):
     i = 0
     cv2.namedWindow("image")
     cv2.moveWindow("image", 50, 50)
     while True:
         p = self.get_next_image_file()
         if p is None:
             break
         img = np.memmap(p, dtype=np.uint8, mode="r", shape=self.shape)
         F = LabeledMovieFrame(internal_storage_method=self.format, compression_level=int(self.quality))
         if self.rotate is not None and self.rotate in ["90", "180", "270"]:
             img = np.rot90(img)
             if self.rotate in ["180", "270"]:
                 img = np.rot90(img)
             if self.rotate in ["270"]:
                 img = np.rot90(img)
         if self.flip_vert:
             img = cv2.flip(img, 0)
         if self.flip_hor:
             img = cv2.flip(img, 1)
         if self.dsize.count("x") == 1:
             osize = tuple(map(lambda x: int(x), self.dsize.split("x")))
         else:
             factor = float(self.dsize)
             osize = int(img.shape[1]*factor), int(img.shape[0]*factor)
         img = cv2.resize(img, dsize=osize, interpolation=cv2.INTER_CUBIC)
         F.set_image(img)
         cv2.imshow("image", img)
         cv2.waitKey(40)
         i += 1
         self.frame_collection.append(F)
     self.frame_collection.write_to_file(self.output_file)
Пример #20
0
def random_flip_img(img, horizontal_chance=0, vertical_chance=0):
    import cv2
    flip_horizontal = False
    if random.random() < horizontal_chance:
        flip_horizontal = True

    flip_vertical = False
    if random.random() < vertical_chance:
        flip_vertical = True

    if not flip_horizontal and not flip_vertical:
        return img

    flip_val = 1
    if flip_vertical:
        flip_val = -1 if flip_horizontal else 0

    if not isinstance(img, list):
        res = cv2.flip(img, flip_val) # 0 = X axis, 1 = Y axis,  -1 = both
    else:
        res = []
        for img_item in img:
            img_flip = cv2.flip(img_item, flip_val)
            res.append(img_flip)
    return res
Пример #21
0
def getUImg(sid):
	#size can be 
	m=hashlib.sha256()
	m.update(sid)
	hsh=m.hexdigest()

	npar=np.zeros((8,8,3),np.uint8)
	img=np.zeros((256,256,3),np.uint8)
	n=0
	for i in range(0,8):
		for j in range(0,8):
			if(n==3):
				n=0
			npar[i][j][n]=17*int(hsh[(i+1)*(j+1)-1],16)
			npar[i][j][(n+1)%3]=255-npar[i][j][n]
			n+=1


	npar=cv2.resize(npar,(128,128))
	imgLU=npar
	imgRU=cv2.flip(npar,1)

	img[:128,:128]=imgLU
	img[:128,128:256]=imgRU
	img[128:256,:256]=cv2.flip(img[:128,:256],0)
	return img
Пример #22
0
    def PopulateImages(self):

        """
        Populate necessary images for opencv to use
        :return: 0 if the video is done
        """
        if self.csvFlag:
            try:
                points = self.PointsFromCSV()
            except:
                return 0
            self.img0 = self.PointsForJointsCSV(points) # img0 is a list of lists of points. each sublist corresponds to a joint. The last sublist corresponds to roguepoints
        else:
            ret, frame = self.cam.read()
            if frame is None:
                return 0# video done streaming
            if self.rotation != 0:
                if self.rotation == 90:
                    frame = cv2.flip(cv2.transpose(frame), 1)
                elif self.rotation  == -90:
                    frame = cv2.flip(cv2.transpose(frame), 0)
                elif abs(self.rotation) == 180:
                    frame = cv2.flip(cv2.flip(frame,0), 1)
                else:
                    raise Exception("Improper rotation value passed.")
            if prm.DEBUG:
                self.vis = frame.copy()
            self.frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            return 1#Good flag
Пример #23
0
 def transformImages(self, images, classes=None, rotate=False, crop=False):
     transformedImages = []
     transformedClasses = []
     for i in range(0, len(images)):
         image = images[i]
         transformedImages.append(image) # Cargo las imagenes en formato cv2
         transformedImages.append(cv2.flip(image, 1)) # Cargo el mirror horizontal
         transformedImages.append(cv2.flip(image, 0)) # Cargo el mirror vertical
         if rotate:
             transformedImages.append(self._rotateImage(image, 90))
             transformedImages.append(self._rotateImage(image, 270))
         if crop:
             transformedImages += self._getCrops(image)
         # TODO: Agregar esta transformacion: http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html#affine-transformation
         if(classes is not None):
             animalClass = classes[i]
             transformedClasses.append(animalClass) # Al agregar la imagen y su clase en el mismo orden no pierdo la relacion
             transformedClasses.append(animalClass) # Agrego la clase del mirror Horizontal
             transformedClasses.append(animalClass) # Agrego la clase del mirror vertical
             if rotate:
                 transformedClasses.append(animalClass)
                 transformedClasses.append(animalClass)
             if crop:
                 for j in range(6):
                     transformedClasses.append(animalClass)
     return (transformedImages, transformedClasses)
Пример #24
0
def getImage(i, imOriginal, mean):
    if i == 0: #return a central crop of 99x99 pixels
        resized = cv2.resize(imOriginal, (128, 128))
        resized = resized * 256.0 - mean
        return resized[14:113, 14:113]
    rot = np.random.uniform(0,360,1).astype(int)[0] #Random rotations
    rot = 90 * np.random.uniform(0,4,1).astype(int)[0] #Random rotations
#    rot = 0
    im_size = imOriginal.shape[0]
    if (np.random.rand() > 0.5):
       if (np.random.rand() > 0.5):
         imOriginal = cv2.flip(imOriginal,0)
       else:
         imOriginal = cv2.flip(imOriginal,1)
    scale = np.random.uniform(0.9,1.1)
    mat = cv2.getRotationMatrix2D((im_size / 2, im_size / 2), rot, scale=scale)
    resized = cv2.warpAffine(img_out, mat, (im_size, im_size), borderValue=(255,255,255))
    img_out = np.zeros((resized.shape[0], resized.shape[1], 3), dtype=np.uint8)
    img_orig = resized[:,:,0]
    img_btop = 255-black_tophat(img_orig, selem)
    img_wtop = 255-white_tophat(img_orig, selem)
    img_out[:, :, 1] = img_btop
    img_out[:, :, 2] = img_wtop
    
    resized = cv2.resize(img_out, (128, 128))
    
    resized = resized * 256.0 - mean #Geht richtig in den Keller auf Werte um 4, wenn man mean nicht abzieht
#    offsetX = np.random.uniform(10,18,1).astype(int)[0] 
#    offsetY = np.random.uniform(10,18,1).astype(int)[0] 
    offsetX = np.random.uniform(0,28,1).astype(int)[0] #Random rotations
    offsetY = np.random.uniform(0,28,1).astype(int)[0] #Random rotations
    return resized[offsetY:(99+offsetY), offsetX:(99+offsetX)]
Пример #25
0
def drawHistogram(height, histogram, k):

    nhist = len(histogram)

    s = 2
    while (2*height > s*nhist-1): s += 1

    himage = 220 * numpy.ones( (height, s*nhist-1, 3), dtype='uint8' )

    hmax = numpy.max(histogram)

    for c in range(nhist):
        label = c / k
        color = ccolors[ label % len(ccolors) ]
        hc = histogram[c] / hmax
        y = int(hc*height)
        if (y):
            startcol = s*c
            endcol = s*(c+1)-1
            color = numpy.array(color).reshape( (1,1,3) )
            himage[0:y, startcol:endcol, :] = color

    cv2.flip(himage, 0, himage)

    return himage
    def flippingOp(self):
        # read image
        im = cv2.imread(self.Image)

        # flip the image horizontally
        flipped = cv2.flip(im, 1)
        cv2.imshow("Flipped Horizontally", flipped)
        (b, g, r) = flipped[235, 259]
        print("red=%d, green=%d, blue=%d" % (r,g,b))
        cv2.waitKey(0)

        # flip the image vertically
        flipped = cv2.flip(im, 0)
        cv2.imshow("Flipped Vertically", flipped)
        cv2.waitKey(0)

        # flip on both axis
        flipped = cv2.flip(im, -1)
        cv2.imshow("Flipped on both axis", flipped)
        cv2.waitKey(0)

        flipped = cv2.flip(im, 1)
        cv2.imshow("1", flipped)
        cv2.waitKey(0)
        rotated = imutils.rotate(flipped, 45)
        cv2.imshow("2", rotated)
        cv2.waitKey(0)
        flippedAgain = cv2.flip(rotated, 0)
        cv2.imshow("3", flippedAgain)
        cv2.waitKey(0)
        (b, g, r) = flippedAgain[189, 441]
        print("red=%d, green=%d, blue=%d" % (r,g,b))

        return
def grabFrames():
	global imageLeft
	global imageRight

	# global filterStackLeft
	# global filterStackRight

	returnLeft, tempLeft = captureLeft.read()
	returnLeft, tempRight = captureRight.read()


	imageLeft = cv2.flip(cv2.transpose(tempLeft), 0) #imageLeft = cv2.flip(tempLeft, -1)
	imageRight = cv2.flip(cv2.transpose(tempRight), 0) #imageRight = tempRight#

	# grayL = cv2.cvtColor(imageLeft, cv2.COLOR_RGBA2GRAY)
	# grayR = cv2.cvtColor(imageRight, cv2.COLOR_RGBA2GRAY)


	# filterStackLeft = np.roll(filterStackLeft, 1, axis=0)
	# filterStackLeft[0] = grayL

	# filterStackRight = np.roll(filterStackRight, 1, axis=0)
	# filterStackRight[0] = grayR

	imageLeft = bilateralFilter(imageLeft)
	imageRight = bilateralFilter(imageRight)
Пример #28
0
def convert_to_pickle(infilename, outfilename, dsize, channel, rotate, format, quality, flip_vert, flip_hor):
    cam = cv2.VideoCapture(infilename)
    fc = lm.FrameCollection()
    cv2.namedWindow("Image")
    cv2.moveWindow("Image", 50, 50)
    while True:
        (ret, im) = cam.read()
        if not ret:
            break
        if dsize is not None:
            osize = tuple(map(lambda x: int(x), dsize.split("x")))
            im=cv2.resize(im, dsize=osize, interpolation=cv2.INTER_CUBIC)
        if rotate is not None and rotate in ["90", "180", "270"]:
            im = np.rot90(im)
            if rotate in ["180", "270"]:
                im = np.rot90(im)
            if rotate in ["270"]:
                im = np.rot90(im)
        if flip_vert:
            im = cv2.flip(im, 0)
        if flip_hor:
            im = cv2.flip(im, 1)
        cv2.imshow("Image", im)
        key = cv2.waitKey(1)
        if key == 27:
            quit()
        f = lm.LabeledMovieFrame(internal_storage_method=format, compression_level=int(quality))
        f.create_channel(channel=channel)
        f.set_image(im, channel=channel)
        f.set_default_channel(channel=channel)
        fc.append(f)
    fc.write_to_file(outfilename)
Пример #29
0
def rotate(input_file,output_file,mode) :
	img = cv2.imread(input_file,cv2.IMREAD_GRAYSCALE)
	rows,cols= img.shape
	temp = np.zeros((cols,rows),np.uint8)
	cv2.transpose(img,temp)
	cv2.flip(temp,mode,temp)	
	cv2.imwrite(output_file,temp)
Пример #30
0
def process_context_detail_args(args):
    # TODO: validate destination path up-front
    # TODO: validate mole names up-front

    context_image = mel.lib.image.load_image(args.context)
    detail_image = mel.lib.image.load_image(args.detail)

    if args.rot90:
        context_image = rotated90(context_image, args.rot90)
        detail_image = rotated90(detail_image, args.rot90)

    if args.rot90_context:
        context_image = rotated90(context_image, args.rot90_context)

    if args.rot90_detail:
        context_image = rotated90(detail_image, args.rot90_detail)

    if args.h_mirror:
        context_image = cv2.flip(context_image, 1)
        detail_image = cv2.flip(detail_image, 1)

    if args.h_mirror_context:
        context_image = cv2.flip(context_image, 1)

    if args.h_mirror_detail:
        detail_image = cv2.flip(detail_image, 1)

    return context_image, detail_image
Пример #31
0
 def update_frame(self):
     ret, self.image = self.capture.read()
     self.image = cv2.flip(self.image, 1)
     self.displayImage(self.image, 1)
import cv2, pickle
import numpy as np
import pyrealsense2 as rs
from coordinate_convert import *

coor = None
pipeline = init_color_camera()
try:
    while True:

        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        if not color_frame:
            continue
        color_image = np.asanyarray(color_frame.get_data())
        color_image = cv2.flip(color_image, 0)
        
        cap = detect_cap(color_image)
        coor = get_axis(cap)
        for one_cap in cap:
            color_image = cv2.circle(color_image, (one_cap[0], one_cap[1]), one_cap[2], (0, 255, 0))
        if coor is not None:
            print(coor, end='\r')
        else:
            print('{0} CAP(S) DETECTED                 '.format(len(cap)), end='\r')
    
        # Show images
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('RealSense', color_image)
        key = cv2.waitKey(100)
        if key == 27:
def flip(img, dir):
    return cv2.flip(img, dir)
def main(**kwargs):
    opt._parse(kwargs)
    # n_gpu = utils.set_gpu(opt.gpu)

    test_dataset = FashionAIKeypoints(opt, phase='test')
    encoder = test_dataset.encoder
    df = utils.data_frame_template()

    print('Testing: {}'.format(opt.category))
    print('Testing sample number: {}'.format(len(val_dataset)))
    cudnn.benchmark = True

    net1 = getattr(models, opt.model[0])(opt)
    checkpoint = torch.load(opt.load_checkpoint_path)  # Must be before cuda
    net1.load_state_dict(checkpoint['state_dict'])
    net1 = net1.cuda()
    # net1 = DataParallel(net)
    net1.eval()

    net2 = getattr(models, opt.model[1])(opt)
    checkpoint = torch.load(opt.load_checkpoint_path_2)  # Must be before cuda
    net2.load_state_dict(checkpoint['state_dict'])
    net2 = net2.cuda()
    # net2 = DataParallel(net2)
    net2.eval()

    for idx in tqdm(range(len(test_dataset))):
        img_path = test_dataset.get_image_path(idx)
        img0 = cv2.imread(img_path)  # BGR
        img0_flip = cv2.flip(img0, 1)
        img_h, img_w, _ = img0.shape

        scale = opt.img_max_size / max(img_w, img_h)

        hm_pred = utils.compute_keypoints(opt, img0, net, encoder)
        hm_pred_flip = utils.compute_keypoints(opt,
                                               img0_flip,
                                               net,
                                               encoder,
                                               doflip=True)
        hm_pred2 = utils.compute_keypoints(opt, img0, net2, encoder)
        hm_pred_flip2 = utils.compute_keypoints(opt,
                                                img0_flip,
                                                net2,
                                                encoder,
                                                doflip=True)

        x, y = encoder.decode_np(
            hm_pred + hm_pred_flip + hm_pred2 + hm_pred_flip2, scale,
            opt.hm_stride, (img_w / 2, img_h / 2))
        keypoints = np.stack([x, y, np.ones(x.shape)], axis=1).astype(np.int16)

        row = test_dataset.anno_df.iloc[idx]
        df.at[idx, 'image_id'] = row['image_id']
        df.at[idx, 'image_category'] = row['image_category']

        for k, kpt_name in enumerate(opt.keypoints[opt.category]):
            df.at[idx, kpt_name] = str(keypoints[k, 0]) + '_' + str(
                keypoints[k, 1]) + '_1'

        if args.visual:
            kp_img = utils.draw_keypoints(img0, keypoints)
            save_img_path = str(
                opt.db_path /
                'tmp/ensemble_{0}{1}.png'.format(opt.category, idx))
            cv2.imwrite(save_img_path, kpt_img)

    df.fillna('-1_-1_-1', inplace=True)
    print(df.head(5))
    df.to_csv(opt.pred_path / 'ensemble_{}.csv'.format(opt.category),
              index=False)
    def update(self):
        """
        Updates the current display based on current information of the agent
        """

        if self.step % settings.steps_slam == 0:
            self.agent.slam(self.bytearray)

        array = np.frombuffer(self.bytearray, dtype=np.uint8)
        self.grid = np.reshape(array,
                               [settings.image_size, settings.image_size])

        self.im = self.to_image()
        self.draw_agent(self.im)

        # self.im = cv2.cvtColor(cv2.Canny(self.im, 100,150), cv2.COLOR_GRAY2RGB)
        self.draw_closest(self.im)
        self.draw_trajectory(self.im)
        self.draw_frontier_centroids(self.im)
        self.draw_wps_delayed(self.im, self.waypoints)

        self.im = cv2.flip(self.im, 0)
        self.draw_elements(self.im)
        self.draw_speed(self.im)

        # im = cv2.filter2D(im,-1,np.ones((5,5),np.float32)/25)
        self.im = cv2.blur(self.im, (3, 3))
        cv2.imshow('Simultaneous Localization and Mapping (SLAM)', self.im)
        key = cv2.waitKey(1) & 0xFF
        self.step += 1

        array = np.frombuffer(self.bytearray, dtype=np.uint8)
        gray = np.reshape(array, [settings.image_size, settings.image_size])

        #self.agent.current_target = (450, 400)
        print("pos:", self.agent.pos)
        # planning part
        if self.step % settings.steps_lee == 0 and len(self.centroids) != 0:
            print("***start path planning")
            print("***agent pos:", self.agent.pos)
            print("***target:", self.centroids[0])
            array = np.frombuffer(self.bytearray, dtype=np.uint8)
            grid = np.reshape(array,
                              [settings.image_size, settings.image_size])
            obst = preprocess_grid(grid, True)
            obst = grow_obstacle(obst)
            waypoints = lee_planning_path(
                obst, (self.agent.pos[0], self.agent.pos[1]),
                self.centroids[0])
            print("***end path planning")
            if waypoints:
                self.agent.current_target = waypoints[0]

                self.waypoints = waypoints
        print("wp in action:", self.agent.current_target)
        print("all_waypoints:", self.waypoints)
        if self.agent.current_target:
            cv2.drawMarker(
                self.im,
                (self.agent.current_target[0], -self.agent.current_target[1]),
                (0, 255, 0),
                markerSize=25,
                markerType=cv2.MARKER_STAR)
Пример #36
0
            (255, 255, 255), 2, cv2.LINE_AA)

cv2.namedWindow('Paint', cv2.WINDOW_AUTOSIZE)

# Load the video
camera = cv2.VideoCapture(0, cv2.CAP_DSHOW)

letterToDraw = 97
correct = False
incorrect = False

# Keep looping
while True:
    # Grab the current paintWindow
    (grabbed, frame) = camera.read()
    frame = cv2.flip(frame, 1)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Add the coloring options to the frame
    frame = cv2.rectangle(frame, (40, 1), (140, 65), (122, 122, 122), -1)
    frame = cv2.rectangle(frame, (160, 1), (255, 65), colors[0], -1)
    frame = cv2.rectangle(frame, (275, 1), (370, 65), colors[1], -1)
    frame = cv2.rectangle(frame, (390, 1), (485, 65), colors[2], -1)
    frame = cv2.rectangle(frame, (505, 1), (600, 65), colors[3], -1)
    cv2.putText(frame, "CLEAR ALL", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(frame, "BLACK", (185, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(frame, "GREEN", (298, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(frame, "RED", (420, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
    r = 0
    t = 2020
    b = 0

    #################################################################
    pixelDraw = 5
    pre_predict = -1

    is_start_time_back = -1 # tính giờ cho back nếu lớn hơn 1,5s xóa hết
    is_start_time_write = -1
    while True:
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        ret, image_np = cap.read()
        if not ret:
            continue
        image_np = cv2.flip(image_np, 1)
        
        try:
            image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        except:
            print("Error converting to RGB")

        # actual detection
        boxes, scores, classes = detector_utils.detect_objects(
            image_np, detection_graph, sess)

        # DRAWWWW
        (left, right, top, bottom) = (boxes[0][1] * im_width, boxes[0][3] * im_width,
                                          boxes[0][0] * im_height, boxes[0][2] * im_height)
        
        cv2.putText(image_np, str(scores[0]), (30,30), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,222))
Пример #38
0
    def generate_depth_env_maps(self, depth_maps):

        depth_env_maps = []

        h_depth_map, w_depth_map = depth_maps[0].shape
        center = np.array(
            [int(depth_maps.shape[2] / 2),
             int(depth_maps.shape[1] / 2)])

        # getting max and min co-ords of the fish eye image
        max_x, max_y = self.max_coord(center)
        min_x, min_y = self.min_coord(center)

        cyl = np.zeros((h_depth_map, int(max_x - min_x) + 1), np.float32)
        mask = np.zeros((cyl.shape[0], cyl.shape[1]))
        xyz_all = np.zeros((h_depth_map, w_depth_map, 3))

        # creating co-ord matrix
        x = np.arange(w_depth_map)
        y = np.arange(h_depth_map)
        xx, yy = np.meshgrid(x, y)
        xyz_all[:, :, 0] = yy - center[1]
        xyz_all[:, :, 1] = xx - center[0]

        # fish-eye co-ord mapping
        xy = np.round(self.convert2cyl_whole(xyz_all, center))
        xy[:, :, 1] = xy[:, :, 1] - min_x
        ind_vals, ind = np.unique(xy.astype(np.int32).reshape(-1, 2),
                                  axis=0,
                                  return_index=True)

        cyl[ind_vals[:, 0], ind_vals[:, 1]] = depth_maps[0].reshape((-1))[ind]
        mask[ind_vals[:, 0], ind_vals[:, 1]] = 255

        for depthMapNo in range(depth_maps.shape[0]):
            cyl[ind_vals[:, 0],
                ind_vals[:, 1]] = depth_maps[depthMapNo].reshape((-1))[ind]
            mask[ind_vals[:, 0], ind_vals[:, 1]] = 255

            # fill the top part
            mask_temp = mask[:mask.shape[0] // 2, :]
            cyl_temp = cyl[:cyl.shape[0] // 2, :]
            cyl_temp[np.where(mask_temp == 0)[0],
                     np.where(mask_temp == 0)[1]] = cyl[self.fillMatUp[:, 0],
                                                        self.fillMatUp[:, 1]]
            cyl[:cyl.shape[0] // 2, :] = cyl_temp

            # fill the bottom part
            mask_temp = cv2.flip(mask, 0)[:mask.shape[0] // 2, :]
            cyl_temp = cv2.flip(cyl, 0)[:cyl.shape[0] // 2, :]
            cyl_temp[np.where(mask_temp == 0)[0],
                     np.where(mask_temp == 0)[1]] = cv2.flip(
                         cyl, 0)[self.fillMatDown[:, 0], self.fillMatDown[:,
                                                                          1]]
            cyl[cyl.shape[0] // 2:, :] = cv2.flip(cyl_temp, 0)

            # expanding the map by pasting flipped cropped images on left and right
            result = cv2.copyMakeBorder(cyl,
                                        0,
                                        0,
                                        int(cyl.shape[1] / 2),
                                        int(cyl.shape[1] / 2),
                                        cv2.BORDER_CONSTANT,
                                        value=(0, 0, 0))

            # paste left
            side = cyl[:, 0:int(cyl.shape[1] / 2)]
            side = cv2.flip(side, 1)
            result[:, 0:side.shape[1]] = side

            # paste right
            side = cyl[:, int(cyl.shape[1] / 2):cyl.shape[1]]
            side = cv2.flip(side, 1)
            result[:, result.shape[1] - side.shape[1]:result.shape[1]] = side
            depth_env_maps.append(result)

        return np.array(depth_env_maps)
Пример #39
0
        text_colour = (255, 0, 0)
    return lower, upper, text_colour


cap = cv2.VideoCapture(0)

# Use trackbar to switch between tracking a red, blue or green object
cv2.namedWindow('img')
cv2.createTrackbar('RGB', 'img', 0, 2, choose_colour)

# i = 0
colour_dict = {0: "Red", 1: "Green", 2: "Blue"}

while (True):
    ret, frame = cap.read()
    img = cv2.flip(frame, 1)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # Obtain position of trackbar to pick which colour to track
    a_pos = cv2.getTrackbarPos('RGB', 'img')
    text = colour_dict[a_pos]

    # Use position of trackbar to pick colour and then isolate objects of matching colour
    lower, upper, text_colour = choose_colour(a_pos)
    fnres = color_isolate(hsv, lower, upper)

    # Extract contours from image after processing
    _, contours, _ = cv2.findContours(fnres, cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
    try:
        # Obtain contour of greatest size
Пример #40
0
                box_size[0], box_size[1])
bbox = bbox_initial
# Tracking status, -1 for not tracking, 0 for unsuccessful tracking, 1 for successful tracking
tracking = -1

# Text display positions
positions = {'fps': (15, 20)}

for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    # grab the raw NumPy array representing the image, then initialize the timestamp
    # and occupied/unoccupied text

    image = frame.array
    image = cv2.flip(image, 1)
    timer = cv2.getTickCount()

    diff = cv2.absdiff(bg, image)
    mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
    # Threshold the mask
    th, thresh = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)
    # Opening, closing and dilation
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
    img_dilation = cv2.dilate(closing, kernel, iterations=2)
    # Get mask indexes
    imask = img_dilation > 0
    # Get foreground from mask
    foreground = mask_array(image, imask)
    foreground_display = foreground.copy()
    def load_data(self, is_train, repeat, mirror=None):
        if (mirror is not None):
            with open(mirror, 'r') as f:
                lines = f.readlines()
                assert len(lines) == 1
                mirror_idx = lines[0].strip().split(',')
                mirror_idx = list(map(int, mirror_idx))
        xy = np.min(self.landmark, axis=0).astype(np.int32)
        zz = np.max(self.landmark, axis=0).astype(np.int32)
        wh = zz - xy + 1

        center = (xy + wh/2).astype(np.int32)
        img = cv2.imread(self.path)
        boxsize = int(np.max(wh)*1.2)
        xy = center - boxsize//2
        x1, y1 = xy
        x2, y2 = xy + boxsize
        height, width, _ = img.shape
        dx = max(0, -x1)
        dy = max(0, -y1)
        x1 = max(0, x1)
        y1 = max(0, y1)

        edx = max(0, x2 - width)
        edy = max(0, y2 - height)
        x2 = min(width, x2)
        y2 = min(height, y2)

        # for x, y in (self.landmark + 0.5).astype(np.int32):
        #    cv2.circle(img, (x, y), 1, (0, 0, 255))
        imgT = img[y1:y2, x1:x2]
        if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
            imgT = cv2.copyMakeBorder(imgT, dy, edy, dx, edx, cv2.BORDER_CONSTANT, 0)
        # if imgT.shape[0] == 0 or imgT.shape[1] == 0:
        #     imgTT = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        #     for x, y in (self.landmark+0.5).astype(np.int32):
        #         cv2.circle(imgTT, (x, y), 1, (0, 0, 255))
        #     cv2.imshow('0', imgTT)
        #     if cv2.waitKey(0) == 27:
        #         exit()

        if is_train:
            imgT = cv2.resize(imgT, (self.image_size, self.image_size))
        landmark = (self.landmark - xy)/boxsize

        assert (landmark >= 0).all(), str(landmark) + str([dx, dy])
        assert (landmark <= 1).all(), str(landmark) + str([dx, dy])
        self.imgs.append(imgT)
        self.landmarks.append(landmark)

        if is_train:
            while len(self.imgs) < repeat:
                angle = np.random.randint(-20, 20)
                cx, cy = center
                cx = cx + int(np.random.randint(-boxsize*0.1, boxsize*0.1))
                cy = cy + int(np.random.randint(-boxsize * 0.1, boxsize * 0.1))
                M, landmark = rotate(angle, (cx, cy), self.landmark)

                imgT = cv2.warpAffine(img, M, (int(img.shape[1]*1.1), int(img.shape[0]*1.1)))
                # for x, y in (landmark + 0.5).astype(np.int32):
                #    cv2.circle(imgT, (x, y), 1, (0, 0, 255))
                wh = np.ptp(landmark, axis=0).astype(np.int32) + 1
                size = np.random.randint(int(np.min(wh)), np.ceil(np.max(wh) * 1.25))
                xy = np.asarray((cx - size // 2, cy - size//2), dtype=np.int32)
                landmark = (landmark - xy) / size
                if (landmark < 0).any() or (landmark > 1).any():
                    continue

                x1, y1 = xy
                x2, y2 = xy + size
                height, width, _ = imgT.shape
                dx = max(0, -x1)
                dy = max(0, -y1)
                x1 = max(0, x1)
                y1 = max(0, y1)

                edx = max(0, x2 - width)
                edy = max(0, y2 - height)
                x2 = min(width, x2)
                y2 = min(height, y2)

                imgT = imgT[y1:y2, x1:x2]
                if (dx > 0 or dy > 0 or edx >0 or edy > 0):
                    imgT = cv2.copyMakeBorder(imgT, dy, edy, dx, edx, cv2.BORDER_CONSTANT, 0)

                imgT = cv2.resize(imgT, (self.image_size, self.image_size))

                if mirror is not None and np.random.choice((True, False)):
                    landmark[:, 0] = 1 - landmark[:, 0]
                    landmark = landmark[mirror_idx]
                    imgT = cv2.flip(imgT, 1)
                self.imgs.append(imgT)
                self.landmarks.append(landmark)
Пример #42
0
    def generate_map(self, background):
        # Easier if everything is in int due to cv2 calls
        background = (background * 255).astype(np.uint8)
        center = np.array(
            [int(background.shape[1] // 2),
             int(background.shape[0] // 2)])

        max_x, max_y = self.max_coord(center)
        min_x, min_y = self.min_coord(center)
        mask = np.zeros((background.shape[0], int(max_x - min_x) + 1),
                        np.uint8)

        # creating co-ord matrix
        xyz_all = np.zeros((background.shape[0], background.shape[1], 3))
        x = np.linspace(0, background.shape[1] - 1, background.shape[1])
        y = np.linspace(0, background.shape[0] - 1, background.shape[0])
        xx, yy = np.meshgrid(x, y)
        xyz_all[:, :, 0] = yy - center[1]
        xyz_all[:, :, 1] = xx - center[0]

        # fish-eye co-ord mapping
        xy = np.round(self.convert2cyl_whole(xyz_all, center))
        xy[:, :, 1] = xy[:, :, 1] - min_x
        ind_vals, ind = np.unique(xy.astype(np.int32).reshape(-1, 2),
                                  axis=0,
                                  return_index=True)

        # creating fish-eye image
        if background.ndim == 3:
            cyl = np.zeros((background.shape[0], int(max_x - min_x) + 1, 3),
                           np.uint8)
            cyl[ind_vals[:, 0], ind_vals[:, 1]] = background.reshape(
                (-1, 3))[ind]
        else:
            cyl = np.zeros((background.shape[0], int(max_x - min_x) + 1),
                           np.float64)
            cyl[ind_vals[:, 0], ind_vals[:, 1]] = background.reshape((-1))[ind]

        mask[ind_vals[:, 0], ind_vals[:, 1]] = 255

        self.fillMatUp, self.fillMatDown = self.fill_matrices(cyl, mask)

        # filling the bottom portion
        mask_temp = cv2.flip(mask, 0)[:mask.shape[0] // 2, :]
        cyl_temp = cv2.flip(cyl, 0)[:cyl.shape[0] // 2, :]
        cyl_temp[np.where(mask_temp == 0)[0],
                 np.where(mask_temp == 0)[1]] = cv2.flip(
                     cyl, 0)[self.fillMatDown[:, 0], self.fillMatDown[:, 1]]
        cyl[-cyl_temp.shape[0]:, :] = cv2.flip(cyl_temp, 0)

        # filling gaps
        # filling the top portion
        mask_temp = mask[:mask.shape[0] // 2, :]
        cyl_temp = cyl[:cyl.shape[0] // 2, :]
        cyl_temp[np.where(mask_temp == 0)[0],
                 np.where(mask_temp == 0)[1]] = cyl[self.fillMatUp[:, 0],
                                                    self.fillMatUp[:, 1]]
        cyl[:cyl_temp.shape[0], :] = cyl_temp

        result = cv2.copyMakeBorder(
            cyl,
            0,
            0,
            int(cyl.shape[1] / 2),
            int(cyl.shape[1] / 2),
            cv2.BORDER_CONSTANT,
            value=(0, 0, 0))  # TODO: check issue with float values ?
        mask_result = cv2.copyMakeBorder(
            mask,
            0,
            0,
            int(mask.shape[1] / 2),
            int(mask.shape[1] / 2),
            cv2.BORDER_CONSTANT,
            value=0)  # TODO: check issue with float values ?

        # attaching cropped flipped image to left image and mask
        side = cyl[:, 0:int(cyl.shape[1] / 2)]
        side = cv2.flip(side, 1)  # TODO: check issue with float values ?
        result[:, 0:side.shape[1]] = side

        mask_side = mask[:, :cyl.shape[1] // 2]
        mask_side = cv2.flip(mask_side,
                             1)  # TODO: check issue with float values ?
        mask_result[:, :mask_side.shape[1]] = mask_side

        # attaching cropped flipped image to right
        side = cyl[:, cyl.shape[1] // 2:]
        side = cv2.flip(side, 1)  # TODO: check issue with float values ?
        result[:, result.shape[1] - side.shape[1]:result.shape[1]] = side

        mask_side = mask[:, cyl.shape[1] // 2:]
        mask_side = cv2.flip(mask_side,
                             1)  # TODO: check issue with float values ?
        mask_result[:, mask_result.shape[1] - side.shape[1]:] = mask_side

        if result.ndim == 3:
            blur = cv2.GaussianBlur(result, (15, 15),
                                    0)  # TODO: check issue with float values ?
            mask_result = np.tile(np.expand_dims(mask_result, axis=-1),
                                  (1, 1, 3))
            result = result + ((blur - result) & ~mask_result)

        return result / 255.0
Пример #43
0
def match_face(unlockFaceId):
    faceCascPath = "modules/face_lock_unlock/face_recognition/haarcascade_frontalface_default.xml"
    eyeCascadePath = "modules/face_lock_unlock/face_recognition/haarcascade_eye.xml"
    faceCascade = cv2.CascadeClassifier(faceCascPath)
    eyeCascade = cv2.CascadeClassifier(eyeCascadePath)

    cam = cv2.VideoCapture(1)
    if cam.read()[0] == False:
        cam = cv2.VideoCapture(0)
    recog = cv2.face.LBPHFaceRecognizer_create()
    recog.read(
        'modules/face_lock_unlock/face_recognition/recognized/training.yml')

    #os.system("dir")
    faceId = 0
    matchFrameCount = 0
    flagFaceMatched = False

    while True:
        img = cam.read()[1]
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(gray,
                                             1.1,
                                             5,
                                             flags=cv2.CASCADE_SCALE_IMAGE)
        for (x, y, w, h) in faces:
            face = gray[y:y + h, x:x + w]
            eyes = eyeCascade.detectMultiScale(face,
                                               1.1,
                                               5,
                                               flags=cv2.CASCADE_SCALE_IMAGE)
            if len(eyes) == 2:
                faceId, confidence = recog.predict(face)
                if confidence < 50:
                    profile = getProfileDataById(str(faceId))
                    name = profile[1]
                    if faceId in unlockFaceId:
                        matchFrameCount += 0.5
                    else:
                        matchFrameCount = 0
                else:
                    name = "Unknown"
                    matchFrameCount = 0
                cv2.rectangle(img, (x, y), (x + w, y + h), 2)
                cv2.putText(img, "Name- " + name, (x, y + h),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0), 2)

                matchFrameCountPercentage = int(matchFrameCount * 100 / 10)
                if matchFrameCountPercentage <= 10:
                    cv2.putText(img, "Matching... " +
                                str(matchFrameCountPercentage) + "%",
                                (x, y + h + 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                                (0, 0, 255), 2)  #red
                elif matchFrameCountPercentage <= 20:
                    cv2.putText(img, "Matching... " +
                                str(matchFrameCountPercentage) + "%",
                                (x, y + h + 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                                (0, 165, 255), 2)  #orange
                elif matchFrameCountPercentage <= 60:
                    cv2.putText(img, "Matching... " +
                                str(matchFrameCountPercentage) + "%",
                                (x, y + h + 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                                (0, 255, 255), 2)  #yellow
                elif matchFrameCountPercentage <= 99:
                    cv2.putText(img, "Matching... " +
                                str(matchFrameCountPercentage) + "%",
                                (x, y + h + 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                                (50, 205, 154), 2)  #lime green
                else:
                    cv2.putText(
                        img, "Matched " + str(matchFrameCountPercentage) + "%",
                        (x, y + h + 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                        (0, 255, 0), 2)
                    flagFaceMatched = True
                    with open("match_face_result", "w") as f:
                        f.write("True")
                    break
                #print ("id = " + str(faceId) + " , confidence = " + str(confidence))

        if flagFaceMatched == True:
            break
        cv2.imshow("Face Recognition Running", img)
        cv2.waitKey(1)
    cam.release()
    cv2.destroyAllWindows()

    return flagFaceMatched
Пример #44
0
sys.path.append('/usr/local/lib/python3.5/site-packages')
import cv2
import os
cam = cv2.VideoCapture(0)
cam.set(3, 640)  # set video width
cam.set(4, 480)  # set video height
face_detector = cv2.CascadeClassifier(
    '/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
# For each person, enter one numeric face id 输入id和姓名
face_id = input('\n enter user id end press <return> ==>  ')
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# Initialize individual sampling face count
count = 0
while (True):
    ret, img = cam.read()
    img = cv2.flip(img, -1)  # flip video image vertically
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_detector.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
        count += 1
        # Save the captured image into the datasets folder
        cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg",
                    gray[y:y + h, x:x + w])
        cv2.imshow('image', img)
    k = cv2.waitKey(100) & 0xff  # Press 'ESC' for exiting video
    if k == 27:
        break
    elif count >= 30:  # Take 30 face sample and stop video
        break
# Do a bit of cleanup
Пример #45
0
for line in lines:
    source_path = line[2]
    filename = source_path.split('/')[-1]
    current_path = 'data/IMG/' + filename
    image = cv2.imread(current_path)
    images.append(image)
    measurement = float(line[3]) - correction
    measurements.append(measurement)


#Flipping Images and Steering Measurements
augmented_images, augmented_measurements = [],[]
for image, measurement in zip(images, measurements):
    augmented_images.append(image)
    augmented_measurements.append(measurement)
    augmented_images.append(cv2.flip(image,1))
    augmented_measurements.append(measurement * -1.0)
##
#X_train = np.array(images)
#Y_train = np.array(measurements)

X_train = np.array(augmented_images)
Y_train = np.array(augmented_measurements)

from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.layers import Cropping2D
from keras.models import Model
import matplotlib.pyplot as plt
Пример #46
0
def engine():
    frame_num = 0
    top, right, bottom, left = 10, 350, 225, 590
    aWeight = 0.5

    capture_video = cv2.VideoCapture(0)

    capture_video.set(3, 320)
    capture_video.set(4, 240)
    width = int(capture_video.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
    height = int(capture_video.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)

    four_cc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter('output.mp4', four_cc, 20.0, (640, 480))
    while (capture_video.isOpened()):
        ret, frame = capture_video.read()

        if ret == True:
            frame = cv2.flip(frame, 1)
            frame = imutils.resize(frame, width=640)
            frame_cloned = frame.copy()
            roi = frame[top:bottom, right:left]
            gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (7, 7), 0)

            if frame_num < 30:
                img_procession.run_avg(gray, aWeight)
            else:
                hand = img_procession.segment(gray)
                horizontal, vertical = img_procession.get_move_direction(gray)

                if horizontal == -1:
                    tmanip.move_left()
                elif horizontal == 1:
                    tmanip.move_right()

                if vertical == -1:
                    tmanip.move_down()
                elif vertical == 1:
                    tmanip.move_up()

                if hand is not None:
                    (threshold, segmented) = hand
                    cv2.drawContours(frame_cloned, [segmented + (right, top)],
                                     -1, (0, 0, 255))
                    #cv2.imshow( "Thresholded", threshold )

            cv2.rectangle(frame_cloned, (left, top), (right, bottom),
                          (0, 255, 0), 2)
            frame_num += 1

            out.write(frame)

            #cv2.imshow( 'frame', frame )

            if (cv2.waitKey(1) & 0xFF) == ord('q'):
                break

        else:
            print("Error happend. Is camera still working?")
            break

    out.release()
    capture_video.release()
    cv2.destroyAllWindows()
Пример #47
0
def generator(samples, batch_size=32):
    num_samples = len(samples)
    while 1: # Loop forever so the generator never terminates
        shuffle(samples)
        for offset in range(0, num_samples, batch_size):
            batch_samples = samples[offset:offset+batch_size]

            images = []
            angles = []
            for batch_sample in batch_samples:
                name_c = '../data_2/IMG/'+batch_sample[0].split('/')[-1]#center
                name_l = '../data_2/IMG/'+batch_sample[1].split('/')[-1]#left
                name_r = '../data_2/IMG/'+batch_sample[2].split('/')[-1]#right
                
                center_image = cv2.imread(name_c)
                left_image = cv2.imread(name_l)
                right_image = cv2.imread(name_r)
                 
                #Reorder BGR to RGB
                #CV2 import BGR but we infere the steering in RGB
                center_image = center_image[:, :, (2, 1, 0)]                                
                left_image = left_image[:, :, (2, 1, 0)]                                
                right_image = right_image[:, :, (2, 1, 0)]                                
                
                
                correction = 0.2 # this is a parameter to tune
                center_angle = float(batch_sample[3])
                left_angle = center_angle + correction
                right_angle = center_angle - correction

                aug_center_image  = []
                aug_center_angle = []
                aug_right_image  = []
                aug_right_angle = []
                aug_left_image  = []
                aug_left_angle = []

                
                aug_center_image, aug_center_angle = data_aug(center_image, center_angle)
                aug_right_image, aug_right_angle = data_aug(right_image, right_angle)
                aug_left_image, aug_left_angle = data_aug(left_image, left_angle)
                #aug_center_image = cv2.flip(center_image,1)
                #aug_center_angle = center_angle*-1.0
                
                #images.append(center_image)
                #angles.append(center_angle)
                
                #images.append(aug_center_image)
                #angles.append(aug_center_angle)

                
                # add images and angles to data set
                images.append(left_image)
                images.append(cv2.flip(left_image,1))
                
                images.append( center_image)
                images.append(cv2.flip(center_image,1))
                
                images.append(right_image)
                images.append(cv2.flip(right_image,1))
                
                
                angles.append(left_angle)
                angles.append(left_angle*-1.0)
                
                angles.append( center_angle)
                angles.append( center_angle*-1.0)
                
                angles.append(right_angle)
                angles.append(right_angle*-1.0)
                
                #images.extend(aug_left_image, aug_center_image, aug_right_image)
                #angles.extend(aug_left_angle, aug_center_angle, aug_right_angle)


            # trim image to only see section with road
            X_train = np.array(images)
            y_train = np.array(angles)
            yield sklearn.utils.shuffle(X_train, y_train)
Пример #48
0
def recognize():
    # Voice Authentication
    FORMAT = pyaudio.paInt16
    CHANNELS = 2
    RATE = 44100
    CHUNK = 1024
    RECORD_SECONDS = 4
    FILENAME = "./test.wav"

    audio = pyaudio.PyAudio()
   
    # start Recording
    stream = audio.open(format=FORMAT, channels=CHANNELS,
                    rate=RATE, input=True,
                    frames_per_buffer=CHUNK)

    time.sleep(2.0)
    print("recording...")
    frames = []

    for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        data = stream.read(CHUNK)
        frames.append(data)
    print("finished recording")


    # stop Recording
    stream.stop_stream()
    stream.close()
    audio.terminate()

    # saving wav file 
    waveFile = wave.open(FILENAME, 'wb')
    waveFile.setnchannels(CHANNELS)
    waveFile.setsampwidth(audio.get_sample_size(FORMAT))
    waveFile.setframerate(RATE)
    waveFile.writeframes(b''.join(frames))
    waveFile.close()

    modelpath = "./gmm_models/"

    gmm_files = [os.path.join(modelpath,fname) for fname in 
                os.listdir(modelpath) if fname.endswith('.gmm')]

    models    = [pickle.load(open(fname,'rb')) for fname in gmm_files]

    speakers   = [fname.split("/")[-1].split(".gmm")[0] for fname 
                in gmm_files]
  
    if len(models) == 0:
        print("No Users in the Database!")
        return
        
    #read test file
    sr,audio = read(FILENAME)

    # extract mfcc features
    vector = extract_features(audio,sr)
    log_likelihood = np.zeros(len(models)) 

    #checking with each model one by one
    for i in range(len(models)):
        gmm = models[i]         
        scores = np.array(gmm.score(vector))
        log_likelihood[i] = scores.sum()

    pred = np.argmax(log_likelihood)
    identity = speakers[pred]
   
    # if voice not recognized than terminate the process
    if identity == 'unknown':
            print("Not Recognized! Try again...")
            return
    
    print( "Recognized as - ", identity)

    # face recognition
    print("Keep Your face infront of the camera")
    cap = cv2.VideoCapture(0)
    cap.set(3, 640)
    cap.set(4, 480)

    cascade = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_default.xml')
    
    #loading the database 
    database = pickle.load(open('face_database/embeddings.pickle', "rb"))
    
    time.sleep(1.0)
    
    start_time = time.time()
    
    while True:
        curr_time = time.time()
            
        _, frame = cap.read()
        frame = cv2.flip(frame, 1, 0)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        face = cascade.detectMultiScale(gray, 1.3, 5)
         
        name = 'unknown'
        
        
        if len(face) == 1:

            for (x, y, w, h) in face:
                roi = frame[y-10:y+h+10, x-10:x+w+10]
            
                fh, fw = roi.shape[:2]
                min_dist = 100
                
                #make sure the face is of required height and width
                if fh < 20 and fh < 20:
                    continue

                
                #resizing image as required by the model
                img = cv2.resize(roi, (96, 96))

                #128 d encodings from pre-trained model
                encoding = img_to_encoding(img)
                
                # loop over all the recorded encodings in database 
                for knownName in database:
                    # find the similarity between the input encodings and recorded encodings in database using L2 norm
                    dist = np.linalg.norm(np.subtract(database[knownName], encoding) )
                    # check if minimum distance or not
                    if dist < min_dist:
                        min_dist = dist
                        name = knownName

            # if min dist is less then threshold value and face and voice matched than unlock the door
            if min_dist <= 0.4 and name == identity:
                print ("Door Unlocked! Welcome " + str(name))
                break

        #open the cam for 3 seconds
        if curr_time - start_time >= 3:
            break    

        cv2.waitKey(1)
        cv2.imshow('frame', frame)
        
    cap.release()
    cv2.destroyAllWindows()
   
    if len(face) == 0:
        print('There was no face found in the frame. Try again...')
        
    elif len(face) > 1:
        print("More than one faces found. Try again...")
        
    elif min_dist > 0.4 or name != identity:
        print("Not Recognized! Try again...")
Пример #49
0
def random_horizontal_flip(img, gt, u=0.5):
    if np.random.random() < u:
        img = cv2.flip(img, 1)
        gt = cv2.flip(gt, 1)

    return img, gt
Пример #50
0
    _, frame = cap.read()

    # Convert BGR to HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # define range of blue color in HSV
    lower_blue = np.array([75, 75, 50])
    upper_blue = np.array([130, 200, 255])

    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_blue, upper_blue)

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame, frame, mask=mask)

    flip = cv2.flip(frame, 180)

    blur = cv2.GaussianBlur(res, (5, 5), 0)

    R1.draw()
    R1.move()

    R2.draw()
    R2.move()

    R3.draw()
    R3.move()

    cv2.imshow('frame', flip)
    #cv2.imshow('mask',mask)
    #cv2.imshow('res',blur)
Пример #51
0
        target_v = 20 if end_dist > 30 else 0
        next_a = 1 * (target_v - car.v)

        # Stanley Lateral Control
        state = {
            "x": car.x,
            "y": car.y,
            "yaw": car.yaw,
            "delta": car.delta,
            "v": car.v,
            "l": car.l,
            "dt": car.dt
        }
        next_delta, target = controller.feedback(state)
        car.control(next_a, next_delta)

        # Update State & Render
        car.update()
        img = img_path.copy()
        cv2.circle(img, (int(target[0]), int(target[1])), 3, (1, 0.3, 0.7),
                   2)  # target points
        img = car.render(img)
        img = cv2.flip(img, 0)
        cv2.imshow("LQR Control Test", img)
        k = cv2.waitKey(1)
        if k == ord('r'):
            car.init_state(start)
        if k == 27:
            print()
            break
Пример #52
0
def random_vertical_flip(img, gt, u=0.5):
    if np.random.random() < u:
        img = cv2.flip(img, 0)
        gt = cv2.flip(gt, 0)

    return img, gt
Пример #53
0
    def drumbackend(self):

        pygame.mixer.music.pause()

        lower_red = numpy.array([-10, 100, 100])
        upper_red = numpy.array([10, 255, 255])
        lower_blue = numpy.array([80, 100, 100])
        upper_blue = numpy.array([100, 255, 255])
        frequencybeep = 2500  # Set Frequency To 2500 Hertz
        durationbeep = 50  # Set Duration To 1000 ms == 1 second
        kernelOpen = numpy.ones((10, 10))
        kernelClose = numpy.ones((20, 20))

        cap = cv2.VideoCapture(0)
        cap.set(3, 1920)
        cap.set(4, 1080)

        retval, frame = cap.read()

        runningcam = True

        while runningcam:

            cv2.flip(frame, 1, frame)  #mirror the image

            self.screen.fill(0)

            retval, frame = cap.read()

            frame = numpy.rot90(frame)

            frame = numpy.ascontiguousarray(frame, dtype=numpy.uint8)

            cv2.rectangle(frame, (450, 200), (550, 300), (0, 255, 0), 2)
            cv2.rectangle(frame, (450, 600), (550, 700), (0, 255, 0), 2)
            cv2.rectangle(frame, (450, 1000), (550, 1100), (0, 255, 0), 2)
            cv2.rectangle(frame, (250, 400), (350, 500), (0, 255, 0), 2)
            cv2.rectangle(frame, (250, 800), (350, 900), (0, 255, 0), 2)

            frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            mask1 = cv2.inRange(frameHSV, lower_red, upper_red)
            mask2 = cv2.inRange(frameHSV, lower_blue, upper_blue)

            #res = cv2.bitwise_and(frame,frame, mask= mask1)

            maskOpen1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, kernelOpen)
            maskClose1 = cv2.morphologyEx(maskOpen1, cv2.MORPH_CLOSE,
                                          kernelClose)

            maskOpen2 = cv2.morphologyEx(mask2, cv2.MORPH_OPEN, kernelOpen)
            maskClose2 = cv2.morphologyEx(maskOpen2, cv2.MORPH_CLOSE,
                                          kernelClose)

            maskClose = maskClose1 + maskClose2

            maskFinal = maskClose

            img, contours, h = cv2.findContours(maskFinal.copy(),
                                                cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_NONE)

            for contour in contours:

                area = cv2.contourArea(contour)

                if area > 1000:

                    cv2.drawContours(frame, contour, -1, (0, 255, 255), 3)

            for i in range(len(contours)):
                from playsound import playsound

                x, y, w, h = cv2.boundingRect(contour[i])
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
                #print(x,y)

                if x > 450 and y > 200 and x + w < 550 and y + h < 300:

                    cv2.rectangle(frame, (450, 200), (550, 300), (0, 255, 255),
                                  3)

                    # from playsound import playsound
                    playsound(
                        "F:\\openCV-jay\\raunak files\\drum\\sound\\sound\\New folder\\New folder\\sound 1-[AudioTrimmer.com].wav"
                    )

                elif x > 450 and y > 600 and x + w < 550 and y + h < 700:

                    cv2.rectangle(frame, (450, 600), (550, 700), (0, 255, 255),
                                  3)

                    # from playsound import playsound
                    playsound(
                        "F:\\openCV-jay\\raunak files\\drum\\sound\\sound\\New folder\\New folder\\sound 2-[AudioTrimmer.com].wav"
                    )

                elif x > 450 and y > 1000 and x + w < 550 and y + h < 1100:

                    cv2.rectangle(frame, (450, 1000), (550, 1100),
                                  (0, 255, 255), 3)

                    playsound(
                        "F:\\openCV-jay\\raunak files\\drum\\sound\\sound\\New folder\\New folder\\sound 3-[AudioTrimmer.com].wav"
                    )

                elif x > 250 and y > 400 and x + w < 350 and y + h < 500:

                    cv2.rectangle(frame, (250, 400), (350, 500), (0, 255, 255),
                                  3)

                    #from playsound import playsound
                    playsound(
                        "F:\\openCV-jay\\raunak files\\drum\\sound\\sound\\New folder\\New folder\\sound 4-[AudioTrimmer.com].wav"
                    )

                elif x > 250 and y > 800 and x + w < 350 and y + h < 900:

                    cv2.rectangle(frame, (250, 800), (350, 900), (0, 255, 255),
                                  3)

                    #from playsound import playsound
                    playsound(
                        "F:\\openCV-jay\\raunak files\\drum\\sound\\sound\\New folder\\New folder\\sound 5-[AudioTrimmer.com].wav"
                    )

            framepygame = frame

            framepygame = cv2.cvtColor(framepygame, cv2.COLOR_BGR2RGB)

            framepygame = pygame.surfarray.make_surface(framepygame)

            framepygame = pygame.transform.scale(framepygame, (1920, 1080))

            self.screen.blit(framepygame, (0, 0))

            pygame.display.update()

            for event in pygame.event.get():

                if event.type == pygame.QUIT:
                    runningcam = False
                    cap.release()
                    pygame.quit()
                    sys.exit()

                elif event.type == pygame.KEYDOWN:

                    if event.key == pygame.K_ESCAPE:

                        pygame.mixer.music.unpause()
                        runningcam = False
                        self.draw()

                    if event.key == pygame.K_q:

                        runningcam = False
                        cap.release()
                        pygame.quit()
                        sys.exit()
Пример #54
0
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
cap.set(3,640)  #넓이 설정
cap.set(4,480)  #높이 설정

while(True):
    ret, frame = cap.read()
    frame = cv2.flip(frame, -1) #상하반전
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    cv2.imshow('frame',frame)
    cv2.imshow('gray',gray)

    k = cv2.waitKey(30) &  0xff
    if k == 27: #press 'ESC' to quit
        break
cap.release()
cv2.destroyAllWindows()
 def do_flip(self, img):
     if np.random.rand() > 0.5:
         img = cv2.flip(img, 1)
     return img
def concatenate_crop_ROI(nb_pixel, delta, fov_oct, Folders, offset, setup):
    #concatenate FOVs, then crop ROI

    pixel_size = 0.365623 * 10**-6
    size_cropping = fov_oct + 2 * delta
    n_pixels_map = round(size_cropping / pixel_size)
    fov_nh_x, fov_nh_y = compute_fov_nh(pixel_size)
    fov_nh_x = nb_pixel['x'] * pixel_size  #nh = nighthawk, i.e. high mag
    fov_nh_y = nb_pixel['y'] * pixel_size

    for filename in tqdm(os.listdir(Folders['lowMag'])):
        if filename.startswith('._'):
            continue
        if 'fluorescent' in filename:

            print(filename)
            if setup == 1:
                low_mag_id_1 = int(filename[1:3])  #along y
                low_mag_id_2 = int(filename[4:6])  #along x
            elif setup == 2:
                low_mag_id_1 = int(filename[1:5])  #along y
                low_mag_id_2 = int(filename[6:10])  #along x

            full_image_name = str(low_mag_id_1) + '_' + str(low_mag_id_2)
            if os.path.exists(Folders['Virtual'] + full_image_name +
                              '_ch1.png'):
                print("skipping")
                continue

            #here, we apply the offset depending on which part of the scan we are using
            #it is a temporary fix until we can to handle the overlap properly
            #it needs a bit more thinking
            if setup == 2:
                y_start = max(
                    max(low_mag_id_1 * fov_oct - delta, 0) + offset['y'], 0)
                x_start = max(low_mag_id_2 * fov_oct - delta, 0) + offset['x']
                partnb = 1
            elif low_mag_id_2 < 5:
                y_start = max(
                    max(low_mag_id_1 * fov_oct - delta, 0) + offset['y'], 0)
                x_start = max(low_mag_id_2 * fov_oct - delta, 0) + offset['x']
                partnb = 1
            elif low_mag_id_2 > 6:
                y_start = max(
                    max(low_mag_id_1 * fov_oct - delta, 0) + offset['y'],
                    0)  #-offset['y_part2']
                x_start = max(low_mag_id_2 * fov_oct - delta,
                              0) + offset['x']  #-offset['x_part2']
                partnb = 2
            else:
                continue  #currently ignoring some files

            y_end = y_start + fov_oct + 2 * delta
            x_end = x_start + fov_oct + 2 * delta

            #loop through the frames
            #identify which ones are the limit ones
            nb_im_x = 23
            print("x_s, x_e: ", x_start, x_end)
            print("y_s, y_e: ", y_start, y_end)
            for i in range(nb_im_x):
                if i * fov_nh_x <= x_start and (i + 1) * fov_nh_x >= x_start:
                    frame_x_start = i + 1
                if i * fov_nh_x <= x_end and (i + 1) * fov_nh_x >= x_end:
                    frame_x_end = i + 1
                if i * fov_nh_y <= y_start and (i + 1) * fov_nh_y >= y_start:
                    frame_y_start = i + 1
                if i * fov_nh_y <= y_end and (i + 1) * fov_nh_y >= y_end:
                    frame_y_end = i + 1

            #compatibility check
            if frame_x_start <= 10 and frame_x_end > 10 and setup == 1:  #it needs frames from both parths
                print(
                    "This frame has not been neglected, it needs FOVs from both parts!"
                )
                continue
            elif frame_x_start > 10 and partnb == 2 and setup == 1:  #we are just changing the name for proper loading, surely to be changed afterwards when dealing with overlap
                frame_x_start -= 10
                frame_x_end -= 10
                #assuming there is an overlap of one row between part 1 and 2
                frame_x_start += 1
                frame_x_end += 1

            #create blank picture
            print("frame y start", frame_y_start)
            print("frame y end: ", frame_y_end)
            print("frame x start: ", frame_x_start)
            print("frame x end: ", frame_x_end)

            full_image = np.zeros(
                (nb_pixel['y'] * (frame_y_end - frame_y_start + 1),
                 nb_pixel['x'] * (frame_x_end - frame_x_start + 1), 3))

            idx_x = np.arange(frame_x_end, frame_x_start - 1, -1)
            idx_y = np.arange(frame_y_end, frame_y_start - 1, -1)
            #iterate over the different frames making up the tiling
            for i in range(idx_x.shape[0]):
                for j in range(idx_y.shape[0]):
                    # filename=str(idx_y[j])+'_'+str(idx_x[i])+'.tif'
                    key = 'HighMag_part' + str(partnb)

                    for fnm in os.listdir(Folders[key]):
                        if fnm.startswith("._"):
                            continue
                        if fnm.endswith(".tif"):
                            nbrs = fnm[-11:-4]
                            nb1 = int(nbrs[0:3])
                            nb2 = int(nbrs[4:8])

                            if setup == 1:
                                pass  #the system is built for that kind of numbering
                            elif setup == 2:  #if the numbering is different
                                nb2 = 23 - nb2

                            if nb1 == idx_y[j] and nb2 == idx_x[i]:
                                filename = fnm
                                pass

                    # print("I should have the file corresponding to ", str(idx_y[j])+'_'+str(idx_x[i])+'.tif')
                    # print("I am loading ",Folders[key]+ filename)

                    if os.path.exists(Folders[key] + filename):
                        crt_img = tiff.imread(Folders[key] + filename)

                        #flip_rotate image here
                        #much easier, as it does not require you to load and save a large tiff image another time before

                        img_list = []
                        for channel in [0, 1, 2]:
                            img = crt_img[channel, :, :]
                            if setup == 1:
                                img = img.transpose()
                                img = cv2.flip(img, 0)
                                img = cv2.flip(img, 1)
                            elif setup == 2:
                                img = cv2.flip(img, 1)
                                img = img.transpose()
                            img_list.append(img)
                        crt_img = np.dstack(img_list)

                    else:
                        print("file does not exist")
                        continue
                    # print(crt_img.shape)
                    nb_pixel['y'] = crt_img.shape[0]
                    nb_pixel['x'] = crt_img.shape[1]
                    # print(nb_pixel)
                    full_image[j * nb_pixel['y']:(j + 1) * nb_pixel['y'],
                               i * nb_pixel['x']:(i + 1) *
                               nb_pixel['x'], :] = crt_img

                    #determine the shift for the crop region
                    if i == 0 and j == 0:
                        #this condition is to properly determine what area of the region you want to crop
                        #super important to have it correctly coded, as otherwise you risk extracting the wrong frames
                        #in this case we add 10-1 because we did the same thing with the frames above
                        #to be cleaned and improved
                        if partnb == 2:
                            x_start_crt = nb_pixel['x'] * (idx_x[i] - 1 + 10 -
                                                           1) * pixel_size
                        else:
                            x_start_crt = nb_pixel['x'] * (idx_x[i] -
                                                           1) * pixel_size
                        y_start_crt = nb_pixel['y'] * (idx_y[j] -
                                                       1) * pixel_size
                        x_ref = x_start_crt + nb_pixel['x'] * pixel_size
                        y_ref = y_start_crt + nb_pixel['y'] * pixel_size
                        x_idx = int(round((x_ref - x_end) / pixel_size))
                        y_idx = int(round((y_ref - y_end) / pixel_size))

            full_image = full_image[y_idx:y_idx + n_pixels_map,
                                    x_idx:x_idx + n_pixels_map, :]

            # tiff.imsave(Folders['Virtual']+full_image_name+'.tif',full_image)

            for ch in [0, 1, 2]:
                cv2.imwrite(
                    Folders['Virtual'] + full_image_name + '_ch' + str(ch) +
                    '.png', full_image[:, :, ch] / 65535 * 255)

        else:
            continue
def main():
#    radius=1,neighbors=1,grid_x=8,grid_y=8,threshold=60
    recognizer = cv2.face.LBPHFaceRecognizer_create(radius=1,neighbors=1,grid_x=8,grid_y=8,threshold=60)
    recognizer.read('trainer/trainer.yml')
    cascadePath = "haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascadePath);
    font = cv2.FONT_HERSHEY_SIMPLEX
    id = 0
    path="dataset\\"
    # Initialize and start realtime video capture
    cam = cv2.VideoCapture(1)
    cam.set(3, 640) # set video widht
    cam.set(4, 480) # set video height
    
    # Define min window size to be recognized as a face
    minW = 0.1*cam.get(3)
    minH = 0.1*cam.get(4)
    names=[]
    name=faceNames(path)
    [names.append(x) for x in name if x not in names]
    while True:
    
        ret, img =cam.read()
        img = cv2.flip(img, 1) # Flip vertically
    
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    
        faces = faceCascade.detectMultiScale( 
            gray,
            scaleFactor = 1.2,
            minNeighbors = 5,
            minSize = (int(minW), int(minH)),
           )
    
        for(x,y,w,h) in faces:
    
            cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
    
            id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
            print(id,confidence)
            
            
            # Check if confidence is less them 100 ==> "0" is perfect match 
            #confidence=100-confidence
            if (confidence < 68):
                id = names[id]
            #confidence = "  {0}%".format(round(100 - confidence))
            else:
                id = "Tanimsiz"
                confidence = "  {0}%".format(round(100 - confidence))
            
            cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
            cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)  
        
        cv2.imshow('camera',img) 
    
        k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
        if k == 27:
            break
    
    # Do a bit of cleanup
    print("\n [BILGI] Programdan Çıkılıyor")
    cam.release()
    cv2.destroyAllWindows()
Пример #58
0
def game():

    global count_score

    #----------------open cv------------------#
    cap = cv2.VideoCapture(0)
    face_detector = cv2.CascadeClassifier(
        r'C:\software\haarcascade_frontalface_default.xml')
    cap.set(3, 1280)
    cap.set(4, 720)
    #-----------------------------------------#

    sg.theme('Black')

    # define the window layout
    layout = [[
        sg.Text('TIME : ',
                size=(10, 1),
                font=('Segoe UI', 20),
                text_color='White'),
        sg.Text('',
                size=(10, 1),
                font=('Helvetica', 22),
                text_color='White',
                key='timer'),
        sg.Button('Ball', size=(10, 1), font='Helvetica 14'),
        sg.Button('Exit', size=(10, 1), font='Helvetica 14')
    ],
              [
                  sg.Text('SCORE : ',
                          size=(10, 1),
                          font=('Segoe UI', 20),
                          text_color='White'),
                  sg.Text('',
                          size=(10, 1),
                          font=('Helvetica', 22),
                          text_color='White',
                          key='score1')
              ],
              [
                  sg.T('                       '),
                  sg.Graph(canvas_size=(1280, 720),
                           graph_bottom_left=(0, 0),
                           graph_top_right=(400, 400),
                           key="canvas")
              ]]

    # create the window and show it without the plot
    window = sg.Window('OneTouch',
                       layout,
                       auto_size_buttons=False,
                       resizable=False).Finalize()
    window.maximize()

    canvas = window['canvas']

    ball = ball_pysim(canvas)
    #-------score-------#
    count_score = 0

    #-----timer--------#
    timer_running = True
    seconds = 5
    start = time()
    current = time()

    while True:

        circle = canvas.DrawCircle((ball.x, ball.y),
                                   ball.r,
                                   fill_color=ball.color,
                                   line_color='black')

        button, values = window.read(timeout=60)

        if button == 'Exit':
            score()

        if button == sg.WIN_CLOSED:
            break

        if button == 'Ball':
            canvas.DeleteFigure(circle)
            ball = ball_pysim(canvas)
            count_score += 1
            window['score1'].update(count_score)

        if timer_running:
            current = time()
            timeleft = int(seconds - (current - start))
            window['timer'].update(timeleft)
            window.refresh()

            if timeleft == 0:
                sg.popup("TIME UP")
                score()
                window.Close()

        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)
        imgbytes = cv2.imencode('.png', frame)[1].tobytes()

        canvas.DrawImage(data=imgbytes, location=(0, 400))

    window.close()
Пример #59
0
# Reference: https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_video_display/py_video_display.html
# The most of part of the code is from the "Capture Video from Camera" demo. I changed the convert color function
# to the flip function

import numpy as np
import cv2

cap = cv2.VideoCapture(0)

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Our operations on the frame come here
    gray = cv2.flip(frame, 0)

    # Display the resulting frame
    cv2.imshow('frame', gray)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Пример #60
0
def remove_background(frame):
    fgmask = bgModel.apply(frame, learningRate=learningRate)
    kernel = np.ones((3, 3), np.uint8)
    fgmask = cv2.erode(fgmask, kernel, iterations=1)
    res = cv2.bitwise_and(frame, frame, mask=fgmask)
    return res


# Camera
camera = cv2.VideoCapture(0)
camera.set(10, 200)

while camera.isOpened():
    ret, frame = camera.read()
    frame = cv2.bilateralFilter(frame, 5, 50, 100)  # smoothing filter
    frame = cv2.flip(frame, 1)  # flip the frame horizontally
    cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                  (frame.shape[1], int(cap_region_y_end * frame.shape[0])),
                  (255, 0, 0), 2)

    cv2.imshow('original', frame)

    # Run once background is captured
    if isBgCaptured == 1:
        img = remove_background(frame)
        img = img[0:int(cap_region_y_end * frame.shape[0]),
                  int(cap_region_x_begin *
                      frame.shape[1]):frame.shape[1]]  # clip the ROI
        cv2.imshow('mask', img)

        # convert the image into binary image