Example #1
0
def update_socket(ws):
    print 'websocket connection request'
    state['draw_output'] = True
    while not ws.closed:
        new_data_condition.acquire()
        new_data_condition.wait()
        new_data_condition.release()
        result = {
            'targets': state['targets'],
            'fps': state['fps'],
            'connected': state['ack']
        }
        _, binframe = cv2.imencode('.jpg', state['output_images']['bin'])
        result['binaryImg'] = base64.b64encode(binframe)
        _, binframe = cv2.imencode('.jpg', state['output_images']['result'])
        result['resultImg'] = base64.b64encode(binframe)
        message = json.dumps(result)
        ws.send(message)
        received = json.loads(ws.receive())
        if 'thresholds' in received:
            config['target'] = received['thresholds']
            save_config(config)
        if 'camera' in received:
            config['camera'] = received['camera']
            save_config(config)

    print 'websocket disconnected'
    state['draw_output'] = False
Example #2
0
 def get_frame(self,faced,saved=False, video=False, videoStop = False):
     while True:
         success, image = self.video.read()
         if not success:
             continue
         else:
             break
     if saved:
         cv2.imwrite("image.jpg",image)
     if video:
         self.videoWriter.write(image)
         cv2.circle(image,(20,20), 15, (0,0,255), -1)
     if faced:
         faceCascade = cv2.CascadeClassifier("faceDetect.xml")
         gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
         faces = faceCascade.detectMultiScale(
             gray,
             scaleFactor=1.1,
             minNeighbors=5,
             flags=cv2.cv.CV_HAAR_SCALE_IMAGE
         )
         for (x, y, w, h) in faces:
             cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
         ret, jpeg = cv2.imencode('.jpg', image)
         return (jpeg.tostring())
     ret, jpeg = cv2.imencode('.jpg', image)
     return (jpeg.tostring())
Example #3
0
def getOrient():
    global sock, capL, capR, frameL, frameR
    retL, img_encodeL = cv2.imencode(
        '.jpeg',
        frameL[50:100],
        encode_param
    )
    retR, img_encodeR = cv2.imencode(
        '.jpeg',
        frameR[50:100],
        encode_param
    )
    stringDataL = np.array(img_encodeL).tostring()
    stringDataR = np.array(img_encodeR).tostring()
    client_commandL = 'put'.ljust(16)
    if confirm(sock, client_commandL):
        print client_commandL
        sock.send(str(len(stringDataL)).ljust(16))
        sock.send(stringDataL)
        sock.send(str(len(stringDataR)).ljust(16))
        sock.send(stringDataR)
    else:
        print 'server error!'
        return False
    orient = sock.recv(4096)
    return int(orient)
Example #4
0
    def set_image(self, image, channel="default", storage_method=None):
        """
        :param image: numpy array containing an RGB representation of an image
        :type image: numpy.ndarray
        :param channel: optional argument defining with which channel the image should be associated (default: "default")
        :param channel: str
        :return: Integer indicating whether the processing of the image was successful
        :rtype: int

        Inserts an image into a channel.
        """
        ret = 0
        if storage_method is None:
            self._per_channel_storage_method[channel] = self._internal_storage_method
        elif storage_method in ["raw", "png", "jpg"]:
            self._per_channel_storage_method[channel] = storage_method
        else:
            raise Exception("Unknown storage method")
        if self._per_channel_storage_method[channel] == "raw":
            self._image_storage[self._channel[channel]] = image.copy()
            ret = 0
        if self._per_channel_storage_method[channel] == "png":
            quality = 9 - int(self._compression_level/10)
            (ret, buf) = cv2.imencode(".png", image, (cv2.IMWRITE_PNG_COMPRESSION, quality))
            self._image_storage[self._channel[channel]] = buf
        if self._per_channel_storage_method[channel] == "jpg":
            (ret, buf) = cv2.imencode(".jpg", image, (cv2.IMWRITE_JPEG_QUALITY, self._compression_level))
            self._image_storage[self._channel[channel]] = buf
        return ret
Example #5
0
def image(img, to_bgr=True, encoding='jpg', **kwargs):
    """ image(img, [win, title, labels, width])
    to_bgr: converts to bgr, if encoded as rgb (default True).
    encoding: 'jpg' (default) or 'png'
    """
    assert img.ndim == 2 or img.ndim == 3
    win = kwargs.get('win') or uid()

    if isinstance(img, list):
        return images(img, kwargs)
    # TODO: if img is a 3d tensor, then unstack it into a list of images

    img = to_rgb(normalize(img, kwargs))
    if to_bgr:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    #pngbytes = png.encode(img.tostring(), img.shape[1], img.shape[0])
    if encoding=='jpg':
        jpgbytes = cv2.imencode('.jpg', img)[1]
        imgdata = 'data:image/jpg;base64,' + base64.b64encode(jpgbytes).decode('ascii')
    elif encoding=='png':
        pngbytes = cv2.imencode('.png', img)[1]
        imgdata = 'data:image/png;base64,' + base64.b64encode(pngbytes).decode('ascii')
    else:
        raise ValueError('unknown encoding')

    send(command='image', id=win, src=imgdata,
        labels=kwargs.get('labels'),
        width=kwargs.get('width'),
        title=kwargs.get('title'))
    return win
def deal_img(imgFile):
    try:
        #img = cv2.imread(imgFile.encode('gbk'),1)
        img = cv2.imdecode(np.fromfile(imgFile,dtype=np.uint8), -1)
        x, y, z = img.shape  # 高 宽  通道数
        cant_deal_file_path = imgFile.replace("源文件", "未处理文件")
        # if x < 600 and y < 1000:  # 太小文件不处理
        #     shutil.copyfile(imgFile, cant_deal_file_path)
        #     print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "," + imgFile + "文件太小,跳过")
        # if y > 3500:  # 太大的文件不处理
        #     shutil.copyfile(imgFile, cant_deal_file_path)
        #     print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "," + imgFile + "文件太大,跳过")
        if os.path.exists(imgFile):  # 这就是最关键的代码了
            print(imgFile)
            for i in range(int(x/13)):
                for j in range(int(y/3.15)+10):
                    varP = img[i, j]
                    img[i, j] = img[int(i/12.5), j]
                    # if sum(varP) > 556 and sum(varP) < 640:  # 大于250,小于765(sum比白色的小)
                    #     #img[i, j] = img[i - 5, j - 10]
                    #
                    #     print(img[i, j])
            dst = imgFile.replace("源文件", "目标文件")
            #shutil.copyfile(imgFile, dst)
            #cv2.imwrite(dst, img)
            #cv2.imread(imgFile.decode('u8').encode('gbk'), -1)
            cv2.imencode('.jpg', img)[1].tofile(dst)
            #hutil.copyfile(imgFile, dst)
            #shutil.copyfile(imgOldFile, imgOldFile)

    except Exception as e:
        shutil.copyfile(imgFile, cant_deal_file_path)
        print("Exception:", e)
Example #7
0
    def measurePictures(self):
        prvs = cv2.cvtColor(self.f1,cv2.COLOR_BGR2GRAY)
        ret, self.frame1 = cv2.imencode('.png', self.f1)
        ret = cv2.imwrite('frame1.png', self.f1)

        next = cv2.cvtColor(self.f2,cv2.COLOR_BGR2GRAY)
        flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        bgr = self.flow2hsv(flow)
        (res, mx, my, ms) = self.flow2measure(flow)
        self.showmeasure(bgr, res, mx, my, ms)
        ret, self.flow12 = cv2.imencode('.png', bgr)
        ret = cv2.imwrite('flow12.png', bgr)
        ret, self.frame2 = cv2.imencode('.png', self.f2)
        ret = cv2.imwrite('frame2.png', self.f2)

        prvs = next

        next = cv2.cvtColor(self.f3,cv2.COLOR_BGR2GRAY)
        flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        bgr = self.flow2hsv(flow)
        (res, mx, my, ms) = self.flow2measure(flow)
        self.showmeasure(bgr, res, mx, my, ms)
        ret, self.flow23 = cv2.imencode('.png', bgr)
        ret = cv2.imwrite('flow23.png', bgr)
        ret, self.frame3 = cv2.imencode('.png', self.f3)
        ret = cv2.imwrite('frame3.png', self.f3)
Example #8
0
def gen(value):
	"""Video streaming generator function."""
	soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
	soc.settimeout(2)
	dataSend = True
    # connect to remote host
	try:
		soc.connect((host, port))
		print 'found target'
		soc.send('#Found you\n')
		dataSend = True
	except:
		print("unable to connect")
		dataSend = False
        while 1:
		if(dataSend == False):
			try:
				soc.connect((host,port))
				print('found target')
				soc.send('#Recovered Transmission\n')
				
				dataSend = True
			except:
				print("unable to connect")
				dataSend = False
		if(value == 0):
			msg, CVframe, maskFrame = vision(cap)
			try:
				soc.send(msg)
				print(msg)
			except:
	   			print 'data send failed'
				dataSend = False
			frame = cv2.imencode('.jpg',CVframe,[IMWRITE_JPEG_QUALITY, 10])[1].tostring()
		elif(value == 1):
			ret, CVframe = cap_two.read()
#			if(ret == False):
#				frame = polarBearError
#			else:
			frame = cv2.imencode('.jpg',CVframe,[IMWRITE_JPEG_QUALITY, 10])[1].tostring()
		elif(value ==2):
			ret, CVframe = cap_three.read()
#			if(ret == False):
#				frame = pandaBearError
#			else:
			frame = cv2.imencode('.jpg',CVframe,[IMWRITE_JPEG_QUALITY, 10])[1].tostring()
		else:
			msg, CVframe, maskFrame = vision(cap)
			try:
				soc.send(msg)
			except:
	   			print 'data send failed'
				dataSend = False
			frame = cv2.imencode('.jpg',maskFrame,[IMWRITE_JPEG_QUALITY, 10])[1].tostring()
	
		yield (b'--frame\r\n'
	       b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
def gen(value):
    """Video streaming generator function."""
    soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    soc.settimeout(2)
    dataSend = True
    # connect to remote host
    try:
        soc.connect((host, port))
        print "found target"
        soc.send("#Found you\n")

    except:
        print ("unable to connect")
    while 1:
        if dataSend == False:
            try:
                soc.connect((host, port))
                print ("found target")
                soc.send("#Recovered Transmission\n")
                dataSend = True
            except:
                print ("unable to connect")
        if value == 0:
            msg, CVframe, maskFrame = vision(cap)
            try:
                soc.send(msg)
            except:
                print "data send failed"
                dataSend = False
            frame = cv2.imencode(".jpg", CVframe, [IMWRITE_JPEG_QUALITY, 10])[1].tostring()
        elif value == 1:
            ret, CVframe = cap_two.read()
            # 			if(ret == False):
            # 				frame = polarBearError
            # 			else:
            frame = cv2.imencode(".jpg", CVframe, [IMWRITE_JPEG_QUALITY, 10])[1].tostring()
        elif value == 2:
            ret, CVframe = cap_three.read()
            # 			if(ret == False):
            # 				frame = pandaBearError
            # 			else:
            frame = cv2.imencode(".jpg", CVframe, [IMWRITE_JPEG_QUALITY, 10])[1].tostring()
        else:
            msg, CVframe, maskFrame = vision(cap)
            # 			try:
            # 				soc.send(msg)
            # 			except:
            # 	   			print 'Lost Connection with Roborio'
            # 				soc.connect((host, port))
            # 				print('found target')
            # 				soc.send("#Found you\n")
            frame = cv2.imencode(".jpg", maskFrame, [IMWRITE_JPEG_QUALITY, 10])[1].tostring()

        yield (b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + frame + b"\r\n")
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
Example #10
0
def svhn(pil_image, province):
    img = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
    if province == 'nacao':
        img = nacao(img)
    imgstr = ''
    if pil_image.format == 'JPEG':
        imgstr = cv2.imencode('.jpg', img)[1].tostring()
    elif pil_image.format == 'PNG':
        imgstr = cv2.imencode('.png', img)[1].tostring()
    return base64.b64encode(imgstr)
    def encodeImage(self, img):
        """ Encodes the np-array given by img into jpeg-data that can be sent over http """
        if isinstance(img, type(None)) or img.size == 0:
            _, data = cv2.imencode('.jpg', np.zeros((1), np.uint8))
        else:
            with self.dataLock:
                 _, data = cv2.imencode('.jpg', img)
            
        jpeg_data = data.tostring()

        return jpeg_data
Example #12
0
    def callback_right_with_3D(self, ros_data):
	'''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        self.right_header = ros_data.header.stamp.secs        
        
        #### direct conversion to CV2 ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, 1)

        # convert np image to grayscale
        gray_image = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)

        if self.right_header == self.left_header and self.group_4_recieved:
            s_t = time.clock()
            print self.right_header, self.left_header
            self.group_4_recieved = False

            left_gray = cv2.cvtColor(self.left_image, cv2.COLOR_BGR2GRAY)

            group_4_out = self.group_4
            
#            PointCloudPose()
#            group_4_out.header = self.group_4.header
#std_msgs/Int16 pose_id
#std_msgs/Int16 pose_id_max
#sensor_msgs/CompressedImage image_left
#sensor_msgs/CompressedImage image_right
#geometry_msgs/Pose spin_center_pose
#sensor_msgs/PointCloud2 carmine_pointcloud 
#geometry_msgs/Pose carmine_pose
#sensor_msgs/PointCloud2 bumblebee_pointcloud
#geometry_msgs/Pose bumblebee_pose_left
#geometry_msgs/Pose bumblebee_pose_right            
            
            

            msg = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format = "jpeg"
            msg.data = np.array(cv2.imencode('.jpg', left_gray)[1]).tostring()
            # Publish new image
            group_4_out.image_left = msg
            
            msg = CompressedImage()
            msg.header.stamp = rospy.Time.now()
            msg.format = "jpeg"
            msg.data = np.array(cv2.imencode('.jpg', gray_image)[1]).tostring()
            # Publish new image
            group_4_out.image_right = msg
            
            #group_4_out.bumblebee_pointcloud = pcloud            
            self.pub_group.publish(group_4_out)

            print "save time", time.clock() - s_t       
Example #13
0
def convert(image, input_type='cv2', output_type='ros'):
    assert input_type in ['cv2', 'ros', 'mongo'] and output_type in ['cv2', 'ros', 'mongo']
    if input_type == 'mongo':
        image = cv2.imdecode(np.array(image, dtype=np.uint8), 1)
    if input_type == 'ros':
        image = cv2.imdecode(np.fromstring(image, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
    if output_type == 'ros':
        return np.array(cv2.imencode('.jpg', image)[1]).tostring()
    if output_type == 'mongo':
        return np.array(cv2.imencode('.jpg', image)[1]).tolist()
    if output_type == 'cv2':
        return image
Example #14
0
    def set_image(self, data_bytes):
        data = np.frombuffer(data_bytes, dtype="uint8")
        cv2im = cv2.imdecode(data, 1)
        (cv2green, cv2red) = split(cv2im)
        _, green = cv2.imencode(".jpeg", cv2green)
        _, red = cv2.imencode(".jpeg", cv2red)
        _, st_img = cv2.imencode(".jpeg", stereo(cv2green, cv2red))

        self.original_holder.set_image(data)
        self.green_holder.set_image(green)
        self.red_holder.set_image(red)
        self.stereo_holder.set_image(st_img)
Example #15
0
File: views.py Project: gat3way/ocv
def get_image(request):
    url = request.GET.get('url', None)
    if url:
        # Try to get existing source first
        try:
            src = Source.objects.get(url=url)
        except Exception:
            src = None

        # No existing source or not active? Try to open and get one frame
        if not src or src.active == False:
            try:
                if url.isdigit():
                    video_capture = cv2.VideoCapture(int(url))
                else:
                    video_capture = cv2.VideoCapture(url)
                ret, frame = video_capture.read()
                if frame.shape[1]!=640 or frame.shape[0]!=480:
                    frame = cv2.resize(frame, (640,480))
                r,buf=cv2.imencode(".jpg",frame)
                JpegData=buf.tostring()
                video_capture.release()
            except Exception:
                video_capture.release()
                return HttpResponse("")
            return HttpResponse(JpegData, content_type="image/png")

        # Source exists and is active? Grab frame from mmapped file
        else:
            try:
                sink = src.raw_sink
                filename = os.path.join(settings.PROJECT_ROOT,"run","sinks",sink.short_id)
                if not os.path.isfile(filename):
                    return HttpResponse("")

                size = os.path.getsize(filename)
                f = open(filename, 'r')
                m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
                capture = m
                capture.seek(0, os.SEEK_SET)
                dt = np.dtype((np.uint8, 3))
                img = np.fromstring(capture.read(size), dt)
                img = np.reshape(img,(480,640,3))
                r,buf=cv2.imencode(".jpg",img)
                JpegData=buf.tostring()
                f.close()
            except Exception:
                return HttpResponse("")

            return HttpResponse(JpegData, content_type="image/png")

    else:
        return HttpResponse("")
Example #16
0
def type4_cut(pil_image, province):
    # cut the image and encode to base64 strings
    img = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
    mapper = {'chongqing':chq, 'gansu':gs, 'jiangxi':jx, 'ningxia':nx, 'tianjin':tj,
              'shan3xi':small, 'sichuan':small, 'xinjiang':small, 'beijing':beijing, 'hubei':hubei}
    patches = mapper[province](img)
    for i in range(len(patches)):
        imgstr = ''
        if pil_image.format == 'JPEG':
            imgstr = cv2.imencode('.jpg', patches[i])[1].tostring()
        elif pil_image.format == 'PNG':
            imgstr = cv2.imencode('.png', patches[i])[1].tostring()
        patches[i] = base64.b64encode(imgstr)
    return patches
Example #17
0
    def read(self, extension=None, quality=None):
        if quality is None:
            quality = self.context.config.QUALITY

        options = None
        extension = extension or self.extension
        try:
            if FORMATS[extension] == 'JPEG':
                options = [cv2.IMWRITE_JPEG_QUALITY, quality]
        except KeyError:
            # default is JPEG so
            options = [cv2.IMWRITE_JPEG_QUALITY, quality]

        try:
            if FORMATS[extension] == 'WEBP':
                options = [cv2.IMWRITE_WEBP_QUALITY, quality]
        except KeyError:
            options = [cv2.IMWRITE_JPEG_QUALITY, quality]

        success, buf = cv2.imencode(extension, self.image, options or [])
        data = buf.tostring()

        if FORMATS[extension] == 'JPEG' and self.context.config.PRESERVE_EXIF_INFO:
            if hasattr(self, 'exif') and self.exif != None:
                img = JpegFile.fromString(data)
                img._segments.insert(0, ExifSegment(self.exif_marker, None, self.exif, 'rw'))
                data = img.writeString()

        return data
Example #18
0
def pack_img(header, img, quality=95, img_fmt='.jpg'):
    """Pack an image into ``MXImageRecord``.

    Parameters
    ----------
    header : IRHeader
        Header of the image record.
        ``header.label`` can be a number or an array.
    img : numpy.ndarray
        image to pack
    quality : int
        Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
    img_fmt : str
        Encoding of the image (.jpg for JPEG, .png for PNG).

    Returns
    -------
    s : str
        The packed string.
    """
    assert cv2 is not None
    jpg_formats = ['.JPG', '.JPEG']
    png_formats = ['.PNG']
    encode_params = None
    if img_fmt.upper() in jpg_formats:
        encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
    elif img_fmt.upper() in png_formats:
        encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]

    ret, buf = cv2.imencode(img_fmt, img, encode_params)
    assert ret, 'failed to encode image'
    return pack(header, buf.tostring())
Example #19
0
	def do_GET(self):
		print self.path
		if self.path.endswith('.mjpg'):
			self.send_response(200)
			self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary')
			self.end_headers()
			while True:
				try:
					rc,img = capture.read()
					if not rc:
						continue
					imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
					r, buf = cv2.imencode(".jpg",imgRGB)
					self.wfile.write("--jpgboundary\r\n")
					self.send_header('Content-type','image/jpeg')
					self.send_header('Content-length',str(len(buf)))
					self.end_headers()
					self.wfile.write(bytearray(buf))
					self.wfile.write('\r\n')
					time.sleep(0.5)
				except KeyboardInterrupt:
					break
			return
		if self.path.endswith('.html') or self.path=="/":
			self.send_response(200)
			self.send_header('Content-type','text/html')
			self.end_headers()
			self.wfile.write('<html><head></head><body>')
			self.wfile.write('<img src="http://127.0.0.1:9090/cam.mjpg"/>')
			self.wfile.write('</body></html>')
			return
Example #20
0
 def get_frame(self):
     success, image = self.video.read()
     # We are using Motion JPEG, but OpenCV defaults to capture raw images,
     # so we must encode it into JPEG in order to correctly display the
     # video stream.
     ret, jpeg = cv2.imencode('.jpg', image)
     return jpeg.tobytes()
Example #21
0
 def test_imencode(self):
     a = np.zeros((480, 640), dtype=np.uint8)
     flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
     self.assertEqual(flag, True)
     self.assertEqual(ajpg.dtype, np.uint8)
     self.assertGreater(ajpg.shape[0], 1)
     self.assertEqual(ajpg.shape[1], 1)
  def process(self, stream):
    data = cv2.imdecode(np.fromstring(stream.getvalue(), dtype=np.uint8), 1)

    (orig_h, orig_w) = data.shape[:2]
    new_h = orig_h / 2
    new_w = orig_w / 2

    new_dims = (new_w, new_h)

    # draw a test rectangle
    cv2.rectangle(data, (10, 400), (300, 300), (0, 255, 0), 1)
    # draw a test square
    data[10:20, 10:20] = (255, 0, 0)
    (B, G, R) = cv2.split(data)

    zeros = np.zeros(data.shape[:2], dtype = "uint8")
    Brz = self.prepare_channel(B, zeros, zeros, new_dims)
    Grz = self.prepare_channel(zeros, G, zeros, new_dims)
    Rrz = self.prepare_channel(zeros, zeros, R, new_dims)
    Orz = self.prepare_channel(B, G, R, new_dims)

    row1 = np.concatenate((Orz, Rrz), axis = 1)
    row2 = np.concatenate((Brz, Grz), axis = 1)

    data = np.concatenate((row1, row2), axis = 0)

    return cv2.imencode('.jpg', data)[1].tostring()
 def do_GET(self):
     global finalImg
     print self.path
     if self.path.endswith('.mjpg'):
         self.send_response(200)
         self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary')
         self.end_headers()
         while True:
             try:
                 img = classify_image(finalImg)
                 r, buf = cv2.imencode(".jpg",img)
                 self.wfile.write("--jpgboundary\r\n")
                 self.send_header('Content-type','image/jpeg')
                 self.send_header('Content-length',str(len(buf)))
                 self.end_headers()
                 self.wfile.write(bytearray(buf))
                 self.wfile.write('\r\n')
                 time.sleep(0.05)
             except KeyboardInterrupt:
                 break
             except:
                 pass
         return
     if self.path.endswith('.html') or self.path=="/":
         self.send_response(200)
         self.send_header('Content-type','text/html')
         self.end_headers()
         self.wfile.write('<html><head></head><body>')
         self.wfile.write('<img src="./cam.mjpg" />')
         self.wfile.write('</body></html>')
         return
Example #24
0
    def post_media(self, img):
        result, img_png = cv2.imencode('.png', img)

        if not result:
            IkaUtils.dprint('%s: Failed to encode the image (%s)' %
                            (self, img.shape))
            return None

        files = { "media": img_png.tostring() }

        CK = self._preset_ck if self.consumer_key_type == 'ikalog' else self.consumer_key
        CS = self._preset_cs if self.consumer_key_type == 'ikalog' else self.consumer_secret

        from requests_oauthlib import OAuth1Session
        twitter = OAuth1Session(
            CK, CS, self.access_token, self.access_token_secret
        )
        req = twitter.post(
            self.url_media,
            files=files,
            verify=self._get_cert_path()
        )

        if req.status_code == 200:
            return json.loads(req.text)['media_id']

        IkaUtis.dprint('%s: Failed to post media.' % self)
        return None
Example #25
0
def openCV_to_kivy(cvImage):
    # encode the OpenCV image in PNG format
    _, imPNG = cv.imencode(".png", cvImage)
    # create a binary data stream for reading that data
    data = io.BytesIO(imPNG.tobytes())
    # create a Kivy Core Image data structure to hold that data
    return CoreImage(data, ext="png")
Example #26
0
 def thumbnail(self, filename):
     cap = cv2.VideoCapture(self.media_path + "/" + filename)
     ret, frame = cap.read()
     ret, jpg = cv2.imencode(".jpg", frame)
     cap.release()
     cherrypy.response.headers['Content-Type'] = 'image/jpeg'
     return jpg.tostring()
Example #27
0
def hash_encode_image(image, extension):
    """ Encode the image, get the hash and return the hash with
        encoded image """
    img = cv2.imencode(extension, image)[1]  # pylint: disable=no-member
    f_hash = sha1(
        cv2.imdecode(img, cv2.IMREAD_UNCHANGED)).hexdigest()  # pylint: disable=no-member
    return f_hash, img
Example #28
0
    def captureFrame():
        print("Capturing 1")
        cap = cv2.VideoCapture(-1)
        if ( not cap.isOpened()):
            print("Camera Failed to open")
            return b'0'

        cap.set(3, 640)
        cap.set(4, 480)
        trials = 3
        frame = np.array([0])
        stat  = False

        while ( not stat and trials > 0 and (frame == 0).all() ):
            time.sleep(.3) # Sleep for 200 ms for camera to turn on;
            stat, frame = cap.read()
            trials -= 1;

        cap.release()

        ret = b'0'
        if (stat):
            ret = cv2.imencode('.jpg', frame)[1].tobytes()

        return ret;
Example #29
0
def process_image(image_buffer):
    '''
    receive image from client. remove lines. send processed image back
    '''

    # print to debug
    print('type:', type(image_buffer), 'len:', len(image_buffer))
    # construct image from binary buffer/file stream or whatever abstraction you see
    np_array = np.frombuffer(image_buffer, dtype='uint8')
    print('type:', type(np_array), 'len:', len(np_array))

    # read image from raw data
    grayscale_image = cv2.imdecode(np_array, cv2.IMREAD_GRAYSCALE)

    # process the received image
    threshed = process_an_image(grayscale_image)
    remove_lines(threshed)
    # cv2.imshow('', threshed)  # debug
    # cv2.waitKey()


    ret, buffer_of_image = cv2.imencode('.jpg', threshed)
    buffer_string = buffer_of_image.tostring()
    list_of_byte = list(buffer_string)

    print('type:', type(buffer_of_image), 'len:', len(buffer_of_image))  # debug
    print('type:', type(list_of_byte), 'len:', len(list_of_byte))
    print('type:', type(buffer_string), 'len:', len(buffer_string))

    return buffer_string
def stream_image(fn):
    """Video streaming generator function."""

    while True:
        frame = ""
        try:
            img = cv2.imread(fn,cv2.IMREAD_COLOR)
            b,frame = cv2.imencode(".jpg", img)
            frame = frame.tostring()

            # with open(fn, 'rb') as f:
            #     frame = f.read()

        except Exception as e:
            print(str(e))
            img = Image.new("RGB", (512, 256))
            draw = ImageDraw.Draw(img)
            pos = float(time.time() * 1000) / 1000
            pos = (1 + math.sin(pos)) / 2
            pos = pos * (img.size[0] - 50)
            draw.multiline_text((pos, img.size[1] / 2),
                                "NO IMAGE",
                                fill="white")
            del draw
            b = io.BytesIO()
            img.save(b, "JPEG")
            b.seek(0)
            frame = b.read()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
Example #31
0
    # Set up the necessary objects for detection
    detector = setup_detector()
    table_factory = TableSetup(cfg)

    # Run the detection algorithm
    detections = detector.detect()
    i = 0
    for image, detection in detections:

        # Convert the detection to a SnookerTable object if possible
        table = table_factory.create_table(detection)
        if table is None:
            continue
        print "Frame {0}".format(i)

        # Get the json and log details to send to the RabbitMQ server
        json = table.to_json()
        messenger.send(json, "json")
        messenger.send(json, "log")
        print "Snooker ball count: {0}".format(len(table.balls))

        # Convert the frame to send to the server
        img = cv2.imencode('.jpg', image)[1].tostring()
        messenger.send(img, "stream")
        print "Image byte size: {0}".format(len(img))
        i += 1

    print "Stream complete"
    messenger.disconnect()
def encode_toString(img):
    retval, buffer = cv2.imencode('.jpg', img)
    data = base64.b64encode(buffer)
    return data
Example #33
0
    def run(self):
        while True:
            camera, frame_time, tracked_objects = self.tracked_objects_queue.get(
            )

            config = self.config[camera]
            best_objects = self.camera_data[camera]['best_objects']
            current_object_status = self.camera_data[camera]['object_status']
            self.camera_data[camera]['tracked_objects'] = tracked_objects
            self.camera_data[camera]['current_frame_time'] = frame_time

            ###
            # Draw tracked objects on the frame
            ###
            current_frame = self.plasma_client.get(f"{camera}{frame_time}")

            if not current_frame is plasma.ObjectNotAvailable:
                # draw the bounding boxes on the frame
                for obj in tracked_objects.values():
                    thickness = 2
                    color = COLOR_MAP[obj['label']]

                    if obj['frame_time'] != frame_time:
                        thickness = 1
                        color = (255, 0, 0)

                    # draw the bounding boxes on the frame
                    box = obj['box']
                    draw_box_with_label(
                        current_frame,
                        box[0],
                        box[1],
                        box[2],
                        box[3],
                        obj['label'],
                        f"{int(obj['score']*100)}% {int(obj['area'])}",
                        thickness=thickness,
                        color=color)
                    # draw the regions on the frame
                    region = obj['region']
                    cv2.rectangle(current_frame, (region[0], region[1]),
                                  (region[2], region[3]), (0, 255, 0), 1)

                if config['snapshots']['show_timestamp']:
                    time_to_show = datetime.datetime.fromtimestamp(
                        frame_time).strftime("%m/%d/%Y %H:%M:%S")
                    cv2.putText(current_frame,
                                time_to_show, (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=.8,
                                color=(255, 255, 255),
                                thickness=2)

                ###
                # Set the current frame
                ###
                self.camera_data[camera]['current_frame'] = current_frame

                # delete the previous frame from the plasma store and update the object id
                if not self.camera_data[camera]['object_id'] is None:
                    self.plasma_client.delete(
                        self.camera_data[camera]['object_id'])
                self.camera_data[camera]['object_id'] = f"{camera}{frame_time}"

            ###
            # Maintain the highest scoring recent object and frame for each label
            ###
            for obj in tracked_objects.values():
                # if the object wasn't seen on the current frame, skip it
                if obj['frame_time'] != frame_time:
                    continue
                if obj['label'] in best_objects:
                    now = datetime.datetime.now().timestamp()
                    # if the object is a higher score than the current best score
                    # or the current object is more than 1 minute old, use the new object
                    if obj['score'] > best_objects[obj['label']]['score'] or (
                            now -
                            best_objects[obj['label']]['frame_time']) > 60:
                        obj['frame'] = np.copy(
                            self.camera_data[camera]['current_frame'])
                        best_objects[obj['label']] = obj
                else:
                    obj['frame'] = np.copy(
                        self.camera_data[camera]['current_frame'])
                    best_objects[obj['label']] = obj

            ###
            # Report over MQTT
            ###
            # count objects with more than 2 entries in history by type
            obj_counter = Counter()
            for obj in tracked_objects.values():
                if len(obj['history']) > 1:
                    obj_counter[obj['label']] += 1

            # report on detected objects
            for obj_name, count in obj_counter.items():
                new_status = 'ON' if count > 0 else 'OFF'
                if new_status != current_object_status[obj_name]:
                    current_object_status[obj_name] = new_status
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}",
                        new_status,
                        retain=False)
                    # send the best snapshot over mqtt
                    best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                              cv2.COLOR_RGB2BGR)
                    ret, jpg = cv2.imencode('.jpg', best_frame)
                    if ret:
                        jpg_bytes = jpg.tobytes()
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                            jpg_bytes,
                            retain=True)
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/region",
                            "{},{},{},{}".format(region[0], region[1],
                                                 region[2], region[3]),
                            retain=False)
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/box",
                            "{},{},{},{}".format(box[0], box[1], box[2],
                                                 box[3]),
                            retain=False)

            # expire any objects that are ON and no longer detected
            expired_objects = [
                obj_name for obj_name, status in current_object_status.items()
                if status == 'ON' and not obj_name in obj_counter
            ]
            for obj_name in expired_objects:
                current_object_status[obj_name] = 'OFF'
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}",
                                    'OFF',
                                    retain=False)
                # send updated snapshot over mqtt
                best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                          cv2.COLOR_RGB2BGR)
                ret, jpg = cv2.imencode('.jpg', best_frame)
                if ret:
                    jpg_bytes = jpg.tobytes()
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                        jpg_bytes,
                        retain=True)
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/region",
                        "{},{},{},{}".format(region[0], region[1], region[2],
                                             region[3]),
                        retain=False)
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/box",
                        "{},{},{},{}".format(box[0], box[1], box[2], box[3]),
                        retain=False)
Example #34
0
def classify_image (event, context):
    try:
        content_type_header = event['headers']['content-type']
        body = base64.b64decode(event["body"])

        picture = decoder.MultipartDecoder(body, content_type_header).parts[0]
        im_arr = np.frombuffer(picture.content, dtype=np.uint8)
        im = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)
        points = fbc.getLandmarks(faceDetector, landmarkDetector, im)
        points = np.array(points)
        im = np.float32(im)/255.0
        h = 600
        w = 600
        imNorm, points = fbc.normalizeImagesAndLandmarks((h, w), im, points)
        imNorm = np.uint8(imNorm*255)

        filename = picture.headers[b'Content-Disposition'].decode().split(';')[1].split('=')[1]
        if len(filename) < 4:
            filename = picture.headers[b'Content-Disposition'].decode().split(';')[2].split('=')[1]
        print ('all done')
        return {
            "statusCode": 200,
            "headers": {
                'Content-Type': 'application/json',
                'Access-Control-Allow-Origin': '*',
                "Access-Control-Allow-Credentials": True
            },
            "body": json.dumps({'file': filename.replace('"', ''), 'aligned_image': str (base64.b64encode (cv2.imencode ('.jpg', imNorm)[1])) })
        }
    except Exception as e:
        print(repr(e))
        return {
            "statusCode": 500,
            "headers": {
                'Content-Type': 'application/json',
                'Access-Control-Allow-Origin': '*',
                "Access-Control-Allow-Credentials": True
            },
            "body": json.dumps({"error": repr(e)})
        }
Example #35
0
 def save_inp_image(self, img, group):
     img_str = cv2.imencode('.png', img)[1]
     self.save('input', img_str, group)
Example #36
0
    def frame_tobytes(self):
        ''' Encode the output frame into bytes.
        '''

        return cv2.imencode('.jpg', self.visualize())[1].tobytes()
Example #37
0
## setup logging
import logging
logging.basicConfig(level = logging.INFO)

## import the package
import numpy as np
import cv2
import sys

cap = cv2.VideoCapture(0)

while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Display the resulting frame
    success,image = cap.read() 
    print(cv2.imencode('.jpg', image)[1].tobytes())

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
        if writer is None:
            # initialize our video writer
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(args["output"], fourcc, 30,
                                     (frame.shape[1], frame.shape[0]), True)

            # some information on processing single frame
            if total > 0:
                elap = (end - start)
                print("[INFO] single frame took {:.4f} seconds".format(elap))
                print("[INFO] estimated total time to finish: {:.4f}".format(
                    elap * total))

        #write the output frame to disk
        writer.write(frame)
    imgbytes = cv2.imencode('.png', frame)[1].tobytes()  # ditto

    if not win_started:
        win_started = True
        layout = [[
            sg.Text('Yolo Playback in PySimpleGUI Window', size=(30, 1))
        ], [sg.Image(data=imgbytes, key='_IMAGE_')],
                  [
                      sg.Text('Confidence'),
                      sg.Slider(range=(0, 1),
                                orientation='h',
                                resolution=.1,
                                default_value=.5,
                                size=(15, 15),
                                key='confidence'),
                      sg.Text('Threshold'),
 def mark_diff(self, diff_image):
     """
     Save 'difference' pixels to a picture
     """
     is_success, buffer = cv2.imencode(".png", diff_image)
     return io.BytesIO(buffer).getvalue()
if len(sys.argv) > 1:
    broker_uri = sys.argv[1]
    topic_id = sys.argv[2]

channel = Channel(broker_uri)
exporter = ZipkinExporter(
    service_name='CameraGateway.{}'.format(topic_id),
    host_name='localhost',
    port=9411,
    transport=BackgroundThreadTransport(max_batch_size=100),
)

image = cv2.imread('../image.png')

for k in range(10):
    tracer = Tracer(exporter)
    with tracer.span(name='image') as span:
        cimage = cv2.imencode(ext='.jpeg',
                              img=image,
                              params=[cv2.IMWRITE_JPEG_QUALITY, 80])
        data = cimage[1].tobytes()
        im = Image(data=data)
        msg = Message()
        msg.topic = 'CameraGateway.{}.Frame'.format(topic_id)
        msg.inject_tracing(span)
        msg.metadata.update({'image_id': k})
        msg.pack(im)
        channel.publish(msg)
        log.info('Message {} published', k)
    time.sleep(0.250)
def root():

    # removing all previous files in folder before start processing
    output_folder = 'static/img/temp/'
    for file in glob.glob(output_folder + '*'):
        os.remove(file)

    # on POST handle upload
    if request.method == 'POST':

        # get file details
        file_data = request.files.get('file')
        if file_data is None:
            err_msg = 'No input image was provided.'
            logging.error(err_msg)
            return render_template('index.html', error_msg=err_msg)

        # read image from string data
        file_request = file_data.read()
        # convert string data to numpy array
        np_inp_image = np.fromstring(file_request, np.uint8)
        img = cv2.imdecode(np_inp_image, cv2.IMREAD_UNCHANGED)
        _, image_encoded = cv2.imencode('.jpeg', img)

        # TODO R1: review inference request payload
        # Required inference request parameter: image (JPG/PNG encoded)
        files = {
            'file': image_encoded.tostring(),
            'Content-Type': 'multipart/form-data',
        }

        # TODO T1: replace model URL placeholder
        # Add model endpoint
        model_url = args.ml_endpoint.rstrip('/') + '**TODO**'

        # Send image file form to model endpoint for prediction
        try:
            results = requests.post(url=model_url, files=files)
        except Exception as e:
            err_msg_temp = 'Prediction request to {} failed: {}'
            err_msg = err_msg_temp.format(model_url, 'Check log for details.')
            logging.error(err_msg_temp.format(model_url, str(e)))
            return render_template("index.html", error_msg=err_msg)

        # surface any prediction errors to user
        if results.status_code != 200:
            err_msg = ('Prediction request returned status code {} ' +
                       'and message {}').format(results.status_code,
                                                results.text)
            logging.error(err_msg)
            return render_template('index.html', error_msg=err_msg)

        # extract prediction from json return
        output_data = results.json()

        # log output in debug
        logging.debug('\n' + pformat(output_data))

        # TODO T2: replace placeholder with appropriate JSON key
        # Extraction prediction result
        result = output_data['**TODO**']

        if len(result) == 0:
            msg = 'No objects detected, try uploading a new image'
            return render_template('index.html', error_msg=msg)
        else:

            # save the output image to return
            file_name = (str(randint(0, 999999)) + '.jpg')
            output_name = output_folder + '/' + file_name
            im = Image.fromarray(img)
            im = im.convert("L")
            newsize = (300, 300)
            im = im.resize(newsize)
            im.save(output_name)

        return render_template('index.html',
                               image_name=output_name,
                               result_1=result[0]['prediction'])

    else:
        # on GET return index.html
        return render_template('index.html')
    res, score = '', 0.0
    i = 0
    mem = ''
    consecutive = 0
    textInput = ''

    while True:
        ret, img = cap.read()
        img = cv2.flip(img, 1)

        if ret:
            x1, y1, x2, y2 = 200, 200, 400, 400
            img_cropped = img[y1:y2, x1:x2]

            c += 1
            image_data = cv2.imencode('.jpg', img_cropped)[1].tostring()

            a = cv2.waitKey(1)  # waits to see if `esc` is pressed

            if i == 4:
                res_tmp, score = predict(image_data)
                res = res_tmp
                i = 0
                if mem == res:
                    consecutive += 1
                else:
                    consecutive = 0
                if consecutive == 2 and res not in ['nothing']:
                    if res == 'space':
                        textInput += ' '
                    elif res == 'del':
Example #43
0
def img2str(img):
    return cv2.imencode('.jpg', img)[1].tostring()
Example #44
0
        if img.shape[2] == 3:
            img = img[:, :, [2, 1, 0]]
        elif img.shape[2] == 4:
            img = img[:, :, [2, 1, 0, 3]]
        img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
        img_LR = img.unsqueeze(0)
        img_LR = img_LR.to(device)
        output = model(img_LR).data.squeeze(0).float().cpu().clamp_(0, 1).numpy()
        if output.shape[0] == 3:
            output = output[[2, 1, 0], :, :]
        elif output.shape[0] == 4:
            output = output[[2, 1, 0, 3], :, :]
        output = np.transpose(output, (1, 2, 0))
        output = (output * 255.0).round()
        if passAsString == True:            
            buffer = cv2.imencode(".png", output)[1]
            data = base64.b64encode(buffer)
            print(data)
            continue

        newpath = base
        printpath = ''
        if mode == '1' or mode == '2':
            baseinput = os.path.splitext(os.path.basename(name))[0]
            baseinput = re.search('(.*)(_tile-[0-9]+)', baseinput, re.IGNORECASE).group(1)
            modelname = os.path.splitext(os.path.basename(model_path))[0]
        if mode == '1':
            os.makedirs('{1:s}/Images/{0:s}/'.format(baseinput, output_folder), exist_ok=True)
            newpath = '{3:s}/Images/{0:s}/[{2:s}]_{1:s}.png'.format(baseinput, base, modelname, output_folder)           
        if mode == '2':
            os.makedirs('{1:s}/Models/{0:s}/'.format(modelname, output_folder), exist_ok=True)
Example #45
0
def resize_512():
    cv2.namedWindow("test", 0)
    cv2.resizeWindow("test", 512, 512)
    num_dic = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5}
    org_path = r'F:\1_sheng\答题卡拍照'
    ner_path = r'F:\1_sheng\card_512'
    all_index = 0
    txt_handle = open('./train.txt', 'w')

    for folder in range(1, 10, 1):
        second_path = os.path.join(org_path, str(folder))
        all_json_list = glob.glob(os.path.join(second_path, "*.json"))
        for i, one_json_path in enumerate(all_json_list):
            with codecs.open(one_json_path,
                             'r',
                             encoding='utf-8',
                             errors='ignore') as f:
                jsondict = json.load(f)
                imagePath = one_json_path.replace(".json", ".jpg")
                if not os.path.exists(imagePath):
                    imagePath = one_json_path.replace(".json", ".png")
                    print(imagePath, "no exit, png")
                elif not os.path.exists(imagePath):
                    imagePath = one_json_path.replace(".json", ".jpeg")
                    print(imagePath, "no exit, jpeg")
                elif not os.path.exists(imagePath):
                    print(imagePath, "no exit, 注意后缀名---------------")
                    break
                pic = jsondict['imageData']
                pic = base64.b64decode(pic)
                # print(pic)
                pic = np.fromstring(pic, np.uint8)
                img = cv2.imdecode(pic, cv2.COLOR_BGR2RGB)
                # img = cv_imread(imagePath)
                height = img.shape[0]
                width = img.shape[1]
                resize_img = cv2.resize(img, (512, 512))
                gt_content = ""
                for one_point in jsondict['shapes']:
                    label = one_point['label']
                    # print(label, "one_point['points']: ", one_point['points'])
                    x = int(one_point['points'][0][0])
                    y = int(one_point['points'][0][1])
                    label_index = num_dic[label]
                    # print(x/width, y/height)
                    x = int((x / width) * 512)
                    y = int((y / height) * 512)
                    # resize_img = cv2.circle(resize_img, (x, y), 3, [0, 255, 0], 5)
                    gt_content = gt_content + ' {},{},{}'.format(
                        str(label_index), str(x), str(y))

                img_write_path = os.path.join(ner_path, str(folder),
                                              os.path.split(imagePath)[1])
                cv2.imencode('.jpg', resize_img)[1].tofile(img_write_path)
                write_content = img_write_path + gt_content
                txt_handle.write(write_content + '\n')
                print(all_index, write_content)
                # cv2.imshow("test", resize_img)
                # cv2.waitKey(0)
            all_index += 1
    txt_handle.close()
 def get_frame(self):
     (self.grabbed, self.frame) = self.video.read()
     image = self.frame
     ret, jpeg = cv2.imencode('.jpg', image)
     return jpeg.tobytes()
                         linger_ms=5,
                         max_request_size=2097152,
                         compression_type='gzip'
                         )

while(True):
    ### read a frame from camera
    ret, frame = cap.read()
    frame = cv2.flip(frame, 1)

    ### prepare frame attributes
    timestamp = datetime.datetime.now()
    
    rows, cols, channels = frame.shape
    frame = cv2.resize(frame, (480, int(480.0*rows/cols)), interpolation = cv2.INTER_LINEAR)
    img_encode = cv2.imencode('.png', frame)[1]
    data_encode = np.array(img_encode)
    
    # cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    ### construc JSON message
    msg_dict = {
        "cameraId":"vid-01",
        "timestamp":timestamp.strftime('%Y-%m-%d %H:%M:%S'),
        "data":base64.b64encode(data_encode).decode('utf-8')
    }
    msg_json = json.dumps(msg_dict)
    # print(msg_json)
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))

# Initialize camera by CV2
cam = cv2.VideoCapture(0)
cv2.namedWindow("face")
img_counter = 0

# While loop to continuously process the video frames 
while True:
    ret, frame = cam.read()
    if not ret:
        print("failed to grab frame")
        break
    cv2.imshow("face", frame)
    # Post video frames to Azure Face Service to obtain face landmarks
    image = cv2.imencode('.jpg', frame)[1].tostring()
    subscription_key = KEY
    face_api_url = "https://southeastasia.api.cognitive.microsoft.com/face/v1.0/detect"
    headers = {'Content-Type': 'application/octet-stream', 'Ocp-Apim-Subscription-Key': subscription_key}
    params = {'returnFaceId': 'true', 'returnFaceLandmarks': 'true'}
    response = requests.post(face_api_url, params=params, headers=headers, data=image)
    response.raise_for_status()
    faces = response.json()
    print(faces)

    # Parse collected face landmarks into variables
    for face in faces:
        flm = face['faceLandmarks']
        pupilLeft = flm['pupilLeft']
        pupilRight = flm['pupilRight']
        noseTip = flm['noseTip']
Example #49
0
	def getImage(self):
		total = T.total	#total number of bounding boxes
		boundingBoxes = T.boundingBoxes #stored bounding box coordinates from last frame
		prevImage = T.prevImage #stored image from previous frame
		file = self.stream.read()
		encoded_string = base64.b64encode(file)
		arr = np.asarray(bytearray(file), dtype=np.uint8)
		img = cv2.imdecode(arr, -1)

		#filter out background
		img2 = img.copy()
		img2 = self.filter2(79, 104, 43, 240, 141, 108, 351, 195, img2) #hacked solution to black out the non-essential parts of the image

		sensitivity = 0.4 #threshold to filter out detections

		boxes, scores, classes, num = C.getClassification(img2) #runs the image through ssd-mobilenet
		limit = 0
		for i in range(scores[0].shape[0]):
			limit = i
			if scores[0][i] < sensitivity:
				break
		nBoxes = boxes[0][0:limit]
		nScores = scores[0][0:limit]
		nClasses = classes[0][0:limit]


		currentBoxes = []
		for box1 in nBoxes:
			x1 = min(351,int(round(box1[1]*352)))
			y1 = min(239,int(round(box1[0]*240)))
			x2 = min(351,int(round(box1[3]*352)))
			y2 = min(239,int(round(box1[2]*240)))
			print((x1, x2, y1, y2))
			currentBoxes.append((x1, x2, y1, y2))
		#currentBoxes now holds the coordinates of the bounding boxes for this frame
		
		#TODO: Run the current bounding boxes through VGG
		#EXAMPLE
		for coord in currentBoxes:
			(x1, x2, y1, y2) = coord
			boundingBox = img[y1:y2, x1:x2, :]
			#Determine whether this bounding box is a car or not by passing through VGG

			#remove bounding box if below score threshold








		#instantiates a correlation object with the boxes from the previous frame and this frame
		self.corr = correlationClass(boundingBoxes, currentBoxes)
		
		#correlates the bounding boxes. method to be implemented in correlation.py.
		#tracked and new each contain a list of indices for the bounding boxes in this frame,
		#whether the car in the box is matched with a bounding box in the previous frame, or not.
		tracked, new = self.corr.correlateBoxes(prevImage, img)
		
		for i in range(len(currentBoxes)):
			(x1, x2, y1, y2) = currentBoxes[i]
			if i in new:
				img = self.drawBox(img, x1, x2, y1, y2, [0, 255, 0])
			else:
				img = self.drawBox(img, x1, x2, y1, y2, [0, 0, 255])
		#img = self.filter(img)
		print("Image 1 bounding boxes: " + str(len(boundingBoxes)))
		print("Image 2 bounding boxes: " + str(len(currentBoxes)))
		print("Number of correlations: " + str(self.corr.numCorrelations))
		T.boundingBoxes = currentBoxes
		T.total += len(new)
		T.prevImage = img
		font = cv2.FONT_HERSHEY_SIMPLEX
		cv2.putText(img,'Car Count: ' + str(T.total),(10,230), font, 0.5,(255,255,255),2,cv2.LINE_AA)
		
		retval, b = cv2.imencode('.jpg', img)
		retval2, b2 = cv2.imencode('.jpg', img2)
		encoded_string = base64.b64encode(b)
		encoded_string2 = base64.b64encode(b2)
		D = {
			"im1":encoded_string,
			"im2":encoded_string2
		}
		json_data = json.dumps(D)
		#encoded_string = base64.b64encode(arr)
		return json_data
    def predict(self, image, image_type):
        keras.backend.set_session(self.session)
        import scipy.misc as sm

        # image = cv2.resize(image, (self.WIDTH, self.HEIGHT))
        ret, mat = cv2.imencode(".jpg", image)
        image = Image(blob=mat)
        image.resize(self.WIDTH, self.HEIGHT)
        image.format = 'gray'
        image.alpha_channel = False
        image = np.asarray(bytearray(image.make_blob()),
                           dtype='uint8').reshape(image.size)
        # if (self.both_model_flag):
        #     # image = cv2.resize(image, (self.WIDTH, self.HEIGHT), interpolation = cv2.INTER_NEAREST)
        #     # image = cv2.resize(image, (self.WIDTH, self.HEIGHT),  interpolation = cv2.INTER_LINEAR)
        #     # image = sm.imresize(image,size=(self.WIDTH, self.HEIGHT),interp="bilinear")
        #     image = cv2.resize(image, (self.WIDTH, self.HEIGHT))
        #
        #
        # else:
        #     # image = sm.imresize(image,size=(self.WIDTH, self.HEIGHT), interp="nearest")
        #     # image = cv2.resize(image, (self.WIDTH, self.HEIGHT),  interpolation = cv2.INTER_LINEAR)
        #     # image = cv2.resize(image, (self.WIDTH, self.HEIGHT), interpolation = cv2.INTER_NEAREST)
        #     image = cv2.resize(image, (self.WIDTH, self.HEIGHT))

        image = cv2.resize(image, (self.WIDTH, self.HEIGHT))
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) / 255

        features = np.zeros(shape=(1, self.feature_model_output_shape[1],
                                   self.feature_model_output_shape[2],
                                   self.feature_model_output_shape[3]))

        with self.graph.as_default():
            with self.session.as_default():
                features[0:1] = self.feature_model.predict(np.array([image]))

        features = np.reshape(features,
                              (1, self.feature_model_output_shape[1] *
                               self.feature_model_output_shape[2] *
                               self.feature_model_output_shape[3]))
        print(features[0][20])
        if (self.both_model_flag):
            with self.graph.as_default():
                with self.session.as_default():
                    prediction = self.both_model.predict_classes(features)[0]
            prob = self.both_model.predict(features)
            pred_label = self.both_classes[prediction]

        else:
            if image_type == 1:
                with self.graph.as_default():
                    with self.session.as_default():
                        prediction = self.letters_model.predict_classes(
                            features)[0]
                prob = self.letters_model.predict(features)
                pred_label = self.letters_classes[prediction]

            elif image_type == 2:
                with self.graph.as_default():
                    with self.session.as_default():
                        prediction = self.numbers_model.predict_classes(
                            features)[0]
                prob = self.numbers_model.predict(features)
                pred_label = self.numbers_classes[prediction]
            else:
                raise TypeError
        probability = prob[0][prediction]

        return pred_label, probability
Example #51
0
    obj = {}
    obj['timestamp'] = datetime.now().isoformat()
    obj['bytes'] = stringify_jpg(jpg_bytes)
    return json.dumps(obj)


def deserialize_payload(payload):
    return json.loads(payload)


#def deserialize_jpg(jpg_json):
#    """Deserialized JSON object created by josnify_image.
#
#    :param string :
#    :return:
#    :rtype:
#    """
#    return json.loads(jpg_json)


if __name__ == '__main__':
    im = cv2.imread('/home/debug/codes/darknet/data/dog.jpg')
    retval, jpg_bytes = cv2.imencode('.jpg', im)

    # size of stringified dog.jpg is 1.33x larger than original
    s_jpg = serialize_jpg(jpg_bytes)
    d_jpg = deserialize_payload(s_jpg)
    # TODO: Can we write JPEG bytes into file directly to prevent
    #       bytes -> numpy array -> decode RGB -> write encoded JPEG
    cv2.imwrite('/tmp/dog.jpg', jpg2bgr(destringify_jpg(d_jpg['bytes'])))
Example #52
0
 def get_frame(self):
     frame = self.flip_if_needed(self.vs.read())
     ret, jpeg = cv2.imencode('.jpg', frame)
     return jpeg.tobytes()
Example #53
0
 def save_full_sem_seg(self, cls_id, seg, group):
     seg_str = cv2.imencode('.png', seg)[1]
     key = 'label_semantic_segmentation_full_size/{:02d}'.format(cls_id)
     self.save(key, seg_str, group)
Example #54
0
            self.send_response(200)
            self.send_header("Content-type", "image/jpeg")
            self.end_headers()

            # ret, frame = cap.read()
            # _, jpg = cv2.imencode(".jpg", frame)

            self.wfile.write(jpg)
        else:
            self.send_response(404)


# continuously get frames from webcam
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
_, jpg = cv2.imencode(".jpg", frame)


class FrameThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.isRunning = True

    def run(self):
        global jpg, cap

        while self.isRunning:
            ret, frame = cap.read()
            _, jpg = cv2.imencode(".jpg", frame)
            sleep(0.03)
Example #55
0
 def save_ori(self, ori, group):
     ori_str = cv2.imencode('.png', ori)[1]
     self.save('orientation', ori_str, group)
Example #56
0
 def save_seg(self, seg_id, seg, group):
     seg_str = cv2.imencode('.png', seg)[1]
     key = 'label_segmentation/{:02d}'.format(seg_id)
     self.save(key, seg_str, group)
Example #57
0
 SECRET_KEY = 'Q6WtWCSFqqMAbKffCrqHQ6DDWRNc0oD7'
 client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
 parser = argparse.ArgumentParser()
 parser.add_argument('-d', '--debug', type=bool, default=False)
 args = parser.parse_args()
 is_debug = args.debug
 camera = PyK4A()
 camera.config.color_resolution = ColorResolution.RES_1080P
 camera.start()
 c_type = 1
 gesture = 0
 print('starting...')
 target_gesture = 3
 while True:
     capture = camera.get_capture()
     img_bytes = cv2.imencode('.jpg', capture.color)[1].tobytes()
     attr_thread = threading.Thread(target=deal_with_bodyAttr_result,
                                    args=(img_bytes, ))
     analysis_thread = threading.Thread(
         target=deal_with_bodyAnalysis_result, args=(img_bytes, ))
     attr_thread.start()
     analysis_thread.start()
     attr_thread.join()
     analysis_thread.join()
     final_results = integrate_results(attr_result, analysis_result)
     if len(final_results) == 0:
         continue
     print(colored(final_results))
     drawn_image = drawDebugImage(capture.color, final_results)
     if is_debug:
         cv2.imshow("Detected Image", drawn_image)
Example #58
0
 def save_full_image(self, img, group):
     img_str = cv2.imencode('.png', img)[1]
     self.save('input_full_size', img_str, group)
Example #59
0
def jpeg_compression(image, factor):
  encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), factor]
  _, image = cv2.imencode('.jpg', image, encode_param)
  return cv2.imdecode(image, 1)
 def frame_to_data(self, frame):
   return cv2.imencode('.png', frame)[1].tobytes()