Beispiel #1
0
def toImage(arr):
    if arr.type().bytes == 1:
        # need to swap coordinates btw array and image (with [::-1])
        im = Image.frombytes('L', arr.shape[::-1], arr.tostring())
    else:
        arr_c = arr - arr.min()
        arr_c *= (255./arr_c.max())
        arr = arr_c.astype(UInt8)
        # need to swap coordinates btw array and image (with [::-1])
        im = Image.frombytes('L', arr.shape[::-1], arr.tostring())
    return im
Beispiel #2
0
def toImage(arr):
    if arr.dtype != 'uint8' :
        arr_c = arr - arr.min()
        maxi = arr_c.max()
        if maxi > 0 : 
            arr_c *= (255./maxi)
        arr = arr_c.astype('uint8')
        print("Array shape %s, size %d, type %s, range %d-%d, mean %.1f, std %.1f"%(repr(arr.shape), arrx.size, arr.dtype, arr.min(), arr.max(), arr.mean(), arr.std()))
    # need to swap coordinates btw array and image (with [::-1])
    if len(arr.shape) == 2 :
        im = Image.frombytes('L', arr.shape[::-1], arr.tostring())
    else :
        im = Image.frombytes('RGB', (arr.shape[1],arr.shape[0]), arr.tostring())
    return im
Beispiel #3
0
    def get_last_environment_map(self):
        if self.last_environment_map is None:
            faceh = self.cubemap_face_yres
            facew = self.cubemap_face_xres

            nchan = self.last_optical_images['posy'].shape[2]
            imnx = numpy.empty( (faceh*3,facew*4,nchan), dtype = numpy.uint8)
            if nchan == 4:
                # make background transparent (alpha channel = 0)
                initvalue = 0
            else:
                # make background white
                initvalue = 255
            imnx.fill(initvalue)

            yi0 = faceh
            yi1 = 2*faceh

            xi0 = 0
            xi1 = facew
            imnx[yi0:yi1,xi0:xi1,:] = self.last_optical_images['posy']

            xi0 = 2*facew
            xi1 = 3*facew
            imnx[yi0:yi1,xi0:xi1,:] = self.last_optical_images['negy']

            xi0 = 3*facew
            xi1 = 4*facew
            imnx[yi0:yi1,xi0:xi1,:] = self.last_optical_images['negx']

            xi0 = facew
            xi1 = 2*facew
            imnx[yi0:yi1,xi0:xi1,:] = self.last_optical_images['posx']

            yi0 = 0
            yi1 = faceh
            imnx[yi0:yi1,xi0:xi1,:] = self.last_optical_images['negz']

            yi0 = 2*faceh
            yi1 = 3*faceh
            imnx[yi0:yi1,xi0:xi1,:] = self.last_optical_images['posz']

            if nchan == 3:
                im = Image.frombytes('RGB',(imnx.shape[1],imnx.shape[0]),imnx.tostring())
            elif nchan == 4:
                im = Image.frombytes('RGBA',(imnx.shape[1],imnx.shape[0]),imnx.tostring())
            im = im.transpose( Image.FLIP_TOP_BOTTOM )
            self.last_environment_map = im
        return self.last_environment_map
Beispiel #4
0
    def save(filename, width, height, fmt, pixels, flipped=False):
        image = PILImage.frombytes(fmt.upper(), (width, height), pixels)

        if flipped:
            image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
        image.save(filename)
        return True
Beispiel #5
0
    def printViewer(self):
        """Save current buffer to filename in format"""
        from global_vars import CAIDViewerWildcard
        # Create a save file dialog
        dialog = wx.FileDialog ( None, style = wx.SAVE | wx.OVERWRITE_PROMPT
                               , wildcard=CAIDViewerWildcard)
        # Show the dialog and get user input
        if dialog.ShowModal() == wx.ID_OK:
            filename="test.jpeg"
            try:
                filename = dialog.GetPath()
            except:
                pass
            ext = filename.split('.')[-1]
            if ext == "jpeg":
                fmt="JPEG"
            if ext == "png":
                fmt="PNG"

            import Image # get PIL's functionality...
            import os
            x,y,width,height = glGetDoublev(GL_VIEWPORT)
            width = int(width)
            height = int(height)
            glPixelStorei(GL_PACK_ALIGNMENT, 1)
            data = glReadPixels(x, y, width, height, GL_RGB, GL_UNSIGNED_BYTE)
#            image = Image.fromstring( "RGB", (width, height), data )
            image = Image.frombytes( "RGB", (width, height), data )
            image = image.transpose( Image.FLIP_TOP_BOTTOM)
            image.save( filename, fmt )
            self.statusbar.SetStatusText("Image has been saved in " + filename)

        # Destroy the dialog
        dialog.Destroy()
Beispiel #6
0
    def save(filename, width, height, fmt, pixels, flipped=False):
        image = PILImage.frombytes(fmt.upper(), (width, height), pixels)

        if flipped:
            image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
        image.save(filename)
        return True
Beispiel #7
0
def savepng():
    buffer = ( GLubyte * (3*width*height) )(0)
    glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, buffer)
    image = Image.frombytes(mode="RGB", size=(width, height), data=buffer)
    image = image.transpose(Image.FLIP_TOP_BOTTOM)
    now_time = datetime.datetime.now()
    image.save('screen/'+now_time.strftime("%Y%m%d-%H%M")+'.png', format = 'png')
    def capture_image(self, event):

        screensize = self.get_screen_size()

        # The cropbox will take care of making sure our image is within
        # screen boundaries.
        #AF UPDATED get full screen on mouse click
        cropbox = CropBox(topleft=Point(0,0),
                          bottomright=screensize,
                          min=Point(0,0),
                          max=screensize)
        #cropbox.reposition(Point(event.Position[0], event.Position[1]))

        self.logger.debug(cropbox)
	
        if os.name == 'posix':
		

            AllPlanes = 0xFFFFFFFF
		
            try:
		
                # cropbox.topleft.x, cropbox.topleft.y,
                # cropbox.size.x, cropbox.size.y, self.savefilename
                raw = self.rootwin.get_image(cropbox.topleft.x,
                        cropbox.topleft.y, cropbox.size.x, cropbox.size.y,
                        X.ZPixmap, AllPlanes)

		
                image_data = Image.frombytes("RGBX", (cropbox.size.x, cropbox.size.y), raw.data, "raw", "BGRX").convert("RGB")
                #ADDED SECTION - AF
                #GET ACTUAL SCREEN COORDS OF MOUSE CLICK
                m_x = event.Position[0]
                m_y = event.Position[1]
                for i in range(m_x-5,m_x+5):
					for j in range(m_y-5,m_y+5):
							image_data.putpixel((i,j),(255,0,0))
                #END ADDED SECTION - AF

                return image_data
            except error.BadDrawable:
                print "bad drawable when attempting to get an image!  Closed the window?"
            except error.BadMatch:
                print "bad match when attempting to get an image! probably specified an area outside the window (too big?)"
            except error.BadValue:
                print "getimage: bad value error - tell me about this one, I've not managed to make it happen yet"
            except:
                print self.logger.debug('Error in getimage.',
                        exc_info = True)

        if os.name == 'nt':
            image_data = ImageGrab.grab(
                (cropbox.topleft.x, cropbox.topleft.y, cropbox.bottomright.x, cropbox.bottomright.y))
            print "putting pixel in"
            m_x = event.Position[0]
            m_y = event.Position[1]
            for i in range(m_x - 5, m_x + 5):
                for j in range(m_y - 5, m_y + 5):
                    image_data.putpixel((i, j), (255, 0, 0))
            return image_data
def getColour(IP, PORT):
    """
First get an image from Nao, then show it on the screen with PIL.
    :param IP:
    :param PORT:
"""


    myBroker = ALBroker("myBroker",
        "0.0.0.0", # listen to anyone
        0, # find a free port and use it
        IP, # parent broker IP
        PORT) # parent broker port

    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 2 # VGA
    colorSpace = 11 # RGB


    videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = camProxy.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    #print "Runde: ", b

    camProxy.unsubscribe(videoClient)


    # Now we work with the image returned and save it as a PNG using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    #Create a PIL Image Instance from our pixel array.
    img0= Image.frombytes("RGB", (imageWidth, imageHeight), array)


    #frame=np.asarray(convert2pil(img0)[:,:])

    #object_rect2=detectColor(img0, RED_MIN,RED_MAX)
    frame=detectShape(img0, RED_MIN,RED_MAX)

    #frame=selectDetected(object_rect1,frame)

    #frame=selectDetected(object_rect2,frame)
    # currentImage = path+ "/camImage1cm.jpg"
    # cv2.imwrite(currentImage, frame)
    cv2.imshow('contour',frame)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Beispiel #10
0
    def getImage(self, *_args):
        '''

        :param _args: _args[0] - Number of images that required
        :return: - Array of images.

        '''
        camProxy   = ALProxy("ALVideoDevice", NAO_IP, NAO_PORT)
        resolution = vision_definitions.kQQVGA
        colorSpace = vision_definitions.kYUVColorSpace
        fps        = 20
        nameId     = camProxy.subscribe("python_GVM", resolution, colorSpace, fps)
        retImages  = []

        for i in range(0, _args[0]):
            print "getting image " + str(i)
            naoImage = camProxy.getImageRemote(nameId)
            # Get the image size and pixel array.
            imageWidth = naoImage[0]
            imageHeight = naoImage[1]
            array = naoImage[6]
            retImages.append(Image.frombytes("RGB", (imageWidth, imageHeight)))
            time.sleep(0.05)
        camProxy.unsubscribe(nameId)
        return retImages
Beispiel #11
0
def arrayToImage(a):
    """
    Converts a gdalnumeric array to a
    Python Imaging Library Image.
    """
    i=Image.frombytes('L',(a.shape[1],a.shape[0]),
            (a.astype('b')).tostring())
    return i
Beispiel #12
0
def getColour(IP, PORT):
    """
First get an image from Nao, then show it on the screen with PIL.
    :param IP:
    :param PORT:
"""

    myBroker = ALBroker(
        "myBroker",
        "0.0.0.0",  # listen to anyone
        0,  # find a free port and use it
        IP,  # parent broker IP
        PORT)  # parent broker port

    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 2  # VGA
    colorSpace = 11  # RGB

    videoClient = camProxy.subscribe("python_client", resolution, colorSpace,
                                     5)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = camProxy.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    #print "Runde: ", b

    camProxy.unsubscribe(videoClient)

    # Now we work with the image returned and save it as a PNG using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    #Create a PIL Image Instance from our pixel array.
    img0 = Image.frombytes("RGB", (imageWidth, imageHeight), array)

    #frame=np.asarray(convert2pil(img0)[:,:])

    #object_rect2=detectColor(img0, RED_MIN,RED_MAX)
    frame = detectShape(img0, RED_MIN, RED_MAX)

    #frame=selectDetected(object_rect1,frame)

    #frame=selectDetected(object_rect2,frame)
    # currentImage = path+ "/camImage1cm.jpg"
    # cv2.imwrite(currentImage, frame)
    cv2.imshow('contour', frame)
    cv2.waitKey()
    cv2.destroyAllWindows()
Beispiel #13
0
 def write(self):
     self.ev.wait(1)
     queue = self.SC.q.get()
     start_time = perf_counter() * 100
     buffer = []
     while self.expression:  # expression
         startTime = perf_counter() * 100
         self.ev.wait(1)
         self.ev.clear()
         queue = self.SC.q.get()
         queue[0] = Image.frombytes("RGB", queue[0].size, queue[0].bgra,
                                    "raw", "BGRX")
         x_mouse, y_mouse = queue[1]
         ImgSize = 0
         keys = queue[2]
         while keys != []:
             # keyboard
             if keys[0][0] != 'B':
                 queue[0].paste(self.mouse, (x_mouse, y_mouse), self.mouse)
                 buffer.append({"key": keys[0], "time": perf_counter()})
             # mouse
             else:
                 img = Image.open("Images\\Mouse\\" + str(keys[0]) + ".png")
                 queue[0].paste(img, (x_mouse, y_mouse), img)
             keys.pop(0)
         else:
             queue[0].paste(self.mouse, (x_mouse, y_mouse), self.mouse)
         for i in buffer:
             if perf_counter() - i["time"] > 1:
                 buffer.remove(i)
             else:
                 x, y = (25 + ImgSize), int(0.75 * self.SC.height)
                 try:
                     img = Image.open("Images\\Keyboard\\" +
                                      str(self.keyList[i["key"]]) + ".png")
                 except FileNotFoundError:
                     img = Image.open("Images\\Keyboard\\Template.png")
                     draw = ImageDraw.Draw(img)
                     font = ImageFont.truetype(
                         "Images\\Keyboard\\Times_New_Roman.ttf", 20)
                     draw.text((50 - len(str(i["key"])) * 4, 45),
                               str(i["key"]), (0, 0, 0),
                               font=font)
                     img.save('Images\\Keyboard\\' + (str(i["key"])) +
                              ".png")
                     self.keyList[i["key"]] = "Images\\Keyboard\\" + str(
                         i["key"]) + ".png"
                     img = Image.open(self.keyList[i["key"]])
                 ImgSize += 160
                 queue[0].paste(img, (x, y), img)
         queue[0] = cv2.cvtColor(array(queue[0]), cv2.COLOR_RGB2BGR)
         self.out.write(queue[0])
     endTime = perf_counter() * 100
     print("Work Time : ", endTime - start_time, "sec * 10^-2")
     print(self.SC.bitrate)
     self.SC.stopFactory()
     self.save()
def extractImage(refImage, saveFile=None):
    imageWidth = refImage[0]
    imageHeigth = refImage[1]
    array = refImage[
        6]  #cioe prelevo il sesto campo, ovvero l'immagine in formato ASCII
    pilImg = Image.frombytes("L", (imageWidth, imageHeigth), array)
    if not (saveFile is None):
        pilImg.save(saveFile, "PNG")
    return pilImg
Beispiel #15
0
 def savepng(self):
     buffer = ( GLubyte * (3*800*600) )(0)
     glReadPixels(0, 0, 800, 600, GL_RGB, GL_UNSIGNED_BYTE, buffer)
     image = Image.frombytes(mode="RGB", size=(800, 600), data=buffer)
     image = image.transpose(Image.FLIP_TOP_BOTTOM)
     now_time = datetime.datetime.now()
     savename = 'screen/'+now_time.strftime("%Y%m%d-%H%M%S")+'.png'
     print 'save image to '+savename
     image.save(savename, format = 'png')
    def convert_file(input_file, output_dir, output_extension):
        input_file_name, _ = os.path.splitext(os.path.basename(input_file))
        output_file = os.path.join(output_dir,
                                   input_file_name + '.' + output_extension)

        heif_image = pyheif.read_heif(input_file)
        input_image = Image.frombytes(mode=heif_image.mode,
                                      size=heif_image.size,
                                      data=heif_image.data)
        input_image.save(output_file)
Beispiel #17
0
def fig2img(fig):
    """
    @brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
    @param fig a matplotlib figure
    @return a Python Imaging Library ( PIL ) image
    """
    # put the figure pixmap into a numpy array
    buf = fig2data(fig)
    w, h, d = buf.shape
    return Image.frombytes("RGBA", (w, h), buf.tostring())
Beispiel #18
0
    def on_facedetected(self, value):
        """
        Callback for event FaceDetected.
        """
        faceID = -1

        if value == []:  # empty value when the face disappears
            self.got_face = False
            self.white_eyes()
        elif not self.got_face:  # only speak the first time a face appears
            self.got_face = True
            self.green_eyes()
            #print "I saw a face!"
            #self.tts.say("Hello, you!")
            # First Field = TimeStamp.
            timeStamp = value[0]
            #print "TimeStamp is: " + str(timeStamp)

            # Second Field = array of face_Info's.
            faceInfoArray = value[1]
            for j in range(len(faceInfoArray) - 1):
                faceInfo = faceInfoArray[j]

                # First Field = Shape info.
                faceShapeInfo = faceInfo[0]

                # Second Field = Extra info (empty for now).
                faceExtraInfo = faceInfo[1]

                faceID = faceExtraInfo[0]

                #print "Face Infos :  alpha %.3f - beta %.3f" % (faceShapeInfo[1], faceShapeInfo[2])
                #print "Face Infos :  width %.3f - height %.3f" % (faceShapeInfo[3], faceShapeInfo[4])
                #print "Face Extra Infos :" + str(faceExtraInfo)

                print "Face ID: %d" % faceID

        if self.camProxy != None and faceID >= 0 and faceID not in self.savedfaces and self.face_recording:
            # Get the image
            img = self.camProxy.getImageRemote(self.videoClient)

            # Get the image size and pixel array.
            imageWidth = img[0]
            imageHeight = img[1]
            array = img[6]

            # Create a PIL Image from our pixel array.
            im = Image.frombytes("RGB", (imageWidth, imageHeight), array)

            # Save the image.
            fname = "face_%03d.png" % faceID
            im.save(fname, "PNG")
            print "Image face %d saved." % faceID

            self.savedfaces.append(faceID)
def mainModule(IP, PORT):
    """
    First get an image from Nao, then show it on the screen with PIL.
    :param IP:
    :param PORT:
    """
    myBroker = ALBroker(
        "myBroker",
        "0.0.0.0",  # listen to anyone
        0,  # find a free port and use it
        IP,  # parent broker IP
        PORT)  # parent broker port

    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    cameraIndex = 1
    resolution = 2  # VGA
    colorSpace = 11  # RGB
    #camProxy.setActiveCamera("python_client", cameraIndex)
    videoClient = camProxy.subscribeCamera("python_client", cameraIndex,
                                           resolution, colorSpace, 30)

    # resolution = vision_definitions.kVGA
    # colorSpace = vision_definitions.kRGBColorSpace
    # fps = 30
    #
    # videoClient = camProxy.subscribe("python_GVM", resolution, colorSpace, fps)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = camProxy.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    #print "Runde: ", b

    camProxy.unsubscribe(videoClient)

    # Now we work with the image returned and save it as a PNG using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    #Create a PIL Image Instance from our pixel array.
    img0 = Image.frombytes("RGB", (imageWidth, imageHeight), array)

    frame = visualize(img0, dict)

    showImage('contour', frame)
    cv2.imwrite(path + "/recognizedImage.jpg", frame)
Beispiel #20
0
def getImage(camProxy, videoClient):

    naoImage = camProxy.getImageRemote(videoClient)
    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    #Create a PIL Image Instance from our pixel array.
    img0 = Image.frombytes("RGB", (imageWidth, imageHeight), array)
    #Convert to PIL image
    return img0
    def nextFrameSlot(self):
        # select.select((self.video,), (), ())
        # self.port.flush()
        # ret, frame = self.cap.read()
        try:
            image_data = self.video.read_and_queue()
            # print image_data
            image = Image.frombytes("RGB", (self.size_x, self.size_y),
                                    image_data)
            frame = cv2.cvtColor(np.array(image),
                                 cv2.COLOR_RGB2GRAY)  #cv2.COLOR_RGB2GRAY

            frame = cv2.flip(np.array(image), 1)
            self.tEnd = time.time()

            # My webcam yields frames in BGR format
            # if ret == True:
            # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            img = QtGui.QImage(frame, frame.shape[1], frame.shape[0],
                               QtGui.QImage.Format_RGB888)  #RGB888  Indexed8
            pix = QtGui.QPixmap.fromImage(img)
            self.video_frame.setPixmap(pix)
            # self.video_frame.setText("sss")
            # Save images if isCapturing
            if self.isCapturing:

                if self.tEnd - self.tStart > 0.03:
                    p = QtGui.QPixmap.grabWidget(self)
                    self.picList.append(p)

                    # p.save('./dataProcess/Data/'+str(self.ith_frame), 'jpg')
                    # self.ith_frame = self.ith_frame + 1
                    self.tStart = time.time()

            elif self.isCapturing == False and len(self.picList) != 0:
                # print "xxxxxxxxxxxxxxsaving"
                A = self.picList
                t = threading.Thread(
                    target=saveBufferedImg,
                    args=(A, self.Alphabet
                          ))  #[data_chunk] make  data_chunk as a arguement
                t.start()
                self.picList = []
                self.Alphabet = self.Alphabet + 'X'

        except:
            pass

        self.costTime.setText(
            str(self.timeStamp[0]) + "    " + str(self.timeStamp[-1]) +
            "    " + str(self.timeStamp[-1] - self.timeStamp[0]))
def showNaoImage(IP, PORT):

    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 2  # VGA
    colorSpace = 11  # RGB

    videoClient = camProxy.subscribe("python_client", resolution, colorSpace,
                                     5)
    while 1:
        t0 = time.time()

        # Get a camera image.
        # image[6] contains the image data passed as an array of ASCII chars.
        naoImage = camProxy.getImageRemote(videoClient)

        t1 = time.time()

        # Time the image transfer.
        # print "acquisition delay ", t1 - t0
        imageWidth = naoImage[0]
        imageHeight = naoImage[1]
        array = naoImage[6]

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), array)
        # Creates an opencv ready image
        open_cv_image = np.array(im)
        # convert from BGR to RGB
        open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
        # create a copy in grayscale and apply a blur just for testing
        # img = cv2.cvtColor(open_cv_image, cv2.COLOR_RGB2GRAY)
        img = cv2.Canny(open_cv_image, 100, 200)
        lines = cv2.HoughLines(img, 1, np.pi / 90, 200)
        if lines is not None:
            for line in lines:
                rho, theta = line[0]
                a = np.cos(theta)
                b = np.sin(theta)
                x0 = a * rho
                y0 = b * rho
                x1 = int(x0 + 1000 * (-b))
                y1 = int(y0 + 1000 * (a))
                x2 = int(x0 - 1000 * (-b))
                y2 = int(y0 - 1000 * (a))
                cv2.line(open_cv_image, (x1, y1), (x2, y2), (0, 0, 255), 2)

        # create two windows to display the images
        cv2.imshow('gray_frame', img)
        cv2.imshow('original_frame', open_cv_image)
        cv2.waitKey(1)
def Main(image, AESmode, modeTitle, ENCmode):
    img = Image.open(image)
    imgSize, imgMode = img.size, img.mode
    imgName, imgExt = image.split('.')

    if ENCmode == "Encode":
        psswd = raw_input(
            "Please enter the password. A random password will be provided if left blank - "
        )
    else:
        psswd = ''
        while psswd == '':
            psswd = raw_input("Please enter the password - ")

    if psswd == '':
        psswd = ''.join(random.SystemRandom().choice(string.ascii_uppercase +
                                                     string.digits +
                                                     string.ascii_lowercase +
                                                     string.punctuation)
                        for _ in range(randint(16, 24)))
        print "Your randomly generated password is " + psswd
        key = hashlib.sha256(psswd).digest()
    else:
        key = hashlib.sha256(psswd).digest()

    iv = Random.new().read(AES.block_size)

    cipherScheme = schemeBuild(key, AESmode, modeTitle, iv)

    if ENCmode == "Encode":
        data = encrypt(img, cipherScheme)
        new = newImg(imgName, ENCmode, imgExt)
        Image.frombytes(imgMode, imgSize, data).save(new)
    else:
        data = decrypt(img, cipherScheme)
        new = newImg(imgName, ENCmode, imgExt)
        Image.frombytes(imgMode, imgSize, data).save(new)
Beispiel #24
0
def convert_flow(input, output):
    count = 0
    for root, folder, files in os.walk(input):
        for file in sorted(files):
            suffix = os.path.basename(file).split('.')[-1]
            filename = os.path.basename(file).split('.')[0]
            if suffix != 'exr':
                continue
            image = OpenEXR.InputFile(os.path.join(input, file))

            pt = Imath.PixelType(Imath.PixelType.FLOAT)
            dw = image.header()['dataWindow']
            size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
            fx_str = image.channel('R', pt)
            fx = Image.frombytes("F", size, fx_str)
            fx = np.array(fx)  #/ size[0] * 2
            fx = np.reshape(fx, (fx.shape[0], fx.shape[1], 1))
            fy_str = image.channel('G', pt)
            fy = Image.frombytes("F", size, fy_str)
            fy = np.array(fy)  #/ size[1] * 2
            fy = np.reshape(fy, (fy.shape[0], fy.shape[1], 1))

            flow = np.concatenate((fx, fy), axis=2)

            hsv = np.zeros((fy.shape[0], fy.shape[0], 3), dtype=np.uint8)
            hsv[..., 1] = 255

            mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
            hsv[..., 0] = ang * 180 / np.pi / 2
            hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
            rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

            rgb[np.where((rgb == (0, 0, 0)).all(axis=2))] = 255
            # fx = cv2.convertScaleAbs(fx*255)
            cv2.imshow("s", rgb)
            cv2.waitKey()
            cv2.imwrite(os.path.join(output, 'flow', filename + ".png"), rgb)
Beispiel #25
0
def addImage(snbFile, canvas, image, rels, element):
    imgFileName = "snote/" + rels[image.getAttribute("r:id")]
    imgStr = zipRead(snbFile, imgFileName)
    if imgFileName.endswith(".zdib"):
        imgStr = decompress(imgStr)
        width = ord(imgStr[5]) * 256 + ord(imgStr[4])
        height = ord(imgStr[9]) * 256 + ord(imgStr[8])
        img = Image.frombytes("RGBA", (width, height), imgStr[52:])
        canvas.drawInlineImage(alpha_to_color(img), 0, 0, 595.27, 841.89)
    else:
        style = imagePoss(
            element.getElementsByTagName("v:shape")[0].getAttribute("style"))
        img = Image.open(BytesIO(imgStr))
        canvas.drawInlineImage(img, style.left, style.bottom, style.width,
                               style.height)
Beispiel #26
0
def addImage(snbFile,canvas,image,rels,element):
	imgFileName = "snote/"+rels[image.getAttribute("r:id")]
	imgStr = zipRead(snbFile,imgFileName)
	if imgFileName.endswith(".zdib"):
		imgStr = decompress(imgStr)
		width = ord(imgStr[5]) * 256 + ord(imgStr[4])
		height = ord(imgStr[9]) * 256 + ord(imgStr[8])
		try:
			img = Image.frombytes("RGBA",(width,height),imgStr[52:])
		except:
			img = Image.fromstring("RGBA",(width,height),imgStr[52:])
		canvas.drawInlineImage(alpha_to_color(img),0,0,595.27,841.89)
	else:
		style = imagePoss(element.getElementsByTagName("v:shape")[0].getAttribute("style"))
		img=Image.open(BytesIO(imgStr))
		canvas.drawInlineImage(img,style.left,style.bottom,style.width,style.height)
    def getImage(self, cameraID, pause=0.0001):
        self.camProxy.setParam(vision_definitions.kCameraSelectID, cameraID)
        videoClient = self.camProxy.subscribe("python_client", self.resolution,
                                              self.colorSpace, 5)
        naoImage = self.camProxy.getImageRemote(videoClient)
        self.camProxy.unsubscribe(videoClient)
        #naoImage = self.camProxy[cameraID].getImageRemote(self.videoClient[cameraID])
        #self.camProxy[cameraID].unsubscribe(self.videoClient[cameraID])
        """
		imageWidth = naoImage[0]
		imageHeight = naoImage[1]
		array = naoImage[6]
		"""
        im = Image.frombytes("RGB", (naoImage[0], naoImage[1]), naoImage[6])
        im.save("camImage.png", "PNG")
        time.sleep(pause)
    def _get_image_from_pdf(self,
                            page_nr=1,
                            max_width=None,
                            max_height=None,
                            angle=0,
                            output_format='JPEG',
                            restricted=False):
        """Render a pdf page as image."""
        if restricted and (page_nr > 1):
            raise ApplicationError.PermissionDenied(
                "Your are not allowed to see this document.")

        if self._doc.getNumPages() < page_nr:
            raise ApplicationError.InvalidArgument(
                "Bad page number: it should be < %s." %
                self._doc.getNumPages())
        import time
        self.logger.debug("Render image from pdf with opts width=%s, "\
            "height=%s, angle=%s, page_nr=%s." % (max_width, max_height, angle,
            page_nr))
        start = time.clock()
        splash = poppler.SplashOutputDev(poppler.splashModeRGB8, 3, False,
                                         (255, 255, 255), True, True)
        splash.startDoc(self._doc.getXRef())

        scale = self._get_optimal_scale(max_width, max_height, page_nr)
        self._doc.displayPage(splash, page_nr, 72 * scale, 72 * scale, -angle,
                              True, True, False)

        bitmap = splash.getBitmap()
        new_width = bitmap.getWidth()
        new_height = bitmap.getHeight()
        if restricted and ((MVOConfig.Security.pdf_max_width < new_width)\
                or (MVOConfig.Security.pdf_max_height < new_height)):
            raise ApplicationError.PermissionDenied(
                "Your are not allowed to see this document.")

        pil = Image.frombytes('RGB', (new_width, new_height),
                              bitmap.getDataPtr())
        temp_file = cStringIO.StringIO()
        pil.save(temp_file, "JPEG", quality=90)
        temp_file.seek(0)
        content = temp_file.read()
        self.logger.debug("Total Process Time: %s", (time.clock() - start))
        #header = [('content-type', 'image/jpeg'), ('content-length',
        #str(len(content)))]
        return ('image/jpeg', content)
def mainModule(IP, PORT):
    """
    First get an image from Nao, then show it on the screen with PIL.
    :param IP:
    :param PORT:
    """
    myBroker = ALBroker("myBroker",
        "0.0.0.0", # listen to anyone
        0, # find a free port and use it
        IP, # parent broker IP
        PORT) # parent broker port

    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 2 # VGA
    colorSpace = 11 # RGB


    videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = camProxy.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    #print "Runde: ", b

    camProxy.unsubscribe(videoClient)


    # Now we work with the image returned and save it as a PNG using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    #Create a PIL Image Instance from our pixel array.
    img0= Image.frombytes("RGB", (imageWidth, imageHeight), array)

    frame = visualize(img0, dict)

    showImage('contour', frame)
Beispiel #30
0
def image_to_pil(image):
    #
    #import Image
    """ Method will convert wx.Image to PIL Image """
    #pil = Image.new('RGB', (image.GetWidth(), image.GetHeight()))
    #pil.fromstring(image.GetData())

    data = image.GetData()

    import sys
    if isinstance(image.GetData(), bytearray):
        if sys.version_info > (3, 0):
            data = bytes(data)
        else:
            data = str(data)
    pil = Image.frombytes('RGB', (image.GetWidth(), image.GetHeight()), data)
    print pil
    return pil
Beispiel #31
0
def dotest(outputname, nostamp):
    plane = genbar()
    palette = (0,0,0, 255,255,255) + (128,128,128)*254
    try:    
        img = Image.frombytes("P", plane.size, plane.tobytes())
    except AttributeError:
        # note: https://github.com/python-pillow/Pillow/issues/63
        img = Image.fromstring("P", plane.size, plane.tostring())
    img.putpalette(palette)

    with tempfile.NamedTemporaryFile(delete = False, suffix = ".gif") as f:
        gif1 = f.name
    with tempfile.NamedTemporaryFile(delete = False, suffix = ".gif") as f:
        gif2 = f.name

    img.save(gif1, "GIF", optimize = 0)
    img.save(gif2, "GIF", transparency = 1, optimize = 0)

    pdf = FPDF()
    if nostamp:
        pdf._putinfo = lambda: common.test_putinfo(pdf)
    pdf.add_page()
    pdf.set_font('Arial', '', 16)
    pdf.write(8, "Transparency")
    pdf.ln()
    pdf.write(8, "    Transparency")
    pdf.ln()
    pdf.write(8, "        Transparency")
    pdf.ln()
    pdf.image(gif1, x = 15, y = 15)

    pdf.write(8, "Transparency")
    pdf.ln()
    pdf.write(8, "    Transparency")
    pdf.ln()
    pdf.write(8, "        Transparency")
    pdf.ln()
    pdf.image(gif2, x = 15, y = 39)

    pdf.output(outputname, 'F')

    os.unlink(gif1)
    os.unlink(gif2)
Beispiel #32
0
def dotest(outputname, nostamp):
    plane = genbar()
    palette = (0, 0, 0, 255, 255, 255) + (128, 128, 128) * 254
    try:
        img = Image.frombytes("P", plane.size, plane.tobytes())
    except AttributeError:
        # note: https://github.com/python-pillow/Pillow/issues/63
        img = Image.fromstring("P", plane.size, plane.tostring())
    img.putpalette(palette)

    with tempfile.NamedTemporaryFile(delete=False, suffix=".gif") as f:
        gif1 = f.name
    with tempfile.NamedTemporaryFile(delete=False, suffix=".gif") as f:
        gif2 = f.name

    img.save(gif1, "GIF", optimize=0)
    img.save(gif2, "GIF", transparency=1, optimize=0)

    pdf = FPDF()
    if nostamp:
        pdf._putinfo = lambda: common.test_putinfo(pdf)
    pdf.add_page()
    pdf.set_font('Arial', '', 16)
    pdf.write(8, "Transparency")
    pdf.ln()
    pdf.write(8, "    Transparency")
    pdf.ln()
    pdf.write(8, "        Transparency")
    pdf.ln()
    pdf.image(gif1, x=15, y=15)

    pdf.write(8, "Transparency")
    pdf.ln()
    pdf.write(8, "    Transparency")
    pdf.ln()
    pdf.write(8, "        Transparency")
    pdf.ln()
    pdf.image(gif2, x=15, y=39)

    pdf.output(outputname, 'F')

    os.unlink(gif1)
    os.unlink(gif2)
Beispiel #33
0
    def _get_image_from_pdf(self, page_nr=1, max_width=None, max_height=None,
        angle=0, output_format='JPEG', restricted=False):
        """Render a pdf page as image."""
        if restricted and (page_nr > 1):
            raise ApplicationError.PermissionDenied(
                "Your are not allowed to see this document.")

        if self._doc.getNumPages() < page_nr:
            raise ApplicationError.InvalidArgument(
                "Bad page number: it should be < %s." %
                self._doc.getNumPages())
        import time
        self.logger.debug("Render image from pdf with opts width=%s, "\
            "height=%s, angle=%s, page_nr=%s." % (max_width, max_height, angle,
            page_nr))
        start = time.clock()
        splash = poppler.SplashOutputDev(poppler.splashModeRGB8, 3, False,
            (255, 255, 255), True, True)
        splash.startDoc(self._doc.getXRef())

        scale = self._get_optimal_scale(max_width, max_height, page_nr)
        self._doc.displayPage(splash, page_nr, 72*scale, 72*scale, -angle, True,
            True, False)

        bitmap = splash.getBitmap()
        new_width = bitmap.getWidth()
        new_height = bitmap.getHeight()
        if restricted and ((MVOConfig.Security.pdf_max_width < new_width)\
                or (MVOConfig.Security.pdf_max_height < new_height)):
            raise ApplicationError.PermissionDenied(
                "Your are not allowed to see this document.")

        pil = Image.frombytes('RGB', (new_width, new_height),
            bitmap.getDataPtr())
        temp_file = cStringIO.StringIO()
        pil.save(temp_file, "JPEG", quality=90)
        temp_file.seek(0)
        content = temp_file.read()
        self.logger.debug("Total Process Time: %s", (time.clock() - start))
        #header = [('content-type', 'image/jpeg'), ('content-length',
        #str(len(content)))]
        return('image/jpeg', content)
Beispiel #34
0
def image_to_pil(image):
    #
    #import Image
    """ Method will convert wx.Image to PIL Image """
    #pil = Image.new('RGB', (image.GetWidth(), image.GetHeight()))
    #pil.fromstring(image.GetData())


    data = image.GetData()

    import sys
    if isinstance(image.GetData(), bytearray):
        if sys.version_info > (3, 0):
            data = bytes(data)
        else:
            data = str(data)
    pil = Image.frombytes('RGB', (image.GetWidth(), image.GetHeight()),
                          data)
    print pil
    return pil
Beispiel #35
0
    def saveImage(self, filename):

        t0 = time.time()
        # Get a camera image.
        # image[6] contains the image data passed as an array of ASCII chars.
        img = self.camProxy.getImageRemote(self.videoClient)

        t1 = time.time()
        # Time the image transfer.
        print "acquisition delay ", t1 - t0

        # Get the image size and pixel array.
        imageWidth = img[0]
        imageHeight = img[1]
        imageArray = img[6]

        # Create a PIL Image from our pixel array.
        imx = Image.frombytes("RGB", (imageWidth, imageHeight), imageArray)

        # Save the image.
        imx.save(filename, "PNG")
Beispiel #36
0
def get_NAO_image(session):
    """
    First get an image, then show it on the screen with PIL.
    """
    # Get the service ALVideoDevice.

    video_service = session.service("ALVideoDevice")
    resolution = 2  # VGA
    colorSpace = 11  # RGB

    videoClient = video_service.subscribe("python_client", resolution,
                                          colorSpace, 5)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = video_service.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    print "acquisition delay ", t1 - t0

    video_service.unsubscribe(videoClient)

    # Now we work with the image returned and save it as a PNG  using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]
    image_string = str(bytearray(array))

    # Create a PIL Image from our pixel array.
    im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)

    # Save the image.
    im.save("camImage.png", "PNG")
Beispiel #37
0
    def export_define_bits(self, tag):
        png_buffer = BytesIO()
        image = None
        if isinstance(tag, TagDefineBitsJPEG3):

            tag.bitmapData.seek(0)
            tag.bitmapAlphaData.seek(0, 2)
            num_alpha = tag.bitmapAlphaData.tell()
            tag.bitmapAlphaData.seek(0)
            image = Image.open(tag.bitmapData)
            if num_alpha > 0:
                image_width = image.size[0]
                image_height = image.size[1]
                image_data = image.getdata()
                image_data_len = len(image_data)
                if num_alpha == image_data_len:
                    buff = ""
                    for i in range(0, num_alpha):
                        alpha = ord(tag.bitmapAlphaData.read(1))
                        rgb = list(image_data[i])
                        buff += struct.pack("BBBB", rgb[0], rgb[1], rgb[2],
                                            alpha)
                    image = Image.frombytes("RGBA",
                                            (image_width, image_height), buff)
        elif isinstance(tag, TagDefineBitsJPEG2):
            tag.bitmapData.seek(0)
            image = Image.open(tag.bitmapData)
        else:
            tag.bitmapData.seek(0)
            if self.jpegTables is not None:
                buff = BytesIO()
                self.jpegTables.seek(0)
                buff.write(self.jpegTables.read())
                buff.write(tag.bitmapData.read())
                buff.seek(0)
                image = Image.open(buff)
            else:
                image = Image.open(tag.bitmapData)

        self.export_image(tag, image)
def showNaoImage(IP, PORT):
    """
  First get an image from Nao, then show it on the screen with PIL.
  """

    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 2  # VGA
    colorSpace = 11  # RGB

    videoClient = camProxy.subscribe("python_client", resolution, colorSpace,
                                     5)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = camProxy.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    print "acquisition delay ", t1 - t0

    camProxy.unsubscribe(videoClient)

    # Now we work with the image returned and save it as a PNG  using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    # Create a PIL Image from our pixel array.
    im = Image.frombytes("RGB", (imageWidth, imageHeight), array)

    # Save the image.
    im.save("camImage.png", "PNG")

    im.show()
Beispiel #39
0
    def export_define_bits(self, tag):
        png_buffer = BytesIO()
        image = None
        if isinstance(tag, TagDefineBitsJPEG3):

            tag.bitmapData.seek(0)
            tag.bitmapAlphaData.seek(0, 2)
            num_alpha = tag.bitmapAlphaData.tell()
            tag.bitmapAlphaData.seek(0)
            image = Image.open(tag.bitmapData)
            if num_alpha > 0:
                image_width = image.size[0]
                image_height = image.size[1]
                image_data = image.getdata()
                image_data_len = len(image_data)
                if num_alpha == image_data_len:
                    buff = ""
                    for i in range(0, num_alpha):
                        alpha = ord(tag.bitmapAlphaData.read(1))
                        rgb = list(image_data[i])
                        buff += struct.pack("BBBB", rgb[0], rgb[1], rgb[2], alpha)
                    image = Image.frombytes("RGBA", (image_width, image_height), buff)
        elif isinstance(tag, TagDefineBitsJPEG2):
            tag.bitmapData.seek(0)
            image = Image.open(tag.bitmapData)
        else:
            tag.bitmapData.seek(0)
            if self.jpegTables is not None:
                buff = BytesIO()
                self.jpegTables.seek(0)
                buff.write(self.jpegTables.read())
                buff.write(tag.bitmapData.read())
                buff.seek(0)
                image = Image.open(buff)
            else:
                image = Image.open(tag.bitmapData)

        self.export_image(tag, image)
    def takeImg(self, req):

        naoImage = self._video_service.getImageRemote(self._videoClient)
        imageWidth = naoImage[0]
        imageHeight = naoImage[1]
        array = naoImage[6]
        image_string = str(bytearray(array))

        try:
            # Create a PIL Image from our pixel array.
            #im = Image.fromstring("RGB", (imageWidth, imageHeight), image_string)
            im = Image.frombytes("RGB", (imageWidth, imageHeight),
                                 image_string)
            # Save the image.
            im.save(req.picture_full_path, "PNG")

            #Display the current given information very usefull
            #im.show()
        except Exception as e:

            print(e)

        return
Beispiel #41
0
def cameraMonitorThread(pip, pport, rate):
    global camera_enabled
    global current_log_dir
    camera_log_dir = os.path.join(current_log_dir, "kTopCamera")
    if not os.path.exists(camera_log_dir):
        os.makedirs(camera_log_dir)

    print 'Starting recording camera @%.2fHz' % rate
    camProxy = ALProxy("ALVideoDevice", pip, pport)
    camera = 0
    resolution = 2  # VGA
    colorSpace = 11  # RGB
    fps = rate
    cameraname = "NAOqibag" + str(time.time())
    videoClient = camProxy.subscribeCamera(cameraname, camera, resolution,
                                           colorSpace, int(fps))
    #camProxy.setFrameRate(videoClient, int(fps))
    print "Current camera rate: ", camProxy.getFrameRate(videoClient)
    while (camera_enabled):
        pepperImage = camProxy.getImageRemote(videoClient)
        if (pepperImage != None):
            imageWidth = pepperImage[0]
            imageHeight = pepperImage[1]
            array = pepperImage[6]

            # Create a PIL Image from our pixel array.
            im = Image.frombytes("RGB", (imageWidth, imageHeight), array)

            # Save the image.
            image_name = os.path.join(
                camera_log_dir, 'spqrel_kTopCamera_%f_rgb.png' % time.time())
            im.save(image_name, "PNG")

        time.sleep(1.0 / rate)

    camProxy.unsubscribe(videoClient)
    print "Exiting Thread Log Camera "
def cv2_pil(cv_im):
    # Convert the cv image to a PIL image
    return Image.frombytes("L", cv.GetSize(cv_im), cv_im.tostring())
Beispiel #43
0
def save_data(params):
	name = params.name
	gesture = params.gesture
	display = params.display
	controller = display.controller

	devices = controller.devices
	if len(devices) == 0:
		return 'no_device'

	frame = controller.frame()

	while not frame.is_valid:
		if(params._stop.is_set()):
			return 'exit'

		frame = controller.frame()

	hands = frame.hands
	controller.set_policy(Leap.Controller.POLICY_IMAGES)

	while len(hands) == 0:
		if(params._stop.is_set()):
			return 'exit'

		frame = controller.frame()
		hands = frame.hands

	# time.sleep(1)

	# while True:
	# 	if(params._stop.is_set()):
	# 		return 'exit'

	# 	frame = controller.frame()
	# 	hands = frame.hands

	# 	if len(hands) > 0:
	# 		if(params._stop.is_set()):
	# 			return 'exit'

	# 		confidence_now = hands[0].confidence

	# 		display.update_confidence_label(confidence_now)

	# 		if confidence_now >= display.confidence:
	# 			break

	image = frame.images[0]

	d = {}
	d['utc'] = str(datetime.datetime.utcnow())
	d['name'] = name
	d['gesture'] = gesture

	# print 'Confidence: ' + str(confidence_now)

	for hand in hands:
		if hand.is_valid:
			print 'Valid hand'
			if hand.is_right:
				which_hand = 'right_hand'
			else:
				which_hand = 'left_hand'

			hand_palm_position = hand.palm_position

			d[which_hand] = {}


			d[which_hand]['confidence'] = hand.confidence
			d[which_hand]['direction'] = (hand.direction-hand_palm_position).to_tuple()
			d[which_hand]['grab_strength'] = hand.grab_strength
			d[which_hand]['palm_normal'] = (hand.palm_normal-hand_palm_position).to_tuple()
			d[which_hand]['palm_position'] = (hand.palm_position-hand_palm_position).to_tuple()
			d[which_hand]['palm_velocity'] = hand.palm_velocity.to_tuple()
			d[which_hand]['palm_width'] = hand.palm_width
			d[which_hand]['sphere_center'] = (hand.sphere_center-hand_palm_position).to_tuple()
			d[which_hand]['sphere_radius'] = hand.sphere_radius
			d[which_hand]['stabilized_palm_position'] = (hand.stabilized_palm_position-hand_palm_position).to_tuple()

			arm = hand.arm
			d[which_hand]['arm'] = {}

			d[which_hand]['arm']['direction'] = arm.direction.to_tuple()
			d[which_hand]['arm']['elbow_position'] = (arm.elbow_position-hand_palm_position).to_tuple()
			d[which_hand]['arm']['wrist_position'] = (arm.wrist_position-hand_palm_position).to_tuple()

			fingers = hand.fingers

			for finger in fingers:
				if finger.type == Finger.TYPE_THUMB:
					which_finger = 'thumb'
				elif finger.type == Finger.TYPE_INDEX:
					which_finger = 'index'
				elif finger.type == Finger.TYPE_MIDDLE:
					which_finger = 'middle'
				elif finger.type == Finger.TYPE_RING:
					which_finger = 'ring'
				elif finger.type == Finger.TYPE_PINKY:
					which_finger = 'pinky'
				else:
					break

				d[which_hand][which_finger] = {}

				d[which_hand][which_finger]['direction'] = finger.direction.to_tuple()
				d[which_hand][which_finger]['length'] = finger.length
				d[which_hand][which_finger]['stabilized_tip_position'] = (finger.stabilized_tip_position-hand_palm_position).to_tuple()
				d[which_hand][which_finger]['tip_position'] = (finger.tip_position-hand_palm_position).to_tuple()
				d[which_hand][which_finger]['tip_velocity'] = finger.tip_velocity.to_tuple()
				d[which_hand][which_finger]['width'] = finger.width

				for i in range(4):
					bone = 'bone_' + str(i)

					d[which_hand][which_finger][bone] = {}

					d[which_hand][which_finger][bone]['center'] = (finger.bone(i).center-hand_palm_position).to_tuple()
					d[which_hand][which_finger][bone]['direction'] = finger.bone(i).direction.to_tuple()
					d[which_hand][which_finger][bone]['length'] = finger.bone(i).length
					d[which_hand][which_finger][bone]['width'] = finger.bone(i).width
					d[which_hand][which_finger][bone]['next_joint'] = (finger.bone(i).next_joint-hand_palm_position).to_tuple()
					d[which_hand][which_finger][bone]['prev_joint'] = (finger.bone(i).prev_joint-hand_palm_position).to_tuple()

		else:
			print 'Not a valid hand'

	ret = mongo.save(d, display.db_name, display.collection_name)

	if(ret.startswith('success')):
		[ret, oid] = ret.split(' ')

		if image.is_valid:
			print 'valid image'

			directory = os.path.join(os.getcwd(), 'images/')
			extension = '.png'
			tmp_file = 'tmp' + extension

			data = image.data

			barray = bytearray(image.width * image.height)
			for d in range(0, image.width * image.height - 1):
				barray[d] = data[d]

			img = Image.frombytes('L', (image.width, image.height), buffer(barray))
			img.save(directory + tmp_file)

			img = io.imread(directory + tmp_file)

			thresh = filters.threshold_isodata(img)
			bw_img = img > thresh

			io.imsave(directory + oid + extension, util.img_as_ubyte(bw_img))

			os.remove(directory + tmp_file)
		else:
			print 'invalid image'

		return ret + ' ' + oid + ' ' + display.db_name + ' ' + display.collection_name

	else:
		return ret
def actionThread_exec (params):
    t = threading.currentThread()
    session = getattr(t, "session", None)
    memory_service = getattr(t, "mem_serv", None)
    tts_service = getattr(t, "session", None).service("ALTextToSpeech")
    video_service = session.service("ALVideoDevice")

    print "Action "+actionName+" started with params "+params

    resolution = 3
    colorSpace = 11
    fps = 5
    
    videoClient = video_service.subscribe("python_client", resolution, colorSpace, fps)
    image = video_service.getImageRemote(videoClient)
    video_service.unsubscribe(videoClient)

    imageWidth = image[0]
    imageHeight = image[1]
    array = image[6]
    image_string = str(bytearray(array))

    img = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)
    img.save("PersonImage.png") 

    # comment when it is not needed anymore
    #img.show()
    ###########

    with open('PersonImage.png', 'rb') as f:
        img_data = f.read()

    response = requests.post(face_api_url, data = img_data, params=api_params, headers=headers)
    faces = response.json()

    # Look for the face closest to the center
    min_distance = 100000000

    if (len(faces)) > 0:

        for f in range(len(faces)):
            print 'Half Image:', imageWidth/2
            print 'Center Face:', faces[f]["faceLandmarks"]["noseTip"]["x"]
            distance = math.fabs(imageWidth/2 - faces[f]["faceLandmarks"]["noseTip"]["x"])

            if math.fabs(imageWidth/2 - faces[f]["faceLandmarks"]["noseTip"]["x"]) < min_distance:
                min_distance = distance
                f_center = f

        # Save the face closest to the center
        #Gender
        print "Gender: " , faces[f_center]["faceAttributes"]["gender"]
        memory_service.insertData("Actions/persondescription/"+params+"/gender",faces[f_center]["faceAttributes"]["gender"])
        #Age
        print "Age: " , faces[f_center]["faceAttributes"]["age"]
        memory_service.insertData("Actions/persondescription/"+params+"/age",faces[f_center]["faceAttributes"]["age"])
        #Hair
        if len(faces[f_center]["faceAttributes"]["hair"]["hairColor"]) > 0:
            print "Hair: " , faces[f_center]["faceAttributes"]["hair"]["hairColor"][0]["color"]
            memory_service.insertData("Actions/persondescription/"+params+"/hair",faces[f_center]["faceAttributes"]["hair"]["hairColor"][0]["color"])
        else:
            memory_service.insertData("Actions/persondescription/"+params+"/hair", "black")
        #Beard
        if float(faces[f_center]["faceAttributes"]["facialHair"]["beard"]) >= 0.2:
            print "Beard: yes"
            memory_service.insertData("Actions/persondescription/"+params+"/beard","yes")
        else:
            print "Beard: no"
            memory_service.insertData("Actions/persondescription/"+params+"/beard","no")  
        # Makeup
        if faces[f_center]["faceAttributes"]["makeup"]["eyeMakeup"] == "true":
            print "Make up: yes"
            memory_service.insertData("Actions/persondescription/"+params+"/makeup","yes")
        else:
            print "Make up: no"
            memory_service.insertData("Actions/persondescription/"+params+"/makeup","no")
        #Glasses
        if faces[f_center]["faceAttributes"]["glasses"] == "NoGlasses":
            print "Glasses: no"
            memory_service.insertData("Actions/persondescription/"+params+"/glasses","yes")
        else:
            print "Glasses: yes"
            memory_service.insertData("Actions/persondescription/"+params+"/glasses","no")
        

        #tts_service.say("I see a ")
        #tts_service.say(faces[f_center]["faceAttributes"]["gender"])
        #tts_service.say(str(int(faces[f_center]["faceAttributes"]["age"])))
        #tts_service.say("years old")
        #tts_service.say(faces[f_center]["faceAttributes"]["hair"]["hairColor"][0]["color"])
        #tts_service.say("hair")
        tts_service.say("Face memorized")
    else:
        tts_service.say("I'm sorry, I see no faces in the image")




    # action end
    print "Action "+actionName+" "+params+" terminated"
    #memory_service.raiseEvent("PNP_action_result_"+actionName,"success");
    action_base.action_success(actionName, params)
Beispiel #45
0
		str_pos.append(n)
    	    if str0[n] != " " and str0[n-1] ==" ":
            	str_pos.append(n)
#    	print str_pos
#    	print len(str_pos)
#        print "line = ", (j) * (i+1) + i
#    	print " x =", str0[0:str_pos[0]]
#    	print " y = ", str0[str_pos[1]:str_pos[2]]
#    	print "value =", str0[str_pos[3]:str_pos[4]]
#    	print "conver str to int"
        if str_pos[3] == str_pos[4]:
    	    value = str0[str_pos[3]+1] 
        else:
    	    value = str0[str_pos[3]:(str_pos[4]+1)]
        if j * (i+1) + i >= 120:  
            byteArray = byteArray + value
#        print byteArray
        yx[j].append(value) 
#    	print "value =", value
fo.close() 

image = Image.frombytes("RGB", (300, 300), byteArray)
#image = Image.fromarray(yx, "RGB")
image.show()
image.save("test.png")
#print yx
#print byteArray
#image = Image.open(StringIO.StringIO(1024 * 1124)
#str_pos = []

import Image, hashlib
from Crypto import Random
from Crypto.Cipher import AES
im = Image.open('heckert_gnu.png')
raw = im.tobytes()
IV = Random.new().read(AES.block_size)
#print raw
#print im
key = hashlib.sha256("password".encode()).digest()

mode = AES.MODE_ECB
encryptor = AES.new(key, mode, IV=IV)

encrypted_bytes = encryptor.encrypt(raw)

im_out = Image.frombytes(im.mode, im.size, encrypted_bytes)
im_out.save('ECB.png')
Beispiel #47
0
    def _step(self, action):
        reward = 1
        if self.stepcount == 0 and action == 1:
            if self.playery > -2 * self.IMAGES['player'][0].get_height():
                self.playerVelY = self.playerFlapAcc
                self.playerFlapped = True
                reward += -10

        self.stepcount += 1
        self.stepcount %= 7

        crashTest = self.checkCrash(
            {
                'x': self.playerx,
                'y': self.playery,
                'index': self.playerIndex
            }, self.upperPipes, self.lowerPipes)

        if crashTest[0]:
            #pygame.image.save(self.SCREEN, 'temp.bmp')
            imgstr = pygame.image.tostring(self.SCREEN, 'RGB')
            bmpfile = Image.frombytes('RGB', self.SCREEN.get_size(), imgstr)
            reward += -10
            return imresize(np.array(bmpfile, dtype=np.float32),
                            0.25), reward, True, {}

        playerMidPos = self.playerx + self.IMAGES['player'][0].get_width() / 2
        for pipe in self.upperPipes:
            pipeMidPos = pipe['x'] + self.IMAGES['pipe'][0].get_width() / 2
            if pipeMidPos <= playerMidPos < pipeMidPos + 4:
                reward += 25
                self.score += 1

        # playerIndex basex change
        if (self.loopIter + 1) % 3 == 0:
            self.playerIndex = self.playerIndexGen.next()
        self.loopIter = (self.loopIter + 1) % 30
        self.basex = -((-self.basex + 100) % self.baseShift)

        # player's movement
        if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
            self.playerVelY += self.playerAccY
        if self.playerFlapped:
            self.playerFlapped = False
        self.playerHeight = self.IMAGES['player'][
            self.playerIndex].get_height()
        self.playery += min(self.playerVelY,
                            self.BASEY - self.playery - self.playerHeight)

        # move pipes to left
        for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
            uPipe['x'] += self.pipeVelX
            lPipe['x'] += self.pipeVelX

        # add new pipe when first pipe is about to touch left of screen
        if 0 < self.upperPipes[0]['x'] < 5:
            newPipe = self.getRandomPipe()
            self.upperPipes.append(newPipe[0])
            self.lowerPipes.append(newPipe[1])

        # remove first pipe if its out of the screen
        if self.upperPipes[0]['x'] < -self.IMAGES['pipe'][0].get_width():
            self.upperPipes.pop(0)
            self.lowerPipes.pop(0)

        self.SCREEN.blit(self.IMAGES['background'], (0, 0))

        for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
            self.SCREEN.blit(self.IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
            self.SCREEN.blit(self.IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))

        self.SCREEN.blit(self.IMAGES['base'], (self.basex, self.BASEY))
        # print score so player overlaps the score
        self.showScore(self.score)
        self.SCREEN.blit(self.IMAGES['player'][self.playerIndex],
                         (self.playerx, self.playery))

        #pygame.image.save(self.SCREEN, 'temp.bmp')
        #bmpfile = Image.open('temp.bmp');
        imgstr = pygame.image.tostring(self.SCREEN, 'RGB')
        bmpfile = Image.frombytes('RGB', self.SCREEN.get_size(), imgstr)
        return imresize(np.array(bmpfile, dtype=np.float32),
                        0.25), reward, False, {}
def cv2_pil(cv_im):
    # Convert the cv image to a PIL image
    return Image.frombytes("L", cv.GetSize(cv_im), cv_im.tostring())
Beispiel #49
0
 def get_image(self):
     """Get a new image from the camera."""
     for _ in range(5): #HACK TODO document this
         im = cv.QueryFrame(self._cam)
     return Image.frombytes("RGB", cv.GetSize(im), im.tobytes(), "raw",
                             "BGR", 0, 1)