Example #1
0
def execute(chain, opts):
    """Execute chain.

    Create new Image with given options and apply chain of
    operations to it. The opts input must be a dictionary.
    """
    from image import Image
    import mylogger

    if 'quiet' in opts:
        quiet = opts['quiet']
    else:
        quiet = False
    if 'debug' in opts:
        debug = opts['debug']
    else:
        debug = False
    log_filename = opts["filename"] + '.pybdsm.log'
    mylogger.init_logger(log_filename, quiet=quiet, debug=debug)
    mylog = mylogger.logging.getLogger("PyBDSM.Init")
    mylog.info("Processing "+opts["filename"])

    try:
        img = Image(opts)
        img.log = log_filename
        _run_op_list(img, chain)
        return img
    except RuntimeError, err:
        # Catch and log, then re-raise if needed (e.g., for AstroWise)
        mylog.error(str(err))
        raise
Example #2
0
 def post(self):
     user = self.get_current_user()
     if not user:
         self.redirect('/')
         return
     name = self.request.get('name')
     overwrite = self.request.get('overwrite')
     chapter = db.get( self.request.get('chapter') )
     args = { 'chapter': chapter.key() }
     if len(name) == 0:
         args['error'] = 'Image must have a name.'
     else:
         old_image = Image.get_image_by_name(chapter, name)
         if overwrite.lower() == 'true':
             if old_image:
                 old_image.delete()
                 refresh_chapter(chapter)
                 #chapter.put()
             old_image = None
         if old_image:
             args['image'] = old_image.key()
             args['error'] = 'Image with this name already exists.'
         else:
             data = self.request.get('img')
             if len(data) == 0:
                 args['error'] = 'No data received.' 
             else:
                 image = Image.create(chapter, name, data)
                 args['image'] = image.key()
                 refresh_chapter(chapter)
     self.redirect('/uploadimagepage?%s' % urllib.urlencode(args))
Example #3
0
def Initialize(credentials=None, opt_url=None):
  """Initialize the EE library.

  If this hasn't been called by the time any object constructor is used,
  it will be called then.  If this is called a second time with a different
  URL, this doesn't do an un-initialization of e.g.: the previously loaded
  Algorithms, but will overwrite them and let point at alternate servers.

  Args:
    credentials: OAuth2 credentials.
    opt_url: The base url for the EarthEngine REST API to connect to.
  """
  data.initialize(credentials, (opt_url + '/api' if opt_url else None), opt_url)
  # Initialize the dynamically loaded functions on the objects that want them.
  ApiFunction.initialize()
  Element.initialize()
  Image.initialize()
  Feature.initialize()
  Collection.initialize()
  ImageCollection.initialize()
  FeatureCollection.initialize()
  Filter.initialize()
  Geometry.initialize()
  List.initialize()
  Number.initialize()
  String.initialize()
  Date.initialize()
  Dictionary.initialize()
  _InitializeGeneratedClasses()
  _InitializeUnboundMethods()
Example #4
0
 def __init__(self, parent = None):
     Image.__init__(self, parent)
     CaptureObject.__init__(self)
     self.blind = False
     self.useGlobalCapture = True
     self.bind('Mouse Enter', self.onChangeCursor, True)
     self.bind('Mouse Leave', self.onChangeCursor, False)
Example #5
0
 def test_detect_face(self):
     orig_image = Image("jpg", "1", "./data/1.jpg")
     orig_image.getRGB(cache=True)
     
     orig_image.detect_face()
     
     self.assertTrue(os.path.exists("./data/1_face.jpg"))
Example #6
0
def detect_face_from_url():
    if request.method == 'POST':
        url = request.form['url']
        filename = url.split('/')[-1]
        image_file = requests.get(url)
        
        if image_file and allowed_file(filename):
            with open(os.path.join(facerec.config['UPLOAD_FOLDER'], filename), 'wb') as f:
                f.write(image_file.content)
                
                f.close()
                
                image = Image('jpg', filename.split(".")[0], os.path.join(facerec.config['UPLOAD_FOLDER'], filename), facerec.config)
                image.getRGB(cache=True)
                
                
                largestBox = detector.getLargestFaceBoundingBox(image.rgb)
                alignedFace = detector.alignImg("homography", 256, image.rgb, largestBox, outputPrefix=image.name, outputDebug=True, expandBox=False)
            
                #image.detect_face()
        
                orig_filename = filename.split('.')[0] + "-orig" + ".jpg"
                face_imagename = filename.split('.')[0] + "-annotated" + ".jpg"
                cropped_filename = filename.split('.')[0] + "-cropped" + ".jpg"
                entries = []
                entries.append(orig_filename)
                entries.append(face_imagename)
                entries.append(cropped_filename)
                return render_template('index.html', entries = entries)
Example #7
0
 def testMissingSymbol(self):
     image = Image('name', 'node', test=True)
     image._entries = {}
     with self.assertRaises(ValueError) as e:
         image.LookupSymbol('_binman_type_prop_pname', False, 'msg')
     self.assertIn("msg: Entry 'type' not found in list ()",
                   str(e.exception))
Example #8
0
def Initialize(credentials="persistent", opt_url=None):
    """Initialize the EE library.

  If this hasn't been called by the time any object constructor is used,
  it will be called then.  If this is called a second time with a different
  URL, this doesn't do an un-initialization of e.g.: the previously loaded
  Algorithms, but will overwrite them and let point at alternate servers.

  Args:
    credentials: OAuth2 credentials.  'persistent' (default) means use
        credentials already stored in the filesystem, or raise an explanatory
        exception guiding the user to create those credentials.
    opt_url: The base url for the EarthEngine REST API to connect to.
  """
    if credentials == "persistent":
        credentials = _GetPersistentCredentials()
    data.initialize(credentials, (opt_url + "/api" if opt_url else None), opt_url)
    # Initialize the dynamically loaded functions on the objects that want them.
    ApiFunction.initialize()
    Element.initialize()
    Image.initialize()
    Feature.initialize()
    Collection.initialize()
    ImageCollection.initialize()
    FeatureCollection.initialize()
    Filter.initialize()
    Geometry.initialize()
    List.initialize()
    Number.initialize()
    String.initialize()
    Date.initialize()
    Dictionary.initialize()
    Terrain.initialize()
    _InitializeGeneratedClasses()
    _InitializeUnboundMethods()
Example #9
0
    def loadImage(self, image):
        if isinstance(image, (str, unicode)):
            image = Image(image)

        pixels = image.flip_vertical().data

        self.initTexture(image.width, image.height, image.components, pixels)
Example #10
0
 def addImage(self, imageId, annFile=None, segFile=None, score=0.0, purpose='train'):
     image = Image( imageId )
     image.purpose = purpose
     image.annotationFile = annFile
     image.segmentationFile = segFile
     image.traningScore = score
     self.images.append( image )
 def test_prepare(self):
     dataset_path = "c:/_Hive/_diploma/LISA Traffic Sign Dataset/signDatabasePublicFramesOnly/vid0/frameAnnotations-vid_cmp2.avi_annotations/"
     annotation_path = dataset_path + 'frameAnnotations.csv'
     image_data = np.genfromtxt(annotation_path, delimiter=';', names=True, dtype=None)
     files = dict()
     for image in image_data:
         filepath = image['Filename']
         if filepath not in files:
             img = Image(filepath)
             img.add_sign(label=image['Annotation_tag'],
                          coordinates=(image['Upper_left_corner_X'], image['Upper_left_corner_Y'],
                                       image['Lower_right_corner_X'], image['Lower_right_corner_Y']))
             files[filepath] = img
         else:
             files[filepath].add_sign(label=image['Annotation_tag'],
                                      coordinates=(image['Upper_left_corner_X'], image['Upper_left_corner_Y'],
                                                   image['Lower_right_corner_X'], image['Lower_right_corner_Y']))
     images = np.array(list(files.keys()))
     images.sort()
     lbl = np.array([files.get(key).get_coordinates() for key in images])
     print(images[0].decode('utf8'))
     imgs, lbls, coords = prepare_images.prepare(dataset_path + images[0].decode('utf8'), lbl[0])
     test_img = cv2.imread(dataset_path + images[0].decode('utf8'), cv2.IMREAD_UNCHANGED)
     # noinspection PyAugmentAssignment
     test_img = test_img / 255
     for j in range(lbls.shape[0]):
         # Rectangle = namedtuple('Rectangle', ['xmin', 'ymin', 'xmax', 'ymax'])
         (x1, y1, x2, y2) = (coords[j].xmin, coords[j].ymin, coords[j].xmax, coords[j].ymax)
         test_img_roi = np.array(
             [test_img[y1:y2, x1:x2, 0], test_img[y1:y2, x1:x2, 1], test_img[y1:y2, x1:x2, 2]])
         # if j > 325:
         #     prepare_images.show_roi([imgs[j], test_img_roi])
         self.assertTrue(np.allclose(imgs[j], test_img_roi), msg="In imgs[{}]".format(j))
Example #12
0
def make_flat_avg(images, out):    
    """
    Create a flat average of images and also a normalised version.
    """
    image = Image(avg_images(images, out))
    image.normalise()
    return out
Example #13
0
def imshow(image, **kwargs):
	img = Image(image, **kwargs)
	gca().width = 2.54 / 72. * img.width()
	gca().height = 2.54 / 72. * img.height()
	gca().xtick_align = 'outside'
	gca().ytick_align = 'outside'
	axis('tight')
	return img
Example #14
0
    def __init__(self,x,y,image,folder=''):
        Image.__init__(self,x,y,image,folder)
        self.spawn_x = x
        self.spawn_y = y
        self.xvel = 0
        self.yvel = 0

        self.gravity = Gravity(5)
Example #15
0
 def testMissingSymbolOptional(self):
     image = Image('name', 'node', test=True)
     image._entries = {}
     with capture_sys_output() as (stdout, stderr):
         val = image.LookupSymbol('_binman_type_prop_pname', True, 'msg')
     self.assertEqual(val, None)
     self.assertEqual("Warning: msg: Entry 'type' not found in list ()\n",
                      stderr.getvalue())
     self.assertEqual('', stdout.getvalue())
Example #16
0
def imshow(image, **kwargs):
	img = Image(image, **kwargs)
	dpi = kwargs.get('dpi', 150.)
	gca().width = 2.54 / dpi * img.width()
	gca().height = 2.54 / dpi * img.height()
	gca().xtick_align = 'outside'
	gca().ytick_align = 'outside'
	axis('tight')
	return img
Example #17
0
def expandMask(img, shrink = False, step = 1):
    """Grow or shrink a mask by a pixel."""
    if shrink:
        img = invert(img)
    img = jitterSum(img.data, step) > 0
    img = Image(data = img.astype(numpy.uint8)*255)
    if shrink:
        img = invert(img)
    return img
Example #18
0
def process_image(page, regex, item):
    image = Image()

    try:
        image.find(page, regex)
    except:
        ERROR("Can't find image relative url on the page")
        return False

    try:
        image.url("http://{domain}{relUrl}".format(domain =item['domain'],
                                                   relUrl =image._relUrl))
    except:
        ERROR("Can't generate full url for the image")
        return False        

    try:
        image.get()
    except:
        ERROR("Can't get image by the url")
        return False

    try:
        path = "{domain}/{comics_name}/{file_name}".format(
            domain     =item['domain'],
            comics_name=item['name'],
            file_name  ="{name}_{counter:04}.{ext}".format(name=item['name'],
                                                           counter=item['page_current'],
                                                           ext=image._ext))
        image.save(path)
    except:
        ERROR("Can't save the image")
        return False

    return True
Example #19
0
 def contactUpdated(self, contact):
     print unicode("Contact Updated: " + QString.fromUtf8(contact.name.toString()))
     l = self._model.findItems("*", Qt.MatchWildcard | Qt.MatchRecursive)
     
     for itm in l:
         if itm.uid() == contact.uid:
             itm.setContactName(QString.fromUtf8(contact.name.toString()))
             img = Image()
             img.loadFromImageView(contact.icon)
             itm.setContactIcon(img)
             break
Example #20
0
 def __init__(self,image,color,y):
     Image.__init__(self,40,y,'status_bar.png',folder = 'status')
     self.value = pygame.Surface((2,21))
     self.value.fill(color)
     self.value = self.value.convert()
     
     self.symbol = load_image(image + '.png',folder = 'status')[0]
     #self.status_bar_rect = self.box.get_rect()
   
     self.current = 100
     self.maximum = 100
 def test_publish_image(self):
     Image.set_abs_reference_dir_from_html_file(self.filename)
     account = AccountManager.get_default_account()
     tt = Transmitter()
     tt.set_account(account)
     image = Image("some <img src='Suzanne.jpg' /> string")
     before = image.get_remote_src()
     self.assertEqual(before, None)
     tt.publish_image(image)
     after = image.get_remote_src()
     self.assertNotEqual(after, None)
def build_basement_image():
  source_file = './templates/Dockerfile-basement'
  destination_file = './tmp/Dockerfile'

  run("mkdir -p ./tmp")
  files.upload_template(source_file, destination_file, mode=0777)

  image = Image("basement", destination_file)
  image.build()
  run("rm -rf ./tmp")
  return image
Example #23
0
def LoadTexture(pathname):
	img = Image(pathname, (255,0,0))
	img = img.img
	data = pygame.image.tostring(img, "RGBA", True)
	texid = glGenTextures(1)
	glBindTexture(GL_TEXTURE_2D, texid)
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA
				,img.get_width(), img.get_height()
				,0, GL_RGBA, GL_UNSIGNED_BYTE, data)
	glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
	glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
Example #24
0
def storeUploadImage(url, inUser, msg):
    """Will store a image into memory"""
    img = Image()
    #img.author = inUser
    img.authorID = inUser._id
    #img.created_at = datetime.now()
    #img.locLabel = user.locLabel
    img.url = url
    setupImageLocation(img, msg)        
    LoadedImage.loadedImages.append(img)
    img.save()
    return img
Example #25
0
 def search_by_image(self, image_filename, num=20):
     """
     Search images in database by color similarity to image.
     
     See search_by_color_hist().
     """
     query_img = Image(image_filename)
     color_hist = util.histogram_colors_smoothed(
         query_img.lab_array, self.ic.palette,
         sigma=self.sigma, direct=False)
     results, time_elapsed = self.search_by_color_hist(color_hist)
     return query_img.as_dict(), results, time_elapsed
def build_web_app_image():
  context = { "parent_image_id": Image.image_id_of("web-basement")[0], "port_maps": [{container: 80}, {container: 443}] }
  source_file = './templates/Dockerfile-web-app'
  destination_file = './tmp/Dockerfile'

  run("mkdir -p ./tmp")
  files.upload_template(source_file, destination_file, context=context, mode=0777)

  image = Image("web-app", destination_file)
  image.build()
  run("rm -rf ./tmp")
  return image
def build_mysql_image():
  context = { "parent_image_id": Image.image_id_of("basement")[0], "root_password": "" }
  source_file = './templates/Dockerfile-mysql'
  destination_file = './tmp/Dockerfile'

  run("mkdir -p ./tmp")
  files.upload_template(source_file, destination_file, mode=0777)

  image = Image("mysql", destination_file)
  image.build()
  run("rm -rf ./tmp")
  return image
Example #28
0
 def test_save_jpg(self):
     #Read jpg file to image
     orig_image = Image("jpg", "1", "./data/1.jpg")
     orig_image.getRGB(cache=True)
     
     # Save it to name + test - jpg image
     filename = orig_image.name + "_test"
     orig_image.save('jpg', filename, "./data/")
     #Check there is file called name + test image in directory
     self.assertTrue(os.path.exists("./data/1_test.jpg"))
     
     os.remove('./data/1_test.jpg')
Example #29
0
    def compile(self):
        """
        Compute the result of flattening all layers
        and return it as an Image.
        """

        if not self.layers: return Image()

        img = Image(None, *self.size, components=self.components)
        for layer in self.layers:
            img.blit(layer, *layer.borders[0:1])

        return img
Example #30
0
	def __init__(self, screen, sidebar):
		self.gameObjects = []
		self.flags = set([])	
		self.screen = screen
		self.sidebar = sidebar
		self.frame = 0
		self.wave = 0
		self.d =  None
		self.boss = None
		self.levelFade = False
		self.levelBackground = self.levelOneBackground = Image.get('backgrounds\water_background\water1a')
		self.levelTwoBackground = Image.get('backgrounds\cloud_background\clouds1')
		self.levelThreeBackground = Image.get('backgrounds\\rainforest')
Example #31
0
def renderAlphaMask(width, height, productionRender=True):
    """
    Render alpha mask suiting a render to renderbufer, that can be used for
    compositing the produced render on a background.
    Verify whether OpenGL drivers support renderbuffers using 
    hasRenderToRenderbuffer().
    """
    # Create and bind framebuffer
    framebuffer = safeRun(glGenFramebuffers,
                          1,
                          fallbacks=(glGenFramebuffersEXT))
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            framebuffer,
            fallbacks=(glBindFramebufferEXT))

    # Now that framebuffer is bound, verify whether dimensions are within max supported dimensions
    maxWidth, maxHeight = glGetInteger(GL_MAX_VIEWPORT_DIMS)
    width = min(width, maxWidth)
    height = min(height, maxHeight)

    # Create and bind renderbuffers
    renderbuffer = safeRun(
        glGenRenderbuffers, 1,
        fallbacks=(glGenRenderbuffersEXT
                   ))  # We need a renderbuffer for both color and depth
    depthRenderbuffer = safeRun(glGenRenderbuffers,
                                1,
                                fallbacks=(glGenRenderbuffersEXT))
    safeRun(glBindRenderbuffer,
            GL_RENDERBUFFER,
            renderbuffer,
            fallbacks=(glBindRenderbufferEXT))
    safeRun(glRenderbufferStorage,
            GL_RENDERBUFFER,
            GL_RGBA,
            width,
            height,
            fallbacks=(glRenderbufferStorageEXT))
    safeRun(glFramebufferRenderbuffer,
            GL_FRAMEBUFFER,
            GL_COLOR_ATTACHMENT0,
            GL_RENDERBUFFER,
            renderbuffer,
            fallbacks=(glFramebufferRenderbufferEXT))

    safeRun(glBindRenderbuffer,
            GL_RENDERBUFFER,
            depthRenderbuffer,
            fallbacks=(glBindRenderbufferEXT))
    safeRun(glRenderbufferStorage,
            GL_RENDERBUFFER,
            GL_DEPTH_COMPONENT16,
            width,
            height,
            fallbacks=(glRenderbufferStorageEXT))
    safeRun(glFramebufferRenderbuffer,
            GL_FRAMEBUFFER,
            GL_DEPTH_ATTACHMENT,
            GL_RENDERBUFFER,
            depthRenderbuffer,
            fallbacks=(glFramebufferRenderbufferEXT))

    # TODO check with glCheckFramebufferStatus ?
    if not glCheckFramebufferStatus(GL_FRAMEBUFFER):
        pass  # TODO

    # Adapt camera projection matrix to framebuffer size
    oldWidth = G.windowWidth
    oldHeight = G.windowHeight
    G.windowWidth = width
    G.windowHeight = height
    glPushAttrib(GL_VIEWPORT_BIT)
    glViewport(0, 0, width, height)

    # Transparent background color
    oldClearColor = G.clearColor
    G.clearColor = (0.5, 0.5, 0.5, 0)
    # Change blend func to accumulate alpha
    glBlendFunc(GL_ONE, GL_ONE)
    # Disable multisampling
    global have_multisample
    old_have_multisample = have_multisample
    have_multisample = False

    # Draw scene as usual
    draw(productionRender)

    # Restore rendering defaults
    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
    have_multisample = old_have_multisample
    G.clearColor = oldClearColor

    # Read pixels
    surface = np.empty((height, width, 4), dtype=np.uint8)
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            framebuffer,
            fallbacks=(glBindFramebufferEXT))
    glReadBuffer(GL_COLOR_ATTACHMENT0)
    glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, surface)

    # Grayscale image of only alpha channel
    surface = Image(data=np.ascontiguousarray(surface[::-1, :, [3, 3, 3]]))

    # Unbind frame buffer
    safeRun(glDeleteFramebuffers,
            np.array([framebuffer]),
            fallbacks=(glDeleteFramebuffersEXT))
    safeRun(glDeleteRenderbuffers,
            1,
            np.array([renderbuffer]),
            fallbacks=(glDeleteRenderbuffersEXT))
    safeRun(glDeleteRenderbuffers,
            1,
            np.array([depthRenderbuffer]),
            fallbacks=(glDeleteRenderbuffersEXT))
    safeRun(glBindRenderbuffer,
            GL_RENDERBUFFER,
            0,
            fallbacks=(glBindRenderbufferEXT))
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            0,
            fallbacks=(glBindFramebufferEXT))

    # Restore viewport dimensions to those of the window
    G.windowWidth = oldWidth
    G.windowHeight = oldHeight
    #glPushAttrib(GL_VIEWPORT_BIT)
    glPopAttrib()
    glViewport(0, 0, oldWidth, oldHeight)

    return surface
Example #32
0
def renderToBuffer(width, height, productionRender=True):
    """
    Perform offscreen render and return the pixelbuffer.
    Verify whether OpenGL drivers support renderbuffers using 
    hasRenderToRenderbuffer().
    """
    # Create and bind framebuffer
    framebuffer = safeRun(glGenFramebuffers,
                          1,
                          fallbacks=(glGenFramebuffersEXT))
    #glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer)
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            framebuffer,
            fallbacks=(glBindFramebufferEXT))

    # Now that framebuffer is bound, verify whether dimensions are within max supported dimensions
    maxWidth, maxHeight = glGetInteger(GL_MAX_VIEWPORT_DIMS)
    aspect = float(height) / width
    width = min(width, maxWidth)
    height = min(height, maxHeight)
    # Maintain original aspect ratio
    if aspect * width < height:
        height = int(aspect * width)
    else:
        width = int(height / aspect)

    # Create and bind renderbuffers
    renderbuffer = safeRun(
        glGenRenderbuffers, 1,
        fallbacks=(glGenRenderbuffersEXT
                   ))  # We need a renderbuffer for both color and depth
    depthRenderbuffer = safeRun(glGenRenderbuffers,
                                1,
                                fallbacks=(glGenRenderbuffersEXT))
    safeRun(glBindRenderbuffer,
            GL_RENDERBUFFER,
            renderbuffer,
            fallbacks=(glBindRenderbufferEXT))
    global have_multisample
    if have_multisample:
        safeRun(glRenderbufferStorageMultisample,
                GL_RENDERBUFFER,
                4,
                GL_RGBA,
                width,
                height,
                fallbacks=(glRenderbufferStorageMultisampleEXT))
    else:
        safeRun(glRenderbufferStorage,
                GL_RENDERBUFFER,
                GL_RGBA,
                width,
                height,
                fallbacks=(glRenderbufferStorageEXT))
    safeRun(glFramebufferRenderbuffer,
            GL_FRAMEBUFFER,
            GL_COLOR_ATTACHMENT0,
            GL_RENDERBUFFER,
            renderbuffer,
            fallbacks=(glFramebufferRenderbufferEXT))

    safeRun(glBindRenderbuffer,
            GL_RENDERBUFFER,
            depthRenderbuffer,
            fallbacks=(glBindRenderbufferEXT))
    if have_multisample:
        safeRun(glRenderbufferStorageMultisample,
                GL_RENDERBUFFER,
                4,
                GL_DEPTH_COMPONENT16,
                width,
                height,
                fallbacks=(glRenderbufferStorageMultisampleEXT))
    else:
        safeRun(glRenderbufferStorage,
                GL_RENDERBUFFER,
                GL_DEPTH_COMPONENT16,
                width,
                height,
                fallbacks=(glRenderbufferStorageEXT))
    safeRun(glFramebufferRenderbuffer,
            GL_FRAMEBUFFER,
            GL_DEPTH_ATTACHMENT,
            GL_RENDERBUFFER,
            depthRenderbuffer,
            fallbacks=(glFramebufferRenderbufferEXT))

    # TODO check with glCheckFramebufferStatus ?
    if not glCheckFramebufferStatus(GL_FRAMEBUFFER):
        pass  # TODO

    # Adapt camera projection matrix to framebuffer size
    oldWidth = G.windowWidth
    oldHeight = G.windowHeight
    G.windowWidth = width
    G.windowHeight = height
    glPushAttrib(GL_VIEWPORT_BIT)
    glViewport(0, 0, width, height)

    # Neutral background color
    oldClearColor = G.clearColor
    G.clearColor = (0.5, 0.5, 0.5, 1)

    # Draw scene as usual
    draw(productionRender)

    G.clearColor = oldClearColor

    if have_multisample:
        # If we have drawn to a multisample renderbuffer, we need to transfer it to a simple buffer to read it
        downsampledFramebuffer = safeRun(glGenFramebuffers,
                                         1,
                                         fallbacks=(glGenFramebuffersEXT))
        safeRun(glBindFramebuffer,
                GL_READ_FRAMEBUFFER,
                framebuffer,
                fallbacks=(glBindFramebufferEXT))  # Multisampled FBO
        safeRun(glBindFramebuffer,
                GL_DRAW_FRAMEBUFFER,
                downsampledFramebuffer,
                fallbacks=(glBindFramebufferEXT))  # Regular FBO
        regularRenderbuffer = safeRun(glGenRenderbuffers,
                                      1,
                                      fallbacks=(glGenRenderbuffersEXT))
        safeRun(glBindRenderbuffer,
                GL_RENDERBUFFER,
                regularRenderbuffer,
                fallbacks=(glBindRenderbufferEXT))
        safeRun(glRenderbufferStorage,
                GL_RENDERBUFFER,
                GL_RGBA,
                width,
                height,
                fallbacks=(glRenderbufferStorageEXT))
        safeRun(glFramebufferRenderbuffer,
                GL_FRAMEBUFFER,
                GL_COLOR_ATTACHMENT0,
                GL_RENDERBUFFER,
                regularRenderbuffer,
                fallbacks=(glFramebufferRenderbufferEXT))
        glBlitFramebuffer(0, 0, width, height, 0, 0, width, height,
                          GL_COLOR_BUFFER_BIT, GL_NEAREST)

        # Dealloc what we no longer need
        safeRun(glDeleteFramebuffers,
                np.array([framebuffer]),
                fallbacks=(glDeleteFramebuffersEXT))
        framebuffer = downsampledFramebuffer
        del downsampledFramebuffer
        safeRun(glDeleteRenderbuffers,
                1,
                np.array([renderbuffer]),
                fallbacks=(glDeleteRenderbuffersEXT))
        renderbuffer = regularRenderbuffer
        del regularRenderbuffer

    # Read pixels
    surface = np.empty((height, width, 4), dtype=np.uint8)
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            framebuffer,
            fallbacks=(glBindFramebufferEXT))
    glReadBuffer(GL_COLOR_ATTACHMENT0)
    glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, surface)

    surface = Image(data=np.ascontiguousarray(surface[::-1, :, [2, 1, 0]]))

    # Unbind frame buffer
    safeRun(glDeleteFramebuffers,
            np.array([framebuffer]),
            fallbacks=(glDeleteFramebuffersEXT))
    safeRun(glDeleteRenderbuffers,
            1,
            np.array([renderbuffer]),
            fallbacks=(glDeleteRenderbuffersEXT))
    safeRun(glDeleteRenderbuffers,
            1,
            np.array([depthRenderbuffer]),
            fallbacks=(glDeleteRenderbuffersEXT))
    safeRun(glBindRenderbuffer,
            GL_RENDERBUFFER,
            0,
            fallbacks=(glBindRenderbufferEXT))
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            0,
            fallbacks=(glBindFramebufferEXT))

    # Restore viewport dimensions to those of the window
    G.windowWidth = oldWidth
    G.windowHeight = oldHeight
    # glPushAttrib(GL_VIEWPORT_BIT)
    glPopAttrib()
    glViewport(0, 0, oldWidth, oldHeight)

    return surface
Example #33
0
def jeDecouvreLaBibliothequeImage(fichier):
    """
    Cette fonction a pour but d'illustrer les différentes fonctions de la
    bibliothèque que nous allons utiliser, à savoir:
        - Image
        - width
        - height
        - getPixel
        - setPixel
        - save
    """
    # Décommentez les lignes pertinentes pour découvrir le fonctionnement de
    # la bibliothèque.
    #
    # N'hésitez pas à modifier ces lignes et à réexécuter la fonction pour bien
    # comprendre le fonctionnement.

    # Pour charger une image, on appelle le 'constructeur' ``Image(fichier)``
    # où ``fichier`` est le nom d'une image au format PPM.
    im = Image(fichier)

    # Les attributs ``.width`` et ``.height`` permettent de récupérer,
    # respectivement, le nombre de colonnes et le nombre de lignes.
    l = im.width
    h = im.height
    print("Cette image est formée de {} colonnes et {} lignes".format(l, h))

    # La fonction ``.getPixel(x,y)`` retourne la couleur du pixel situé à
    # la colonne ``x`` et la ligne ``y``.
    #
    # Ainsi, l'image est vu comme un plan cartésien dont l'axe des y pointe
    # vers le bas:
    #
    #      | 1 2 3 4 5 6 7 8 9 ...
    #     -+--------------------------> x
    #    1 |
    #    2 |               p
    #    3 |
    #    4 |
    #      V
    #      y
    #
    #    Coordonnées (x,y) du point p: (8,2)
    #
    # Note: les couleurs sont représentées par des triplets (r,g,b) (càd des
    #       tableaux à 3 éléments) où r, g et b sont des entiers dans
    #       l'intervalle de 0 à 255 (inclusivement) désignant, respectivement,
    #       l'intensité de rouge (r), de vert (g) et de bleu (b). Ainsi,
    #       (255,0,0) représente la couleur rouge, (0,255,0) le vert et
    #       (0,0,255) le bleu. On note que (255,255,255) est blanc et (0,0,0)
    #       est noir.
    #
    c = im.getPixel(0, 0)
    print("Le pixel en (0,0) est de couleur: {}".format(c))

    # Récupérer les couleurs une par une:
    r = c[0]
    g = c[1]
    b = c[2]
    print("L'intensité de rouge du pixel (0,0) est: {}".format(r))
    print("L'intensité de vert  du pixel (0,0) est: {}".format(g))
    print("L'intensité de bleu  du pixel (0,0) est: {}".format(b))

    # La fonction ``.setPixel(x, y, c)`` affecte la couleur ``c`` au
    # pixel ``(x,y)``.
    # On met le pixel (1,0) en rouge.
    im.setPixel(1, 0, (255, 0, 0))

    # On trace une ligne noire horizontale sur toute la.width de l'image.
    y = im.height // 2
    for x in range(im.width):
        im.setPixel(x, y, (0, 0, 0))

    # On trace une ligne blanche verticale sur toute la.height de l'image.
    x = im.width // 2
    for y in range(im.height):
        im.setPixel(x, y, (255, 255, 255))

    # La fonction ``.save(fichier)`` écrit l'image dans un fichier.
    # L'extension de ce fichier doit être ".ppm"
    im.save("question-1.ppm")
Example #34
0
def testTP1(n):
    """
    n est le numéro de la question à tester
    """

    # Les exemples d'images...
    image1 = "moulin.ppm"
    # image1 = "neige.ppm"
    image2 = "fruits.ppm"
    # image2 = "sushis.ppm"
    image3 = "vague.ppm"

    # On définit quelques couleurs
    blanc = (255, 255, 255)
    noir = (0, 0, 0)
    rouge = (255, 0, 0)
    vert = (0, 255, 0)
    bleu = (0, 0, 255)
    jaune = (255, 255, 0)
    gris = (127, 127, 127)
    vertpomme = (91, 194, 54)

    # Et quelques listes de couleurs
    bgn = [blanc, gris, noir]
    couleurs = [blanc, noir, rouge, vert, bleu, jaune, gris, vertpomme]

    # Question 1
    if n == 1:
        print('# Question 1: voir fichier "question-1.ppm"')
        jeDecouvreLaBibliothequeImage(image1)

    # Question 2
    if n == 2:
        ImageBinaire(image1, "question-2.ppm")
        print('# question 2: voir fichier "question-2.ppm"')

    # Question 3
    # On définit quelques couleurs.
    if n == 3:
        if couleurDiff(blanc, noir) != 195075:
            print("[ERREUR] question 2, mauvaise diff pour blanc et noir")
        elif couleurDiff(blanc, rouge) != 130050:
            print("[ERREUR] question 2, mauvaise diff pour blanc et rouge")
        elif couleurDiff(blanc, vert) != 130050:
            print("[ERREUR] question 2, mauvaise diff pour blanc et vert")
        elif couleurDiff(blanc, jaune) != 65025:
            print("[ERREUR] question 2, mauvaise diff pour blanc et jaune")
        elif couleurDiff(vertpomme, jaune) != 33533:
            print("[ERREUR] question 2, mauvaise diff pour vertpomme et jaune")
        elif couleurDiff(vertpomme, blanc) != 71018:
            print("[ERREUR] question 2, mauvaise diff pour vertpomme et blanc")
        elif couleurDiff(vertpomme, noir) != 48833:
            print("[ERREUR] question 2, mauvaise diff pour vertpomme et noir")
        elif couleurDiff(vertpomme, vertpomme) != 0:
            print(
                "[ERREUR] question 2, mauvaise diff pour vertpomme et vertpomme"
            )
        else:
            print("# Question 3: couleurDiff --> OK")

    # Question 4
    if n == 4:
        troisNuancesDeGris(image1, "question-4.ppm")
        print('# Question 4: voir fichier "question-4.ppm"')

    # Question 5
    if n == 5:
        if plusSemblable((199, 84, 16), couleurs) != rouge:
            print("[ERREUR] question 5, (199, 84, 16) devrait trouver rouge")
        elif plusSemblable((225, 230, 43), couleurs) != jaune:
            print("[ERREUR] question 4, (225, 230, 43) devrait trouver jaune")
        elif plusSemblable((246, 162, 84), couleurs) != jaune:
            print("[ERREUR] question 4, (246, 162, 84) devrait trouver jaune")
        elif plusSemblable((240, 185, 136), couleurs) != gris:
            print("[ERREUR] question 4, (240, 185, 136) devrait trouver gris")
        elif plusSemblable((189, 132, 10), couleurs) != vertpomme:
            print(
                "[ERREUR] question 4, (189, 132, 10) devrait trouver vertpomme"
            )
        elif plusSemblable((112, 126, 55), couleurs) != vertpomme:
            print(
                "[ERREUR] question 4, (112, 126, 55) devrait trouver vertpomme"
            )
        elif plusSemblable((133, 73, 213), couleurs) != gris:
            print("[ERREUR] question 4, (133, 73, 213) devrait trouver gris")
        elif plusSemblable((246, 68, 114), couleurs) != rouge:
            print("[ERREUR] question 4, (246, 68, 114) devrait trouver rouge")
        elif plusSemblable((64, 27, 0), couleurs) != noir:
            print("[ERREUR] question 4, (64, 27, 0) devrait trouver noir")
        elif plusSemblable((134, 238, 190), couleurs) != gris:
            print("[ERREUR] question 4, (134, 238, 190) devrait trouver gris")
        else:
            print("# Question 5: plusSemblable --> OK")

    # Question 6
    if n == 6:
        im = Image(image1)
        repeindre(im, bgn)
        im.save("question-6a.ppm")
        print(
            "# Question 6: l'image \"question-6a.ppm\" doit être identique à \"question-4.ppm\""
        )

        im = Image(image2)
        repeindre(im, couleurs)
        im.save("question-6b.ppm")
        print('# Question 6: voir fichier "question-6b.ppm"')

    # Question 7
    if n == 7:
        for k in range(2, 20):
            l = genKgris(k)
            testKgris(l, k)

        im = Image(image1)
        l = genKgris(16)
        repeindre(im, l)
        im.save("question-7.ppm")
        print('# Question 7: voir fichier "question-7.ppm"')

    # Question 8
    if n == 8:
        # Définition de prédicats à partir des fonction ``dansCercle`` et
        # ``dansRectangle``
        def predicatCercle1(x, y):
            return dansCercle(x, y, 200, 125, 95)

        def predicatCercle2(x, y):
            return dansCercle(x, y, 30, 150, 150)

        def predicatRectangle1(x, y):
            return dansRectangle(x, y, 280, 30, 80, 200)

        im = Image(image2)
        repeindreSi(im, couleurs, predicatCercle1)
        im.save("question-8a.ppm")
        print('# Question 8: voir fichier "question-8a.ppm"')

        im = Image(image1)
        repeindreSi(im, couleurs, predicatCercle2)
        l = genKgris(16)
        repeindreSi(im, l, predicatRectangle1)
        im.save("question-8b.ppm")
        print('# Question 8: voir fichier "question-8b.ppm"')

    # Question 9
    if n == 9:
        im = Image(image3)
        faireNBandes(im, 3)
        im.save("question-9a.ppm")
        print('# Question 9: voir fichier "question-9a.ppm"')

        im = Image(image3)
        faireNBandes(im, 10)
        im.save("question-9b.ppm")
        print('# Question 9: voir fichier "question-9b.ppm"')
Example #35
0
def recongize():
    '''
    Fetches image from incoming text message. Sends image to app.py (Amazon Rekognition) for analysis.
    Sends reply text message.
    '''
    logging.info("MESSAGE RECIEVED")
    # Checks if application is being hit too many times per time interval.
    # If so, send an email alert.
    MY_ALERT.abuse_check()

    # Fetches image url from Twilio request object
    media_url = request.values.get('MediaUrl0')
    logging.info(media_url)

    # Fetches incoming phone number
    external_num = request.values.get('From')
    logging.info("From: " + external_num)

    # Fetches twilio phone number
    twilio_num = request.values.get('To')
    logging.info("Twilio number: " + twilio_num)

    # Create image object from image url
    target_image = RecognitionImage(image_url=media_url)

    try:
        # Makes POST request with image to app.py on port 8888
        logging.info("Sending image to rekognition app...")
        recognizer.recognize(target_image)

        # Sends reply text based on content in Amazon Rekognition's response
        resp = MessagingResponse()

        logging.info("Found %s faces" % target_image.recognized_faces)
        if len(target_image.recognized_faces) > 0:

            logging.info("Processing faces ...")
            face_messages = process_faces(target_image)

            if len(face_messages) == 0:
                resp.message(failure_message)
                del target_image
                return str(resp)

            logging.info("Sending image up to S3")
            key_str = 'applications/faces/' + str(uuid.uuid4()) + '.png'
            target_image.get_image_file().seek(0)

            # IT IS FAILING RIGHT HERE APPARENTLY
            # s3.Bucket('int.nyt.com').put_object(Key=key_str, Body=target_image.get_image_file(), ContentType='image/png')
            url = persist_file(key_str, target_image.get_image_file())

            url = "https://int.nyt.com/" + key_str
            logging.info("Image uploaded to: " + url)
            logging.info("\n".join(face_messages))
            resp.message("\n".join(face_messages))

            message = twilio_client.messages.create(to=external_num,
                                                    from_=twilio_num,
                                                    media_url=url)

        else:
            logging.info("Failure message sent")
            resp.message(failure_message)

    finally:
        del target_image

    return str(resp)
    def read_image(path):
        image_aux = cv2.imread(path,1)
	height = image_aux.shape[0]  
	width = image_aux.shape[1]        
	image = Image( image_aux, path, height, width)
        return image
Example #37
0
from image import Image

# Modify to match problem
rows = 5
columns = 47
width = 3
height = 4.5

# Increase to get better results
swaps = 5000

# Setup
image = Image(rows, columns, patch_width=width, patch_height=height)
percentage = swaps / 100

# Run
for i in xrange(swaps):
    last_total_distances = image.get_total_distances()
    image.swap_random()

    if image.get_total_distances() <= last_total_distances:
        image.undo_swap()

    # Print percentage
    if i % percentage == 0:
        print i / percentage, "%:", last_total_distances

# Print results
for patch in image.all_patches():
    row = patch.position['row'] + 1
    column = patch.position['column'] + 1
Example #38
0
def run():
    '''
    This script tests a robot based on the code in robot.py on a maze given
    as an argument when running the script.
    '''

    gui = False
    if 3 <= len(sys.argv) and sys.argv[2] == '--gui':
        gui = True
    # Create a maze based on input argument on command line.
    testmaze = Maze( str(sys.argv[1]) )
    if gui:
        pygame.init()
        size = 50 * testmaze.dim + 100
        screen = pygame.display.set_mode((size, size))
        image = Image(testmaze.dim, screen)
        thread = threading.Thread(target=pygame_loop)
        thread.start()
    else:
        image = DummyImage()
    # Intitialize a robot; robot receives info about maze dimensions.

    game_objects.append(image)
    testrobot = Robot(testmaze.dim, image)
   # Record robot performance over two runs.
    runtimes = []
    total_time = 0
    for run in range(2):
        print "Starting run {}.".format(run)

        # Set the robot in the start position. Note that robot position
        # parameters are independent of the robot itself.
        robot_pos = {'location': [0, 0], 'heading': 'up'}

        run_active = True
        hit_goal = False
        while run_active:
            total_time += 1
            # check for end of time
            if total_time > max_time:
                run_active = False
                print "Allotted time exceeded."
                break

            # provide robot with sensor information, get actions
            sensing = [testmaze.dist_to_wall(robot_pos['location'], heading)
                       for heading in dir_sensors[robot_pos['heading']]]
            rotation, movement = testrobot.next_move(sensing)

            # check for a reset
            if (rotation, movement) == ('Reset', 'Reset'):
                if run == 0 and hit_goal:
                    run_active = False
                    runtimes.append(total_time)
                    print "Ending first run. Starting next run."
                    break
                elif run == 0 and not hit_goal:
                    print "Cannot reset - robot has not hit goal yet."
                    continue
                else:
                    print "Cannot reset on runs after the first."
                    continue

            # perform rotation
            if rotation == -90:
                robot_pos['heading'] = dir_sensors[robot_pos['heading']][0]
            elif rotation == 90:
                robot_pos['heading'] = dir_sensors[robot_pos['heading']][2]
            elif rotation == 0:
                pass
            else:
                print "Invalid rotation value, no rotation performed."

            # perform movement
            if abs(movement) > 3:
                print "Movement limited to three squares in a turn."
            movement = max(min(int(movement), 3), -3) # fix to range [-3, 3]
            while movement:
                if movement > 0:
                    if testmaze.is_permissible(robot_pos['location'], robot_pos['heading']):
                        robot_pos['location'][0] += dir_move[robot_pos['heading']][0]
                        robot_pos['location'][1] += dir_move[robot_pos['heading']][1]
                        movement -= 1
                    else:
                        print "Movement stopped by wall."
                        movement = 0
                else:
                    rev_heading = dir_reverse[robot_pos['heading']]
                    if testmaze.is_permissible(robot_pos['location'], rev_heading):
                        robot_pos['location'][0] += dir_move[rev_heading][0]
                        robot_pos['location'][1] += dir_move[rev_heading][1]
                        movement += 1
                    else:
                        print "Movement stopped by wall."
                        movement = 0

            # check for goal entered
            goal_bounds = [testmaze.dim/2 - 1, testmaze.dim/2]
            if robot_pos['location'][0] in goal_bounds and robot_pos['location'][1] in goal_bounds:
                hit_goal = True
                if run != 0:
                    runtimes.append(total_time - sum(runtimes))
                    run_active = False
                    print "Goal found; run {} completed!".format(run)

    # Report score if robot is successful.
    if len(runtimes) == 2:
        print "Task complete! Score: {:4.3f}".format(runtimes[1] + train_score_mult*runtimes[0])
        done = True
    if gui:
        thread.join()
Example #39
0
def main():
    parser = argparse.ArgumentParser(
        description='Convert your pictures into a a Lego mosaic.')
    # parser.add_argument('--server', action='run_server', help='If toggled, displays output to screen')
    parser.add_argument('--server',
                        nargs='?',
                        const=True,
                        type=bool,
                        default=False,
                        help='Starts a ZMQ server')
    parser.add_argument('input_filename',
                        metavar='input_filename',
                        type=str,
                        nargs='?',
                        help='The input image to convert')
    parser.add_argument('output_filename',
                        metavar='output_filename',
                        type=str,
                        const='out.jpg',
                        default='out.jpg',
                        nargs='?',
                        help='The output image to save to')
    parser.add_argument('--show',
                        nargs='?',
                        const=True,
                        type=bool,
                        default=False,
                        help='If toggled, displays output to screen')
    parser.add_argument('--size',
                        nargs='?',
                        const=5,
                        type=int,
                        default=5,
                        help='The rendering length of each tile.')
    parser.add_argument('--length',
                        nargs='?',
                        const=48,
                        type=int,
                        default=48,
                        help='The length of each mosaic.')
    parser.add_argument('--num_clusters',
                        nargs='?',
                        const=7,
                        type=int,
                        default=7,
                        help='Number of color clusters to quantize')
    parser.add_argument('--palette_scheme',
                        nargs='?',
                        const='colors2010',
                        type=str,
                        default='colors2010',
                        help='Color palette scheme to use')
    # parser.add_argument('--show', dest='accumulate', action='store_const',
    #    const=sum, default=max,
    #    help='sum the integers (default: find the max)')

    args = parser.parse_args()

    color_generator = Color_Generator()

    def load_color_palettes():
        pattern = os.path.dirname(__file__) + '/resources/*.csv'
        for filename in glob.iglob(pattern):
            palette_name = os.path.splitext(os.path.basename(filename))[0]
            color_generator.load_palette(palette_name, filename)

    load_color_palettes()

    start_server = args.server
    if (start_server):
        run_server(IP, PORT)

    input_filename = args.input_filename
    output_filename = args.output_filename
    tile_size = args.size
    n = args.num_clusters
    img_length = args.length
    palette_scheme = args.palette_scheme
    print '************************'
    print palette_scheme
    print '************************'

    # resource_path = 'legocolors2010.csv'

    # csv_filepath = pkg_resources.resource_filename(__name__, resource_path)
    # color_generator.load_palette('greyscale', )

    img = Image(img_length)
    img.load_file(input_filename) \
        .apply_filter(QuantizeFilter(n)) \
        .apply_filter(ConstrainPaletteFilter(color_generator, palette_scheme)) \
        .apply_filter(BuildMapFilter(tile_size)) \
        .save_file(output_filename)

    img.generate_instructions(tile_size)

    if args.show:
        img.show()
    def get_similar_images(self, dir_name, file_name, limit):
        img = cv.imread(dir_name + '/' + file_name)
        img = self.image_processor.resize_image(img)
        query_hist_vector = self.image_processor.generate_hist_vector(img)
        if self.use_wavelets is True:
            query_tex_vector = self.image_processor.generate_wavelet_texture_vector(
                img)
        else:
            query_tex_vector = self.image_processor.generate_glcm_texture_vector(
                img)

        query_vector = np.concatenate((query_hist_vector, query_tex_vector))

        query_vector_sum = np.sum(query_vector[0:6])
        query_vector[0:6] = query_vector[0:6] / query_vector_sum
        query_vector_discrete = mh.make_vector_discrete(query_vector)

        image = Image(file_name, dir_name, img, query_vector,
                      query_vector_discrete, query_hist_vector,
                      query_tex_vector)

        similar = self.image_repository.get_similar_images(
            image, self.texture_props)
        distances = {}
        distances_sum = {}
        distances_hist = {}
        distances_tex = {}
        for img_name in similar:
            img_hist_vector = self.image_repository.get_image_hist_vector(
                img_name)
            img_tex_vector = self.image_repository.get_image_tex_vector(
                img_name)
            distances_hist[img_name] = mh.get_cosine_distance(
                img_hist_vector, query_hist_vector)
            distances_tex[img_name] = mh.get_cosine_distance(
                img_tex_vector, query_tex_vector)
            distances_sum[
                img_name] = distances_hist[img_name] + distances_tex[img_name]

        sorted_similar_names = []
        sorted_similar_images = []
        if len(similar) > 0:
            max_hist = max(distances_hist.values())
            max_tex = max(distances_tex.values())
            max_sum = max(distances_sum.values())
            if max_sum > 0.0:
                # distances_hist = {key: value / max_hist for key, value in distances_hist.items()}
                # distances_tex = {key: value / max_tex for key, value in distances_tex.items()}
                distances_sum = {
                    key: value / max_sum
                    for key, value in distances_sum.items()
                }
                for img_name in similar:
                    if distances_sum[img_name] <= self.distance_limit_sum:
                        distances[img_name] = distances_sum[img_name]
                sorted_similar_names = self.sorting_strategy.sort(
                    distances, False)
                sorted_similar_names = sorted_similar_names[0:limit]
            else:
                sorted_similar_names = similar

        for img_name in sorted_similar_names:
            img = self.image_repository.get_image(img_name)
            image = Image(img_name, '', img, None, None, None, None)
            sorted_similar_images.append(image)

        return sorted_similar_images
Example #41
0
def renderSkin(dst,
               vertsPerPrimitive,
               verts,
               index=None,
               objectMatrix=None,
               texture=None,
               UVs=None,
               textureMatrix=None,
               color=None,
               clearColor=None):

    if isinstance(dst, Texture):
        glBindTexture(GL_TEXTURE_2D, dst.textureId)
    elif isinstance(dst, Image):
        dst = Texture(image=dst)
    elif isinstance(dst, tuple):
        dimensions = dst
        dst = Texture(size=dimensions)
        if dst.width < dimensions[0] or dst.height < dimensions[1]:
            raise RuntimeError(
                'Could not allocate render texture with dimensions: %s' %
                str(dst))
    else:
        raise RuntimeError('Unsupported destination: %r' % dst)

    width, height = dst.width, dst.height

    framebuffer = safeRun(glGenFramebuffers,
                          1,
                          fallbacks=(glGenFramebuffersEXT))
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            framebuffer,
            fallbacks=(glBindFramebufferEXT))
    safeRun(glFramebufferTexture2D,
            GL_DRAW_FRAMEBUFFER,
            GL_COLOR_ATTACHMENT0,
            GL_TEXTURE_2D,
            dst.textureId,
            0,
            fallbacks=(glFramebufferTexture2DEXT))
    safeRun(glFramebufferTexture2D,
            GL_READ_FRAMEBUFFER,
            GL_COLOR_ATTACHMENT0,
            GL_TEXTURE_2D,
            dst.textureId,
            0,
            fallbacks=(glFramebufferTexture2DEXT))

    if clearColor is not None:
        glClearColor(clearColor[0], clearColor[1], clearColor[2],
                     clearColor[3])
        glClear(GL_COLOR_BUFFER_BIT)

    glVertexPointer(verts.shape[-1], GL_FLOAT, 0, verts)

    if texture is not None and UVs is not None:
        if isinstance(texture, Image):
            tex = Texture()
            tex.loadImage(texture)
            texture = tex
        if isinstance(texture, Texture):
            texture = texture.textureId
        glEnable(GL_TEXTURE_2D)
        glEnableClientState(GL_TEXTURE_COORD_ARRAY)
        glBindTexture(GL_TEXTURE_2D, texture)
        glTexCoordPointer(UVs.shape[-1], GL_FLOAT, 0, UVs)

    if color is not None:
        glColorPointer(color.shape[-1], GL_UNSIGNED_BYTE, 0, color)
        glEnableClientState(GL_COLOR_ARRAY)
    else:
        glDisableClientState(GL_COLOR_ARRAY)
        glColor4f(1, 1, 1, 1)

    glDisableClientState(GL_NORMAL_ARRAY)
    glDisable(GL_LIGHTING)

    glDepthMask(GL_FALSE)
    glDisable(GL_DEPTH_TEST)
    # glDisable(GL_CULL_FACE)

    glPushAttrib(GL_VIEWPORT_BIT)
    glViewport(0, 0, width, height)

    glMatrixMode(GL_MODELVIEW)
    glPushMatrix()
    if objectMatrix is not None:
        glLoadTransposeMatrixd(objectMatrix)
    else:
        glLoadIdentity()

    glMatrixMode(GL_PROJECTION)
    glPushMatrix()
    glLoadIdentity()
    glOrtho(0, 1, 0, 1, -100, 100)

    if textureMatrix is not None:
        glMatrixMode(GL_TEXTURE)
        glPushMatrix()
        glLoadTransposeMatrixd(textureMatrix)

    if index is not None:
        glDrawElements(g_primitiveMap[vertsPerPrimitive - 1], index.size,
                       GL_UNSIGNED_INT, index)
    else:
        glDrawArrays(g_primitiveMap[vertsPerPrimitive - 1], 0, verts[:, :,
                                                                     0].size)

    if textureMatrix is not None:
        glMatrixMode(GL_TEXTURE)
        glPopMatrix()

    glMatrixMode(GL_PROJECTION)
    glPopMatrix()

    glMatrixMode(GL_MODELVIEW)
    glPopMatrix()

    glPopAttrib()

    glEnable(GL_DEPTH_TEST)
    glDepthMask(GL_TRUE)

    glEnable(GL_LIGHTING)
    glEnableClientState(GL_NORMAL_ARRAY)

    glEnableClientState(GL_COLOR_ARRAY)

    # TODO: consider whether this is going to cause a crash on AMD cards. It's possible it should be commented out.
    glDisable(GL_TEXTURE_2D)

    glDisableClientState(GL_TEXTURE_COORD_ARRAY)

    surface = np.empty((height, width, 4), dtype=np.uint8)
    glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, surface)
    surface = Image(data=np.ascontiguousarray(surface[::-1, :, :3]))

    safeRun(glFramebufferTexture2D,
            GL_DRAW_FRAMEBUFFER,
            GL_COLOR_ATTACHMENT0,
            GL_TEXTURE_2D,
            0,
            0,
            fallbacks=(glFramebufferTexture2DEXT))
    safeRun(glFramebufferTexture2D,
            GL_READ_FRAMEBUFFER,
            GL_COLOR_ATTACHMENT0,
            GL_TEXTURE_2D,
            0,
            0,
            fallbacks=(glFramebufferTexture2DEXT))
    safeRun(glBindFramebuffer,
            GL_FRAMEBUFFER,
            0,
            fallbacks=(glBindFramebufferEXT))
    safeRun(glDeleteFramebuffers,
            np.array([framebuffer]),
            fallbacks=(glDeleteFramebuffersEXT))
    glBindTexture(GL_TEXTURE_2D, 0)

    return surface