示例#1
0
def query_blob(id):
    blob = Image("static\\img\\%d.jpg" % id).crop(
        int(request.form["x"]), int(request.form["y"]), int(request.form["width"]), int(request.form["height"])
    )
    if blob and "SK_MODEL" in app.config:
        if blob.height > blob.width:
            blob = blob.resize(h=app.config["PATCH_SIZE"])
        else:
            blob = blob.resize(w=app.config["PATCH_SIZE"])
        blob = blob.embiggen((app.config["PATCH_SIZE"], app.config["PATCH_SIZE"]))
        np_img = blob.getGrayNumpy().transpose().reshape(-1)
        pred = labels.inverse_transform(sk_model.predict(np_img))[0]
        return jsonify(prediction=pred)
    else:
        return jsonify(prediction="")
示例#2
0
def parse_frame(img):
    """
    Parses a SimpleCV image object of a frame from Super Hexagon.
    Returns a ParsedFrame object containing selected features.
    """

    img

    # helper image size variables
    w,h = img.size()
    midx,midy = w/2,h/2

    # Create normalized images for targeting objects in the foreground or background.
    # (This normalization is handy since Super Hexagon's colors are inverted for some parts of the game)
    # fg_img = foreground image (bright walls, black when binarized)
    # bg_img = background image (bright space, black when binarized)
    fg_img = img
    if sum(img.binarize().getPixel(midx,midy)) == 0:
        fg_img = img.invert()
    bg_img = fg_img.invert()

    # We need to close any gaps around the center wall so we can detect its containing blob.
    # The gaps are resulting artifacts from video encoding.
    # The 'erode' function does this by expanding the dark parts of the image.
    bimg = bg_img.binarize()
    bimg = black_out_GUI(bimg)

    blobs = bimg.findBlobs()
    cursor_blob = get_cursor_blob(blobs, h, midx, midy)

    if cursor_blob:
        cursor_point = map(int, cursor_blob.centroid())
        cursor_r, cursor_angle = cart_to_polar(cursor_point[0] - midx, midy - cursor_point[1])

        cursor_angle = int(cursor_angle * 360/ (2 * pi))
        cursor_angle = 180 - cursor_angle
        if cursor_angle < 0:
            a += 360

        bimg = black_out_center(bimg, cursor_r).applyLayers()
        arr = bimg.resize(100).getGrayNumpy() > 100
        rot_arr = arr_to_polar(arr)
        rot_img = Image(PIL.Image.fromarray(np.uint8(np.transpose(rot_arr)*255))).dilate(iterations=3)
        rot_arr = rot_img.getGrayNumpy() > 100
        rot_img = rot_img.resize(400).flipVertical()
        return ParsedFrame(img, bimg, arr, rot_arr, rot_img, cursor_r, cursor_angle)
    else:
        return None
def query_blob(id):
    blob = Image("static\\img\\%d.jpg" % id).crop(int(request.form['x']),
                                                  int(request.form['y']),
                                                  int(request.form['width']),
                                                  int(request.form['height']))
    if blob and 'SK_MODEL' in app.config:
        if blob.height > blob.width:
            blob = blob.resize(h=app.config['PATCH_SIZE'])
        else:
            blob = blob.resize(w=app.config['PATCH_SIZE'])
        blob = blob.embiggen(
            (app.config['PATCH_SIZE'], app.config['PATCH_SIZE']))
        np_img = blob.getGrayNumpy().transpose().reshape(-1)
        pred = labels.inverse_transform(sk_model.predict(np_img))[0]
        return jsonify(prediction=pred)
    else:
        return jsonify(prediction='')
示例#4
0
    def getRotateBoxTest(self):
        if not self._rotBoxTest:

            grayImg = self.image.toGray()
            diff = grayImg - self._boxFilter(grayImg, 35)

            result = [0]*18

            if not self.ellipse.error and self.ellipse.a > 0 and self.ellipse.b > 0:
                res = countInWindowEst(diff.threshold(6).getGrayNumpy(), 5)
                res = self._getRotatedAndScaledGalImg(Image(res))

                if res != None:
                    res = res.getGrayNumpy()

                    img = Image(res)
                    ellipse = self.ellipse
                    center = (img.width/2, img.height/2)
                    width = 10
                    diameter = int(max(ellipse.a, ellipse.b)*2)
                    mask = np.zeros_like(img.getGrayNumpy(), dtype=np.bool)
                    mask[center[0]-width/2:center[0]+width/2, center[1]-diameter/2:center[1]+diameter/2] = True
                    mask = Image(mask).threshold(0)

                    maxI = 0
                    maxThetaVal = 0

                    values = []

                    for i, theta in enumerate(xrange(-90, 90, 10)):
                        masked = (img & mask.rotate(theta, point=(img.width/2, img.height/2))).getGrayNumpy()
                        val = np.sum(masked)/float(img.width*img.height)
                        values.append(val)

                        if val > maxThetaVal:
                            maxThetaVal = val
                            maxI = i

                    result[0:len(values)-maxI] = values[maxI:]
                    result[len(values)-maxI:] = values[0:maxI]

            self._rotBoxTest = result

        return self._rotBoxTest