def __init__(self, graphLP, input, laplacian, output): logger = logging.getLogger(__name__) ret0 = (input.type == brcm_openvx.VX_DF_IMAGE_U8) ret1 = (laplacian.type == brcm_openvx.VX_DF_IMAGE_S16) ret2 = (output.type == brcm_openvx.VX_DF_IMAGE_S16) ret = (ret0 & ret1 & ret2) if ret is not True: logger.error( 'VXLaplacianPyramidNode: input/output type constraints are not met' ) raise TypeError if (laplacian.scale != brcm_openvx.VX_SCALE_PYRAMID_HALF): logger.error( 'VXLaplacianPyramidNode: scale constraints are not met %s' % (laplacian.scale)) raise ValueError if ((input.width % 16 != 0) or (input.height < 8)): logger.error( 'VXLaplacianPyramidNode: input/output resolution constraints are not met inW = %s inH = %s' % (input.width, input.height)) raise ValueError outImg = laplacian.getImage(0) if ((input.width != outImg.width) or (input.height != outImg.height)): logger.error( 'VXLaplacianPyramidNode: input W=%s H=%s and first level pyramid W=%s H=%s' % (input.width, input.height, outImg.width, outImg.height)) raise ValueError graph.VXNode.__init__(self) self.input = input self.pyramid = laplacian self.output = output (inputStore, inputW, inputH, inputS) = input.getDetails() self.storage = [] self.outF = [] self.setNumJobs(self.pyramid.getNumLevels() + 1) for i in range(1, self.pyramid.levels): input = self.pyramid.getImage(i) (inputStore, inputW, inputH, inputS) = input.getDetails() tmpImg = image.VXImage(graphLP, inputW, inputH, brcm_openvx.VX_DF_IMAGE_U8, None) self.outF.append(tmpImg) (inputStore, inputW, inputH, inputS) = output.getDetails() tmpImg = image.VXImage(graphLP, inputW, inputH, brcm_openvx.VX_DF_IMAGE_U8, None) self.outF.append(tmpImg) self.sclImg = bitdepth.VXConvertDepthNode(tmpImg, output)
def compute(self, graph_PedDetect): (imStore, width, height, step) = self.image.getDetails() self.storage.append(graph_PedDetect.allocateStorage(imStore.getSize())) self.storage.append(graph_PedDetect.allocateStorage(imStore.getSize())) self.storage = self.storage * int(self.levels / 2) w = width h = height for i in range(0, self.levels): w = int(w / self.scale) h = int(h / self.scale) if (w < 48) or (h < 96): self.levels = i self.setNumJobs(2 * self.levels) break tempImage = image.VXImage(graph_PedDetect, w, h, brcm_openvx.VX_DF_IMAGE_U8, self.storage[i]) self.images.append(tempImage) if i == 0: tempScaleNode = scale.VXScaleNode( self.image, self.images[i], brcm_openvx.VX_INTERPOLATION_BILINEAR) else: tempScaleNode = scale.VXScaleNode( self.images[i - 1], self.images[i], brcm_openvx.VX_INTERPOLATION_BILINEAR) tempScaleNode.compute(graph_PedDetect) self.scaleNodes.append(tempScaleNode) histobufWidth = (width >> 3) - 1 histobufHeight = (height >> 3) - 1 mbx = width >> 4 mby = height >> 4 if ((histobufWidth & 1) == 0): mbx += 1 if ((histobufHeight & 1) == 0): mby += 1 self.histogram = image.VXImage(graph_PedDetect, 36 * 2 * mbx, 2 * mby, brcm_openvx.VX_DF_IMAGE_S16, None) self.intHog = image.VXImage(graph_PedDetect, 36 * 2 * mbx, mby + 1, brcm_openvx.VX_DF_IMAGE_S32, None) self.results = image.VXImage(graph_PedDetect, 48 * 2, 8, brcm_openvx.VX_DF_IMAGE_S32, None) self.syncMatStorage = graph_PedDetect.allocateStorage( ((1088 / 16) + 1 + 1 + 16) * 2 * 4) return True
def __init__(self, graph, old_images, new_images, old_points, new_points_estimates, new_points, max_count, epsilon, winsize): logger = logging.getLogger(__name__) ret0 = (old_images.type == brcm_openvx.VX_DF_IMAGE_U8) ret1 = (new_images.type == brcm_openvx.VX_DF_IMAGE_U8) ret2 = (old_points.type == brcm_openvx.VX_TYPE_KEYPOINT) ret3 = (new_points.type == brcm_openvx.VX_TYPE_KEYPOINT) ret4 = (new_points_estimates.type == brcm_openvx.VX_TYPE_KEYPOINT) ret = (ret0 & ret1 & ret2 & ret3 & ret4) if ret is not True: logger.error( 'OpticalFlowPyrLKNode: input/output type constraints are not met' ) raise TypeError if ((old_images.width != new_images.width) or (old_images.height != new_images.height) or (old_images.width % 8 != 0)): logger.error( 'OpticalFlowPyrLKNode: image resolution constraints are not met oldimgW = %s oldimgH = %s newImgW = %s newImgH = %s' % (old_images.width, old_images.height, new_images.width, new_images.height)) raise ValueError self.oldPyramid = old_images self.newPyramid = new_images self.old_points = old_points self.new_points_estimates = new_points_estimates self.new_points = new_points self.max_count = max_count self.epsilon = epsilon self.winsize = winsize self.pointCount = 3000 self.setNumJobs(new_images.getNumLevels()) self.max_level = new_images.getNumLevels() - 1 high_res_img = new_images.getImage(0) (imgStore, imgW, imgH, imgS) = high_res_img.getDetails() self.tmpOld = image.VXImage(graph, (imgW >> 4) - 1, (imgH >> 4) - 1, brcm_openvx.VX_DF_IMAGE_S32C2, None) self.tmpNew = image.VXImage(graph, (imgW >> 4) - 1, (imgH >> 4) - 1, brcm_openvx.VX_DF_IMAGE_S32C2, None)
def __init__(self, graphPyr, input, pyramid): logger = logging.getLogger(__name__) ret0 = (input.type == brcm_openvx.VX_DF_IMAGE_U8) ret1 = (pyramid.type == brcm_openvx.VX_DF_IMAGE_U8) ret = (ret0 & ret1) if ret is not True: logger.error( 'VXGaussianPyramidNode: input/pyramid type constraints are not met' ) raise TypeError if ((input.width % 16 != 0) or (input.height % 8 != 0)): logger.error( 'VXGaussianPyramidNode: input/pyramid resolution constraints are not met inW = %s inH = %s' % (input.width, input.height)) raise ValueError if ((pyramid.scale != brcm_openvx.VX_SCALE_PYRAMID_ORB) and (pyramid.scale != brcm_openvx.VX_SCALE_PYRAMID_HALF)): logger.error( 'VXGaussianPyramidNode: pyramid scale constraints are not met scale = %s' % (pyramid.scale)) raise ValueError outImg = pyramid.getImage(0) if ((input.width != outImg.width) or (input.height != outImg.height)): logger.error( 'VXGaussianPyramidNode: input W=%s H=%s and first level pyramid W=%s H=%s' % (input.width, input.height, outImg.width, outImg.height)) raise ValueError graph.VXNode.__init__(self) self.pyramid = pyramid self.scaleNode = [] self.gausspyrORBNode = [] self.copyNode = arith.VXArithS(input, None, pyramid.getImage(0), arith.OPCODE_ARITH_COPY, 0, 0) if (self.pyramid.scale is brcm_openvx.VX_SCALE_PYRAMID_ORB): (inputStore, inputW, inputH, inputS) = input.getDetails() size = inputStore.getSize() self.storage = graphPyr.allocateStorage(size) for i in range(pyramid.getNumLevels() - 1): input = self.pyramid.getImage(i) output = self.pyramid.getImage(i + 1) (inputStore, inputW, inputH, inputS) = input.getDetails() tmpImg = image.VXImage(graphPyr, inputW, inputH, brcm_openvx.VX_DF_IMAGE_U8, self.storage) node = filter.VXGaussian5x5(input, tmpImg) self.gausspyrORBNode.append(node) node = scale.VXScaleNode(tmpImg, output, brcm_openvx.VX_INTERPOLATION_BILINEAR) self.scaleNode.append(node) self.setNumJobs(pyramid.getNumLevels() * 2 - 1) else: self.setNumJobs(pyramid.getNumLevels())
def __init__(self, graph, input, strength_thresh, sensitivity, points, min_distance): logger = logging.getLogger(__name__) ret0 = (input.type == brcm_openvx.VX_DF_IMAGE_U8) ret1 = (points.type == brcm_openvx.VX_TYPE_KEYPOINT) ret = ret0 & ret1 if ret is not True: logger.error( 'vxHarrisCornersNode: input/output type constraints are not met' ) raise TypeError if ((input.width % 8 != 0) or (input.height < 4)): logger.error( 'vxHarrisCornersNode: input resolution constraints are not met inW=%s inHt=%s' % (input.width, input.height)) raise ValueError ret0 = isinstance(sensitivity, numbers.Number) ret1 = isinstance(min_distance, numbers.Number) ret2 = isinstance(strength_thresh, numbers.Number) ret = ret0 & ret1 & ret2 if ret is not True: logger.error( 'vxHarrisCornersNode:sensitivity/minDist/strength fails') raise TypeError ret0 = (min_distance < 2) ret1 = (sensitivity < 5) and (sensitivity > 1) ret2 = (strength_thresh >= 1000) and (strength_thresh <= 10000) ret = ret0 & ret1 & ret2 if ret is not True: logger.error( 'vxHarrisCornersNode:sensitivity/minDist/strength fails') raise ValueError self.input = input self.quality = strength_thresh self.min_distance = min_distance self.sensitivity = sensitivity self.points = points self.corners = None self.setNumJobs(1) mbx = input.width >> 4 mby = input.height >> 4 self.corners = image.VXImage(graph, mbx - 1, mby - 1, brcm_openvx.VX_DF_IMAGE_S32C2, None)
def __init__(self, aContext, levels, scale, width, height, type): logger = logging.getLogger(__name__) if ((levels < 1) or (width < 1) or (height < 1) or (type < brcm_openvx.VX_DF_IMAGE_RGB) or (type > brcm_openvx.VX_DF_IMAGE_HSV_EXT)): logger.error('VXPyramid":validate: param fails') raise ValueError if ((scale != brcm_openvx.VX_SCALE_PYRAMID_ORB) and (scale != brcm_openvx.VX_SCALE_PYRAMID_HALF)): logger.error('VXPyramid":validate: scale factor fails') raise ValueError self.levels = levels self.scale = scale self.width = width self.height = height self.type = type w = width h = height offsets = [0] for i in range(levels - 1): offsets.append(offsets[i] + image.getImageSize(w, h, type)) w = int(round(w * scale)) h = int(round(h * scale)) if (((w <= 1) or (h <= 1)) and (i != levels - 2)): logger.error('VXPyramid":width and height is less than one') raise ValueError totalSize = offsets[levels - 1] + image.getImageSize(w, h, type) self.storage = aContext.allocateStorage(totalSize) w = width h = height self.images = [] for i in range(levels): intWdth = (int)( (w + 7) / 8) * 8 #height limitations? check further TBD self.images.append( image.VXImage( aContext, intWdth, h, type, context.VXStorage(0, 0, offsets[i], self.storage))) #print 'intWidth = %s w = %s, h = %s' %(intWdth,w,h) w = int(round(w * scale)) h = int(round(h * scale))
def compute(self): logger = logging.getLogger(__name__) (imStore, width, height, step) = self.image.getDetails() self.width = width self.height = height self.dstOffset = (FACEDETECT_MAX_FACES+8) #8 * FACEDETECT_MAX_FACES/total_cores + 8 self.results = image.VXImage(self.graph, (self.dstOffset*MAX_TOTAL_CORES),1, brcm_openvx.VX_DF_IMAGE_U8, None) ret0 = isinstance(self.scale, numbers.Number) ret1 = isinstance(self.minSize, numbers.Number) ret = ret0 & ret1; if ret is not True: logger.error('VXFaceDetect: minSize/Scale is not a valid number') raise TypeError ret0 = (self.scale > 1.0) and (self.scale < 2.0) ret2 = (self.minSize <= self.height) ret = ret0 & ret2 if ret is not True: logger.error('VXFaceDetect:compute:scale/minSize fails') raise ValueError wndwSize = 20 pwrNfctr = int(math.ceil((math.log10(self.minSize) - math.log10(wndwSize)) / math.log10(self.scale))) frstFctr = math.pow(self.scale, pwrNfctr) tmpSize = round(min(self.width,self.height)/ frstFctr) self.resizeCntr = int(math.ceil((math.log10(tmpSize) - math.log10(wndwSize)) / math.log10(self.scale))) if (self.resizeCntr > FACEDETECT_MAX_SCALE_LVLS): return False self.numSclDwn = int(math.floor(math.log10(frstFctr)/math.log10(2.0))) self.chunkCnt = [] for i in range (self.resizeCntr): fctr = frstFctr * math.pow(self.scale,i) self.factor.append(fctr) szW = round(self.width / fctr) szH = round(self.height / fctr) chunkLoopCnt = szW - (FACEDETECT_WINSIZE_X - FACEDETECT_COL_STEPX_BYTES); chnkCnt = 0 while (1): chunkLoopCnt = chunkLoopCnt - FACEDETECT_COL_STEPX_BYTES; if (chunkLoopCnt < 0): break chnkCnt = chnkCnt + 1 #end of while self.chunkCnt.append(int(chnkCnt)) if (self.chunkCnt[i] == 0): self.resizeCntr = i break; img = image.VXImage(self.graph, szW, szH, brcm_openvx.VX_DF_IMAGE_U8, None) self.resizeImg.append(img) #end for if (self.numSclDwn > 0): self.storage.append(self.graph.allocateStorage(imStore.getSize()/4)) #tmp for downscale self.storage.append(self.graph.allocateStorage(imStore.getSize()/4)) inp = self.image sclW = self.width sclH = self.height for i in range (self.numSclDwn): sclW = sclW/2 sclH = sclH/2 out = image.VXImage(self.graph, sclW, sclH, brcm_openvx.VX_DF_IMAGE_U8, self.storage[i&1]) node = pyramid.VXHalfScaleGaussianNode(inp,out,None,5) self.hlfsclDwnNodes.append(node) inp = out for i in range (self.resizeCntr): node = scale.VXScaleNode(inp,self.resizeImg[i],brcm_openvx.VX_INTERPOLATION_BILINEAR) self.scaleNode.append(node) inp = self.resizeImg[i] if (self.chunkCnt[0]==0): logger.error('VXFaceDetect:compute:chunkCnt cannot be zero fails') raise ValueError self.setNumJobs(self.numSclDwn + (2*self.resizeCntr)) #end of while return True
def __init__(self, graphLPR, laplacian, input, output): logger = logging.getLogger(__name__) ret0 = (input.type == brcm_openvx.VX_DF_IMAGE_S16) ret1 = (laplacian.type == brcm_openvx.VX_DF_IMAGE_S16) ret2 = (output.type == brcm_openvx.VX_DF_IMAGE_U8) ret = (ret0 & ret1 & ret2) if ret is not True: logger.error( 'VXLaplacianReconstructNode: input/output type constraints are not met' ) raise TypeError if (laplacian.scale != brcm_openvx.VX_SCALE_PYRAMID_HALF): logger.error( 'VXLaplacianReconstructNode: scale =%s is not supported ' % (laplacian.scale)) raise ValueError graph.VXNode.__init__(self) self.laplacian = laplacian self.levels = laplacian.getNumLevels() chkImglast = laplacian.getImage(0) if ((chkImglast.width != output.width) or (chkImglast.height != output.height)): logger.error( 'VXLaplacianReconstructNode: final pyramid image and output image doesnt match' ) raise ValueError chkImgFirst = laplacian.getImage(self.levels - 1) if ((chkImgFirst.width != (input.width * 2)) or (chkImgFirst.height != (input.height * 2))): logger.error( 'VXLaplacianReconstructNode: first level pyramid image and input image doesnt match appropriately' ) raise ValueError self.storage = [] self.input = input self.output = output self.upscaleNode = [] self.outConvNode = [] self.sclImg = [] self.addNode = [] (outStore, width, height, step) = output.getDetails() size = outStore.getSize() self.storage.append(graphLPR.allocateStorage(size * 2)) self.storage.append(graphLPR.allocateStorage(size)) for i in range(self.levels): nX = self.levels - i - 1 gImg = laplacian.getImage(nX) (imStore, width, height, step) = gImg.getDetails() upsclImgtmp = image.VXImage(graphLPR, width, height, brcm_openvx.VX_DF_IMAGE_U8, self.storage[1]) (imStore, inW, inH, step) = input.getDetails() outImg = image.VXImage(graphLPR, inW, inH, brcm_openvx.VX_DF_IMAGE_U8, outStore) node = bitdepth.VXConvertDepthNode(input, outImg) self.outConvNode.append(node) node = VXUpSampleBy2x2Node(outImg, upsclImgtmp) self.upscaleNode.append(node) out16Img = image.VXImage(graphLPR, width, height, brcm_openvx.VX_DF_IMAGE_S16, self.storage[0]) node = bitdepth.VXConvertDepthNode(upsclImgtmp, out16Img) self.sclImg.append(node) node = arith.VXArithV(gImg, out16Img, out16Img, arith.OPCODE_ARITH_ADD, 0) self.addNode.append(node) input = out16Img (outStore, width, height, step) = output.getDetails() output = image.VXImage(graphLPR, width, height, brcm_openvx.VX_DF_IMAGE_U8, outStore) self.lastNode = bitdepth.VXConvertDepthNode(out16Img, output) self.setNumJobs((self.levels) * 4 + 1)