def main(): stage1 = mpipe.Stage(Incrementor, 3) stage2 = mpipe.Stage(Doubler, 3) stage1.link(stage2) pipe = mpipe.Pipeline(stage1) for number in range(10): pipe.put(number) pipe.put(None) for result in pipe.results(): print(result)
def doTask(self, tstamp): try: image = images[tstamp] cv2.imshow('object detection 2', image) cv2.waitKey(1) except: print('Error in viewer !!!') return tstamp # Create the detector stages. detector_stages = list() for classi in util.cascade.classifiers: detector_stages.append( mpipe.Stage(Detector, 1, classifier=classi, color=util.cascade.colors[classi]), ) # Assemble the image processing pipeline: # # detector(s) viewer # || || # filter_detector --> postproc --> filter_viewer # filter_detector = mpipe.FilterStage( detector_stages, max_tasks=1, cache_results=True, ) postproc = mpipe.Stage(Postprocessor) filter_viewer = mpipe.FilterStage(
# Monitor framerates for the given seconds past. framerate2 = coils.RateTicker((1,5,10)) # Create the output window. cv2.namedWindow('diff average 4', cv2.cv.CV_WINDOW_NORMAL) def step2(tstamp): """Display the image, stamped with framerate.""" fps_text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*framerate2.tick()) util.writeOSD(common[tstamp]['image_diff'], (fps_text,)) cv2.imshow('diff average 4', common[tstamp]['image_diff']) cv2.waitKey(1) # Allow HighGUI to process event. return tstamp # Assemble the pipeline. stage1 = mpipe.Stage(Step1) stage2 = mpipe.FilterStage( (mpipe.OrderedStage(step2),), max_tasks=2, # Allow maximum 2 tasks in the viewer stage. drop_results=True, ) stage1.link(stage2) pipe = mpipe.Pipeline( mpipe.FilterStage( (stage1,), max_tasks=3, # Allow maximum 3 tasks in pipeline. drop_results=True, ) ) # Create an auxiliary process (modeled as a one-task pipeline)
import mpipe class Incrementor(mpipe.OrderedWorker): def doTask(self, value): result = value + 1 self.putResult(result) class Doubler(mpipe.OrderedWorker): def doTask(self, value): result = value * 2 self.putResult(result) stage1 = mpipe.Stage(Incrementor, 13) stage2 = mpipe.Stage(Doubler, 13) stage1.link(stage2) pipe = mpipe.Pipeline(stage1) for number in range(10): pipe.put(number) pipe.put(None) for result in pipe.results(): print(result)
def buildTrainingSet(DF, inArray, outArray, storeSourceImg=True): outTsv = outArray.replace('.h5', '.tsv') modelFile = sys.argv[1] #nChannels = len(gradientFunctions) nChannels = 1 if storeSourceImg: nChannels += 1 CAM_SHAPE = (140, 140, 140, nChannels) class cubeSource(mpipe.OrderedWorker): def doInit(self): self.sparseImages = SparseImageSource(inArray) def doTask(self, row): cubes, positions = self.sparseImages.getCubesAndPositions(row, posType='pos') self.putResult((row, cubes, positions)) print 'returning image' class camImgMaker(mpipe.OrderedWorker): def doInit(self): from keras.models import load_model, Sequential from gradCam import register_gradient, modify_backprop, compile_saliency_function, normalize from gradCam import target_category_loss, target_category_loss_output_shape, buildGradientFunction from gradCam import buildSoftmaxGradientFunction from gradCam import batchImages, gradCamsFromList, saliencyMapsFromList, makeCamImageFromCubesFaster from gradCam import makeCamImgFromImage, makeResizedSourceImage self.model = load_model(modelFile) # single output neuron cams noduleGrad, cubeCamSize = buildGradientFunction(self.model) # diamGrad, cubeCamSize = buildGradientFunction(model, output='diam') self.gradientFunctions = [noduleGrad] nChannels = len(self.gradientFunctions) global nChannels self.cubeCamSize = cubeCamSize # softmax output models # noduleGrad, cubeCamSize = buildSoftmaxGradientFunction(model, 0) def doTask(self, task): row, cubes, positions = task from gradCam import makeCamImageFromCubesFaster camImage = makeCamImageFromCubesFaster(cubes, positions, self.gradientFunctions, self.cubeCamSize, storeSourceImg=storeSourceImg) print 'returning cubes and positions' return row, camImage class storeCams(mpipe.OrderedWorker): def doInit(self): DBo = tables.open_file(outArray, mode='w') filters = tables.Filters(complevel=1, complib='blosc:snappy') # 7.7sec / 1.2 GB (14 sec 1015MB if precision is reduced) 140s 3.7GB # filters = None self.cams = DBo.create_earray(DBo.root, 'cams', atom=tables.Float32Atom(shape=CAM_SHAPE), shape=(0,), expectedrows=len(DF), filters=filters) self.camImageDF = pandas.DataFrame() def doTask(self, task): row, camImage = task if camImage.mean() == 0: print 'THIS IMAGE IS BAD ========================' print camImage.shape print 'nodule image mean %s min %s max %s : ' % ( camImage[:,:,:,0].mean(), camImage[:,:,:,0].min(), camImage[:,:,:,0].max()) print 'diam image mean %s min %s max %s : ' % ( camImage[:,:,:,1].mean(), camImage[:,:,:,1].min(), camImage[:,:,:,1].max()) #print 'source image mean %s min %s max %s : ' % ( # camImage[:,:,:,2].mean(), camImage[:,:,:,2].min(), camImage[:,:,:,2].max()) cam = forceImageIntoShape(camImage, CAM_SHAPE) #cam = resize(camImage, CAM_SHAPE) #crop = boundingBox(camImage, channel=0) print cam.shape self.cams.append([cam]) self.camImageDF = self.camImageDF.append(row) self.camImageDF.to_csv(outTsv, sep='\t') print 'starting workers' stage1 = mpipe.Stage(cubeSource, 1, ) stage2 = mpipe.Stage(camImgMaker, 1) stage3 = mpipe.Stage(storeCams, 1, disable_result=True) stage1.link(stage2.link(stage3)) pipe = mpipe.Pipeline(stage1) for index, row in tqdm(DF.iterrows(), total=len(DF)): print 'putting row', index pipe.put(row)
self.time_total += time_end self.time_records[self.time_rec_idx] = time_end self.time_rec_idx = (self.time_rec_idx + 1) % NUM_TIME_RECORDS # Write FPS to frame #cv2.putText(frame, '%2.2f FPS' % (1 / time_end), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255)) cv2.putText(frame, '%2.2f FPS' % (NUM_TIME_RECORDS / self.time_total), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255)) cv2.imshow("Video", frame) self.last_time = cv2.getTickCount() if cv2.waitKey(1) & 0xFF == ord('q'): return False else: return True stage = mpipe.Stage(DisplayWorker) #stage = mpipe.FilterStage((mpipe.Stage(DisplayWorker),), max_tasks=1000, drop_results=True) pipe = mpipe.Pipeline(stage) cap = cv2.VideoCapture(VIDEO_SOURCE) cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, VIDEO_WIDTH) cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, VIDEO_HEIGHT) while True: ret, frame = cap.read() pipe.put(frame)