Example #1
0
def main():
    stage = mpipe.OrderedStage(echo)
    pipe = mpipe.Pipeline(stage)

    for val in (0, 1, 2, 3):
        pipe.put(val)

    pipe.put(None)  # Stop the pipeline.
Example #2
0
def main():
    stage = mpipe.UnorderedStage(for_loop, 2)
    pipe = mpipe.Pipeline(stage)

    for foobar in range(5):
        pipe.put(int(sys.argv[1]) if len(sys.argv) >= 2 else 10)

    pipe.put(None)
Example #3
0
def main():
    stage1 = mpipe.Stage(Incrementor, 3)
    stage2 = mpipe.Stage(Doubler, 3)
    stage1.link(stage2)
    pipe = mpipe.Pipeline(stage1)

    for number in range(10):
        pipe.put(number)
    pipe.put(None)

    for result in pipe.results():
        print(result)
Example #4
0
def main(watched_path, mountpoint, logfile="test_log.csv"):
    oh = OutputHandler.OutputHandler(logfile)

    def signal_handler(sig, frame):  # TODO maybe bug - needs two Ctrl+C to exit
        oh.close()

    signal.signal(signal.SIGINT, signal_handler)

    stage1 = mpipe.UnorderedStage(scanner.scan_file, size=3, max_backlog=3)
    stage2 = mpipe.OrderedStage(oh.write, size=1)
    pipeline = mpipe.Pipeline(stage1.link(stage2))

    watcher = Watcher(watched_path, pipeline)
    watcher.main([sys.argv[0], mountpoint], foreground=True)
Example #5
0
# Parse files in a pipeline.
def parseFile(fname):
    """Parse the XML file looking for fully demangled class
    names, and communicate the result."""
    names = list()
    doc = xml.dom.minidom.parse(fname)
    classes = doc.getElementsByTagName('Class')
    for entry in classes:
        name = entry.getAttribute('demangled')
        NSPACE = 'Wm5::'
        if name[:len(NSPACE)] != NSPACE:
            continue
        names.append(name)
    return names
pipe = mpipe.Pipeline(mpipe.UnorderedStage(parseFile, num_cpus))
for fname in fnames:
    pipe.put(fname)
pipe.put(None)

# Report on progress in realtime.
total_names = dict()
done_count = 0
for result in pipe.results():
    for name in result:
        total_names[name] = None
    done_count += 1
    percent = float(done_count) / len(fnames) * 100
    sys.stdout.write('\r' + '%d of %d (%.1f%%)'%(done_count, len(fnames), percent))
    sys.stdout.flush()
Example #6
0
#
filter_detector = mpipe.FilterStage(
    detector_stages,
    max_tasks=1,
    cache_results=True,
)
postproc = mpipe.Stage(Postprocessor)
filter_viewer = mpipe.FilterStage(
    (mpipe.Stage(Viewer), ),
    max_tasks=2,
    drop_results=True,
)

filter_detector.link(postproc)
postproc.link(filter_viewer)
pipe_iproc = mpipe.Pipeline(filter_detector)


# Create an auxiliary process (modeled as a one-task pipeline)
# that simply pulls results from the image processing pipeline,
# and deallocates associated shared memory after allowing
# the designated amount of time to pass.
def deallocate(tdelta):
    for tstamp in pipe_iproc.results():
        elapsed = datetime.datetime.now() - tstamp
        if tdelta - elapsed > datetime.timedelta():
            time.sleep(tdelta.total_seconds())
        del images[tstamp]


pipe_dealloc = mpipe.Pipeline(mpipe.UnorderedStage(deallocate))
Example #7
0
import mpipe


def echo(value):
    print(value)


stage = mpipe.OrderedStage(echo)
pipe = mpipe.Pipeline(stage)

for val in (0, 1, 2, 3):
    pipe.put(val)

pipe.put(None)  # Stop the pipeline.
Example #8
0
    'multiwork',
    'filter',
    )

# Export Dia diagrams.
saved = os.getcwd()
os.chdir('source')
def runDia(diagram):
    """Generate the diagrams using Dia."""
    ifname = '{}.dia'.format(diagram)
    ofname = '{}.png'.format(diagram)
    cmd = 'dia -t png-libart -e {} {}'.format(ofname, ifname)
    print('  {}'.format(cmd))
    subprocess.call(cmd, shell=True)
    return True
pipe = mpipe.Pipeline(mpipe.UnorderedStage(runDia, len(diagrams)))
for diagram in diagrams: 
    pipe.put(diagram)
pipe.put(None)
for result in pipe.results():
    pass
os.chdir(saved)

# Copy the .py examples from test/ to source/ directory
# so that they can be picked up by the Sphinx build.
codes = (
    'tiny.py',
    'helloworld.py',
    'chain.py',
    'pipeout.py',
    'fork.py',
Example #9
0
num_cpus = multiprocessing.cpu_count()
print('Running %d commands on %d CPUs' % (len(commands), num_cpus))


# Run commands in a pipeline.
def runCommand(command):
    """Run the given command in a subprocess shell."""
    result = subprocess.call(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
    return result


pipe = mpipe.Pipeline(mpipe.UnorderedStage(runCommand, num_cpus))
for command in commands:
    if OPTS['verbose']:
        print(command)
    pipe.put(command)
pipe.put(None)

# Report on progress in realtime.
num_succeeded = 0
for result in pipe.results():

    num_succeeded += int(not result)
    percent = float(num_succeeded) / len(commands) * 100
    sys.stdout.write('\r' + '%d of %d (%.1f%%)' %
                     (num_succeeded, len(commands), percent))
    sys.stdout.flush()
Example #10
0

if __name__ == "__main__":
    
    
    
    for f in range(2):
        frame = camera.capture(encoding="raw")
        ls[f] = np.ctypeslib.as_array(frame.buffer_ptr[0].data,shape=(800,1280))
        del frame
    i = 2
    cont = True
    while cont:
        stage1 = mpipe.OrderedStage(cap, 1)
        stage2 = mpipe.OrderedStage(processing, 1)
        pipe = mpipe.Pipeline(stage1.link(stage2))
        
        pipe.put(i,camera,ls)
        
        if(i==100):
            i=0
        else:
            i+=1
        for result in pipe.results():
            counter = result
            if counter >pix_threshold:
                cont = False
        
    for i in range(100):
        im = Image.fromarray(ls[i])
        im = im.convert("L")
Example #11
0
    cv2.imshow('diff average 4', common[tstamp]['image_diff'])
    cv2.waitKey(1)  # Allow HighGUI to process event.
    return tstamp

# Assemble the pipeline.
stage1 = mpipe.Stage(Step1)
stage2 = mpipe.FilterStage(
    (mpipe.OrderedStage(step2),),
    max_tasks=2,  # Allow maximum 2 tasks in the viewer stage.
    drop_results=True,
    )
stage1.link(stage2)
pipe = mpipe.Pipeline(
    mpipe.FilterStage(
        (stage1,),
        max_tasks=3,  # Allow maximum 3 tasks in pipeline.
        drop_results=True,
        )
    )

# Create an auxiliary process (modeled as a one-task pipeline)
# that simply pulls results from the image processing pipeline, 
# and deallocates associated shared memory after allowing
# the designated amount of time to pass.
def deallocate(age):
    for tstamp in pipe.results():
        delta = datetime.datetime.now() - tstamp
        duration = datetime.timedelta(seconds=age) - delta
        if duration > datetime.timedelta():
            time.sleep(duration.total_seconds())
        del common[tstamp]
Example #12
0
def buildTrainingSet(DF, inArray, outArray, storeSourceImg=True):

	outTsv = outArray.replace('.h5', '.tsv')

	modelFile = sys.argv[1]


	#nChannels = len(gradientFunctions)
	nChannels = 1
	if storeSourceImg: nChannels += 1

	CAM_SHAPE = (140, 140, 140, nChannels)



	class cubeSource(mpipe.OrderedWorker):
		def doInit(self):
			self.sparseImages = SparseImageSource(inArray)

		def doTask(self, row):
			cubes, positions = self.sparseImages.getCubesAndPositions(row, posType='pos')
			self.putResult((row, cubes, positions))
			print 'returning image'

	class camImgMaker(mpipe.OrderedWorker):
		def doInit(self):
			from keras.models import load_model, Sequential

			from gradCam import register_gradient, modify_backprop, compile_saliency_function, normalize
			from gradCam import target_category_loss, target_category_loss_output_shape, buildGradientFunction
			from gradCam import buildSoftmaxGradientFunction

			from gradCam import batchImages, gradCamsFromList, saliencyMapsFromList, makeCamImageFromCubesFaster
			from gradCam import makeCamImgFromImage, makeResizedSourceImage

			self.model = load_model(modelFile)

			# single output neuron cams
			noduleGrad, cubeCamSize = buildGradientFunction(self.model)
			# diamGrad, cubeCamSize  = buildGradientFunction(model, output='diam')

			self.gradientFunctions = [noduleGrad]
			nChannels = len(self.gradientFunctions)
			global nChannels
			self.cubeCamSize = cubeCamSize

			# softmax output models
			# noduleGrad, cubeCamSize  = buildSoftmaxGradientFunction(model, 0)


		def doTask(self, task):
			row, cubes, positions = task
			from gradCam import makeCamImageFromCubesFaster
			camImage = makeCamImageFromCubesFaster(cubes, positions, self.gradientFunctions, self.cubeCamSize, storeSourceImg=storeSourceImg)


			print 'returning cubes and positions'
			return row, camImage

	class storeCams(mpipe.OrderedWorker):
		def doInit(self):
			DBo = tables.open_file(outArray, mode='w')
			filters = tables.Filters(complevel=1, complib='blosc:snappy')  # 7.7sec / 1.2 GB   (14 sec 1015MB if precision is reduced)           140s 3.7GB
			# filters = None
			self.cams = DBo.create_earray(DBo.root, 'cams', atom=tables.Float32Atom(shape=CAM_SHAPE), shape=(0,),
									 expectedrows=len(DF), filters=filters)

			self.camImageDF = pandas.DataFrame()

		def doTask(self, task):
			row, camImage = task

			if camImage.mean() == 0: print 'THIS IMAGE IS BAD ========================'

			print camImage.shape

			print 'nodule image    mean %s    min %s    max %s : ' % (
				camImage[:,:,:,0].mean(), camImage[:,:,:,0].min(), camImage[:,:,:,0].max())

			print 'diam image      mean %s    min %s    max %s : ' % (
				camImage[:,:,:,1].mean(), camImage[:,:,:,1].min(), camImage[:,:,:,1].max())

			#print 'source image      mean %s    min %s    max %s : ' % (
			#	camImage[:,:,:,2].mean(), camImage[:,:,:,2].min(), camImage[:,:,:,2].max())


			cam = forceImageIntoShape(camImage, CAM_SHAPE)
			#cam = resize(camImage, CAM_SHAPE)

			#crop = boundingBox(camImage, channel=0)
			print cam.shape

			self.cams.append([cam])
			self.camImageDF = self.camImageDF.append(row)
			self.camImageDF.to_csv(outTsv, sep='\t')



	print 'starting workers'
	stage1 = mpipe.Stage(cubeSource, 1, )
	stage2 = mpipe.Stage(camImgMaker, 1)
	stage3 = mpipe.Stage(storeCams, 1, disable_result=True)
	stage1.link(stage2.link(stage3))

	pipe = mpipe.Pipeline(stage1)



	for index, row in tqdm(DF.iterrows(), total=len(DF)):
		print 'putting row', index
		pipe.put(row)
Example #13
0

def step2(tstamp):
    """Display the image, stamped with framerate."""
    fps_text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*framerate.tick())
    util.writeOSD(common[tstamp]['image_diff'], (fps_text, ))
    cv2.imshow('diff average 3', common[tstamp]['image_diff'])
    cv2.waitKey(1)  # Allow HighGUI to process event.
    return tstamp


# Assemble the pipeline.
stage1 = mpipe.Stage(Step1)
stage2 = mpipe.OrderedStage(step2)
stage1.link(stage2)
pipe = mpipe.Pipeline(stage1)


# Create an auxiliary process (modeled as a one-task pipeline)
# that simply pulls results from the image processing pipeline,
# and deallocates associated shared memory after allowing
# the designated amount of time to pass.
def deallocate(age):
    for tstamp in pipe.results():
        delta = datetime.datetime.now() - tstamp
        duration = datetime.timedelta(seconds=age) - delta
        if duration > datetime.timedelta():
            time.sleep(duration.total_seconds())
        del common[tstamp]