Exemple #1
0
def main(block_size, codegen_args, image_filename, result_filename, codegen_implementation, analyser_classes):

	# image
	image = imageio.read(image_filename)
	if not image:
		print 'could not read image %s'%image_filename
		image = [[0 for i in xrange(128)] for j in xrange(128)]

	im_size = len(image[0]), len(image)
	pe_dim = [s//b for s,b in zip(im_size, block_size)]
	codegen_args['pe_dim'] = pe_dim

	# code
	code = Code()
	code.set_generator(codegen_implementation, block_size, codegen_args)
	
	# setup interpreter
	interpreter = AnalysisInterpreter(code, image, block_size)

	# setup analysers, create instances of analysis classes
	analysers = [x(interpreter) for x in analyser_classes]
	for a in analysers: interpreter.set_analysis(a)
	
	# run interpreter
	interpreter.run()
	
	# generate report for all analysers
	res = ''
	for a in analysers:
		res += '- '*40 + '\n'
		res += str(a) + '\n'
		try:
			res += a.report() + '\n'
		except Exception, e:
			res += 'could not print report\n'	
Exemple #2
0
def main(block_size, code_gen, args, image_filename, output_filename):
    # load image
    image = imageio.read(image_filename)

    #print image[0][0]

    if not image:
        print 'could not read image, continuing with empty 128x128 image'
        image = [[0 for i in xrange(128)] for j in xrange(128)]

    code = Code()
    code.set_generator(code_gen, block_size, args)

    interpreter = Interpreter(code, image, block_size)
    interpreter.run()

    doRGB = True
    out_image = interpreter.gen_output_image(1, True, True, False, doRGB)
    if doRGB:
        imageio.write(output_filename, out_image, 3)
    else:
	    imageio.write(output_filename, out_image, 1)
    
    '''f = open('curr_codegen.txt', 'w')
    f.write(str(code))'''

    '''print len(code)'''
    
    return interpreter
Exemple #3
0
def run_implementation(block_size, implementation, image_filename, filterbank_filename, res_filename_prefix):
	''' Execution wrapper '''
	from blip.simulator import interpreter
	from blip.support import imageio

	# first load the cascade
	filterbank = Filterbank.load(filterbank_filename)

	image = imageio.read(image_filename)
	if not image: raise Exception('image %s not found or not supported'%image_filename)

	im_size = len(image[0]), len(image)
	pe_dim = [s//b for s,b in zip(im_size, block_size)]

	args = {'filterbank':filterbank, 'pe_dim':pe_dim}

	# now execute the codegen
	code = Code()
	code.set_generator(implementation, block_size, args)

	sim = interpreter.Interpreter(code, image, block_size, 4)
	sim.run()

	result = sim.gen_output_image(1) # result is saved in first buffer

	imageio.write(res_filename_prefix + '.png', result, 1)
Exemple #4
0
def demo(im_filename, out_filename, no_optimalisations):
    from blip.code.trace_optimiser import Optimiser, ImmediatePass, PeepholePass, MemoryPass
    from blip.simulator import interpreter
    from blip.support import imageio
    from blip.simulator.opcodes import Imm, Mul, Add

    # settings
    block_size = (32, 32)
    out_ptr = block_size[0] * block_size[1]
    coeff = [[1, -2, 1]] * 3

    # convolution implementation with map_neighborhood_to_pixel skeleton
    def convolution_op(code, coeff_v, val, acc, args, block_size):
        """ Simple convolution implementation. """
        with scoped_alloc(code, 2) as (v, coeff_r):
            yield Imm(coeff_r, coeff_v)
            yield Mul(v, coeff_r, val)
            yield Add(acc, acc, v)

    def codegen(code, block_size, args):
        """ Map convolution to image. """
        return map_neighborhood_to_pixel(code, 0, out_ptr, coeff, convolution_op, args, block_size)

        # Wrap optimisers

    optimiser = Optimiser(50)
    optimiser.register_pass(ImmediatePass(optimiser))
    # optimiser.register_pass(PeepholePass(optimiser))
    optimiser.register_pass(MemoryPass(optimiser))

    def optim_wrapper(code, block_size, args):
        if no_optimalisations:
            print "optimalisations disabled"
            return codegen(code, block_size, args)
        else:
            return optimiser.run(code, codegen, block_size, args)

            # Render instruction trace

    f = open(out_filename + "_trace.txt", "w")

    def tag_str(instr):
        return ", ".join(instr.tag) if hasattr(instr, "tag") else ""

    f.write("\n".join(str(x).ljust(40) + " tags: " + tag_str(x) for x in optim_wrapper(Code(), block_size, {})))
    f.close()

    # Run simulation
    code = Code()

    code.set_generator(optim_wrapper, block_size, {})
    image = imageio.read(im_filename)
    sim = interpreter.Interpreter(code, image, block_size)
    sim.run()
    out = sim.gen_output_image(1)
    imageio.write(out_filename, out, 1)
Exemple #5
0
def main(filtersfile, inputimage, outputimage):
	''' Main entry function. '''
	filterbank = Filterbank.load(filtersfile)
	filters = filterbank.filters

	inputimage = imageio.read(inputimage)

	output = calc_planarity(inputimage, filters)

	output = _scale_to_integer(output)
	imageio.write(outputimage, output, 1)
def test_compare_implementations():
	''' check if the to implementation of detect_faces yield the same result '''
	# settings
	cascade_filename = '../data/haarcascade_frontalface_alt.xml'
	image_filename = '../data/vakgroep128_64.png'

	def run_test(codegen_function, image, cascade, block_size):
		print 'running %s'%codegen_function.__name__
		print 'XXX histogram equalisation is not implemented yet, use violajones impl'
		print '    before executing simulator'
		image = reference.equalizeHist(image)

		width, height = block_size
		pe_dim = (len(image[0])//width, len(image)//height)

		# now execute the codegen
		code = Code()
		
		args = {'haar_classifier':cascade, 'pe_dim':pe_dim}
		code.set_generator(codegen_function, block_size, args)

		sim = Interpreter(code, image, block_size, 4)
		sim.run()

		detections_pixmap = sim.gen_output_image(1) # result is saved in first buffer

		# convert the number of rejections in the stages to detections
		detections = gen_code.convert_pixelmap_to_detections(detections_pixmap, cascade.size)
		return detections


	# load image and cascade
	cascade = parse_haar.parse_haar_xml(cascade_filename)
	print cascade

	image = imageio.read(image_filename)
	if not image: raise Exception('image %s not found or not supported'%image_filename)


	block_size = (64, 64)
	implementations = [\
		gen_code.gen_detect_faces,\
		gen_code.gen_detect_faces_stage_outer,\
		gen_code.gen_detect_faces_fullintegral]
	detections = [run_test(impl, image, cascade, block_size) for impl in implementations]
	for i in xrange(len(implementations)-1):
		d1 = detections[i]
		d2 = detections[i+1]
		n1 = implementations[i].__name__
		n2 = implementations[i+1].__name__
		assert d1 == d2
def test_detect_faces_fullintegral():
	''' check if whole program works '''
	# settings
	cascade_filename = '../data/haarcascade_frontalface_alt.xml'
	image_filename = '../data/vakgroep128_64.png'

	def run_test(image, cascade):
		block_size = (64, 64)
		im_size = len(image[0]), len(image)
		pe_dim = tuple(s//b for s, b in zip(im_size, block_size))

		print 'XXX histogram equalisation is not implemented yet, use violajones impl'
		print '    before executing simulator'
		image = reference.equalizeHist(image)

		args = {'haar_classifier': cascade, 'pe_dim':pe_dim}
		# now execute the codegen
		code = Code()
		code.set_generator(gen_code.gen_detect_faces_fullintegral_opt, block_size, args)
		#print '# instructions: %i'%(code.instr_size())

		sim = Interpreter(code, image, block_size, 4)
		sim.run()

		detections_pixmap = sim.gen_output_image(1) # result is saved in first buffer

		# convert the number of rejections in the stages to detections
		detections = gen_code.convert_pixelmap_to_detections(detections_pixmap, cascade.size)
		return detections

	def run_ref(image, cascade):
		return reference.detect_faces(image, cascade)

	# first load the cascade
	cascade = parse_haar.parse_haar_xml(cascade_filename)
	print cascade

	image = imageio.read(image_filename)
	if not image: raise Exception('image %s not found or not supported'%image_filename)

	detections_test = run_test(image, cascade)
	detections_ref = run_ref(image, cascade)
	assert detections_test == detections_ref
Exemple #8
0
def main(image_filename, cascade_filename, res_filename, use_multiscale = False):
	image = imageio.read(image_filename)

	haar_classifier = parse_haar.parse_haar_xml(cascade_filename)
	print str(haar_classifier)


	# parameters
	scale_factor = 1.2
	min_size = (40, 40)

	# process image
	detected_faces = []
	if use_multiscale:
		detected_faces = detect_faces_multiscale(image, haar_classifier, scale_factor, min_size)
	else:
		detected_faces = detect_faces(image, haar_classifier)

	res = visualisation.draw_faces(image, detected_faces)
	imageio.write(res_filename, res, 3)
Exemple #9
0
def run_detector(block_size, implementation, image_filename, cascade_filename, res_filename_prefix):
	from blip.simulator import interpreter
	from blip.support import imageio
	import violajones.reference

	# first load the cascade
	cascade = violajones.parse_haar.parse_haar_xml(cascade_filename)
	print cascade

	image = imageio.read(image_filename)
	if not image: raise Exception('image %s not found or not supported'%image_filename)

	print 'XXX histogram equalisation is not implemented yet, use violajones impl'
	print '    before executing simulator'
	image = violajones.reference.equalizeHist(image)
	im_size = len(image[0]), len(image)

	pe_dim = [s//b for s,b in zip(im_size, block_size)]

	args = {'haar_classifier':cascade, 'pe_dim':pe_dim}
	# now execute the codegen
	code = Code()
	code.set_generator(implementation, block_size, args)
	#print '# instructions: %i'%(code.instr_size())

	sim = interpreter.Interpreter(code, image, block_size, 4)
	sim.run()

	detections_pixmap = sim.gen_output_image(1) # result is saved in first buffer

	# convert the number of rejections in the stages to detections
	detections = convert_pixelmap_to_detections(detections_pixmap, cascade.size)
	print 'detections:', detections
	detections_im = visualisation.draw_faces(image, detections)

	imageio.write(res_filename_prefix + '_pixmap.png', detections_pixmap, 1)
	imageio.write(res_filename_prefix + '.png', detections_im, 3)