Example #1
0
	def project_diffusion(self):
		print 'Projecting diffusion...'
		bench = Benchmark()
		start_polygons = len(self.diffusion.polygons)
		# Save in case we want to reference it
		self.diffusion_original = self.diffusion
		if False:
			self.diffusion.show()
			self.polysilicon.show()
		
		#self.diffusioni = self.diffusion_original.subtract(self.polysilicon)
		#self.diffusion = self.diffusioni.to_layer()		
		
		self.diffusion = self.diffusion_original.subtract(self.polysilicon)
		
		# Find intersection with poly to form transistors
		# Require 2, some chips use diffusion for conduction to bypass metal
		# 4003 overlaps diffusion and poly because they split a contact across them
		
		# Subtract out the poly from diffusion
		# We can then use the same proximity algorithm as before
		# However, we will save intersections to get a better idea of correct transistor mapping
		end_polygons = len(self.diffusion.polygons)
		print 'Projected diffusion %d => %d polygons in %s' % (start_polygons, end_polygons, repr(bench))
		
		if False:
			self.diffusion.show()
			#self.polysilicon.show()
			#self.diffusion_original.show()
			sys.exit(1)
Example #2
0
 def rebuild_qt(self):
     bench = Benchmark()
     rect_l = list()
     for polygon in self.polygons:
         rect_l.append(QTUVPolygon(polygon))
     self.qt = PolygonQuadTree(rect_l)
     print 'Finished building %s quadtree (%d elements), took: %s' % (
         self.name, len(rect_l), repr(bench))
Example #3
0
	def find_and_merge_nets(self):
		# Combine polygons at a net level on the same layer
		# Only needed if you have bad input polygons
		# Really would be better to pre-process input to combine them
		#bench = Benchmark()
		#self.condense_polygons()
		#print 'Polygons condensed in %s' % repr(bench)
		#self.show_nets()
		#sys.exit(1)
		self.verify_net_index()
		
		# Note that you cannot have diffusion and poly, via is for one or the other		
		bench = Benchmark()
		self.merge_metal_vias()
		print 'Metal and vias merged in %s' % repr(bench)
		#self.show_nets()
		#self.verify_net_index()
		#sys.exit(1)
		
		self.via_check(2)
		
		# Connected poly to metal
		
		bench = Benchmark()
		self.merge_poly_vias_layers()
		print 'Poly and vias merged in %s' % repr(bench)
		#self.show_nets()
		self.verify_net_index()
		
		# Connect diffusion to metal
		bench = Benchmark()
		self.merge_diffusion_vias_layers()
		print 'Diffusion and vias merged in %s' % repr(bench)
		#self.show_nets()
		self.verify_net_index()
		
		self.via_check(3)
		
		#self.show_nets()
		
		print 'Finished merge'
Example #4
0
    def run(self):
        bench = Benchmark()
        
        # The following will assume all of the images have the same size
        self.verify_images()
        
        fns = []
        # Copy project so we can trash it
        project = self.project.copy()
        for il in project.get_image_lines():
            fns.append(il.get_name())
        self.icm = ImageCoordinateMap.from_tagged_file_names(fns)

        pre_opt(project, self.icm)
        prepare_pto(project, reoptimize=False)
        
        # "PToptimizer out.pto"
        args = ["PToptimizer"]
        args.append(project.get_a_file_name())
        print 'Optimizing %s' % project.get_a_file_name()
        #raise Exception()
        #self.project.save()
        rc = execute.without_output(args)
        if rc != 0:
            raise Exception('failed position optimization')
        # API assumes that projects don't change under us
        project.reopen()
        
        # final rms error 24.0394 units
        rms_error = None
        for l in project.get_comment_lines():
            if l.find('final rms error') >= 00:
                rms_error = float(l.split()[4])
                break
        print 'Optimize: RMS error of %f' % rms_error
        # Filter out gross optimization problems
        if self.rms_error_threshold and rms_error > self.rms_error_threshold:
            raise Exception("Max RMS error threshold %f but got %f" % (self.rms_error_threshold, rms_error))
        
        print 'Merging project...'
        merge_pto(project, self.project)
        if self.debug:
            print self.project
        
        bench.stop()
        print 'Optimized project in %s' % bench
Example #5
0
    def run(self):
        bench = Benchmark()
        
        # The following will assume all of the images have the same size
        self.verify_images()
        
        fns = []
        # Copy project so we can trash it
        project = self.project.copy()
        for il in project.get_image_lines():
            fns.append(il.get_name())
        self.icm = ImageCoordinateMap.from_tagged_file_names(fns)

        print 'Verbose: %d' % self.debug
        print 'working direclty on %s' % self.project.get_a_file_name()
        pre_opt(self.project, self.icm, verbose=self.debug, stdev=self.stdev)
        
        bench.stop()
        print 'Optimized project in %s' % bench
Example #6
0
    def process_image(self, pim, st_bounds):
        '''
        A tile is valid if its in a safe location
        There are two ways for the location to be safe:
        -No neighboring tiles as found on canvas edges
        -Sufficiently inside the blend area that artifacts should be minimal
        '''
        bench = Benchmark()
        [x0, x1, y0, y1] = st_bounds
        gen_tiles = 0
        print
        # TODO: get the old info back if I miss it after yield refactor
        print 'Phase 4: chopping up supertile'
        self.msg('step(x: %d, y: %d)' % (self.tw, self.th), 3)
        #self.msg('x in xrange(%d, %d, %d)' % (xt0, xt1, self.tw), 3)
        #self.msg('y in xrange(%d, %d, %d)' % (yt0, yt1, self.th), 3)

        for (y, x) in self.gen_supertile_tiles(st_bounds):
            # If we made it this far the tile can be constructed with acceptable enblend artifacts
            row = self.y2row(y)
            col = self.x2col(x)

            # Did we already do this tile?
            if self.is_done(row, col):
                # No use repeating it although it would be good to diff some of these
                if self.verbose:
                    print 'Rejecting tile x%d, y%d / r%d, c%d: already done' % (
                        x, y, row, col)
                continue

            # note that x and y are in whole pano coords
            # we need to adjust to our frame
            # row and col on the other hand are used for global naming
            self.make_tile(pim, x - x0, y - y0, row, col)
            gen_tiles += 1
        bench.stop()
        print 'Generated %d new tiles for a total of %d / %d in %s' % (
            gen_tiles, len(
                self.closed_list), self.net_expected_tiles, str(bench))
        if gen_tiles == 0:
            raise Exception("Didn't generate any tiles")
Example #7
0
    def run(self):
        '''
        The base Hugin project seems to work if you take out a few things:
        Eb1 Eev0 Er1 Ra0 Rb0 Rc0 Rd0 Re0 Va1 Vb0 Vc0 Vd0 Vx-0 Vy-0
        So say generate a project file with all of those replaced
        
        In particular we will generate new i lines
        To keep our original object intact we will instead do a diff and replace the optimized things on the old project
        
        
        Output is merged into the original file and starts after a line with a single *
        Even Hugin wpon't respect this optimization if loaded in as is
        Gives lines out like this
        
        o f0 r0 p0 y0 v51 a0.000000 b0.000000 c0.000000 g-0.000000 t-0.000000 d-0.000000 e-0.000000 u10 -buf 
        These are the lines we care about
        
        C i0 c0  x3996.61 y607.045 X3996.62 Y607.039  D1.4009 Dx-1.15133 Dy0.798094
        Where D is the magnitutde of the distance and x and y are the x and y differences to fitted solution
        
        There are several other lines that are just the repeats of previous lines
        '''
        bench = Benchmark()
        
        # The following will assume all of the images have the same size
        self.verify_images()
        
        # Copy project so we can trash it
        project = self.project.copy()
        prepare_pto(project, self.reoptimize)
        
        pre_run_text = project.get_text()
        if 0:
            print
            print
            print 'PT optimizer project:'
            print pre_run_text
            print
            print
                
        
        # "PToptimizer out.pto"
        args = ["PToptimizer"]
        args.append(project.get_a_file_name())
        #project.save()
        rc = execute.without_output(args)
        if rc != 0:
            fn = '/tmp/pr0nstitch.optimizer_failed.pto'
            print
            print
            print 'Failed rc: %d' % rc
            print 'Failed project save to %s' % (fn,)
            try:
                open(fn, 'w').write(pre_run_text)
            except:
                print 'WARNING: failed to write failure'
            print
            print
            raise Exception('failed position optimization')
        # API assumes that projects don't change under us
        project.reopen()
        
        '''
        Line looks like this
        # final rms error 24.0394 units
        '''
        rms_error = None
        for l in project.get_comment_lines():
            if l.find('final rms error') >= 00:
                rms_error = float(l.split()[4])
                break
        print 'Optimize: RMS error of %f' % rms_error
        # Filter out gross optimization problems
        if self.rms_error_threshold and rms_error > self.rms_error_threshold:
            raise Exception("Max RMS error threshold %f but got %f" % (self.rms_error_threshold, rms_error))
        
        if self.debug:
            print 'Parsed: %s' % str(project.parsed)

        if self.debug:
            print
            print
            print
            print 'Optimized project:'
            print project
            #sys.exit(1)
        print 'Optimized project parsed: %d' % project.parsed

        print 'Merging project...'
        merge_pto(project, self.project)
        if self.debug:
            print self.project
        
        bench.stop()
        print 'Optimized project in %s' % bench
Example #8
0
    if args.stampout:
        _outdate = IOTimestamp(sys, 'stdout')
        _errdate = IOTimestamp(sys, 'stderr')

    if exist:
        _outlog.out_fd.write('\n')
        _outlog.out_fd.write('\n')
        _outlog.out_fd.write('\n')
        _outlog.out_fd.write('*' * 80 + '\n')
        _outlog.out_fd.write('*' * 80 + '\n')
        _outlog.out_fd.write('*' * 80 + '\n')
    print 'pr0npto starting'
    print 'In: %s' % pto_in
    print 'Out: %s' % pto_out
    bench = Benchmark()

    pto = PTOProject.from_file_name(pto_in)
    # Make sure we don't accidently override the original
    pto.remove_file_name()

    if args.center is True:
        center(pto)

    if args.anchor:
        print 'Re-finding anchor'
        center_anchor(pto)

    if args.basename:
        print 'Converting to basename'
        make_basename(pto)
Example #9
0
	def from_cif_init(self, file_name = "in.cif"):
		self.vias = Layer()
		self.metal = Layer()
		self.polysilicon = Layer()
		self.diffusion = Layer()
		self.labels = Layer()
		self.metal_gnd = None
		self.metal_vcc = None
		self.default_layer_names()
		
		parsed = CIFParser.parse(file_name)
		
		print 'CIF width: %d' % parsed.width
		print 'CIF height: %d' % parsed.height
		
		self.rebuild_layer_lists(False)
		# Make sizes the furthest point found
		for layer in self.layers + [self.labels]:
			#print 'Setting %s to %s' % (layer.name, parsed.width)
			layer.width = parsed.width
			layer.height = parsed.height
		
		def add_cif_polygons(uv_layer, cif_layer):
			print '%s: adding %d boxes' % (uv_layer.name, len(cif_layer.boxes)) 
			for box in cif_layer.boxes:
				'''
				CIF uses lower left coordinate system
				Convert to internal representation, upper left
				
				UL
					Vertical
						B 22 94 787 2735
					Horizontal
						B 116 22 740 2793
				'''
				#print '.',
				if False:
					print 'start'
					print box.xpos
					print box.ypos
					print box.width
					print box.height
				# FIXME: change this into one operation since this now takes non-negligible amount of time
				#uvp = UVPolygon.from_rect(box.xpos, box.ypos, box.width, box.height)
				uvp = UVPolygon.from_rect_ex(box.xpos, box.ypos, box.width, box.height, flip_height = uv_layer.height)
				if False:
					print uvp
					uvp.show()
				#uvp.flip_horizontal(uv_layer.height)
				#print uvp
				#uvp.show()
				uv_layer.add_uvpolygon(uvp)
				#sys.exit(1)
			# uv_layer.show()
			#sys.exit(1)
			
		print 'Width: %d, height: %d' % (parsed.width, parsed.height)
		print 'Parsed labels: %d' % len(parsed.labels)
		for label in parsed.labels:
			# Make it a smallish object
			# Really 1 pix should be fine...but I'm more afraid of corner cases breaking things
			# Get it working first and then debug corner cases if needed
			# Maybe length should be related to text length
			uvpoly = UVPolygon.from_rect_ex(label.x, label.y, 20, 20, flip_height = parsed.height)
			uvpoly.text = label.text
			print uvpoly 
			#uvpoly.show()
			self.labels.add_uvpolygon(uvpoly)
		#self.labels.show()
		#sys.exit(1)
				
		for layer_id in parsed.layers:
			layer = parsed.layers[layer_id]
			bench = Benchmark()
			# NMOS metal
			if layer_id == CIFLayer.NM:
				add_cif_polygons(self.metal, layer)
			# NMOS poly
			elif layer_id == CIFLayer.NP:
				add_cif_polygons(self.polysilicon, layer)
			# NMOS diffusion
			elif layer_id == CIFLayer.ND:
				add_cif_polygons(self.diffusion, layer)
			# NMOS contact
			elif layer_id == CIFLayer.NC:
				add_cif_polygons(self.vias, layer)
			else:
				raise Exception('Unsupported layer type %s' % repr(layer_id))
			print bench
			
		#self.compute_wh()
		self.init()
Example #10
0
	def init(self):
		set_debug_width(self.metal.width)
		set_debug_height(self.metal.height)
		#print g_width, g_height
		#sys.exit(1)
	
		self.default_layer_names()
	
		# Clip as early as possible to avoid extra operations
		self.clip()
		
		self.color_layers()
	
		self.metal.index = Layer.METAL
		
		if self.metal_gnd:
			self.metal_gnd.potential = Net.GND
			self.metal_gnd.index = Layer.METAL
		if self.metal_vcc:
			self.metal_vcc.potential = Net.VCC
			self.metal_vcc.index = Layer.METAL
		
		self.polysilicon.index = Layer.POLYSILICON
		
		# visual6502 net numbers seem to start at 1, not 0
		self.min_net_number = 1
				
		self.transdefs = Transdefs()
		
		self.reset_net_number()
		# Skip some checks before nets are setup, but make the reference availible
		self.nets = None
		#self.polygon_nets = dict()
		self.remove_polygon = self.remove_polygon_no_nets
		
		self.vdd = None
		self.vss = None
		
		# Deals with small non-intersecting delta issues, but does distort the result
		print 'Enlarging layers...'
		bench = Benchmark()
		for layer in self.layers:
			layer.enlarge(None, 1.0)
		print 'Layers enlarged in %s' % repr(bench)
		
		# Must be done before projrection or can result in complex geometries
		# Well you can still get them, but its much easier if you don't do this first
		bench = Benchmark()
		self.condense_polygons()
		print 'Polygons condensed in %s' % repr(bench)

		# net to set of polygons
		# Used for merging nets
		# number to net object
		self.nets = Nets()
		self.remove_polygon = self.remove_polygon_regular
		
		if Options.transistors_by_intersect:
			self.project_diffusion()
		#print 'Polygons: %d' % len(self.diffusion.polygons)
		#self.diffusion.show_polygons()
		#sys.exit(1)
		#self.diffusion.index = Layer.UNKNOWN_DIFFUSION


		#self.buried_contacts = Layer(Options.DEFAULT_IMAGE_FILE_BURIED_CONTACTS)
		#self.transistors = Layer(Options.DEFAULT_IMAGE_FILE_TRANSISTORS)
		self.transistors = Transistors()
		
		self.rebuild_layer_lists()
		
		self.verify_layer_sizes_after_load()
		
		for layer in self.layers:
			layer.show()
Example #11
0
    def run(self):
        '''
        The base Hugin project seems to work if you take out a few things:
        Eb1 Eev0 Er1 Ra0 Rb0 Rc0 Rd0 Re0 Va1 Vb0 Vc0 Vd0 Vx-0 Vy-0
        So say generate a project file with all of those replaced
        
        In particular we will generate new i lines
        To keep our original object intact we will instead do a diff and replace the optimized things on the old project
        
        
        Output is merged into the original file and starts after a line with a single *
        Even Hugin wpon't respect this optimization if loaded in as is
        Gives lines out like this
        
        o f0 r0 p0 y0 v51 a0.000000 b0.000000 c0.000000 g-0.000000 t-0.000000 d-0.000000 e-0.000000 u10 -buf 
        These are the lines we care about
        
        C i0 c0  x3996.61 y607.045 X3996.62 Y607.039  D1.4009 Dx-1.15133 Dy0.798094
        Where D is the magnitutde of the distance and x and y are the x and y differences to fitted solution
        
        There are several other lines that are just the repeats of previous lines
        '''
        bench = Benchmark()

        # The following will assume all of the images have the same size
        self.verify_images()

        # Copy project so we can trash it
        project = self.project.copy()
        prepare_pto(project, self.reoptimize)

        pre_run_text = project.get_text()
        if 0:
            print
            print
            print 'PT optimizer project:'
            print pre_run_text
            print
            print

        # "PToptimizer out.pto"
        args = ["PToptimizer"]
        args.append(project.get_a_file_name())
        #project.save()
        rc = execute.without_output(args)
        if rc != 0:
            fn = '/tmp/pr0nstitch.optimizer_failed.pto'
            print
            print
            print 'Failed rc: %d' % rc
            print 'Failed project save to %s' % (fn, )
            try:
                open(fn, 'w').write(pre_run_text)
            except:
                print 'WARNING: failed to write failure'
            print
            print
            raise Exception('failed position optimization')
        # API assumes that projects don't change under us
        project.reopen()
        '''
        Line looks like this
        # final rms error 24.0394 units
        '''
        rms_error = None
        for l in project.get_comment_lines():
            if l.find('final rms error') >= 00:
                rms_error = float(l.split()[4])
                break
        print 'Optimize: RMS error of %f' % rms_error
        # Filter out gross optimization problems
        if self.rms_error_threshold and rms_error > self.rms_error_threshold:
            raise Exception("Max RMS error threshold %f but got %f" %
                            (self.rms_error_threshold, rms_error))

        if self.debug:
            print 'Parsed: %s' % str(project.parsed)

        if self.debug:
            print
            print
            print
            print 'Optimized project:'
            print project
            #sys.exit(1)
        print 'Optimized project parsed: %d' % project.parsed

        print 'Merging project...'
        merge_pto(project, self.project)
        if self.debug:
            print self.project

        bench.stop()
        print 'Optimized project in %s' % bench

        # These are beyond this scope
        # Move them somewhere else if we want them
        if 0:
            # The following will assume all of the images have the same size
            self.verify_images()

            # Final dimensions are determined by field of view and width
            # Calculate optimial dimensions
            self.calc_dimensions()

            print 'Centering project...'
            self.center_project()
            '''
            WARNING WARNING WARNING
            The panotools model is too advanced for what I'm doing right now
            The image correction has its merits but is mostly getting in the way to distort images
        
            Therefore, I'd like to complete this to understand the intended use but I suspect its not a good idea
            and I could do my own nona style program much better
            The only downside is that if / when I start doing lens model corrections I'll have to rethink this a little
        
            Actually, a lot of these problems go away if I trim to a single tile
            I can use the same FOV as the source image or something similar
            '''
            print 'Calculating optimial field of view to match desired size...'
            self.calc_fov()
Example #12
0
    def run(self):
        print 'Input images width %d, height %d' % (self.img_width,
                                                    self.img_height)
        print 'Output to %s' % self.out_dir
        print 'Super tile width %d, height %d from scalar %d' % (
            self.stw, self.sth, self.st_scalar_heuristic)
        print 'Super tile x step %d, y step %d' % (self.super_t_xstep,
                                                   self.super_t_ystep)
        print 'Supertile clip width %d, height %d' % (self.clip_width,
                                                      self.clip_height)

        if self.merge and self.force:
            raise Exception('Can not merge and force')

        if not self.dry:
            self.dry = True
            print
            print
            print
            print '***BEGIN DRY RUN***'
            self.run()
            print '***END DRY RUN***'
            print
            print
            print
            self.dry = False

        if not self.ignore_crop and self.pto.get_panorama_line().getv(
                'S') is None:
            raise Exception('Not cropped.  Set ignore crop to force continue')
        '''
        if we have a width of 256 and 1 pixel we need total size of 256
        If we have a width of 256 and 256 pixels we need total size of 256
        if we have a width of 256 and 257 pixel we need total size of 512
        '''
        print 'Tile width: %d, height: %d' % (self.tw, self.th)
        print 'Net size: %d width (%d:%d) X %d height (%d:%d) = %d MP' % (
            self.width(), self.left(), self.right(), self.height(), self.top(),
            self.bottom(), self.width() * self.height() / 1000000)
        print 'Output image extension: %s' % self.out_extension

        self.this_tiles_done = 0

        bench = Benchmark()

        # Scrub old dir if we don't want it
        if os.path.exists(self.out_dir) and not self.merge:
            if not self.force:
                raise Exception("Must set force to override output")
            if not self.dry:
                shutil.rmtree(self.out_dir)
        if not self.dry and not os.path.exists(self.out_dir):
            os.mkdir(self.out_dir)
        if self.st_dir and not self.dry and not os.path.exists(self.st_dir):
            os.mkdir(self.st_dir)
        # in form (row, col)
        self.closed_list = set()

        self.n_expected_sts = len(list(self.gen_supertiles()))
        print 'M: Generating %d supertiles' % self.n_expected_sts

        x_tiles_ideal = 1.0 * self.width() / self.tw
        x_tiles = math.ceil(x_tiles_ideal)
        y_tiles_ideal = 1.0 * self.height() / self.th
        y_tiles = math.ceil(y_tiles_ideal)
        self.net_expected_tiles = x_tiles * y_tiles
        ideal_tiles = x_tiles_ideal * y_tiles_ideal
        print 'M: Ideal tiles: %0.3f x, %0.3f y tiles => %0.3f net' % (
            x_tiles_ideal, y_tiles_ideal, ideal_tiles)
        print 'M: Expecting to generate x%d, y%d => %d basic tiles' % (
            x_tiles, y_tiles, self.net_expected_tiles)
        if self.merge:
            self.seed_merge()

        if self.is_full:
            print 'M: full => forcing 1 thread '
            self.threads = 1
        print 'M: Initializing %d workers' % self.threads
        self.workers = []
        for ti in xrange(self.threads):
            print 'Bringing up W%02d' % ti
            w = Worker(ti, self, os.path.join(self.log_dir, 'w%02d.log' % ti))
            self.workers.append(w)
            w.start()

        print
        print
        print
        print 'S' * 80
        print 'M: Serial end'
        print 'P' * 80

        try:
            #temp_file = 'partial.tif'
            self.n_supertiles = 0
            st_gen = self.gen_supertiles()

            all_allocated = False
            last_progress = time.time()
            pair_submit = 0
            pair_complete = 0
            idle = False
            while not (all_allocated and pair_complete == pair_submit):
                progress = False
                # Check for completed jobs
                for wi, worker in enumerate(self.workers):
                    try:
                        out = worker.qo.get(False)
                    except Queue.Empty:
                        continue
                    pair_complete += 1
                    what = out[0]
                    progress = True

                    if what == 'done':
                        (st_bounds, img_fn) = out[1]
                        print 'MW%d: done w/ submit %d, complete %d' % (
                            wi, pair_submit, pair_complete)
                        # Dry run
                        if img_fn is None:
                            pim = None
                        else:
                            pim = PImage.from_file(img_fn)
                        # hack
                        # ugh remove may be an already existing supertile (not a temp file)
                        #os.remove(img_fn)
                        self.process_image(pim, st_bounds)
                    elif what == 'exception':
                        if not self.ignore_errors:
                            for worker in self.workers:
                                worker.running.clear()
                            # let stdout clear up
                            # (only moderately effective)
                            time.sleep(1)

                        #(_task, e) = out[1]
                        print '!' * 80
                        print 'M: ERROR: MW%d failed w/ exception' % wi
                        (_task, _e, estr) = out[1]
                        print 'M: Stack trace:'
                        for l in estr.split('\n'):
                            print l
                        print '!' * 80
                        if not self.ignore_errors:
                            raise Exception('M: shutdown on worker failure')
                        print 'M WARNING: continuing despite worker failure'
                    else:
                        print 'M: %s' % (out, )
                        raise Exception('M: internal error: bad task type %s' %
                                        what)

                    self.st_limit -= 1
                    if self.st_limit == 0:
                        print 'Breaking on ST limit reached'
                        break

                # Any workers need more work?
                for wi, worker in enumerate(self.workers):
                    if all_allocated:
                        break
                    if worker.qi.empty():
                        while True:
                            try:
                                st_bounds = st_gen.next()
                            except StopIteration:
                                print 'M: all tasks allocated'
                                all_allocated = True
                                break

                            progress = True

                            [x0, x1, y0, y1] = st_bounds
                            self.n_supertiles += 1
                            print 'M: checking supertile x(%d:%d) y(%d:%d)' % (
                                x0, x1, y0, y1)
                            if not self.should_try_supertile(st_bounds):
                                print 'M WARNING: skipping supertile %d as it would not generate any new tiles' % self.n_supertiles
                                continue

                            print '*' * 80
                            #print 'W%d: submit %s (%d / %d)' % (wi, repr(pair), pair_submit, n_pairs)
                            print "Creating supertile %d / %d with x%d:%d, y%d:%d" % (
                                self.n_supertiles, self.n_expected_sts, x0, x1,
                                y0, y1)
                            print 'W%d: submit' % (wi, )

                            worker.qi.put((st_bounds, ))
                            pair_submit += 1
                            break

                if progress:
                    last_progress = time.time()
                    idle = False
                else:
                    if not idle:
                        print 'M Server thread idle'
                    idle = True
                    # can take some time, but should be using smaller tiles now
                    if time.time() - last_progress > 4 * 60 * 60:
                        print 'M WARNING: server thread stalled'
                        last_progress = time.time()
                        time.sleep(0.1)

            bench.stop()
            print 'M Processed %d supertiles to generate %d new (%d total) tiles in %s' % (
                self.n_expected_sts, self.this_tiles_done, self.tiles_done(),
                str(bench))
            tiles_s = self.this_tiles_done / bench.delta_s()
            print 'M %f tiles / sec, %f pix / sec' % (tiles_s, tiles_s *
                                                      self.tw * self.th)

            if self.tiles_done() != self.net_expected_tiles:
                print 'M ERROR: expected to do %d basic tiles but did %d' % (
                    self.net_expected_tiles, self.tiles_done())
                self.dump_open_list()
                raise Exception('State mismatch')

            # Gather up supertile filenames generated by workers
            # xxx: maybe we should tell slaves the file they should use?
            for worker in self.workers:
                while True:
                    try:
                        st_fn = worker.st_fns.get(False)
                    except Queue.Empty:
                        break
                    self.st_fns.append(st_fn)

        finally:
            self.wkill()
            self.workers = None
Example #13
0
    def try_supertile(self, st_bounds):
        '''x0/1 and y0/1 are global absolute coordinates'''
        # First generate all of the valid tiles across this area to see if we can get any useful work done?
        # every supertile should have at least one solution or the bounds aren't good
        x0, x1, y0, y1 = st_bounds

        bench = Benchmark()
        try:
            if self.st_dir:
                # nah...tiff takes up too much space
                dst = os.path.join(self.st_dir,
                                   'st_%06dx_%06dy.jpg' % (x0, y0))
                if os.path.exists(dst):
                    # normally this is a .tif so slight loss in quality
                    img = PImage.from_file(dst)
                    print 'supertile short circuit on already existing: %s' % (
                        dst, )
                    return img

            # st_081357x_000587y.jpg
            temp_file = ManagedTempFile.get(None,
                                            '.tif',
                                            prefix_mangle='st_%06dx_%06dy_' %
                                            (x0, y0))

            stitcher = PartialStitcher(self.pto,
                                       st_bounds,
                                       temp_file.file_name,
                                       self.i,
                                       self.running,
                                       pprefix=self.pprefix)
            stitcher.enblend_lock = self.enblend_lock
            stitcher.nona_args = self.nona_args
            stitcher.enblend_args = self.enblend_args

            if self.dry:
                print 'dry: skipping partial stitch'
                stitcher = None
            else:
                stitcher.run()

            print
            print 'phase 3: loading supertile image'
            if self.dry:
                print 'dry: skipping loading PTO'
                img_fn = None
            else:
                if self.st_dir:
                    self.st_fns.put(dst)

                    #shutil.copyfile(temp_file.file_name, dst)
                    args = [
                        'convert', '-quality', '90', temp_file.file_name, dst
                    ]
                    print 'going to execute: %s' % (args, )
                    subp = subprocess.Popen(args,
                                            stdout=None,
                                            stderr=None,
                                            shell=False)
                    subp.communicate()
                    if subp.returncode != 0:
                        raise Exception('Failed to copy stitched file')

                    # having some problems that looks like file isn't getting written to disk
                    # monitoring for such errors
                    # remove if I can root cause the source of these glitches
                    for i in xrange(30):
                        if os.path.exists(dst):
                            break
                        if i == 0:
                            print 'WARNING: soften missing strong blur dest file name %s, waiting a bit...' % (
                                dst, )
                        time.sleep(0.1)
                    else:
                        raise Exception(
                            'Missing soften strong blur output file name %s' %
                            dst)

                # FIXME: was passing loaded image object
                # Directory should delete on exit
                # otherwise parent can delete it
                #img = PImage.from_file(temp_file.file_name)
                img_fn = temp_file.file_name
                # prevent deletion
                temp_file.file_name = ''

                #print 'supertile width: %d, height: %d' % (img.width(), img.height())
                print 'Supertile done w/ fn %s' % (img_fn, )
            return img_fn
        except:
            print 'supertile failed at %s' % (bench, )
            raise
Example #14
0
    def run(self):
        if self.dry:
            print 'Dry run abort'
            return

        bench = Benchmark()

        if not self.output_project_file_name:
            raise Exception("need project file")
        #if not self.output_project_file_name:
        #self.project_temp_file = ManagedTempFile.get()
        #self.output_project_file_name = self.project_temp_file.file_name
        print 'Beginning stitch'
        print 'output project file name: %s' % self.output_project_file_name

        #sys.exit(1)
        self.init_failures()

        # Generate control points and merge them into a master project
        self.control_point_gen = get_cp_engine()
        # How many rows and cols to go to each side
        # If you hand took the pictures, this might suit you
        self.project = PTOProject.from_blank()
        if self.output_project_file_name:
            self.project.set_file_name(self.output_project_file_name)
            if os.path.exists(self.output_project_file_name):
                # Otherwise, we merge into it
                print 'WARNING: removing old project file: %s' % self.output_project_file_name
                os.remove(self.output_project_file_name)
        else:
            self.project.get_a_file_name(None, "_master.pto")

        self.project.image_file_names = self.image_file_names

        try:
            '''
            Generate control points
            '''
            self.generate_control_points()
            print 'Soften try: %s' % (self.soften_try, )
            print 'Soften ok: %s' % (self.soften_ok, )

            print 'Post stitch fixup...'
            optimize_xy_only(self.project)
            fixup_i_lines(self.project)
            fixup_p_lines(self.project)

            print
            print '***PTO project baseline final (%s / %s) data length %d***' % (
                self.project.file_name, self.output_project_file_name,
                len(self.project.get_text()))
            print

            self.failure_json_w()
            print

            # Make dead sure its saved up to date
            self.project.save()
            # having issues with this..
            if self.output_project_file_name and not self.project.file_name == self.output_project_file_name:
                raise Exception('project file name changed %s %s',
                                self.project.file_name,
                                self.output_project_file_name)

            # TODO: missing calc opt size/width/height/fov and crop

        except Exception as e:
            sys.stdout.flush()
            sys.stderr.flush()
            print
            print 'WARNING: stitch FAILED'
            traceback.print_exc()
            try:
                fn = self.project.file_name + ".failed"
                print 'Attempting to save intermediate result to %s' % fn
                self.project.save_as(fn)
            except:
                print 'WARNING: failed intermediate save'
            raise e
        finally:
            bench.stop()
            print 'Stitch done in %s' % bench
Example #15
0
    def run(self):
        bench = Benchmark()

        # The following will assume all of the images have the same size
        self.verify_images()

        # Copy project so we can trash it
        self.opt_project = self.project.copy()
        self.prepare_pto(self.opt_project)

        print 'Building image coordinate map'
        i_fns = []
        for il in self.opt_project.image_lines:
            i_fns.append(il.get_name())
        self.icm = ImageCoordinateMap.from_file_names(i_fns)
        print 'Built image coordinate map'

        if self.icm.width() <= self.tw:
            raise Exception('Decrease tile width')
        if self.icm.height() <= self.th:
            raise Exception('Decrease tile height')

        order = 2
        '''
        Phase 1: baseline
        Fully optimize a region in the center of our pano
        '''
        print 'Phase 1: baseline'
        x0 = (self.icm.width() - self.tw) / 2
        if x0 % order != 0:
            x0 += 1
        x1 = x0 + self.tw - 1
        y0 = (self.icm.height() - self.th) / 2
        if y0 % order != 0:
            y0 += 1
        y1 = y0 + self.th - 1
        (center_pto, center_cplis) = self.partial_optimize(x0, x1, y0, y1)
        merge_pto(center_pto, self.opt_project, center_cplis)
        '''
        Phase 2: predict
        Now use base center project to predict optimization positions for rest of project
        Assume that scanning left/right and that backlash will cause rows to alternate ("order 2")
        Note this will also fill in position estimates for unmatched images
        x = c0 * c + c1 * r + c2
        y = c3 * c + c4 * r + c5
        XXX: is there reason to have order 2 y coordinates?
        '''
        print 'Phase 2: predict'
        ((c0s, c1s, c2s), (c3s, c4s, c5s)) = linearize(self.opt_project,
                                                       center_pto,
                                                       allow_missing=False,
                                                       order=order)
        # Exclude filenames directly optimized
        center_is = set()
        for il in center_pto.get_image_lines():
            center_is.add(self.opt_project.i2i(center_pto, il.get_index()))
        for row in xrange(self.icm.width()):
            for col in xrange(self.icm.height()):
                fn = self.icm.get_image(col, row)
                il = self.project.img_fn2l(fn)
                # Skip directly optimized lines
                if il.get_index() in center_is:
                    continue
                # Otherwise predict position
                x = c0s[col % order] * col + c1s[col % order] * row + c2s[
                    col % order]
                il.set_variable('d', x)
                y = c3s[row % order] * col + c4s[row % order] * row + c5s[
                    row % order]
                il.set_variable('e', y)
        '''
        Phase 3: optimize
        Moving out from center, optimize sub-sections based off of prediction
        Move in a cross pattern
            Left
            Right
            Up
            Down
            Expand scope
        '''
        '''
        x0 = self.icm.width() / 2
        if x0 % order != 0:
            x0 += 1
        x1 = x0 + self.tw - 1
        y0 = self.icm.height() / 2
        if y0 % order != 0:
            y0 += 1
        y1 = y0 + self.th - 1
        (center_pto, center_cplis) = self.partial_optimize(x0, x1, y0, y1)
        merge_pto(center_pto, self.opt_project, center_cplis)
        '''

        if self.debug:
            print self.project

        bench.stop()
        print 'Optimized project in %s' % bench