Exemple #1
0
def gen_bias(lg, hg):
    """Helper function - creates and returns a bias object for use when creating Glyphs. Basically weights each line with the amount of ink on it, so when a writter uses every other line it strongly biases towards letters being assigned to the lines they wrote on."""
    bias = defaultdict(float)

    # Transform the line graph to line space...
    ls_lg = LineGraph()
    ls_lg.from_many(lg)

    ihg = la.inv(hg)
    ls_lg.transform(ihg, True)

    # Add weight from all of the line segments...
    for ei in xrange(ls_lg.edge_count):
        edge = ls_lg.get_edge(ei)

        vf = ls_lg.get_vertex(edge[0])
        vt = ls_lg.get_vertex(edge[1])

        dx = vt[0] - vf[0]
        dy = vt[1] - vf[1]

        mass = (vf[5] + vt[5]) * numpy.sqrt(dx * dx + dy * dy)
        line = int(numpy.floor(0.5 * (vt[1] + vf[1])))

        bias[line] += mass

    # Normalise and return...
    maximum = max(bias.values())

    for key in bias.keys():
        bias[key] /= maximum

    return bias
Exemple #2
0
def gen_bias(lg, hg):
  """Helper function - creates and returns a bias object for use when creating Glyphs. Basically weights each line with the amount of ink on it, so when a writter uses every other line it strongly biases towards letters being assigned to the lines they wrote on."""
  bias = defaultdict(float)
  
  # Transform the line graph to line space...
  ls_lg = LineGraph()
  ls_lg.from_many(lg)
  
  ihg = la.inv(hg)
  ls_lg.transform(ihg, True)
  
  # Add weight from all of the line segments...
  for ei in xrange(ls_lg.edge_count):
    edge = ls_lg.get_edge(ei)
    
    vf = ls_lg.get_vertex(edge[0])
    vt = ls_lg.get_vertex(edge[1])
    
    dx = vt[0] - vf[0]
    dy = vt[1] - vf[1]
    
    mass = (vf[5] + vt[5]) * numpy.sqrt(dx*dx + dy*dy)
    line = int(numpy.floor(0.5 * (vt[1] + vf[1])))
    
    bias[line] += mass
  
  # Normalise and return...
  maximum = max(bias.values())
  
  for key in bias.keys():
    bias[key] /= maximum
  
  return bias
Exemple #3
0
def combine_seperate(lg_layout):
    """Given a line graph layout (List of (homography, line graph) pairs) this merges them all together into a single LineGraph. This version doesn't do anything clever."""
    args = []
    for hg, lg in lg_layout:
        args.append(hg)
        args.append(lg)

    ret = LineGraph()
    ret.from_many(*args)
    return ret
Exemple #4
0
def combine_seperate(lg_layout):
  """Given a line graph layout (List of (homography, line graph) pairs) this merges them all together into a single LineGraph. This version doesn't do anything clever."""
  args = []
  for hg, lg in lg_layout:
    args.append(hg)
    args.append(lg)
  
  ret = LineGraph()
  ret.from_many(*args)
  return ret
Exemple #5
0
def stitch_connect(glyph_layout, soft=True, half=False, pair_base=0):
    """Converts a glyph layout to a linegraph layout. This stitches together the glyphs when it has sufficient information to do so."""
    ret = []

    # First copy over the actual glyphs...
    for pair in glyph_layout:
        if pair is not None:
            hg, glyph = pair
            ret.append((hg, glyph.lg))

    # Now loop through and identify all pairs that can be stitched together, and stitch them...
    pair_code = 0
    for i in xrange(len(glyph_layout) - 1):
        # Can't stitch spaces...
        if glyph_layout[i] is not None and glyph_layout[i + 1] is not None:
            l_hg, l_glyph = glyph_layout[i]
            r_hg, r_glyph = glyph_layout[i + 1]

            matches = costs.match_links(l_glyph, r_glyph)

            # Iterate and do each pairing in turn...
            for ml, mr in matches:
                # Calculate the homographies to put the two line graphs into position...
                lc_hg = numpy.dot(
                    l_hg, numpy.dot(l_glyph.transform,
                                    la.inv(ml[0].transform)))
                rc_hg = numpy.dot(
                    r_hg, numpy.dot(r_glyph.transform,
                                    la.inv(mr[0].transform)))

                # Copy the links, applying the homographies...
                lc = LineGraph()
                lc.from_many(lc_hg, ml[0].lg)

                rc = LineGraph()
                rc.from_many(rc_hg, mr[0].lg)

                # Extract the merge points...
                blend = [(ml[3], 0.0, mr[4]), (ml[4], 1.0, mr[3])]

                # Do the blending...
                lc.blend(rc, blend, soft)

                # Record via tagging that the two parts are the same entity...
                pair = 'duplicate:%i,%i' % (pair_base, pair_code)
                lc.add_tag(0, 0.5, pair)
                rc.add_tag(0, 0.5, pair)
                pair_code += 1

                # Store the pair of line graphs in the return, with identity homographies...
                ret.append((numpy.eye(3), lc))
                if not half: ret.append((numpy.eye(3), rc))

    return ret
Exemple #6
0
def stitch_connect(glyph_layout, soft = True, half = False, pair_base = 0):
  """Converts a glyph layout to a linegraph layout. This stitches together the glyphs when it has sufficient information to do so."""
  ret = []
  
  # First copy over the actual glyphs...
  for pair in glyph_layout:
    if pair!=None:
      hg, glyph = pair
      ret.append((hg, glyph.lg))
      
  # Now loop through and identify all pairs that can be stitched together, and stitch them...
  pair_code = 0
  for i in xrange(len(glyph_layout)-1):
    # Can't stitch spaces...
    if glyph_layout[i]!=None and glyph_layout[i+1]!=None:
      l_hg, l_glyph = glyph_layout[i]
      r_hg, r_glyph = glyph_layout[i+1]
      
      matches = costs.match_links(l_glyph, r_glyph)
        
      # Iterate and do each pairing in turn...
      for ml, mr in matches:
        # Calculate the homographies to put the two line graphs into position...
        lc_hg = numpy.dot(l_hg, numpy.dot(l_glyph.transform, la.inv(ml[0].transform)))
        rc_hg = numpy.dot(r_hg, numpy.dot(r_glyph.transform, la.inv(mr[0].transform)))

        # Copy the links, applying the homographies...
        lc = LineGraph()
        lc.from_many(lc_hg, ml[0].lg)
        
        rc = LineGraph()
        rc.from_many(rc_hg, mr[0].lg)
          
        # Extract the merge points...
        blend = [(ml[3], 0.0, mr[4]), (ml[4], 1.0, mr[3])]
          
        # Do the blending...
        lc.blend(rc, blend, soft)
          
        # Record via tagging that the two parts are the same entity...
        pair = 'duplicate:%i,%i' % (pair_base, pair_code)
        lc.add_tag(0, 0.5, pair)
        rc.add_tag(0, 0.5, pair)
        pair_code += 1
        
        # Store the pair of line graphs in the return, with identity homographies...
        ret.append((numpy.eye(3), lc))
        if not half: ret.append((numpy.eye(3), rc))
  
  return ret
Exemple #7
0
  def convert(self, lg, choices = 1, adv_match = False, textures = TextureCache(), memory = 0):
    """Given a line graph this chops it into chunks, matches each chunk to the database of chunks and returns a new line graph with these chunks instead of the original. Output will involve heavy overlap requiring clever blending. choices is the number of options it select from the db - it grabs this many closest to the requirements and then randomly selects from them. If adv_match is True then instead of random selection from the choices it does a more advanced match, and select the best match in terms of colour distance from already-rendered chunks. This option is reasonably expensive. memory is how many recently use chunks to remember, to avoid repetition."""
    if memory > (choices - 1):
      memory = choices - 1

    # If we have no data just return the input...
    if self.empty(): return lg
    
    # Check if the indexing structure is valid - if not create it...
    if self.kdtree==None:
      data = numpy.array(map(lambda p: self.feature_vect(p[0], p[1]), self.chunks), dtype=numpy.float)
      self.kdtree = scipy.spatial.cKDTree(data, 4)
      
    # Calculate the radius scaler and distance for this line graph, by calculating the median radius...
    rads = map(lambda i: lg.get_vertex(i)[5], xrange(lg.vertex_count))
    rads.sort()
    median_radius = rads[len(rads)//2]
    radius_mult = 1.0 / median_radius
    
    dist = self.dist * median_radius
    
    # Create the list into which we dump all the chunks that will make up the return...
    chunks = []
    temp = LineGraph()
    
    # List of recently used chunks, to avoid obvious patterns...
    recent = []
    
    # If advanced match we need a Composite of the image thus far, to compare against...
    if adv_match:
      canvas = Composite()
      min_x, max_x, min_y, max_y = lg.get_bounds()
      canvas.set_size(int(max_x+8), int(max_y+8))
    
    # Iterate the line graph, choping it into chunks and matching a chunk to each chop...
    for chain in lg.chains():
      head = 0
      tail = 0
      length = 0.0
        
      while True:
        # Move tail so its long enough, or has reached the end...
        while length<dist and tail+1<len(chain):
          tail += 1
          v1 = lg.get_vertex(chain[tail-1])
          v2 = lg.get_vertex(chain[tail])
          length += numpy.sqrt((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2)

        # Extract a feature vector for this chunk...
        temp.from_vertices(lg, chain[head:tail+1])
        fv = self.feature_vect(temp, median_radius)
        
        # Select a chunk from the database...
        if choices==1:
          selected = self.kdtree.query(fv)[1]
          orig_chunk = self.chunks[selected]
        else:
          options = list(self.kdtree.query(fv, choices)[1])
          options = filter(lambda v: v not in recent, options)
          if not adv_match:
            selected = random.choice(options)
            orig_chunk = self.chunks[selected]
          else:
            cost = 1e64 * numpy.ones(len(options))
            
            for i, option in enumerate(options):
              fn = filter(lambda t: t[0].startswith('texture:'), self.chunks[option][0].get_tags())
              if len(fn)!=0:
                fn = fn[0][0][len('texture:'):]
                tex = textures[fn]
                
                chunk = LineGraph()
                chunk.from_many(self.chunks[option][0])
                chunk.morph_to(lg, chain[head:tail+1])
              
                part = canvas.draw_line_graph(chunk)
                cost[i] = canvas.cost_texture_nearest(tex, part)
            
            selected = options[numpy.argmin(cost)]
            orig_chunk = self.chunks[selected]
        
        # Update recent list...
        recent.append(selected)
        if len(recent)>memory:
          recent.pop(0)

        # Distort it to match the source line graph...
        chunk = LineGraph()
        chunk.from_many(orig_chunk[0])
        chunk.morph_to(lg, chain[head:tail+1])
        
        # Record it for output...
        chunks.append(chunk)
        
        # If advanced matching is on write it out to canvas, so future choices will take it into account...
        if adv_match:
          fn = filter(lambda t: t[0].startswith('texture:'), chunk.get_tags())
          if len(fn)!=0:
            fn = fn[0][0][len('texture:'):]
            tex = textures[fn]

            part = canvas.draw_line_graph(chunk)
            canvas.paint_texture_nearest(tex, part)
         
        # If tail is at the end exit the loop...
        if tail+1 >= len(chain): break
          
        # Move head along for the next chunk...
        to_move = dist * self.factor
        while to_move>0.0 and head+2<len(chain):
          head += 1
          v1 = lg.get_vertex(chain[head-1])
          v2 = lg.get_vertex(chain[head])
          offset = numpy.sqrt((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2)
          length -= offset
          to_move -= offset

    # Return the final line graph...
    ret = LineGraph()
    ret.from_many(*chunks)
    return ret
Exemple #8
0
    def convert(self,
                lg,
                choices=1,
                adv_match=False,
                textures=TextureCache(),
                memory=0):
        """Given a line graph this chops it into chunks, matches each chunk to the database of chunks and returns a new line graph with these chunks instead of the original. Output will involve heavy overlap requiring clever blending. choices is the number of options it select from the db - it grabs this many closest to the requirements and then randomly selects from them. If adv_match is True then instead of random selection from the choices it does a more advanced match, and select the best match in terms of colour distance from already-rendered chunks. This option is reasonably expensive. memory is how many recently use chunks to remember, to avoid repetition."""
        if memory > (choices - 1):
            memory = choices - 1

        # If we have no data just return the input...
        if self.empty(): return lg

        # Check if the indexing structure is valid - if not create it...
        if self.kdtree == None:
            data = numpy.array(map(lambda p: self.feature_vect(p[0], p[1]),
                                   self.chunks),
                               dtype=numpy.float)
            self.kdtree = scipy.spatial.cKDTree(data, 4)

        # Calculate the radius scaler and distance for this line graph, by calculating the median radius...
        rads = map(lambda i: lg.get_vertex(i)[5], xrange(lg.vertex_count))
        rads.sort()
        median_radius = rads[len(rads) // 2]
        radius_mult = 1.0 / median_radius

        dist = self.dist * median_radius

        # Create the list into which we dump all the chunks that will make up the return...
        chunks = []
        temp = LineGraph()

        # List of recently used chunks, to avoid obvious patterns...
        recent = []

        # If advanced match we need a Composite of the image thus far, to compare against...
        if adv_match:
            canvas = Composite()
            min_x, max_x, min_y, max_y = lg.get_bounds()
            canvas.set_size(int(max_x + 8), int(max_y + 8))

        # Iterate the line graph, choping it into chunks and matching a chunk to each chop...
        for chain in lg.chains():
            head = 0
            tail = 0
            length = 0.0

            while True:
                # Move tail so its long enough, or has reached the end...
                while length < dist and tail + 1 < len(chain):
                    tail += 1
                    v1 = lg.get_vertex(chain[tail - 1])
                    v2 = lg.get_vertex(chain[tail])
                    length += numpy.sqrt((v1[0] - v2[0])**2 +
                                         (v1[1] - v2[1])**2)

                # Extract a feature vector for this chunk...
                temp.from_vertices(lg, chain[head:tail + 1])
                fv = self.feature_vect(temp, median_radius)

                # Select a chunk from the database...
                if choices == 1:
                    selected = self.kdtree.query(fv)[1]
                    orig_chunk = self.chunks[selected]
                else:
                    options = list(self.kdtree.query(fv, choices)[1])
                    options = filter(lambda v: v not in recent, options)
                    if not adv_match:
                        selected = random.choice(options)
                        orig_chunk = self.chunks[selected]
                    else:
                        cost = 1e64 * numpy.ones(len(options))

                        for i, option in enumerate(options):
                            fn = filter(lambda t: t[0].startswith('texture:'),
                                        self.chunks[option][0].get_tags())
                            if len(fn) != 0:
                                fn = fn[0][0][len('texture:'):]
                                tex = textures[fn]

                                chunk = LineGraph()
                                chunk.from_many(self.chunks[option][0])
                                chunk.morph_to(lg, chain[head:tail + 1])

                                part = canvas.draw_line_graph(chunk)
                                cost[i] = canvas.cost_texture_nearest(
                                    tex, part)

                        selected = options[numpy.argmin(cost)]
                        orig_chunk = self.chunks[selected]

                # Update recent list...
                recent.append(selected)
                if len(recent) > memory:
                    recent.pop(0)

                # Distort it to match the source line graph...
                chunk = LineGraph()
                chunk.from_many(orig_chunk[0])
                chunk.morph_to(lg, chain[head:tail + 1])

                # Record it for output...
                chunks.append(chunk)

                # If advanced matching is on write it out to canvas, so future choices will take it into account...
                if adv_match:
                    fn = filter(lambda t: t[0].startswith('texture:'),
                                chunk.get_tags())
                    if len(fn) != 0:
                        fn = fn[0][0][len('texture:'):]
                        tex = textures[fn]

                        part = canvas.draw_line_graph(chunk)
                        canvas.paint_texture_nearest(tex, part)

                # If tail is at the end exit the loop...
                if tail + 1 >= len(chain): break

                # Move head along for the next chunk...
                to_move = dist * self.factor
                while to_move > 0.0 and head + 2 < len(chain):
                    head += 1
                    v1 = lg.get_vertex(chain[head - 1])
                    v2 = lg.get_vertex(chain[head])
                    offset = numpy.sqrt((v1[0] - v2[0])**2 +
                                        (v1[1] - v2[1])**2)
                    length -= offset
                    to_move -= offset

        # Return the final line graph...
        ret = LineGraph()
        ret.from_many(*chunks)
        return ret