示例#1
0
  def _urls_and_domains(self, auth_entity, user_url):
    """Returns this user's valid (not webmention-blacklisted) URLs and domains.

    Converts the auth entity's user_json to an ActivityStreams actor and uses
    its 'urls' and 'url' fields. May be overridden by subclasses.

    Args:
      auth_entity: oauth_dropins.models.BaseAuth
      user_url: string, optional URL passed in when authorizing

    Returns: ([string url, ...], [string domain, ...])
    """
    actor = self.gr_source.user_to_actor(json.loads(auth_entity.user_json))
    logging.debug('Converted to actor: %s', json.dumps(actor, indent=2))

    urls = []
    for url in util.trim_nulls(util.uniquify(
        [user_url] + [actor.get('url')] +
        [u.get('value') for u in actor.get('urls', [])])):
      domain = util.domain_from_link(url)
      if domain and not util.in_webmention_blacklist(domain.lower()):
        urls.append(url)

    urls = util.dedupe_urls(urls)
    domains = [util.domain_from_link(url).lower() for url in urls]
    return urls, domains
示例#2
0
  def on_new_syndicated_post(self, syndpost):
    """If this source has no username, try to infer one from a syndication URL.

    Args:
      syndpost: SyndicatedPost
    """
    url = syndpost.syndication
    if self.username or not url:
      return

    # FB usernames only have letters, numbers, and periods:
    # https://www.facebook.com/help/105399436216001
    author_id = self.gr_source.base_object({'object': {'url': url}})\
                              .get('author', {}).get('id')
    if author_id:
      if author_id != self.inferred_username and not util.is_int(author_id):
        logging.info('Inferring username %s from syndication url %s', author_id, url)
        self.inferred_username = author_id
        self.put()
        syndpost.syndication = self.canonicalize_url(syndpost.syndication)
      elif author_id != self.key.id() and author_id not in self.inferred_user_ids:
        logging.info('Inferring app-scoped user id %s from syndication url %s', author_id, url)
        self.inferred_user_ids = util.uniquify(self.inferred_user_ids + [author_id])
        self.put()
        syndpost.syndication = self.canonicalize_url(syndpost.syndication)
示例#3
0
文件: facebook.py 项目: v1cker/bridgy
  def on_new_syndicated_post(self, syndpost):
    """If this source has no username, try to infer one from a syndication URL.

    Args:
      syndpost: :class:`models.SyndicatedPost`
    """
    url = syndpost.syndication
    if self.username or not url:
      return

    # FB usernames only have letters, numbers, and periods:
    # https://www.facebook.com/help/105399436216001
    author_id = self.gr_source.base_object({'object': {'url': url}})\
                              .get('author', {}).get('id')
    if author_id:
      if author_id != self.inferred_username and not util.is_int(author_id):
        logging.info('Inferring username %s from syndication url %s', author_id, url)
        self.inferred_username = author_id
        self.put()
        syndpost.syndication = self.canonicalize_url(syndpost.syndication)
      elif author_id != self.key.id() and author_id not in self.inferred_user_ids:
        logging.info('Inferring app-scoped user id %s from syndication url %s', author_id, url)
        self.inferred_user_ids = util.uniquify(self.inferred_user_ids + [author_id])
        self.put()
        syndpost.syndication = self.canonicalize_url(syndpost.syndication)
示例#4
0
    def _urls_and_domains(self, auth_entity, user_url):
        """Returns this user's valid (not webmention-blacklisted) URLs and domains.

    Converts the auth entity's user_json to an ActivityStreams actor and uses
    its 'urls' and 'url' fields. May be overridden by subclasses.

    Args:
      auth_entity: :class:`oauth_dropins.models.BaseAuth`
      user_url: string, optional URL passed in when authorizing

    Returns:
      ([string url, ...], [string domain, ...])
    """
        actor = self.gr_source.user_to_actor(json.loads(auth_entity.user_json))
        logging.debug('Converted to actor: %s', json.dumps(actor, indent=2))

        candidates = util.trim_nulls(
            util.uniquify([user_url] + microformats2.object_urls(actor)))

        if len(candidates) > MAX_AUTHOR_URLS:
            logging.warning(
                'Too many profile links! Only resolving the first %s: %s',
                MAX_AUTHOR_URLS, candidates)

        urls = []
        for i, url in enumerate(candidates):
            url, domain, send = util.get_webmention_target(
                url, resolve=i < MAX_AUTHOR_URLS)
            if send:
                urls.append(url)

        urls = util.dedupe_urls(urls)  # normalizes domains to lower case
        domains = [util.domain_from_link(url) for url in urls]
        return urls, domains
示例#5
0
def superInterfaces(interface):
    """Given an interface, return list of super-interfaces (including itself)."""
    result = [interface]
    result.extend(reflect.allYourBase(interface, Interface))
    result = util.uniquify(result)
    result.remove(Interface)
    return result
示例#6
0
  def _urls_and_domains(self, auth_entity, user_url):
    """Returns this user's valid (not webmention-blacklisted) URLs and domains.

    Converts the auth entity's user_json to an ActivityStreams actor and uses
    its 'urls' and 'url' fields. May be overridden by subclasses.

    Args:
      auth_entity: oauth_dropins.models.BaseAuth
      user_url: string, optional URL passed in when authorizing

    Returns: ([string url, ...], [string domain, ...])
    """
    actor = self.gr_source.user_to_actor(json.loads(auth_entity.user_json))
    logging.debug('Converted to actor: %s', json.dumps(actor, indent=2))

    candidates = util.trim_nulls(util.uniquify(
        [user_url] + microformats2.object_urls(actor)))

    if len(candidates) > MAX_AUTHOR_URLS:
      logging.warning('Too many profile links! Only resolving the first %s: %s',
                      MAX_AUTHOR_URLS, candidates)

    urls = []
    for i, url in enumerate(candidates):
      url, domain, send = util.get_webmention_target(url, resolve=i < MAX_AUTHOR_URLS)
      if send:
        urls.append(url)

    urls = util.dedupe_urls(urls)  # normalizes domains to lower case
    domains = [util.domain_from_link(url) for url in urls]
    return urls, domains
示例#7
0
def extractIncludes(file):
	content = open(file, "r").read()
	includes = re.findall(r'(?m)^#include ["<]([^">]+)[">]', content)
	includes = prependPath(includes, file)
	
	for inc in includes:
		includes += extractIncludes(inc)
	return uniquify(includes)
示例#8
0
 def gen_track_infos(self, titles, durations):
     timedeltas = gen_timedeltas(durations)
     # join together two lists of form:
     # - ["title1", "title2"...]
     # - [("start1, end1", "start2, end2")]
     # -> [("title1, start1, end1"), ("title2, start2, end2")]
     return [(ti, ) + td
             for ti, td in zip(uniquify(list(titles)), timedeltas)]
示例#9
0
def extractIncludes(file):
    content = open(file, "r").read()
    includes = re.findall(r'(?m)^#include ["<]([^">]+)[">]', content)
    includes = prependPath(includes, file)

    for inc in includes:
        includes += extractIncludes(inc)
    return uniquify(includes)
def extractIncludes(file):
	content = open(file, "r").read()
	# filter out multi-line comments (could contain #include lines as examples)
	content = re.sub(r'(?s)/\*.*?\*/', '/* */', content)
	includes = re.findall(r'(?m)^#include ["<]([^">]+)[">]', content)
	includes = prependPath(includes, file)
	
	for inc in includes:
		includes += extractIncludes(inc)
	return uniquify(includes)
示例#11
0
def extract_strings_from_c_files(with_paths=False):
    strings = []
    for f in C_FILES_TO_PROCESS:
        file_content = open(f, "r").read()
        file_strings = re.findall(TRANSLATION_PATTERN, file_content)
        if with_paths:
            strings += [(s, os.path.basename(os.path.dirname(f))) for s in file_strings]
        else:
            strings += file_strings
    return util.uniquify(strings)
示例#12
0
def extract_strings_from_c_files(with_paths=False):
    strings = []
    for f in C_FILES_TO_PROCESS:
        file_content = open(f, "r").read()
        file_strings = re.findall(TRANSLATION_PATTERN, file_content)
        if with_paths:
            strings += [(s, os.path.basename(os.path.dirname(f)))
                        for s in file_strings]
        else:
            strings += file_strings
    return util.uniquify(strings)
示例#13
0
def extractIncludes(file):
	content = open(file, "r").read()
	content = content.replace("\r\n", "\n")
	# filter out multi-line comments (could contain #include lines as examples)
	content = re.sub(r'(?s)/\*.*?\*/', '/* */', content)
	# try to filter out "#if 0 ... #endif" sections (hacky)
	content = re.sub(r'(?sm)^#if 0$.*?^#endif$', '', content)
	includes = re.findall(r'(?m)^#include ["<]([^">]+)[">]', content)
	includes = prependPath(includes, file)
	
	for inc in includes:
		includes += extractIncludes(inc)
	return uniquify(includes)
示例#14
0
文件: models.py 项目: mblaney/bridgy
    def _urls_and_domains(self, auth_entity, user_url):
        """Returns this user's valid (not webmention-blacklisted) URLs and domains.

    Converts the auth entity's user_json to an ActivityStreams actor and uses
    its 'urls' and 'url' fields. May be overridden by subclasses.

    Args:
      auth_entity: :class:`oauth_dropins.models.BaseAuth`
      user_url: string, optional URL passed in when authorizing

    Returns:
      ([string url, ...], [string domain, ...])
    """
        actor = self.gr_source.user_to_actor(json.loads(auth_entity.user_json))
        logging.debug('Converted to actor: %s', json.dumps(actor, indent=2))

        candidates = util.trim_nulls(
            util.uniquify([user_url] + microformats2.object_urls(actor)))

        if len(candidates) > MAX_AUTHOR_URLS:
            logging.info(
                'Too many profile links! Only resolving the first %s: %s',
                MAX_AUTHOR_URLS, candidates)

        urls = []
        for i, url in enumerate(candidates):
            final, domain, ok = util.get_webmention_target(
                url, resolve=i < MAX_AUTHOR_URLS)
            if ok:
                final = final.lower()
                if util.schemeless(final).startswith(
                        util.schemeless(url.lower())):
                    # redirected to a deeper path. use the original higher level URL. #652
                    final = url
                # If final has a path segment check if root has a matching rel=me.
                match = re.match(r'^(https?://[^/]+)/.+', final)
                if match and i < MAX_AUTHOR_URLS:
                    root = match.group(1)
                    resp = util.requests_get(root)
                    resp.raise_for_status()
                    data = util.mf2py_parse(resp.text, root)
                    me_urls = data.get('rels', {}).get('me', [])
                    if final in me_urls:
                        final = root
                urls.append(final)

        urls = util.dedupe_urls(urls)  # normalizes domains to lower case
        domains = [util.domain_from_link(url) for url in urls]
        return urls, domains
示例#15
0
  def urls_and_domains(self, auth_entity, user_url, actor=None,
                       resolve_source_domain=True):
    """Returns this user's valid (not webmention-blocklisted) URLs and domains.

    Converts the auth entity's user_json to an ActivityStreams actor and uses
    its 'urls' and 'url' fields. May be overridden by subclasses.

    Args:
      auth_entity: :class:`oauth_dropins.models.BaseAuth`
      user_url: string, optional URL passed in when authorizing
      actor: dict, optional AS actor for the user. If provided, overrides
        auth_entity
      resolve_source_domain: boolean, whether to follow redirects on URLs on
        this source's domain

    Returns:
      ([string url, ...], [string domain, ...])
    """
    if not actor:
      actor = self.gr_source.user_to_actor(json_loads(auth_entity.user_json))
    logger.debug(f'Extracting URLs and domains from actor: {json_dumps(actor, indent=2)}')

    candidates = util.trim_nulls(util.uniquify(
        [user_url] + microformats2.object_urls(actor)))

    if len(candidates) > MAX_AUTHOR_URLS:
      logger.info(f'Too many profile links! Only resolving the first {MAX_AUTHOR_URLS}: {candidates}')

    urls = []
    for i, url in enumerate(candidates):
      on_source_domain = util.domain_from_link(url) == self.gr_source.DOMAIN
      resolve = ((resolve_source_domain or not on_source_domain) and
                 i < MAX_AUTHOR_URLS)
      resolved = self.resolve_profile_url(url, resolve=resolve)
      if resolved:
        urls.append(resolved)

    final_urls = []
    domains = []
    for url in util.dedupe_urls(urls):  # normalizes domains to lower case
      # skip links on this source's domain itself. only currently needed for
      # Mastodon; the other silo domains are in the webmention blocklist.
      domain = util.domain_from_link(url)
      if domain != self.gr_source.DOMAIN:
        final_urls.append(url)
        domains.append(domain)

    return final_urls, domains
示例#16
0
def getInterfaces(obj, attr='__implements__'):
    """Return list of all interfaces an object implements, using a particular
    attribute name.  For example, if you wish to discover what interfaces a
    class implements directly, pass '__class_implements__' as the attribute
    name.
    """
    if not hasattr(obj, attr):
        return []

    result = []
    for i in tupleTreeToList(getattr(obj,attr,())):
        result.append(i)
        result.extend(reflect.allYourBase(i, Interface))
    result = util.uniquify(result)
    result.remove(Interface)
    return result
示例#17
0
    def _urls_and_domains(self, auth_entity, user_url):
        """Returns this user's valid (not webmention-blacklisted) URLs and domains.

    Converts the auth entity's user_json to an ActivityStreams actor and uses
    its 'urls' and 'url' fields. May be overridden by subclasses.

    Args:
      auth_entity: :class:`oauth_dropins.models.BaseAuth`
      user_url: string, optional URL passed in when authorizing

    Returns:
      ([string url, ...], [string domain, ...])
    """
        user = json_loads(auth_entity.user_json)
        actor = (
            user.get('actor')  # for Instagram; its user_json is IndieAuth
            or self.gr_source.user_to_actor(user))
        logging.debug('Extracting URLs and domains from actor: %s',
                      json_dumps(actor, indent=2))

        candidates = util.trim_nulls(
            util.uniquify([user_url] + microformats2.object_urls(actor)))

        if len(candidates) > MAX_AUTHOR_URLS:
            logging.info(
                'Too many profile links! Only resolving the first %s: %s',
                MAX_AUTHOR_URLS, candidates)

        urls = []
        for i, url in enumerate(candidates):
            resolved = self.resolve_profile_url(url,
                                                resolve=i < MAX_AUTHOR_URLS)
            if resolved:
                urls.append(resolved)

        final_urls = []
        domains = []
        for url in util.dedupe_urls(urls):  # normalizes domains to lower case
            # skip links on this source's domain itself. only currently needed for
            # Mastodon; the other silo domains are in the webmention blacklist.
            domain = util.domain_from_link(url)
            if domain != self.gr_source.DOMAIN:
                final_urls.append(url)
                domains.append(domain)

        return final_urls, domains
示例#18
0
文件: models.py 项目: mblaney/bridgy
  def _urls_and_domains(self, auth_entity, user_url):
    """Returns this user's valid (not webmention-blacklisted) URLs and domains.

    Converts the auth entity's user_json to an ActivityStreams actor and uses
    its 'urls' and 'url' fields. May be overridden by subclasses.

    Args:
      auth_entity: :class:`oauth_dropins.models.BaseAuth`
      user_url: string, optional URL passed in when authorizing

    Returns:
      ([string url, ...], [string domain, ...])
    """
    actor = self.gr_source.user_to_actor(json.loads(auth_entity.user_json))
    logging.debug('Converted to actor: %s', json.dumps(actor, indent=2))

    candidates = util.trim_nulls(util.uniquify(
        [user_url] + microformats2.object_urls(actor)))

    if len(candidates) > MAX_AUTHOR_URLS:
      logging.info('Too many profile links! Only resolving the first %s: %s',
                   MAX_AUTHOR_URLS, candidates)

    urls = []
    for i, url in enumerate(candidates):
      final, domain, ok = util.get_webmention_target(url, resolve=i < MAX_AUTHOR_URLS)
      if ok:
        final = final.lower()
        if util.schemeless(final).startswith(util.schemeless(url.lower())):
          # redirected to a deeper path. use the original higher level URL. #652
          final = url
        # If final has a path segment check if root has a matching rel=me.
        match = re.match(r'^(https?://[^/]+)/.+', final)
        if match and i < MAX_AUTHOR_URLS:
          root = match.group(1)
          resp = util.requests_get(root)
          resp.raise_for_status()
          data = util.mf2py_parse(resp.text, root)
          me_urls = data.get('rels', {}).get('me', [])
          if final in me_urls:
            final = root
        urls.append(final)

    urls = util.dedupe_urls(urls)  # normalizes domains to lower case
    domains = [util.domain_from_link(url) for url in urls]
    return urls, domains
    def find_largest_areas(self):
        """
        Finds the largest areas that can *currently* be built from each
        area-corner-cell in the grid.
        Returns a list of Areas.
        """
        areas = []

        for ypos in range(0, self.grid.height):
            for xpos in range(0, self.grid.width):
                cell = self.grid.get_cell(xpos, ypos)
                # Removing the is_corner() check below reduces final
                # keystroke count by ~3% but makes the routine ~12x slower
                if cell.plottable \
                    and self.grid.is_corner(xpos, ypos):
                    areas.append(self.find_largest_area_from(xpos, ypos))

        areas = util.uniquify(
            areas, lambda area: ''.join([str(c) for c in area.corners]))

        return areas
示例#20
0
    def find_largest_areas(self):
        """
        Finds the largest areas that can *currently* be built from each
        area-corner-cell in the grid.
        Returns a list of Areas.
        """
        areas = []

        for ypos in range(0, self.grid.height):
            for xpos in range(0, self.grid.width):
                cell = self.grid.get_cell(xpos, ypos)
                # Removing the is_corner() check below reduces final
                # keystroke count by ~3% but makes the routine ~12x slower
                if cell.plottable \
                    and self.grid.is_corner(xpos, ypos):
                    areas.append(self.find_largest_area_from(xpos, ypos))

        areas = util.uniquify(areas,
            lambda area: ''.join([str(c) for c in area.corners]))

        return areas
示例#21
0
def IntersectionsTrim(inter, console):
    '''Removes intersections that do not have more than 1 connection (since they are redundant).
    Possibly will do more later, like searching for loops.
    Returns straight list of indexes.'''

    t0 = time.time()
    
    new = filter(lambda a: any((len(a[1])>2, a[1].isStart, a[1].isEnd)), inter.items()) #has more than two intersections
    series = []
    series_append = series.append
    for key, value in new:
        for intersection in value.values():
            '''tup = intersection[1:4]
            for pt in intersection[2]:
                series_append(tuple([pt])+)'''
            '''series += map(lambda a: a[0], intersection)'''
            series += [a[0] for a in intersection]

    console.add('Intersections Trim', error=': '+str(round(time.time()-t0, 3)))

    return sorted(util.uniquify(series))
示例#22
0
def IntersectionsTrim(inter, console):
    '''Removes intersections that do not have more than 1 connection (since they are redundant).
    Possibly will do more later, like searching for loops.
    Returns straight list of indexes.'''

    t0 = time.time()
    
    new = filter(lambda a: any((len(a[1])>2, a[1].isStart, a[1].isEnd)), inter.items()) #has more than two intersections
    series = []
    series_append = series.append
    for key, value in new:
        for intersection in value.values():
            '''tup = intersection[1:4]
            for pt in intersection[2]:
                series_append(tuple([pt])+)'''
            '''series += map(lambda a: a[0], intersection)'''
            series += [a[0] for a in intersection]

    console.add('Intersections Trim', error=': '+str(round(time.time()-t0, 3)))

    return sorted(util.uniquify(series))
示例#23
0
    def dec(self):
        '''The declaration of this module
	
	Note:
		Because user can define F90 Type, we need to keep the correct order.
	
	Warning:
		If we uniquify that can cause a problem.
		```TYPE toto
			INTEGER :: n
		   END TYPE toto
		   INTEGER :: n
		```
	Fix:
        	We need to support TYPE keyword.

	'''

        l = [" %s" % line.text for _, line in self.residual_text_use_dec.dec]
        from util import uniquify
        if len(l) != len(uniquify(l)):
            raise NotImplementedError

        return l
示例#24
0
    #get Overpass API data
    '''pts = Overpasser.overpass(pts, references, bounds, Console)'''
    #moved later on after elev_refs and interpol_refs were calculated to reduce number of refs to sift through

    #generate original intersections, without filtering based on connections
    intind = OriginalIntersections(pts, Console)
    net = IntersectionsJoin(pts, intind, Console)
    intersections = IntersectionsBuild(net, pts, Console)

    #calculate likely routes through intersections
    routes = OptimalDistance(intersections, Console)

    #filter intersections by being in calculated intersections already
    intsInRange = util.uniquify(
        util.flatten(map(lambda a: a[0], routes))
    )  #makes list of intersections within reasonable distance to start/end
    ultimate_trim = sorted(
        util.flatten(
            map(lambda a: intersections[a].references[0].references,
                intsInRange)))  #points included in intersections
    final_inter = IntersectionsBuild(
        IntersectionsJoin(pts, ultimate_trim, Console), pts, Console)
    '''final_refs = ValidReferences(final_inter)'''

    #get elevation data of relevant pts
    '''Add in support for interpolation'''
    elev_refs, interpol_refs = Elevator.getInterpolations(
        routes, final_inter, Console)
    final_refs = elev_refs + [ref for ref, ref1, ref2 in interpol_refs]
    ref_chunks = Overpasser.chunk(pts, final_refs)
示例#25
0
def get_untranslated_as_list(untranslated_dict):
    return uniquify(sum(untranslated_dict.values(), []))
示例#26
0
def extract_strings_from_c_files():
    strings = []
    for f in C_FILES_TO_PROCESS:
        file_content = open(f, "r").read()
        strings += re.findall(TRANSLATION_PATTERN, file_content)
    return uniquify(strings)
示例#27
0
def main():

    vim.install()

    if command_line.do_help:
        command_line.usage()
        return
    if command_line.do_version:
        from version import version
        print version
        return

    if command_line.do_init:
        from build_file import create_generalmakefile
        create_generalmakefile(command_line.do_ninja)
        return

    comm_world = Irpy_comm_world()

    if command_line.do_graph:
        # Create a dot reprenstion of the dependency graph.
        # Merge inside a subgraph the Entity provided together

        def print_full_diagram(l_entity):

            l_entity_not_leaf = [e for e in l_entity if e.needs]
            print 'digraph Full { '
            for e in l_entity_not_leaf:
                print '   %s -> { %s } ' % (e.name, ' '.join(e.needs))
            print '}'

        def print_subgraph(l_tuple, name, color):
            for i, s in enumerate(l_tuple):
                print '   subgraph cluster_%s_%s {' % (name, i)
                print '       %s ' % ' '.join(s)
                print '       color = %s ' % color
                print '   }'

        comm_world.t_filename_parsed_text  # Initialize entity need. Dirty I know.

        print_full_diagram(comm_world.d_entity.values())

        print 'digraph Compact { '
        print '   graph [ordering="out" splines=true overlap=false];'

        l_main_usr = set([
            entity for entity in comm_world.d_entity.values() if entity.is_main
        ])
        l_main_head_usr = set(
            [entity for entity in l_main_usr if entity.l_others_name])
        l_set_main_head_name = [set(e.l_name) for e in l_main_head_usr]

        print_subgraph(l_set_main_head_name, 'usr', color='blue')

        from util import l_dummy_entity

        l_set_dummy_name = l_dummy_entity(comm_world.d_entity)
        print_subgraph(l_set_dummy_name, 'dummy', color='red')

        #~=~=~=~=
        # Create List Node Uniq
        #~=~=~=~=

        from util import split_l_set, flatten
        l_main_dummy_name, s_exculde_dummy_name = split_l_set(l_set_dummy_name)
        l_name_dummy_name_flatten = flatten(l_set_dummy_name)

        l_main_head_dummy = set(
            [comm_world.d_entity[name] for name in l_name_dummy_name_flatten])
        s_exculde_dummy = set(
            [comm_world.d_entity[name] for name in s_exculde_dummy_name])

        l_node_uniq = (l_main_usr | l_main_head_dummy) - s_exculde_dummy

        #~=~=~=~=
        # Create All edge
        #~=~=~=~=
        # We need to remove the spurious edge caused by the the dummy multiples providers
        d_need = dict()
        for e in l_node_uniq:
            d_need[e.name] = set(e.needs)

    #~=~=~=~=
    # Create All edge
    #~=~=~=~=
    # Draw the eddge
    # If a arrow if arriving into Multipliple provider and if it is bold this mean it use all the entity inside it.

        from util import uniquify
        l_set_multiple = uniquify(l_set_dummy_name + l_set_main_head_name)

        l_name_usr = [e.name for e in l_main_head_usr]
        for source, l_target in d_need.items():

            if source in l_name_usr:
                color = 'blue'
            elif source in l_name_dummy_name_flatten:
                color = 'red'
            else:
                color = 'black'

            for s in l_set_multiple:
                if s.issubset(l_target):
                    print ' %s -> %s [color="%s", penwidth=2]' % (
                        source, sorted(s).pop(), color)
                    l_target = l_target - s

            if l_target:
                print ' %s -> { %s } [color="%s"]' % (
                    source, '  '.join(l_target), color)

        print '   }'
        return

    if command_line.do_preprocess:
        for filename, text in comm_world.preprocessed_text:
            if filename in command_line.preprocessed:
                for line in text:
                    print line.text
        return

    if command_line.do_touch:
        for var in command_line.touched:
            if var not in comm_world.d_entity:
                print "%s is not an IRP entity" % var
            else:
                print "Touching %s invalidates the following entities:" % var
                for x in sorted(d_entity[var].parents):
                    print "- %s" % (x, )
        return

    if command_line.do_codelet:
        import profile
        profile.build_rdtsc()
        import codelet
        codelet.run()

    if not command_line.do_run:
        return

    comm_world.create_buildfile(command_line.do_ninja)
    comm_world.write_modules()

    comm_world.create_touches()
    comm_world.create_man()

    if command_line.do_debug or command_line.do_assert:
        comm_world.create_stack()

    if command_line.do_profile:
        import profile
        profile.run(comm_world.d_entity)

    if command_line.do_openmp:
        comm_world.create_lock()
示例#28
0
def extract_strings_from_c_files():
    strings = []
    for f in C_FILES_TO_PROCESS:
        file_content = open(f, "r").read()
        strings += re.findall(TRANSLATION_PATTERN, file_content)
    return uniquify(strings)
示例#29
0
def EcoCartographer(args):
    '''Main function. Returns routes objects and saves files.'''

    start_time = time.time()

    #prepare files
    util.mkdir(args['id'])
    Console = ecio.Console(args['id']+'/console.html')

    start, success = Geocoder.geocode(args['start'], Console, 1, 2, 0)
    if not success:
        ecio.WriteFail(args['id']+'/output.json')
        return False
    end, success = Geocoder.geocode(args['end'], Console, 2, 2, 0)
    if not success:
        ecio.WriteFail(args['id']+'/output.json')
        return False
    vehicle = Vehicle(mass=args['mass'], drag_coefficient=args['cd'], area=args['area'], displacement=args['disp'])

    #generate pts and references
    '''Update shape of network generation'''
    pts, bounds, recom, success = GetPoints(start, end, Console)
    if not success:
        ecio.WriteFail(args['id']+'/output.json')
        return False
    references = References(pts, Console)

    #get Overpass API data
    '''pts = Overpasser.overpass(pts, references, bounds, Console)'''
    #moved later on after elev_refs and interpol_refs were calculated to reduce number of refs to sift through

    #generate original intersections, without filtering based on connections
    intind = OriginalIntersections(pts, Console)
    net = IntersectionsJoin(pts, intind, Console)
    intersections = IntersectionsBuild(net, pts, Console)

    #calculate likely routes through intersections
    routes = OptimalDistance(intersections, Console)

    #filter intersections by being in calculated intersections already
    intsInRange = util.uniquify(util.flatten(map(lambda a: a[0], routes))) #makes list of intersections within reasonable distance to start/end
    ultimate_trim = sorted(util.flatten(map(lambda a: intersections[a].references[0].references, intsInRange))) #points included in intersections
    final_inter = IntersectionsBuild(IntersectionsJoin(pts, ultimate_trim, Console), pts, Console)
    '''final_refs = ValidReferences(final_inter)'''

    #get elevation data of relevant pts
    '''Add in support for interpolation'''
    elev_refs, interpol_refs = Elevator.getInterpolations(routes, final_inter, Console)
    final_refs = elev_refs + [ref for ref, ref1, ref2 in interpol_refs]
    ref_chunks = Overpasser.chunk(pts, final_refs)
    pts = Overpasser.overpass(pts, final_refs, ref_chunks, bounds, Console)
    pts, elev_queries, success = Elevator.elevation(pts, elev_refs, interpol_refs, Console)
    if not success:
        ecio.WriteFail(args['id']+'/output.json')
        return False

    #calculate energy requirements
    ComputeEnergy(final_inter, vehicle)
    recalc = OptimalEnergy(final_inter, routes, args['routes'], vehicle, Console)
    recom = Micropath(recom, vehicle)
    recom_distance = sum([pt.pt.distanceTo(pt.next.pt) for pt in recom[:-1]])
    recom_instructions = Longpath(recom, vehicle, ('energy', recom.energy), ('distance', recom_distance), ('time', recom.time))

    #output instructions to files
    '''Output to JSON and HTML'''
    ecio.JSON(args['id']+'/output.json',recalc,recom_instructions)
    ecio.HTML(args['id']+'/output.html',recalc,recom_instructions)
    Console.add('Finished',error=': '+str(round(time.time()-start_time,3)))

    return recalc, recom #final routes
示例#30
0
def OriginalIntersections(pts, console):
    '''Get original pts that can be intersections. Returns straight list of indexes.'''
    '''ptcount = [pts[0].references]
    indexes = [0]
    ptcount_append = ptcount.append
    indexes_append = indexes.append
    frequencies = map(lambda a: len(a.references), pts)

    #add possible intersections, all occurances and duplicates
    for index, i in enumerate(frequencies[2:-2]):
        if i > frequencies[index-2] and i > frequencies[index+2]:
            ptcount_append(pts[index].references)
            indexes_append(index)

    ptcount_append(pts[-1].references)
    indexes_append(len(pts)-1)

    print len(ptcount), ptcount[:10]

    #strip duplicates from the array
    ptcount = util.uniquify(ptcount)

    #get indexes in pts of possible intersections
    indexes = reduce(list.__add__, (zip([index]*len(mi),mi) for index, mi in enumerate(ptcount))) #TypeError: reduce() of empty sequence with no initial value

    #filter duplicates
    new_ptcount = []
    new_ptcount_append = new_ptcount.append
    for i in enumerate(ptcount):
        if ptcount.indexof(i[1]) == i[0]:
            new_ptcount_append(i[1])

    return ptcount, indexes'''

    '''#data structure: pt_index, string_of_coords, references, intersection_id

    ptcount = [(0, pts[0].pt.toStringURL(), pts[0].references, 0)]
    ptcount_append = ptcount.append
    frequencies = map(lambda a: len(a.references), pts)
    counter = 1

    for index, i in enumerate(frequencies[2:-2]):
        if frequencies[index] > frequencies[index-2] or frequencies[index] > frequencies[index+2]: #stands out in frequency, probable intersection
            ptcount_append((index, pts[index].pt.toStringURL(), pts[index].references, counter))
            counter += 1

    ptcount_append((len(pts)-1, pts[-1].pt.toStringURL(), pts[-1].references, counter))

    return ptcount'''

    t0 = time.time()

    freqs = [len(a.references) for a in pts]

    indexes = [0] + filter(lambda a: freqs[a]>freqs[a-1] or freqs[a]>freqs[a+1], range(1,len(freqs)-1))
    if len(pts[-1].references) > 1: indexes += [len(pts)-1]
    else: indexes += [len(pts)-2]
    '''all_indexes = map(lambda a: pts[a].references, indexes)'''
    all_indexes = [pts[a].references for a in indexes] #list comp version

    console.add('Original Intersections', error=': '+str(round(time.time()-t0, 3)))
    
    return sorted(util.uniquify(util.flatten(all_indexes)))
示例#31
0
    references = References(pts, Console)

    #get Overpass API data
    '''pts = Overpasser.overpass(pts, references, bounds, Console)'''
    #moved later on after elev_refs and interpol_refs were calculated to reduce number of refs to sift through

    #generate original intersections, without filtering based on connections
    intind = OriginalIntersections(pts, Console)
    net = IntersectionsJoin(pts, intind, Console)
    intersections = IntersectionsBuild(net, pts, Console)

    #calculate likely routes through intersections
    routes = OptimalDistance(intersections, Console)

    #filter intersections by being in calculated intersections already
    intsInRange = util.uniquify(util.flatten(map(lambda a: a[0], routes))) #makes list of intersections within reasonable distance to start/end
    ultimate_trim = sorted(util.flatten(map(lambda a: intersections[a].references[0].references, intsInRange))) #points included in intersections
    final_inter = IntersectionsBuild(IntersectionsJoin(pts, ultimate_trim, Console), pts, Console)
    '''final_refs = ValidReferences(final_inter)'''

    #get elevation data of relevant pts
    '''Add in support for interpolation'''
    elev_refs, interpol_refs, used_cons = Elevator.getInterpolations(routes, final_inter, Console) #connections used in interpolation harvested to use in energy calculation
    final_refs = elev_refs + [ref for ref, ref1, ref2 in interpol_refs]
    ref_chunks = Overpasser.chunk(pts, final_refs)
    pts = Overpasser.overpass(pts, final_refs, ref_chunks, bounds, Console)
    pts, elev_queries, success = Elevator.elevation(pts, elev_refs, interpol_refs, Console)

    #calculate energy requirements
    ComputeEnergy(final_inter, used_cons, vehicle)
    recalc = OptimalEnergy(final_inter, routes, args['routes'], vehicle, Console)
示例#32
0
 def test_uniquify(self):
   self.assertEqual([], util.uniquify(None))
   self.assertEqual([], util.uniquify([]))
   self.assertEqual([3], util.uniquify((3,)))
   self.assertEqual([3, 2, 4, 5, 9],
                    util.uniquify([3, 3, 2, 3, 4, 3, 5, 9, 9, 9, 3]))
示例#33
0
def get_untranslated_as_list(untranslated_dict):
    return util.uniquify(sum(untranslated_dict.values(), []))
示例#34
0
def EcoCartographer(args):
    '''Main function. Returns routes objects and saves files.'''

    start_time = time.time()

    #prepare files
    util.mkdir(args['id'])
    Console = ecio.Console(args['id'] + '/console.html')

    start, success = Geocoder.geocode(args['start'], Console, 1, 2, 0)
    if not success:
        ecio.WriteFail(args['id'] + '/output.json')
        return False
    end, success = Geocoder.geocode(args['end'], Console, 2, 2, 0)
    if not success:
        ecio.WriteFail(args['id'] + '/output.json')
        return False
    vehicle = Vehicle(mass=args['mass'],
                      drag_coefficient=args['cd'],
                      area=args['area'],
                      displacement=args['disp'])

    #generate pts and references
    '''Update shape of network generation'''
    pts, bounds, recom, success = GetPoints(start, end, Console)
    if not success:
        ecio.WriteFail(args['id'] + '/output.json')
        return False
    references = References(pts, Console)

    #get Overpass API data
    '''pts = Overpasser.overpass(pts, references, bounds, Console)'''
    #moved later on after elev_refs and interpol_refs were calculated to reduce number of refs to sift through

    #generate original intersections, without filtering based on connections
    intind = OriginalIntersections(pts, Console)
    net = IntersectionsJoin(pts, intind, Console)
    intersections = IntersectionsBuild(net, pts, Console)

    #calculate likely routes through intersections
    routes = OptimalDistance(intersections, Console)

    #filter intersections by being in calculated intersections already
    intsInRange = util.uniquify(
        util.flatten(map(lambda a: a[0], routes))
    )  #makes list of intersections within reasonable distance to start/end
    ultimate_trim = sorted(
        util.flatten(
            map(lambda a: intersections[a].references[0].references,
                intsInRange)))  #points included in intersections
    final_inter = IntersectionsBuild(
        IntersectionsJoin(pts, ultimate_trim, Console), pts, Console)
    '''final_refs = ValidReferences(final_inter)'''

    #get elevation data of relevant pts
    '''Add in support for interpolation'''
    elev_refs, interpol_refs = Elevator.getInterpolations(
        routes, final_inter, Console)
    final_refs = elev_refs + [ref for ref, ref1, ref2 in interpol_refs]
    ref_chunks = Overpasser.chunk(pts, final_refs)
    pts = Overpasser.overpass(pts, final_refs, ref_chunks, bounds, Console)
    pts, elev_queries, success = Elevator.elevation(pts, elev_refs,
                                                    interpol_refs, Console)
    if not success:
        ecio.WriteFail(args['id'] + '/output.json')
        return False

    #calculate energy requirements
    ComputeEnergy(final_inter, vehicle)
    routes_force = OptimalEnergyInitial(final_inter, routes, args['routes'],
                                        Console)
    recalc = OptimalEnergyDetailed(final_inter, routes_force, vehicle,
                                   args['routes'], Console)
    recom = Path(recom, vehicle)

    #make instructions
    for route in recalc:
        route.getInstructions(vehicle)

    #output instructions to files
    '''Output to JSON and HTML'''
    ecio.JSON(args['id'] + '/output.json', recalc, recom)
    ecio.HTML(args['id'] + '/output.html', recalc, recom)
    Console.add('Finished', error=': ' + str(time.time() - start_time))

    return recalc, recom  #final routes
示例#35
0
def OriginalIntersections(pts, console):
    '''Get original pts that can be intersections. Returns straight list of indexes.'''
    '''ptcount = [pts[0].references]
    indexes = [0]
    ptcount_append = ptcount.append
    indexes_append = indexes.append
    frequencies = map(lambda a: len(a.references), pts)

    #add possible intersections, all occurances and duplicates
    for index, i in enumerate(frequencies[2:-2]):
        if i > frequencies[index-2] and i > frequencies[index+2]:
            ptcount_append(pts[index].references)
            indexes_append(index)

    ptcount_append(pts[-1].references)
    indexes_append(len(pts)-1)

    print len(ptcount), ptcount[:10]

    #strip duplicates from the array
    ptcount = util.uniquify(ptcount)

    #get indexes in pts of possible intersections
    indexes = reduce(list.__add__, (zip([index]*len(mi),mi) for index, mi in enumerate(ptcount))) #TypeError: reduce() of empty sequence with no initial value

    #filter duplicates
    new_ptcount = []
    new_ptcount_append = new_ptcount.append
    for i in enumerate(ptcount):
        if ptcount.indexof(i[1]) == i[0]:
            new_ptcount_append(i[1])

    return ptcount, indexes'''
    '''#data structure: pt_index, string_of_coords, references, intersection_id

    ptcount = [(0, pts[0].pt.toStringURL(), pts[0].references, 0)]
    ptcount_append = ptcount.append
    frequencies = map(lambda a: len(a.references), pts)
    counter = 1

    for index, i in enumerate(frequencies[2:-2]):
        if frequencies[index] > frequencies[index-2] or frequencies[index] > frequencies[index+2]: #stands out in frequency, probable intersection
            ptcount_append((index, pts[index].pt.toStringURL(), pts[index].references, counter))
            counter += 1

    ptcount_append((len(pts)-1, pts[-1].pt.toStringURL(), pts[-1].references, counter))

    return ptcount'''

    t0 = time.time()

    freqs = [len(a.references) for a in pts]

    indexes = [0] + filter(
        lambda a: freqs[a] > freqs[a - 1] or freqs[a] > freqs[a + 1],
        range(1,
              len(freqs) - 1))
    if len(pts[-1].references) > 1: indexes += [len(pts) - 1]
    else: indexes += [len(pts) - 2]
    '''all_indexes = map(lambda a: pts[a].references, indexes)'''
    all_indexes = [pts[a].references for a in indexes]  #list comp version

    console.add('Original Intersections', error=': ' + str(time.time() - t0))

    return sorted(util.uniquify(util.flatten(all_indexes)))
示例#36
0
 def test_uniquify(self):
   self.assertEqual([], util.uniquify(None))
   self.assertEqual([], util.uniquify([]))
   self.assertEqual([3], util.uniquify((3,)))
   self.assertEqual([3, 2, 4, 5, 9],
                    util.uniquify([3, 3, 2, 3, 4, 3, 5, 9, 9, 9, 3]))