Esempio n. 1
1
File: UI.py Progetto: nsmoooose/csp
 def GetItemPath(self, item):
     path = []
     while item.IsOk():
         path.append(self._tree.GetPyData(item))
         item = self._tree.GetItemParent(item)
     path.reverse()
     return path
Esempio n. 2
0
def extend_path(path, name):
    """Simpler version of the stdlib's :obj:`pkgutil.extend_path`.

    It does not support ".pkg" files, and it does not require an
    __init__.py (this is important: we want only one thing (pkgcore
    itself) to install the __init__.py to avoid name clashes).

    It also modifies the "path" list in place (and returns C{None})
    instead of copying it and returning the modified copy.
    """
    if not isinstance(path, list):
        # This could happen e.g. when this is called from inside a
        # frozen package.  Return the path unchanged in that case.
        return
    # Reconstitute as relative path.
    pname = os.path.join(*name.split('.'))

    for entry in sys.path:
        if not isinstance(entry, basestring) or not os.path.isdir(entry):
            continue
        subdir = os.path.join(entry, pname)
        # XXX This may still add duplicate entries to path on
        # case-insensitive filesystems
        if subdir not in path:
            path.append(subdir)
Esempio n. 3
0
File: build.py Progetto: darjus/mypy
def default_lib_path(data_dir: str, pyversion: Tuple[int, int],
        python_path: bool) -> List[str]:
    """Return default standard library search paths."""
    # IDEA: Make this more portable.
    path = []  # type: List[str]

    auto = os.path.join(data_dir, 'stubs-auto')
    if os.path.isdir(auto):
        data_dir = auto

    # We allow a module for e.g. version 3.5 to be in 3.4/. The assumption
    # is that a module added with 3.4 will still be present in Python 3.5.
    versions = ["%d.%d" % (pyversion[0], minor)
                for minor in reversed(range(pyversion[1] + 1))]
    # E.g. for Python 3.5, try 2and3/, then 3/, then 3.5/, then 3.4/, 3.3/, ...
    for v in ['2and3', str(pyversion[0])] + versions:
        for lib_type in ['stdlib', 'builtins', 'third_party']:
            stubdir = os.path.join(data_dir, 'typeshed', lib_type, v)
            if os.path.isdir(stubdir):
                path.append(stubdir)

    # Add fallback path that can be used if we have a broken installation.
    if sys.platform != 'win32':
        path.append('/usr/local/lib/mypy')

    # Contents of Python's sys.path go last, to prefer the stubs
    # TODO: To more closely model what Python actually does, builtins should
    #       go first, then sys.path, then anything in stdlib and third_party.
    if python_path:
        path.extend(sys.path)

    return path
Esempio n. 4
0
    def _extract_system_path(self, script):
        """
        Extracts and normalizes additional paths for code execution.
        For now, there's a default path of data/course/code; this may be removed
        at some point.

        script : ?? (TODO)
        """

        DEFAULT_PATH = ["code"]

        # Separate paths by :, like the system path.
        raw_path = script.get("system_path", "").split(":") + DEFAULT_PATH

        # find additional comma-separated modules search path
        path = []

        for dir in raw_path:
            if not dir:
                continue

            # path is an absolute path or a path relative to the data dir
            dir = os.path.join(self.capa_system.filestore.root_path, dir)
            # Check that we are within the filestore tree.
            reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)
            if ".." in reldir:
                log.warning("Ignoring Python directory outside of course: %r", dir)
                continue

            abs_dir = os.path.normpath(dir)
            path.append(abs_dir)

        return path
Esempio n. 5
0
def test_func_dir(tmpdir):
    # Test the creation of the memory cache directory for the function.
    memory = Memory(location=tmpdir.strpath, verbose=0)
    path = __name__.split('.')
    path.append('f')
    path = tmpdir.join('joblib', *path).strpath

    g = memory.cache(f)
    # Test that the function directory is created on demand
    func_id = _build_func_identifier(f)
    location = os.path.join(g.store_backend.location, func_id)
    assert location == path
    assert os.path.exists(path)
    assert memory.location == os.path.dirname(g.store_backend.location)
    with warns(DeprecationWarning) as w:
        assert memory.cachedir == g.store_backend.location
    assert len(w) == 1
    assert "The 'cachedir' attribute has been deprecated" in str(w[-1].message)

    # Test that the code is stored.
    # For the following test to be robust to previous execution, we clear
    # the in-memory store
    _FUNCTION_HASHES.clear()
    assert not g._check_previous_func_code()
    assert os.path.exists(os.path.join(path, 'func_code.py'))
    assert g._check_previous_func_code()

    # Test the robustness to failure of loading previous results.
    func_id, args_id = g._get_output_identifiers(1)
    output_dir = os.path.join(g.store_backend.location, func_id, args_id)
    a = g(1)
    assert os.path.exists(output_dir)
    os.remove(os.path.join(output_dir, 'output.pkl'))
    assert a == g(1)
Esempio n. 6
0
def extend_path(path, name):
    if not isinstance(path, list):
        return path
    pname = os.path.join(*name.split('.'))
    sname = os.extsep.join(name.split('.'))
    sname_pkg = sname + os.extsep + 'pkg'
    init_py = '__init__' + os.extsep + 'py'
    path = path[:]
    for dir in sys.path:
        if not isinstance(dir, basestring) or not os.path.isdir(dir):
            continue
        subdir = os.path.join(dir, pname)
        initfile = os.path.join(subdir, init_py)
        if subdir not in path and os.path.isfile(initfile):
            path.append(subdir)
        pkgfile = os.path.join(dir, sname_pkg)
        if os.path.isfile(pkgfile):
            try:
                f = open(pkgfile)
            except IOError as msg:
                sys.stderr.write("Can't open %s: %s\n" % (pkgfile, msg))
            else:
                for line in f:
                    line = line.rstrip('\n')
                    if not line or line.startswith('#'):
                        continue
                    path.append(line)

                f.close()

    return path
Esempio n. 7
0
def _read_xml(stream):
    document = XmlNode()
    current_node = document
    path = []

    while not stream.atEnd():
        stream.readNext()

        if stream.isStartElement():
            node = XmlNode()
            attrs = stream.attributes()

            for i in xrange(attrs.count()):
                attr = attrs.at(i)
                node.attribs[_node_name(attr.name())] = unicode(attr.value())

            current_node.append_child(_node_name(stream.name()), node)
            path.append(current_node)
            current_node = node

        elif stream.isEndElement():
            current_node = path.pop()

        elif stream.isCharacters():
            current_node.text += unicode(stream.text())

    return document
Esempio n. 8
0
def main():
    output_path = "/Users/ramapriyasridharan/Documents/clients_file.txt"
    path = []
    with open(output_path) as f:
        for exptini_path_raw in f:
            exptini_path = exptini_path_raw.strip()
            path.append(exptini_path)
            print path

    for i in range(6,11):
        print i
        for j in range(0,len(path)):
            print j
            if i < 10:
                p = "/%s/0%d"%(path[j],i)
            else:
                p = "/%s/%d"%(path[j],i)
            print p
            for root, _, files in os.walk(p):
                for f in files:
                    if f.endswith('.tgz'):
                        print 'going to extract %s'%f
                        f1 = os.path.join(p,f)
                        print f1
                        tar = tarfile.open(f1)
                        tar.extractall(p)
                        tar.close()
  def random_walk(self, path_length, alpha=0, rand=random.Random(), start=None):
    """ Returns a truncated random walk.

        path_length: Length of the random walk.
        alpha: probability of restarts.
        start: the start node of the random walk.
    """

    G = self
    if start:
      path = [start]
    else:
      # Sampling is uniform w.r.t V, and not w.r.t E
      path = [rand.choice(G.keys())]

    while len(path) < path_length:
      cur = path[-1]
      if len(G[cur]) > 0:
        if rand.random() >= alpha:
          path.append(rand.choice(G[cur]))
        else:
          path.append(path[0])
      else:
        break
    return path
Esempio n. 10
0
    def findBestPathFromWordToWord( self, word, toWord ):
        """
        Do a breadth first search, which will find the shortest
        path between the nodes we are interested in
        This will only find a path if the words are
        linked by parent relationship
        you won't be able to find your cousins
        """
        queue = [word]
        previous = { word: word }
        while queue:
            node = queue.pop(0)
            if node == toWord: # If we've found our target word.
                # Work out how we got here.
                path = [node]
                while previous[node] != node:
                    node = previous[node]
                    path.append( node )
                return path

            # Continue on up the tree.
            for parent in self.getParentRelationships(node):
                # unless we've been to this parent before.
                if parent not in previous and parent not in queue:
                    previous[parent] = node
                    queue.append( parent )

        # We didn't find a path to our target word
        return None
def mapping_to_alignment(mapping, sam_file, region_fetcher):
    ''' Convert a mapping represented by a pysam.AlignedRead into an alignment. '''
    # The read indices in AlignedRead's aligned_pairs are relative to qstart.
    # Convert them to be absolute indices in seq.
    path = []
    mismatches = set()
    deletions = set()
    rname = sam_file.getrname(mapping.tid)
    for read_i, ref_i in mapping.aligned_pairs:
        if read_i != None:
            read_i = read_i + mapping.qstart
            if ref_i == None:
                ref_i = sw.GAP
            else:
                read_base = mapping.seq[read_i]
                ref_base = region_fetcher(rname, ref_i, ref_i + 1).upper()
                if read_base != ref_base:
                    mismatches.add((read_i, ref_i))

            path.append((read_i, ref_i))
        else:
            deletions.add(ref_i)

    alignment = {'path': path,
                 'XO': mapping.opt('XO'),
                 'XM': mapping.opt('XM'),
                 'is_reverse': mapping.is_reverse,
                 'mismatches': mismatches,
                 'deletions': deletions,
                 'rname': rname,
                 'query': mapping.seq,
                }
    return alignment
Esempio n. 12
0
  def url_to_filename(self, url):
    """ Map url to a unique file name/path. Register the directories in the path. """
    filename = re.sub(r'^https?://', '', url)
    filename = os.path.normpath(filename)
    
    path = []
    path_parts = filename.split("/")

    for idx, part in enumerate(path_parts):
      tries = 0
      unique_part = part
      new_path = "/".join(path + [part])

      while new_path in self.path_types and (self.path_types[new_path] == "file" or len(path_parts)-1 == idx):
        tries += 1
        unique_part = "%s.%d" % (part, tries)
        new_path = "/".join(path + [unique_part])

      if len(path_parts)-1 == idx:
        self.path_types[new_path] = "file"
      else:
        self.path_types[new_path] = "dir"

      path.append(unique_part)

    return "/".join(path)
Esempio n. 13
0
def test_func_dir():
    # Test the creation of the memory cache directory for the function.
    memory = Memory(cachedir=env["dir"], verbose=0)
    memory.clear()
    path = __name__.split(".")
    path.append("f")
    path = os.path.join(env["dir"], "joblib", *path)

    g = memory.cache(f)
    # Test that the function directory is created on demand
    yield nose.tools.assert_equal, g._get_func_dir(), path
    yield nose.tools.assert_true, os.path.exists(path)

    # Test that the code is stored.
    # For the following test to be robust to previous execution, we clear
    # the in-memory store
    _FUNCTION_HASHES.clear()
    yield nose.tools.assert_false, g._check_previous_func_code()
    yield nose.tools.assert_true, os.path.exists(os.path.join(path, "func_code.py"))
    yield nose.tools.assert_true, g._check_previous_func_code()

    # Test the robustness to failure of loading previous results.
    dir, _ = g.get_output_dir(1)
    a = g(1)
    yield nose.tools.assert_true, os.path.exists(dir)
    os.remove(os.path.join(dir, "output.pkl"))
    yield nose.tools.assert_equal, a, g(1)
Esempio n. 14
0
 def _gen(self, node, path, prefix):
     global args
     tail, heads = self._name_node(path), []
     edges = node.sufs.keys()
     sort = args.sort == "all" or (args.sort == "root" and not path)
     if sort:
         edges.sort()
     for i, edge in enumerate(edges):
         child = node.sufs[edge]
         path.append(i)
         prefix.append(edge)
         self._gen_node(child, path, "".join(prefix))
         head = self._name_node(path)
         if heads and sort:
             self._gen_edge(heads[-1], head, style="invisible", dir="none")
         heads.append(head)
         edge_label = [cgi.escape(edge)]
         edge_attrs = {'label': edge_label, 'tooltip': edge_label}
         if len(path) == 1:
             root = self._name_node([i + len(node.sufs)])
             self._print(root, self._stringify_attrs(style="invisible"))
             self._gen_edge(root + ":e", head + ":w", **edge_attrs)
         else:
             self._gen_edge(tail, head, **edge_attrs)
         self._gen(child, path, prefix)
         path.pop()
         prefix.pop()
     if sort and heads:
         self._print("{ rank=same", " ".join(head for head in heads), "}")
Esempio n. 15
0
def default_lib_path(data_dir: str, target: int, pyversion: int) -> List[str]:
    """Return default standard library search paths."""
    # IDEA: Make this more portable.
    path = List[str]()

    # Add MYPYPATH environment variable to library path, if defined.
    path_env = os.getenv('MYPYPATH')
    if path_env is not None:
        path[:0] = path_env.split(os.pathsep)

    if target in [ICODE, C]:
        # Add C back end library directory.
        path.append(os.path.join(data_dir, 'lib'))
    else:
        # Add library stubs directory. By convention, they are stored in the
        # stubs/x.y directory of the mypy installation.
        version_dir = '3.2'
        if pyversion < 3:
            version_dir = '2.7'
        path.append(os.path.join(data_dir, 'stubs', version_dir))
        path.append(os.path.join(data_dir, 'stubs-auto', version_dir))
        #Add py3.3 and 3.4 stubs
        if sys.version_info.major == 3:
            versions = ['3.' + str(x) for x in range(3, sys.version_info.minor + 1)]
            for v in versions:
                path.append(os.path.join(data_dir, 'stubs', v))

    # Add fallback path that can be used if we have a broken installation.
    if sys.platform != 'win32':
        path.append('/usr/local/lib/mypy')

    return path
Esempio n. 16
0
def evaluateTopicPaths(inputFl, pathFl, outputFl, topicId, metric, cutoff):
    profiles = ranking.readInputFl.readInputFl(inputFl, topicId)
    # path files can be quite large, so it opens and processes

    # the path file line-by-line.
    with open(pathFl) as pthFl:
        metricFun = ranking.calcUtilities.getMetricFunction(metric)
        with tempfile.NamedTemporaryFile("r+", delete=False) as tmpF:
            for line in pthFl:
                # 15017299068   1   5  d4 : 0 d4:0 d1:0 d3:1 d2:0
                (usell, ll) = utils.utils.stripComments(line)
                if usell:
                    lline = re.sub("\s:\s", ":", ll)
                    lst = lline.split(None)
                    instId = lst[0]
                    tTopicId = lst[1]
                    if topicId == tTopicId:
                        profileId = lst[2]
                        path = []

                        for dc in lst[3 : len(lst)]:
                            (d, _, c) = dc.partition(":")
                            path.append(d)
                            if profiles.getDocRelMap(profileId) == None:
                                raise Exception("Profile id not found.")
                        docsToRels = dict(profiles.getDocRelMap(profileId))
                        util = metricFun(path, docsToRels, cutoff)
                        tmpF.write(instId + " " + topicId + " " + profileId + " " + str(util) + "\n")
            tmpF.flush()
            tmpF.close()
            shutil.move(tmpF.name, outputFl)
            if os.path.exists(tmpF.name):
                os.remove(tmpF.name)
    return outputFl
Esempio n. 17
0
    def GetCertificatePath(self, cert):
        """
        Return the certification path for cert.
        """

        path = []
        repo = self.GetCertificateRepository()
        c = cert
        checked = {}
        while 1:
            subj = str(c.GetSubject())
            path.append(c)
            if c.GetSubject().as_der() == c.GetIssuer().as_der():
                break

            # If we come back to a place we've been before, we're in a cycle
            # and won't get anywhere. Bail.

            if subj in checked:
                return ""
            checked[subj] = 1
            
            issuers = repo.FindCertificatesWithSubject(str(c.GetIssuer()))

            validIssuers = filter(lambda x: not x.IsExpired(), issuers)

            # Find an issuer to return. If none is valid, pick one to return.
            if len(validIssuers) == 0:
                if len(issuers) > 0:
                    path.append(issuers[0])
                break
            
            c = issuers[0]

        return path
Esempio n. 18
0
def searchPath(goal, path, graph):
    if goal == path[-1]:
        return
    for key in graph[path[-1]]:
        if key not in path:
            path.append(key)
            searchPath(goal, path, graph)
Esempio n. 19
0
def test_func_dir(tmpdir):
    # Test the creation of the memory cache directory for the function.
    memory = Memory(cachedir=tmpdir.strpath, verbose=0)
    memory.clear()
    path = __name__.split('.')
    path.append('f')
    path = tmpdir.join('joblib', *path).strpath

    g = memory.cache(f)
    # Test that the function directory is created on demand
    assert g._get_func_dir() == path
    assert os.path.exists(path)

    # Test that the code is stored.
    # For the following test to be robust to previous execution, we clear
    # the in-memory store
    _FUNCTION_HASHES.clear()
    assert not g._check_previous_func_code()
    assert os.path.exists(os.path.join(path, 'func_code.py'))
    assert g._check_previous_func_code()

    # Test the robustness to failure of loading previous results.
    dir, _ = g.get_output_dir(1)
    a = g(1)
    assert os.path.exists(dir)
    os.remove(os.path.join(dir, 'output.pkl'))
    assert a == g(1)
Esempio n. 20
0
def chain_wires(wires):
    assert wires.num_vertices > 0
    visited = np.zeros(wires.num_vertices, dtype=bool)
    path = [0]
    visited[0] = True
    while not np.all(visited):
        front = path[0]
        front_neighbors = wires.get_vertex_neighbors(int(front))
        for v in front_neighbors:
            if visited[v]:
                continue
            path.insert(0, v)
            visited[v] = True
            break
        end = path[-1]
        end_neighbors = wires.get_vertex_neighbors(int(end))
        for v in end_neighbors:
            if visited[v]:
                continue
            visited[v] = True
            path.append(v)
            break

    first_neighbors = wires.get_vertex_neighbors(int(path[0])).squeeze()
    if len(path) > 2 and path[-1] in first_neighbors:
        # Close the loop.
        path.append(path[0])

    path = wires.vertices[path]
    return path
Esempio n. 21
0
def test_func_dir():
    # Test the creation of the memory cache directory for the function.
    memory = Memory(cachedir=env['dir'], verbose=0)
    path = __name__.split('.')
    path.append('f')
    path = os.path.join(env['dir'], 'joblib', *path)

    g = memory.cache(f)
    # Test that the function directory is created on demand
    yield nose.tools.assert_equal, g._get_func_dir(), path
    yield nose.tools.assert_true, os.path.exists(path)

    # Test that the code is stored.
    yield nose.tools.assert_false, \
        g._check_previous_func_code()
    yield nose.tools.assert_true, \
            os.path.exists(os.path.join(path, 'func_code.py'))
    yield nose.tools.assert_true, \
        g._check_previous_func_code()

    # Test the robustness to failure of loading previous results.
    dir, _ = g.get_output_dir(1)
    a = g(1)
    yield nose.tools.assert_true, os.path.exists(dir)
    os.remove(os.path.join(dir, 'output.pkl'))
    yield nose.tools.assert_equal, a, g(1)
Esempio n. 22
0
def _read_structure_lazy(infile=None, include_hosts=True):
    '''Determine and return the organizational structure from a given
    host file.
    '''

    path, hosts, structure = [], [], []

    lines = _read_lines_lazy(infile)
    for line in lines:
        if not _iscomment(line):
            if include_hosts:
                hosts.append(line)
            continue

        match = _match_open.search(line)
        if match:
            path.append(match.group(1))
            continue

        match = _match_close.search(line)
        if match:
            if include_hosts:
                yield (list(path), list(hosts))
                hosts = []
            else:
                yield list(path)

            path.pop()
    return
Esempio n. 23
0
    def _find_path(self, src, tgt):
        visited = set()
        to_visit = [src]
        came_from = {}
        while len(to_visit) > 0:
            elem = to_visit[0]
            del to_visit[0]

            if elem in visited:
                continue
            # if path found, redo path and return
            if elem is tgt and src in visited:  # elem in visited for the case where src = tgt
                path = []
                prev_elem = elem
                while prev_elem is not src:
                    path.append(prev_elem)
                    prev_elem = came_from[prev_elem]
                path.append(src)
                path.reverse()
                return path

            visited.add(elem)
            for outelem in elem.outset:
                came_from[outelem] = elem
                to_visit.append(outelem)
        return []  # if no path found, return an empty path
Esempio n. 24
0
 def read_mda(self, attribute):
     lines = attribute.split('\n')
     mda = {}
     current_dict = mda
     path = []
     for line in lines:
         if not line:
             continue
         if line == 'END':
             break
         key, val = line.split('=')
         key = key.strip()
         val = val.strip()
         try:
             val = eval(val)
         except NameError:
             pass
         if key in ['GROUP', 'OBJECT']:
             new_dict = {}
             path.append(val)
             current_dict[val] = new_dict
             current_dict = new_dict
         elif key in ['END_GROUP', 'END_OBJECT']:
             if val != path[-1]:
                 raise SyntaxError
             path = path[:-1]
             current_dict = mda
             for item in path:
                 current_dict = current_dict[item]
         elif key in ['CLASS', 'NUM_VAL']:
             pass
         else:
             current_dict[key] = val
     return mda
Esempio n. 25
0
    def build_model_results_path(self, model):
        opt = self.process[model]
        K_part = "K-{0}".format(self.process["options"]["K"])
        path = [opt["init"].upper(), model, K_part]
        for key in ["burnIn", "maxIter", "sampleLag", "alpha", "beta"]:
            path.append("{0}-{1}".format(self.argmap(key), opt[key]))

        return "_".join(path)
Esempio n. 26
0
 def get_node_path(self):
     node = self
     path = []
     while node:
         if not node._key:
             break
         path.append(node._key)
         node = node._parent
     return '.'.join(path)
Esempio n. 27
0
def dp(dist_mat):
    """
    Find minimum-cost path through matrix `dist_mat` using dynamic programming.

    The cost of a path is defined as the sum of the matrix entries on that
    path. See the following for details of the algorithm:

    - http://en.wikipedia.org/wiki/Dynamic_time_warping
    - http://www.ee.columbia.edu/ln/labrosa/matlab/dtw/dp.m

    The notation in the first reference was followed, while Dan Ellis's code
    (second reference) was used to check for correctness. Return a list path 
    of indices and the cost matrix.
    """

    N, M = dist_mat.shape
    
    # Initialize the cost matrix
    cost_mat = np.zeros((N + 1, M + 1))
    for i in range(1, N + 1):
        cost_mat[i, 0] = np.inf
    for i in range(1, M + 1):
        cost_mat[0, i] = np.inf

    # Fill the cost matrix while keeping traceback information
    traceback_mat = np.zeros((N, M))
    for i in range(N):
        for j in range(M):
            penalty = [
                cost_mat[i, j],      # match (0)
                cost_mat[i, j + 1],  # insertion (1)
                cost_mat[i + 1, j]]  # deletion (2)
            i_penalty = np.argmin(penalty)
            cost_mat[i + 1, j + 1] = dist_mat[i, j] + penalty[i_penalty]
            traceback_mat[i, j] = i_penalty

    # Traceback from bottom right
    i = N - 1
    j = M - 1
    path = [(i, j)]
    while i > 0 or j > 0:
        tb_type = traceback_mat[i, j]
        if tb_type == 0:
            # Match
            i = i - 1
            j = j - 1
        elif tb_type == 1:
            # Insertion
            i = i - 1
        elif tb_type == 2:
            # Deletion
            j = j - 1
        path.append((i, j))

    # Strip infinity edges from cost_mat before returning
    cost_mat = cost_mat[1:, 1:]
    return (path[::-1], cost_mat)
Esempio n. 28
0
def load_path_file(filename):
    path = []
    with open(filename, "r") as fin:
        for line in fin:
            fields = line.split()
            if len(fields) == 0 or line[0] == "#":
                continue
            path.append([float(x) for x in fields])
    return np.array(path, dtype=float)
Esempio n. 29
0
def gen_refs(o, path=None):
    path = path or ['']

    path.append(o.xpathname())
    yield (o.id, '/'.join(path))
    if hasattr(o, 'items'):
        for child in o.items:
            for entry in gen_refs(child, list(path)):
                yield entry
Esempio n. 30
0
File: tree.py Progetto: fhopecc/stxt
 def path(self):
     '''Nodes from root to self, every tree node has a unique path.
        Path can represente a tree node identically, so we use 
        a path to represent a file in tree file system.
     '''
     if self.isRoot(): return [self]
     path = self.parent.path()
     path.append(self)
     return path
Esempio n. 31
0
def make_process(*args, **kwargs):
    """Choose whether to use python built in process or jasper."""
    process_cls = process.Process
    if config.SPAWN_USING == "jasper":
        process_cls = jasper_process.Process

    # Add the current working directory and /data/multiversion to the PATH.
    env_vars = kwargs.get("env_vars", {}).copy()
    path = [
        os.getcwd(),
        config.DEFAULT_MULTIVERSION_DIR,
    ]

    # If installDir is provided, add it early to the path
    if config.INSTALL_DIR is not None:
        path.append(config.INSTALL_DIR)
        env_vars["INSTALL_DIR"] = config.INSTALL_DIR

    path.append(env_vars.get("PATH", os.environ.get("PATH", "")))

    env_vars["PATH"] = os.pathsep.join(path)
    kwargs["env_vars"] = env_vars
    return process_cls(*args, **kwargs)
Esempio n. 32
0
    def find_shortest_path(self, src, dest):
        # does not need to be recalculated for all

        src = self.Cities.inverse[src]
        dest = self.Cities.inverse[dest]
        a, pr = shortest_path(self.DistanceMap,
                              directed=False,
                              return_predecessors=True)
        path = [dest]
        k = dest
        while pr[src, k] != -9999:
            path.append(pr[src, k])
            k = pr[src, k]
        if path[-1] == src:
            path = path[::-1]
            res = [(self.Cities[path[0]], self.Cities[path[1]], a[path[0],
                                                                  path[1]])]
            for i in range(len(path) - 2):
                res.append((self.Cities[path[i + 1]], self.Cities[path[i + 2]],
                            res[-1][2] + a[path[i + 1], path[i + 2]]))
            return res
        else:
            return np.zeros(1, int)
def move_up_tree(graph, hpo, move_up, min_dist):
    c = hpo
    d = 0
    path = [hpo]
    try:
        while graph.nodes[c]['name'] != 'All':
            c = graph.nodes[c]['is_a'][0]
            d += 1
            path.append(c)
    except KeyError:
        print('Key Error ' + current + ' dist: ' + str(dist))
        return None
    if min_dist == None or d > min_dist:
        if len(path) - move_up > min_dist:
            return path[move_up]
        else:
            # return the item at min_dist
            print('Cannot move ' + str(move_up) +
                  ' spaces up the tree. Returning node at minimum distance')
            return path[-(min_dist + 1)]
    else:
        print('Path shorter than min distance. Returning starting point')
        return hpo
    def on_click(event, x, y, flags, param):
        if event != cv2.EVENT_LBUTTONUP:
            return

        # kp = clostest_key_points(key_points, (x,y), 1)[0]

        res_frame = resize_frame(frame)
        kp_grid = key_point_grid(orb, res_frame)
        print("len(kp_grid)", len(kp_grid))

        pos = (y, x)

        grid_offset_x = ((frame.shape[0] - 32) % stride) / 2.0 + 16
        grid_offset_y = ((frame.shape[1] - 32) % stride) / 2.0 + 16
        g_pos = (int(math.floor((pos[0] - grid_offset_x) / stride)),
                 int(math.floor((pos[1] - grid_offset_y) / stride)))

        print("g_pos", g_pos)
        path = []

        for i in range(playback_random_walk_length):
            g_pos, pos = next_pos_play(kp_grid, res_frame.shape, g_pos)
            print("g_pos, pos", g_pos, pos)
            if g_pos is None:
                break
            path.append(pos)

        path = list(set(path))

        windows = np.array([extract_window(res_frame, p) for p in path])

        preprocess_input(windows)
        features = model.predict(windows)
        features = features.reshape((windows.shape[0], 512))

        print("windows.shape, feats.shape", windows.shape, features.shape)
        show_patches(windows, features, path, frame.shape, memory_graph)
Esempio n. 35
0
def A_star(start, goal, MyWorld):
    # frontier of points that havent been searched
    frontier = PriorityQueue()
    # put a point in frontier
    frontier.put(start, 0)
    # dictionary of squares that other squares came from
    came_from = {}
    came_from[start] = None
    cost_so_far = {}
    cost_so_far[start] = 0
    while not frontier.empty():
        current = frontier.get()
        print("Searching {0}".format(current))
        # If the place we are at is the goal end the search
        if current == goal:
            break
        # Search for each point that is next to current
        for next in MyWorld.neighbors(current):
            new_cost = cost_so_far[current] + distance(
                current, next) + angle_cost(came_from[current], current, next)
            if new_cost < cost_so_far.get(next, math.inf):
                cost_so_far[next] = new_cost
                priority = new_cost + distance(next, goal)
                frontier.put(next, priority)
                came_from[next] = current

    # Find the path from the start to end
    current = goal
    path = []
    while current != start:
        print("Current: {0}".format(current))
        path.append(current)
        current = came_from[current]

    path.append(start)
    path.reverse()
    return path
Esempio n. 36
0
    def random_walk(self,
                    path_length,
                    alpha=0,
                    rand=random.Random(),
                    cumulated_cache=None,
                    nodes=None,
                    q=None,
                    p=None):
        """ Returns a truncated random walk.

        path_length: Length of the random walk.
        alpha: probability of restarts.
        start: the start node of the random walk.
    """
        G = self
        paths = []

        for node in nodes:
            if node:
                path = [node]
            else:
                # Sampling is uniform w.r.t V, and not w.r.t E
                path = [rand.choice(G.keys())]
            while len(path) < path_length:
                cur = path[-1]
                if len(G[cur]) > 0:
                    if cur == path[0] or rand.random() >= alpha:
                        #path.append(rand.choice(G[cur].keys()))
                        path.append(
                            weighted_choice(G[cur], cumulated_cache[cur]))
                        #path.append(choice(G[cur].keys(), p=weights[cur]))
                    else:
                        path.append(path[0])
                else:
                    break
            paths.append(path)
        q.put(paths)
Esempio n. 37
0
def default_lib_path(data_dir: str, pyversion: Tuple[int, int],
                     python_path: bool) -> List[str]:
    """Return default standard library search paths."""
    # IDEA: Make this more portable.
    path = []  # type: List[str]

    # Add MYPYPATH environment variable to library path, if defined.
    path.extend(mypy_path())

    auto = os.path.join(data_dir, 'stubs-auto')
    if os.path.isdir(auto):
        data_dir = auto

    # We allow a module for e.g. version 3.5 to be in 3.4/. The assumption
    # is that a module added with 3.4 will still be present in Python 3.5.
    versions = [
        "%d.%d" % (pyversion[0], minor)
        for minor in reversed(range(pyversion[1] + 1))
    ]
    # E.g. for Python 3.5, try 2and3/, then 3/, then 3.5/, then 3.4/, 3.3/, ...
    for v in ['2and3', str(pyversion[0])] + versions:
        for lib_type in ['stdlib', 'builtins', 'third_party']:
            stubdir = os.path.join(data_dir, 'typeshed', lib_type, v)
            if os.path.isdir(stubdir):
                path.append(stubdir)

    # Add fallback path that can be used if we have a broken installation.
    if sys.platform != 'win32':
        path.append('/usr/local/lib/mypy')

    # Contents of Python's sys.path go last, to prefer the stubs
    # TODO: To more closely model what Python actually does, builtins should
    #       go first, then sys.path, then anything in stdlib and third_party.
    if python_path:
        path.extend(sys.path)

    return path
Esempio n. 38
0
    def viterbi(self, sentence):
        pmv = np.zeros(shape=(len(self.states), len(sentence)))
        bp = np.zeros(shape=(len(self.states), len(sentence)))
        for i in range(len(self.states)):
            pmv[i][0] = self.start_probas[self.states[
                i]] * self.emission_probas[self.states[i]][sentence[0]]
            bp[i][0] = 0
        for i in range(1, len(sentence)):
            for j in range(len(self.states)):
                li = []
                li1 = []
                for k in range(len(self.states)):
                    li.append(
                        pmv[k][i - 1] *
                        self.emission_probas[self.states[j]][sentence[i]] *
                        self.transition_probas[self.states[k]][self.states[j]])
                    li1.append(
                        pmv[k][i - 1] *
                        self.transition_probas[self.states[k]][self.states[j]])
                pmv[j][i] = max(li)
                bp[j][i] = li1.index(max(li1))
        li2 = []
        for k in range(len(self.states) - 1):
            li2.append(pmv[k][len(sentence) - 1])
        self.proba = max(li2)
        bptr = li2.index(max(li2))
        li3 = [bptr]
        for i in range(len(sentence) - 1, 0, -1):
            a = bp[int(bptr)][i]
            li3.append(int(a))
            bptr = a
        li3 = li3[::-1]
        path = []
        for i in li3:
            path.append(self.states[i])

        return path, self.proba
  def random_walk(self, path_length, alpha=0, rand=random.Random(), start=None):
    """ Returns a truncated random walk.

        path_length: Length of the random walk.
        alpha: probability of restarts.
        start: the start node of the random walk.
    """
    G = self
    if start:
      path = [start]
    else:
      # Sampling is uniform w.r.t V, and not w.r.t E
      path = [rand.choice(G.keys())]

    while len(path) < path_length:
      cur = path[-1]
      if len(G[cur]) > 0:
        if rand.random() >= alpha:
          path.append(rand.choice(G[cur]))
        else:
          path.append(path[0])
      else:
        break
    return path
Esempio n. 40
0
def nestTree(parent_node, path, export_data, count_nodes):
    ### export_data,count_nodes are used for QC only
    children = edges[parent_node]
    path.append(0)
    for child in children.keys():
        tuple_path = tuple(path)
        #count_nodes+=1
        #try: temp = string.join(edges[child].keys(),'|')
        #except Exception: temp = ''
        #export_data.write(str(tuple_path)+'\t'+child+'\t'+temp+'\n')
        p = list(
            path
        )  ### Otherwise, the same path somehow gets used (alternative to copy.deepcopy())
        if child in edges:
            count_nodes = nestTree(child, p, export_data, count_nodes)
        #if count_nodes==1000: kill

        path_ontology_db[tuple_path] = child
        if child not in built_ontology_paths:
            built_ontology_paths[child] = [tuple_path]
        elif tuple_path not in built_ontology_paths[child]:
            built_ontology_paths[child].append(tuple_path)
        path[-1] += 1
    return count_nodes
Esempio n. 41
0
def restore_path(grid, start, end, geo_ref):
    increments = [(1, 0), (1, 1), (0, 1), (0, -1), (-1, -1), (-1, 0), (-1, 1),
                  (1, -1)]
    path = []
    current = start
    time_start = timeit.default_timer()
    while True:
        path.append(current)
        neighbours = [(current[0] + dx, current[1] + dy)
                      for dx, dy in increments]
        vals = []
        for x, y in neighbours:
            try:
                val = grid[x, y]
                if val == 0 and not ((x, y) == end): val = 10000000000000
                vals.append(val)
            except IndexError:
                vals.append(10000000000000000000)
        minimum = min(vals)
        current = neighbours[vals.index(minimum)]
        if current == end: break
        #if grid[current] <2: break
        if timeit.default_timer() - time_start > 0.1: return None
    return path[::-1]
Esempio n. 42
0
 def normal_random_walk(self,
                        path_length,
                        rand=random.Random(),
                        start=None):
     """
 Define a normal random walk without restart to generate positive training pairs
 """
     G = self
     pairs = []
     if start:
         path = [start]
     else:
         # Sampling is uniform w.r.t V, and not w.r.t E
         path = [rand.choice(G.keys())]
     while len(path) < path_length:
         cur = path[-1]
         if len(G[cur]) > 0:
             next_node = rand.choice(G[cur])
             path.append(next_node)
             if path[0] != next_node:
                 pairs.append((path[0], next_node))
         else:
             break
     return pairs
Esempio n. 43
0
def get_lowest_latency_path(start, end):
    # disjktra's algorithm
    distances = {}
    predecessors = {}
    to_assess = partial_latencies.keys()

    for node in partial_latencies:
        distances[node] = float("inf")
        predecessors[node] = None

    sp_set = []
    distances[start] = 0

    while len(sp_set) < len(to_assess):
        still_in = {
            node: distances[node]
            for node in [node for node in to_assess if node not in sp_set]
        }
        closest = min(still_in, key=distances.get)
        sp_set.append(closest)

        for node in partial_latencies[closest]:
            if distances[node] > distances[closest] + partial_latencies[
                    closest][node]:
                distances[node] = distances[closest] + partial_latencies[
                    closest][node]
                predecessors[node] = closest

    path = [end]
    while start not in path:
        path.append(predecessors[path[-1]])

    path_ordered = path[::-1]
    path_latency = distances[end]

    return {"path": path_ordered, "latency": path_latency}
Esempio n. 44
0
def deserialize_lms_sig(buffer):
    q = deserialize_u32(buffer[0:4])
    # print "q: " + str(q)
    lmots_type = typecode_peek(buffer[4:8])
    # print "lmots_type: " + str(lmots_type)
    if lmots_type in lmots_params:
        pos = 4 + LmotsSignature.bytes(lmots_type)
    else:
        raise ValueError(err_unknown_typecode, str(lmots_type))
    lmots_sig = buffer[4:pos]
    lms_type = typecode_peek(buffer[pos:pos + 4])
    if lms_type in lms_params:
        m, h, LenI = lms_params[lms_type]
    else:
        raise ValueError(err_unknown_typecode, str(lms_type))
    if (q >= 2**h):
        raise ValueError(err_bad_value)
    pos = pos + 4
    path = list()
    for i in xrange(0, h):
        path.append(buffer[pos:pos + m])
        pos = pos + m
    # PrintUtl.print_hex("buffer tail", buffer[pos:])
    return lms_type, q, lmots_sig, path
Esempio n. 45
0
def a_star(grid, start, goal):

    path = []
    path_cost = 0

    if (start == goal):
        return path, path_cost

    queue = PriorityQueue()
    queue.put((0, start))
    visited = set(start)

    branch = {}
    found = False

    while not queue.empty():
        item = queue.get()
        current_node = item[1]
        if current_node == start:
            current_cost = 0.0
        else:
            current_cost = branch[current_node][0]

        if current_node == goal:
            print('Found a path.')
            found = True
            break
        else:
            for action in valid_actions(grid, current_node):
                # get the tuple representation
                da = action.delta
                next_node = (current_node[0] + da[0], current_node[1] + da[1])
                branch_cost = current_cost + action.cost
                queue_cost = branch_cost + heuristic(next_node, goal)

                if next_node not in visited:
                    visited.add(next_node)
                    branch[next_node] = (branch_cost, current_node, action)
                    queue.put((queue_cost, next_node))

    if found:
        # retrace steps
        n = goal
        path_cost = branch[n][0]
        path.append(goal)
        while branch[n][1] != start:
            path.append(branch[n][1])
            n = branch[n][1]
        path.append(branch[n][1])
    else:
        print('**********************')
        print('Failed to find a path!')
        print('**********************')
        return None

    return path[::-1], path_cost
Esempio n. 46
0
    def show_result(self, initial, goal, duration, length, term, potarr):
        line = dict()
        line['initial'] = [self.int_float(x) for x in initial]
        line['goal'] = [self.int_float(x) for x in goal]
        # Omit duration for reproducibility
        # line['duration'] = duration
        line['length'] = int(length) if length % 1 == 0.0 else float(f"{length:.5f}")

        path = list()
        # Iterable of poses, handles unitary paths
        if term == self.noPath:
            poses = []
        elif term.getSort() == self.m.findSort('Pose'):
            poses = [term] 
        else: 
            poses = term.arguments()
            
        for pose in poses:
            x, y, t = self.destruct_pose(pose)
            path.append([self.int_float(x), self.int_float(y)])
        line['path'] = path
        if potarr is not None:
            line['navfn'] = [float(entry) for row in potarr.arguments() for entry in row.arguments()]
        print(json.dumps(line))
def __random_walk__(G, path_length, start, alpha=0, rand=random.Random()):
    '''
    Returns a truncated random walk.
    :param G: networkx graph
    :param path_length: Length of the random walk.
    :param alpha: probability of restarts.
    :param rand: random number generator
    :param start: the start node of the random walk.
    :return:
    '''

    path = [start]


    while len(path) < path_length:
        cur = path[-1]
        if len(G.neighbors(cur)) > 0:
            if rand.random() >= alpha:
                path.append(rand.choice(G.neighbors(cur)))
            else:
                path.append(path[0])
        else:
            break
    return path
Esempio n. 48
0
def dsp(n, s, t, C):
    '''
    DSP algorithm using label correction.
    '''
    openList = [s]
    g = [float('inf') for i in range(n)]
    g[s] = 0
    parent = [-1 for i in range(n)]
    while (len(openList) > 0):
        i = openList.pop()
        for j in range(n):
            if (g[i] + C[i, j]) < g[j] and (g[i] + C[i, j]) < g[t]:
                g[j] = g[i] + C[i, j]
                parent[j] = i
                if j != t:
                    openList = [j] + openList
    end = int(t)
    path, cost = [], []

    while (end != -1):
        path.append(int(end))
        cost.append(g[end])
        end = parent[end]
    return path[::-1], cost
Esempio n. 49
0
def make_path(fid, sfid=None, cid=None, subid=None):
    path = [urllib.unquote(fid)]
    if sfid is not None:
        path.append(urllib.unquote(sfid))
        if cid is not None:
            if subid is not None:
                path.append(cid + '@' + subid)
            else:
                path.append(cid)
    return '/'.join(path)
Esempio n. 50
0
def a_starGraph(graph, start, goal):
    """Modified A* to work with NetworkX graphs."""

    path = []
    path_cost = 0

    if (start == goal):
        return path, path_cost

    queue = PriorityQueue()
    queue.put((0, start))
    visited = set(start)

    branch = {}
    found = False

    while not queue.empty():
        item = queue.get()
        current_node = item[1]
        if current_node == start:
            current_cost = 0.0
        else:
            current_cost = branch[current_node][0]

        if current_node == goal:
            print('Found a path.')
            found = True
            break
        else:
            for next_node in graph[current_node]:
                cost = graph.edges[current_node, next_node]['weight']
                branch_cost = current_cost + cost
                queue_cost = branch_cost + heuristic(next_node, goal)

                if next_node not in visited:
                    visited.add(next_node)
                    branch[next_node] = (branch_cost, current_node)
                    queue.put((queue_cost, next_node))

    if found:
        # retrace steps
        n = goal
        path_cost = branch[n][0]
        path.append(goal)
        while branch[n][1] != start:
            path.append(branch[n][1])
            n = branch[n][1]
        path.append(branch[n][1])
    else:
        print('**********************')
        print('Failed to find a path in graph!')
        print('**********************')
        return None, None

    return path[::-1], path_cost
Esempio n. 51
0
def _extract_path(tpl: BIP32Template, want_nomatch: bool = False) -> List[int]:

    path = []
    have_nomatch = False
    for s in tpl.sections:
        for start, end in s:
            if want_nomatch and not have_nomatch:
                if (start & HARDENED_INDEX_MASK) != 0:
                    path.append(start-1)
                    have_nomatch = True
                    break
                if (end | HARDENED_INDEX_START) != 0xFFFFFFFF:
                    path.append(end+1)
                    have_nomatch = True
                    break
            elif random.choice((True, False)):
                path.append(random.randint(start, end))
                break
        else:
            path.append(s[0][0])

    if want_nomatch and not have_nomatch:
        # Could not put non-matching value in any position, that means that
        # all sections contain wildcard match. To make a non-matching path,
        # just flip the last hardened section to unhardened.
        # If there's no hardened sections, flip first section to hardened
        have_flipped = False
        for i, s in enumerate(tpl.sections):
            assert len(s) == 1
            start, end = s[0]
            assert (start & HARDENED_INDEX_MASK) == 0
            assert (end | HARDENED_INDEX_START) == 0xFFFFFFFF
            if start >= HARDENED_INDEX_START and not have_flipped:
                # Found the hardened section, flip
                path[i] = start ^ HARDENED_INDEX_START
                have_flipped = True
                # do not break so all sections are checked with asserts

        if not have_flipped:
            # All sections were unhardened, make first section hardened
            assert tpl.sections[0][0][0] < HARDENED_INDEX_START
            path[0] = tpl.sections[0][0][0] | HARDENED_INDEX_START

    return path
Esempio n. 52
0
    def compute_path(cls, col, row, depth):
        """Constructor that takes a col,row of a tile and computes the path. 

        """
        assert col % 2 == 0
        assert row % 4 == 0

        xradius = 2**depth
        yradius = 2 * 2**depth

        colbounds = [-xradius, xradius]
        rowbounds = [-yradius, yradius]

        path = []

        for level in xrange(depth):
            # Strategy: Find the midpoint of this level, and determine which
            # quadrant this row/col is in. Then set the bounds to that level
            # and repeat

            xmid = (colbounds[1] + colbounds[0]) // 2
            ymid = (rowbounds[1] + rowbounds[0]) // 2

            if col < xmid:
                if row < ymid:
                    path.append(0)
                    colbounds[1] = xmid
                    rowbounds[1] = ymid
                else:
                    path.append(2)
                    colbounds[1] = xmid
                    rowbounds[0] = ymid
            else:
                if row < ymid:
                    path.append(1)
                    colbounds[0] = xmid
                    rowbounds[1] = ymid
                else:
                    path.append(3)
                    colbounds[0] = xmid
                    rowbounds[0] = ymid

        return cls(col, row, path)
Esempio n. 53
0
  def random_walk(self, path_length, alpha=0, rand=random.Random(), start=None):
    """ Returns a truncated random walk.

        path_length: Length of the random walk.
        alpha: probability of restarts.
        start: the start node of the random walk.
    """
    G = self
    if start:
      path = [start]
    else:
      # Sampling is uniform w.r.t V, and not w.r.t E
      path = [rand.choice(list(G.keys()))]

    while len(path) < path_length:
      cur = path[-1]
      if len(G[cur]) > 0:
        if rand.random() >= alpha:
          ###############
          # RANDOM WALK #
          ###############
          # path.append(rand.choice(G[cur]))
          ###############
          # RANDOM WALK #
          ###############

          ################################
          # NON-BACKTRACKING RANDOM WALK #
          ################################
          neighbors = G[cur]
          if len(path) > 1:
            previous = path[-2]
            try:
              neighbors.remove(previous)
            except ValueError:
              pass
          if neighbors:
            path.append(rand.choice(neighbors))
          else:
            path.append(path[0])
          ################################
          # NON-BACKTRACKING RANDOM WALK #
          ################################
        else:
          path.append(path[0])
      else:
        break
    return [str(node) for node in path]
Esempio n. 54
0
    def get_resource(self, action=None, token=True):
        path = []
        if self.settings.prefix is not None:
            path.append(self.settings.prefix)
            if action is None:
                path.append('')
        if action is not None:
            path.append(action)

        if token == 'one-time':
            query = {'token': self.gen_one_time_token()}
        elif token:
            query = {'token': self.auth_token}
        else:
            query = None
        return url.Resource('/'.join(path), query)
Esempio n. 55
0
    def bgpa_customers_routes(AS, br_tuples):
        if self.paths_from_customers == {}:
            return

        for br_id, br_addr in brs:
            nn = br_addr[0]
            msg_dst = br_addr[0] + ' ' + br_addr[1]
            for path_dst, path_tuple in self.paths_from_customers:
                path = path_tuple[0]
                path = path.append(self.AS)

                networks = self.networks_in_as[path[-1]]
                networks = ' '.join(networks)

                path = ' '.join(path)

                for ip_addr in self.addrs:
                    if ip_addr[0] == nn:
                        msg_src = ip_addr[0] + ' ' + ip_addr[1]
                        bgpa_msg = msg_src + ' ' + msg_dst + ' ' + self.ID + ' PATHADV ' + path + ' NETWKS ' + networks

                        print bgpa_msg
Esempio n. 56
0
def get_parameter(param_path, template_path, db):
    """returns string value of parameter, given its path, which can contain
    wildcards,template path, from which wildcards will be substituted and
    structure, from which it should be fetched"""
    # path to parameter to be built
    path = []
    # TODO optimize this
    if param_path.startswith('.'):
        path = template_path[:-1] + [param_path[1:]]
    else:
        paths_diverged = False
        all_steps = param_path.split('.')
        for j in range(len(all_steps)):
            step = all_steps[j]
            # we need to substitute all asterisks for their values
            if step == '*':
                if not paths_diverged:
                    path.append(template_path[j])
                else:
                    sys.stderr.write("""
                    Paths already diverged,wildcard cant be 
                    substituted {}""".format(path))
            # special case if we want to return key not value of dict
            # only for assurance that ^ is at the end and paths has not
            # diverged
            elif step == '^':
                if j == len(all_steps) - 1 and not paths_diverged:
                    path.append('^')
                else:
                    sys.stderr.write("""
                    Paths already diverged or ^ in parameter path\n""")
            else:
                if step != template_path[j]:
                    paths_diverged = True
                path.append(step)
    # now we get value for built path
    val = db
    for i in range(len(path)):
        step = path[i]
        if step == '^':
            val = template_path[i]
        else:
            val = val[step]
    return str(val)
Esempio n. 57
0
    def get_blocks(self):
        '''
            Compares two binary strings under the assumption that y is the result of
            applying the following transformations onto x:

             * change single bytes in x (likely)
             * expand single bytes in x to two bytes (less likely)
             * drop single bytes in x (even less likely)

            Returns a generator that yields elements of the form (unmodified, xdiff, ydiff),
            where each item represents a binary chunk with "unmodified" denoting whether the
            chunk is the same in both strings, "xdiff" denoting the size of the chunk in x
            and "ydiff" denoting the size of the chunk in y.

            Example:
            >>> x = "abcdefghijklm"
            >>> y = "mmmcdefgHIJZklm"
            >>> list(MemoryComparator(x, y).get_blocks())
            [(False, 2, 3), (True, 5, 5),
             (False, 3, 4), (True, 3, 3)]
        '''
        x, y = self.x, self.y
        _, moves = self.get_grid()

        # walk the grid
        path = []
        i, j = 0, 0
        while True:
            dy, dx = self.move_to_gradient[moves[j][i]]
            if dy == dx == 0: break
            path.append((dy == 1 and x[i] == y[j], dy, dx))
            j, i = j + dy, i + dx

        for i, j in zip(range(i, len(x)), itertools.count(j)):
            if j < len(y): path.append((x[i] == y[j], 1, 1))
            else: path.append((False, 0, 1))

        i = j = 0
        for unmodified, subpath in itertools.groupby(path, itemgetter(0)):
            ydiffs = list(map(itemgetter(1), subpath))
            dx, dy = len(ydiffs), sum(ydiffs)
            yield unmodified, dx, dy
            i += dx
            j += dy
Esempio n. 58
0
def find_best_path(k, n, curr_path, actual_path, min_cost, cost):
    if k == n:
        c = 0
        path = []
        path.append(0)
        for i in curr_path:
            path.append(i)
        path.append(n + 1)
        for j in range(len(path) - 1):
            c += cost[path[j], path[j + 1]]
        if min_cost[0] == -1 or c < min_cost[0]:
            while actual_path.__len__() > 0:
                actual_path.pop()
            for tmp in path:
                actual_path.append(tmp)
            min_cost[0] = c  
    else:
        for i in range(k -1, n):
            curr_path[k - 1], curr_path[i] = curr_path[i], curr_path[k - 1]
            find_best_path(k + 1, n, curr_path, actual_path, min_cost, cost)
            curr_path[k - 1], curr_path[i] = curr_path[i], curr_path[k - 1]
Esempio n. 59
0
def random_walk(G, path_length, alpha=0, rand=random.Random(), start=None, temporal=False):
    """ Returns a truncated random walk.
        path_length: Length of the random walk.
        alpha: probability of restarts.
        start: the start node of the random walk.
        temporal: whether to use "time" edge attribute to only generate a temporal walk
    """
    if start:
        path = [start]
    else:
        # Sampling is uniform w.r.t V, and not w.r.t E
        path = [rand.choice(G.nodes())]

    if temporal:
        u = path[-1]
        times = [G[u][v]['time'] for v in G.neighbors(u)]
        if times:
            t = rand.choice(times)
        else:
            return path

    while len(path) < path_length:
        cur = path[-1]
        if len(G[cur]) > 0:
            if rand.random() >= alpha:
                if temporal:
                    node_and_time = [(v,G[cur][v]['time']) for v in G.neighbors(cur) if G[cur][v]['time'] > t]
                    if node_and_time:
                        next_node, t = rand.choice(node_and_time)
                        path.append(next_node)
                    else:
                        break
                else:
                    path.append(rand.choice(list(G[cur].keys())))
            else:
                path.append(path[0])
        else:
            break
    return path
Esempio n. 60
0
def run_trial(policy,
              problem_server,
              limit=1000,
              det_sample=False,
              show_time=False):
    """Run policy on problem. Returns (cost, path), where cost may be None if
    goal not reached before horizon."""
    env = RemoteEnv(problem_server)
    obs = env.reset()
    # total cost of this run
    cost = 0
    path = []
    if det_sample:
        bak_sample = policy.action_space.weighted_sample
        policy.action_space.weighted_sample = det_sampler
    try:
        for action_num in range(1, limit):
            if show_time:
                start = time()
            action, _ = policy.get_action(obs)
            if show_time:
                elapsed = time() - start
                print('get_action took %fs' % elapsed)
            path.append(env.action_name(action))
            obs, reward, done, step_info = env.step(action)
            cost += step_info['step_cost']
            if step_info['goal_reached']:
                path.append('GOAL! :D')
                return cost, True, path
            # we can run out of time or run out of actions to take
            if done:
                break
        path.append('FAIL! D:')
    finally:
        if det_sample:
            policy.action_space.weighted_sample = bak_sample
    return cost, False, path