コード例 #1
0
 def request_states(self):
     ret = OrderedSet()
     for node in self.req_endnodes:
         state = node.request_state
         assert isinstance(state, str)
         ret.add(state)
     return ret
コード例 #2
0
    def test_add_and_discard(self):
        numbers = OrderedSet([1, 2, 3])
        numbers.add(3)
        self.assertEqual(len(numbers), 3)
        numbers.add(4)
        self.assertEqual(len(numbers), 4)
        numbers.discard(4)
        self.assertEqual(len(numbers), 3)
        numbers.discard(4)
        self.assertEqual(len(numbers), 3)

        # Make sure the add method is efficient too!
        setup = "numbers = OrderedSet([])"
        time = partial(timeit, setup=setup, globals=globals(), number=100)
        small_set_time = time(
            dedent("""
            add = numbers.add
            for n in [9999 for _ in range(1000)]:
                add(n)
        """))
        large_set_time = time(
            dedent("""
            add = numbers.add
            for n in [9999 + i for i in range(1000)]:
                add(n)
        """))
        self.assertGreater(small_set_time * 30, large_set_time)
コード例 #3
0
def vertices_and_indices():

    icosahedron = mesh.Mesh.from_file(output('subdivided-5.stl'))

    vertices = OrderedSet()
    
    for face in icosahedron:

        v1 = face[0:3]
        v2 = face[3:6]
        v3 = face[6:9]

        vertices.add(tuple(v1))
        vertices.add(tuple(v2))
        vertices.add(tuple(v3))

    with open(output("vertices"), 'w') as vertices_output:

        for vertex in vertices: 

            theta, phi = spherical(vertex)
            vertices_output.write(f"{theta}, {phi},\n")

    with open(output("indices"), 'w') as indices_output:

        for face in icosahedron:

            v1 = vertices.index(tuple(face[0:3]))
            v2 = vertices.index(tuple(face[3:6]))
            v3 = vertices.index(tuple(face[6:9]))

            indices_output.write(f"{v1}, {v2}, {v3},\n")

    return vertices
コード例 #4
0
ファイル: raw_fact_entry.py プロジェクト: landonb/hamster-gtk
    def _populate_stores(self, evt):
        activities, categories = OrderedSet(), OrderedSet()

        self._activities_with_categories_model.clear()
        self._activities_model.clear()
        self._categories_model.clear()

        for activity in self._get_activities():
            activities.add(text_type(activity.name))
            if activity.category:
                categories.add(text_type(activity.category.name))

            # While we iterate over all activities anyway, we use this to
            # populate the 'activity+category' store right away.
            if activity.category:
                text = '{activity}@{category}'.format(
                    activity=activity.name, category=activity.category.name)
            else:
                text = activity.name
            self._activities_with_categories_model.append([text])

        for activity in activities:
            self._activities_model.append([activity])

        for category in categories:
            self._categories_model.append([category])
コード例 #5
0
        def union(self, iterable):
            result = OrderedSet(self)

            for key in iterable:
                result.add(key)

            return result
コード例 #6
0
    def handle_irrd_routes_for_as_set(self, set_name: str) -> str:
        """
        !a query - find all originating prefixes for all members of an AS-set, e.g. !a4AS-FOO or !a6AS-FOO
        """
        if set_name.startswith('4'):
            set_name = set_name[1:]
            object_classes = ['route']
        elif set_name.startswith('6'):
            set_name = set_name[1:]
            object_classes = ['route6']
        else:
            object_classes = ['route', 'route6']

        self._current_set_priority_source = None
        self._current_set_root_object_class = 'as-set'

        members = self._recursive_set_resolve({set_name})
        asns = {int(member[2:]) for member in members}

        query = self._prepare_query(column_names=['parsed_data'], ordered_by_sources=False)
        query = query.object_classes(object_classes).asns_first(asns)
        query_result = self.database_handler.execute_query(query)

        prefixes = OrderedSet()
        for result in query_result:
            for object_class in object_classes:
                prefix = result['parsed_data'].get(object_class)
                if prefix:
                    prefixes.add(prefix)

        return ' '.join(prefixes)
コード例 #7
0
ファイル: sched.py プロジェクト: mefyl/drake
class RoundRobin(SchedulingPolicy):

  def __init__(self):
    self.__coroutines = OrderedSet()

  @property
  def busy(self):
    return bool(self.__coroutines)

  def add(self, coroutine):
    self.__coroutines.add(coroutine)

  def remove(self, coroutine):
    self.__coroutines.remove(coroutine)

  def freeze(self, coroutine):
    self.__coroutines.remove(coroutine)

  def unfreeze(self, coroutine):
    self.__coroutines.add(coroutine)

  def round(self):
    for coro in list(self.__coroutines):
      assert coro is not None
      yield coro

  def dump(self):
    def dump(c, idt = 0):
      print('{}{}{}'.format('  ' * idt, c, ' (frozen)' if c.frozen else ''))
      for child in self.__hierarchy.get(c, []):
        dump(child, idt + 1)
    for root in self.__hierarchy.get(None, []):
      dump(root)
コード例 #8
0
class RoundRobin(SchedulingPolicy):
    def __init__(self):
        self.__coroutines = OrderedSet()

    @property
    def busy(self):
        return bool(self.__coroutines)

    def add(self, coroutine):
        self.__coroutines.add(coroutine)

    def remove(self, coroutine):
        self.__coroutines.remove(coroutine)

    def freeze(self, coroutine):
        self.__coroutines.remove(coroutine)

    def unfreeze(self, coroutine):
        self.__coroutines.add(coroutine)

    def round(self):
        for coro in list(self.__coroutines):
            assert coro is not None
            yield coro

    def dump(self):
        def dump(c, idt=0):
            print('{}{}{}'.format('  ' * idt, c,
                                  ' (frozen)' if c.frozen else ''))
            for child in self.__hierarchy.get(c, []):
                dump(child, idt + 1)

        for root in self.__hierarchy.get(None, []):
            dump(root)
コード例 #9
0
    def _query(self, num_solutions, *atoms: Atom):
        # find variables
        vars = OrderedSet()
        for atom in atoms:
            for var in atom.get_variables():
                if var not in vars:
                    vars.add(var)

        if len(vars) == 0:
            for atom in atoms:
                for term in atom.get_terms():
                    if term not in vars:
                        vars.add(term)

        ori_vars = list(vars)
        vars = [
            x.as_kanren() if getattr(x, "as_kanren", None) else x
            for x in ori_vars
        ]
        goals = [
            x.as_kanren() if getattr(x, "as_kanren", None) else x
            for x in atoms
        ]

        return kanren.run(num_solutions, vars, *goals), ori_vars
コード例 #10
0
    def test_memory_and_time_efficient(self):
        with Timer() as small_set_timer:
            numbers = OrderedSet([])
            for n in [9999 for _ in range(2000)]:
                numbers.add(n)
        with Timer() as large_set_timer:
            numbers2 = OrderedSet([])
            for n in [9999 + i for i in range(2000)]:
                numbers2.add(n)

        # Time efficient construction
        self.assertGreater(
            small_set_timer.elapsed * 5,
            large_set_timer.elapsed,
        )

        # Memory efficient
        self.assertLess(get_size(numbers) * 100, get_size(numbers2))
        self.assertLess(get_size(numbers), 2000)

        with Timer() as beginning_lookup:
            9999 in numbers2
        with Timer() as end_lookup:
            -1 in numbers2

        # Time efficient lookups
        self.assertGreater(
            beginning_lookup.elapsed * 1.5,
            end_lookup.elapsed,
        )
コード例 #11
0
ファイル: hetrpasses.py プロジェクト: ami-GS/ngraph
    def do_pass(self, ops, transformer):

        ops = OrderedSet(op.forwarded for op in ops)

        for op in reversed(Op.ordered_ops(ops)):
            if op.metadata.get('marker') == 'gather':
                # op is GatherRecvOp
                self.parallel_axes = op.metadata['parallel']

                gather_send_op = op.send_nodes[0]

                # clone nodes for each device_id
                replaced_send_ops = OrderedSet()
                new_gather_send_nodes = OrderedSet()
                for i, id in enumerate(op.from_id):
                    new_gather_send_op, new_sends, replaced_sends = clone_graph(
                        root=gather_send_op,
                        clone_id=id,
                        shared_queues_idx=i,
                        parallel_axis=self.parallel_axes,
                        num_clones=len(op.from_id))

                    new_gather_send_nodes.add(new_gather_send_op)

                    new_sends.add(new_gather_send_op)
                    for o in new_sends:
                        self.send_nodes.add(o)

                    replaced_send_ops |= replaced_sends

                op.send_nodes = new_gather_send_nodes

                replaced_send_ops.add(gather_send_op)
                for o in replaced_send_ops:
                    self.send_nodes.remove(o)
コード例 #12
0
ファイル: base.py プロジェクト: stjordanis/ngraph-neon
    def allocate(self):
        """
        Allocate storage and then initializes constants.

        Will finalize if not already done.
        """
        if self.allocated:
            return

        if not self.finalized:
            logging.info("Finalizing transformer.")
            self._transform_computations()

        self.allocate_storage()

        init_states = OrderedSet()
        for op in OrderedSet(self.ops):
            op = op.forwarded
            states = op.states_read | op.states_written
            for state in states:
                state = state.forwarded
                if state.initial_value is not None:
                    init_states.add(state)
        for state in init_states:
            tensor_description = state.tensor.tensor_description()
            self.get_tensor_description_tensor_view(tensor_description)[
                ...] = state.initial_value

        self.allocated = True
コード例 #13
0
def unit_propogation():
  '''
  Returns:
      {int}: conflict clause
  '''  
  global assignment,propogation_history, implications
  # Keep doing until unit clauses are present
  while True:
    # Create a OrderedSet for all the unit clauses encountered.
    # OrderedSet preserves insertion order and does quicker query 
    propogation = OrderedSet()
    for clause in [x for x in clauses.union(learnings)]:
      # Evaluate a clause, if satisfied continue, if unsatisfied report conflict
      # clause else add it to the propogation list
      result = evaluate_clause(clause)
      if result == 1:
        continue
      elif result == 0:
        return clause
      elif result == -1:
        propogation.add((get_first_literal(clause),clause))
    # If no unit clause is encountered return None
    if len(propogation) == 0:
      return None
    # Assign the variable and update the implication graph, add the assignment
    # to history of current level
    for literal,clause in propogation:
      # As this assignment is implied
      implications = implications + 1
      assignment[literal_value(literal)] = literal_sign(literal)
      update_graph(literal_value(literal),clause=clause)
      if current_level != 0:
        propogation_history[current_level].add(literal)
コード例 #14
0
class OrderedSetQueue(Queue):
    """
        https://stackoverflow.com/questions/16506429/check-if-element-is-already-in-a-queue
    """
    def _init(self, maxsize):
        # print("b" * 200)
        self.cacheLock = Lock()
        self.queue = OrderedSet()
    def _put(self, item):
        with self.cacheLock:
            self.queue.add(item)
    def _get(self):
        with self.cacheLock:
            return self.queue.pop(last=False)
    def __contains__(self, item):
        with self.cacheLock:
            with self.mutex:
                return item in self.queue
    def _qsize(self):
        with self.cacheLock:
            return len(self.queue)
    def size(self):
        return self.qsize()
    def toList(self):
        return queueToList(self, maxsize=self.maxsize)
コード例 #15
0
class GlutSpyInputs(Thread):
    def __init__(self, app, *args, **kwargs):
        super().__init__(name="SpyInputs", daemon=True)
        self._app = app
        self._key_pressed = OrderedSet()

        glutInit(*args, *kwargs)
        glutInitDisplayMode(GLUT_SINGLE)

        glutInitWindowSize(800, 400)
        glutInitWindowPosition(100, 100)
        glutCreateWindow("SpyInputs")

        glutKeyboardFunc(self.on_key_press)
        glutKeyboardUpFunc(self.on_key_up)

    def on_key_press(self, key, x, y):
        self._key_pressed.add(key)
        self.on_key_update()

    def on_key_up(self, key, x, y):
        self._key_pressed.remove(key)
        self.on_key_update()

    def on_key_update(self):
        print(" | ".join([v.decode() for v in self._key_pressed]))

    def run(self):
        glutMainLoop()
コード例 #16
0
ファイル: statistics.py プロジェクト: hamidgh09/npf
    def buildDataset(cls, all_results: Dataset, testie: Testie) -> List[tuple]:
        dtype = testie.variables.dtype()
        y = OrderedDict()
        dataset = []
        for i, (run, results_types) in enumerate(all_results.items()):
            vars = list(run.variables.values())
            if not results_types is None and len(results_types) > 0:
                dataset.append(vars)
                for result_type, results in results_types.items():
                    r = np.mean(results)
                    y.setdefault(result_type, []).append(r)

        dtype['values'] = [None] * len(dtype['formats'])
        for i, f in enumerate(dtype['formats']):
            if f is str:
                dtype['formats'][i] = int
                values = OrderedSet()
                for row in dataset:
                    values.add(row[i])
                    row[i] = values.index(row[i])
                dtype['values'][i] = list(values)
        X = np.array(dataset, ndmin=2)

        lset = []
        for result_type, v in y.items():
            lset.append((result_type, X, np.array(v),dtype))
        return lset
コード例 #17
0
def put_into_pool(
        candidate_pool: OrderedSet,
        candidates: typing.Union[Clause, Procedure, typing.Sequence]) -> None:
    if isinstance(candidates, Clause):
        candidate_pool.add(candidates)
    else:
        candidate_pool |= candidates
コード例 #18
0
    def _get_persona_pool(self, opt, remove_duplicate=True):
        print("[loading persona pool from convai2 training data]")
        # Get episodes from training dataset
        datapath = make_path(opt, 'train.txt')
        episodes = []
        eps = []
        with open(datapath) as read:
            for line in read:
                msg = str_to_msg(line.rstrip('\n'))
                if msg:
                    # self.num_exs += 1
                    eps.append(msg)
                    if msg.get('episode_done', False):
                        episodes.append(eps)
                        eps = []
        if len(eps) > 0:
            # add last episode
            eps[-1].force_set('episode_done', True)
            episodes.append(eps)

        # Extract personas from episodes
        persona_set = OrderedSet()
        for episode in episodes:
            first_turn = episode[0]
            text = first_turn['text']
            persona, _ = _split_persona_and_context(text)
            persona_set.add(persona)

        # Remove duplicate
        if remove_duplicate:
            train_persona_fname = os.path.join(__PATH__,
                                               'train_persona_map.pkl')
            with open(train_persona_fname, 'rb') as fp:
                _train_personas = pickle.load(fp)
            train_personas = []
            for personas in _train_personas.values():
                longest_idx = 0
                longest_length = -1
                for idx, persona in enumerate(personas):
                    if len(persona) > longest_length:
                        longest_idx = idx
                        longest_length = len(persona)
                selected_persona = map(lambda x: f"your persona: {x}.",
                                       personas[longest_idx])
                selected_persona = '\n'.join(selected_persona)
                train_personas.append(selected_persona)
            persona_set = OrderedSet()
            for train_persona in train_personas:
                persona_set.add(train_persona)

        all_personas = []
        persona_to_idx = {}
        for i, persona in enumerate(persona_set):
            all_personas.append(persona)
            persona_to_idx[persona] = i

        print(f"Total {len(all_personas)} personas in dataset")

        return all_personas, persona_to_idx
コード例 #19
0
ファイル: main.py プロジェクト: usafak/acipdt
def findKeys(ws, rows):
    func_list = OrderedSet()
    for i in range(2, rows):
        if (ws.cell(i, 0)).value:
            func_list.add((ws.cell(i, 0)).value)
        else:
            i += 1
    return func_list
コード例 #20
0
ファイル: execution.py プロジェクト: QiJune/ngraph
 def get_copied_params(graph, input_params):
     placeholders = ExecutorFactory.get_all_placeholders(graph)
     copied_params = OrderedSet()
     for i in input_params:
         for p in placeholders:
             if i.name == p.tensor.name:
                 copied_params.add(p.tensor)
     return tuple(copied_params)
コード例 #21
0
ファイル: main.py プロジェクト: carlniger/acitool
def findKeys(ws, rows):
    func_list = OrderedSet()
    for i in range(2, rows):
        if (ws.cell(i, 0)).value:
            func_list.add((ws.cell(i, 0)).value)
        else:
            i += 1
    return func_list
コード例 #22
0
ファイル: hetr_utils.py プロジェクト: ami-GS/ngraph
def clone_graph(root, clone_id, shared_queues_idx, parallel_axis, num_clones):
    """
    clone graph with serde (serialization)
    input:
    output: new_root of the cloned graph
    """
    # clone nodes with GatherSendOp as root using serde
    ser_cloned_nodes = deserialize_graph(serialize_graph([root]))
    new_root = next((o for o in ser_cloned_nodes if o.uuid == root.uuid), None)

    orig_ops = {op.uuid: op for op in Op.ordered_ops([root])}
    # Prune ops that are not control_deps of new_gather_send_op
    # deserialize includes extra referenced nodes
    cloned_graph = Op.ordered_ops([new_root])

    new_send_nodes = OrderedSet()
    replaced_send_nodes = OrderedSet()

    # update newly cloned op metadata, generate new UUIDs
    for op in cloned_graph:
        op.metadata['transformer'] = op.metadata['device'] + str(clone_id)
        op.metadata['device_id'] = str(clone_id)

        if isinstance(op, (ScatterRecvOp, GatherSendOp)):
            op._shared_queues = orig_ops[op.uuid]._shared_queues
            op.idx = shared_queues_idx
            if isinstance(op, ScatterRecvOp):
                op._send_node = orig_ops[op.uuid].send_node()
        elif isinstance(op, (CPUQueueRecvOp, GPUQueueRecvOp)):
            # Cloning a recv node means we need a broadcast, so simulate one by adding an
            # additional sender with the same input data as the original sender.
            # TODO replace with real broadcast #1398 #1399
            send_op = CPUQueueSendOp(orig_ops[op.uuid].send_node().args[0])
            op._queue = send_op.queue
            op._send_node = send_op
            new_send_nodes.add(send_op)
            replaced_send_nodes.add(orig_ops[op.uuid].send_node())

        if hasattr(op, '_axes') and parallel_axis in op._axes:
            op._axes = calculate_scatter_axes(op.axes, parallel_axis,
                                              num_clones)
            # TODO: Revisit to handle axes updation better. Github Ticket #1355
            if isinstance(op, DotOp):
                if parallel_axis in op.x_out_axes:
                    op.x_out_axes = calculate_scatter_axes(
                        op.x_out_axes, parallel_axis, num_clones)
                elif parallel_axis in op.y_out_axes:
                    op.y_out_axes = calculate_scatter_axes(
                        op.y_out_axes, parallel_axis, num_clones)
                else:
                    raise ValueError(
                        "Missing parallel_axis in Op's x_out_axes or y_out_axes"
                    )
        op.uuid = uuid.uuid4()

    return new_root, new_send_nodes, replaced_send_nodes
コード例 #23
0
class Poller(object):
    def __init__(self):
        rospy.init_node('poller_node')
        self.rate = rospy.Rate(3)   # 3hz
        self.extractBasestationFromParams()
        self.createCommunicators()
        self.request_list = OrderedSet([])

    def createCommunicators(self):
        self.client = Client(10019)
        self.measurements_publisher = rospy.Publisher('measurements', MeasurementList, queue_size=10)
        self.request_subscriber = rospy.Subscriber("measurements_request", String, self.pushbackRequest)

    def extractBasestationFromParams(self):
        stations = rospy.get_param("/poller_node/basestations")
        self.storeBasestation(stations)

    def storeBasestation(self, stations):
        self.basestations = []
        for station in stations:
            self.basestations.append(Basestation(station[0], float(station[1]), float(station[2])))

    def pushbackRequest(self, msg):
        self.request_list.add(msg.data)

    def measurementsLoop(self):
        while not rospy.is_shutdown():
            while not self.request_list.isEmpty():
                station_address = self.request_list.pop()
                self.serveRequest(station_address)
            self.rate.sleep()

    def pollStation(self, station_address):
        return self.client.pollBasestation(station_address)

    def serveRequest(self, station_address):
        try:
            data = self.pollStation(station_address)
            if containsMeasurements(data):
                self.publishMeasuements(extractJson(data), station_address)
        except socket.error:
            pass

    def publishMeasuements(self, measurs, station):
        msg = MeasurementList()
        for el in measurs:
            msg.data.append(self.generateMeasurement(el))
        msg.basestation = station
        msg.header.stamp = rospy.Time.now()
        self.measurements_publisher.publish(msg)

    def generateMeasurement(self, element):
        tmp = Measurement()
        tmp.tag = element['id_tag'].encode('utf-8')
        tmp.measurement = int(element['rssid'])
        return tmp
コード例 #24
0
 def test_add_and_discard(self):
     numbers = OrderedSet([1, 2, 3])
     numbers.add(3)
     self.assertEqual(len(numbers), 3)
     numbers.add(4)
     self.assertEqual(len(numbers), 4)
     numbers.discard(4)
     self.assertEqual(len(numbers), 3)
     numbers.discard(4)
     self.assertEqual(len(numbers), 3)
コード例 #25
0
def getAllMins(infofile):
    r = r"^\d{2}:\d{2},\b"
    mins = OrderedSet()
    f = open(infofile)
    for line in f.readlines():
        search = re.search(r, str(line))
        if search is not None:
            search = search.group()[:-1]
            search = search[:2] + search[3:]
            mins.add(int(search))
    return mins
コード例 #26
0
def get_sorted_matrix_labels(label_list, matrix, descending=False, file_path=None, word_pair_limit=None,
                            non_zeros_only=False):
    """
    Method to get list of matrix labels based on sorted cell values.
    While adding the labels, row label is added prior to corresponding column label.

    parameters
    -----------
    :param label_list: list of str
        List of labels
    :param matrix: matrix
    :param descending: boolean, optional
        Boolean to indicate the sorting order of matrix cell values in final nd-array
    :param file_path: str, optional
        .tsv file path to save cell values with row and column labels for analysis purpose
    :param word_pair_limit: int, optional
        Limits the number of sorted cells need to be considered.
    :param non_zeros_only: boolean, optional
        Boolean to indicate the inclusion of non-zero values
    :return: list of str
    """
    sorted_labels = OrderedSet()

    # sort matrix
    if descending:
        sorted = np.argsort(matrix, axis=None)[::-1]
    else:
        sorted = np.argsort(matrix, axis=None)
    rows, cols = np.unravel_index(sorted, matrix.shape)
    matrix_sorted = matrix[rows, cols]

    # save data for analysis purpose
    if file_path:
        # create folder if not exist
        create_folder_if_not_exist(file_path, is_file_path=True)
        result_file = open(file_path, 'a', newline='', encoding='utf-8')
        result_writer = csv.writer(result_file, delimiter='\t')

    i = 0
    for r, c, v in zip(rows, cols, matrix_sorted):
        i = i + 1
        if non_zeros_only and v == 0:
            break

        sorted_labels.add(label_list[r])
        sorted_labels.add(label_list[c])

        if file_path:
            result_writer.writerow([v, label_list[r], label_list[c]])
        if word_pair_limit and i == word_pair_limit:
            break

    if file_path: result_file.close()
    return list(sorted_labels)
コード例 #27
0
 def update_rows(self):
     if not self.cols:
         self.rows = OrderedSet()
         return
     rows = OrderedSet()
     for i in range(len(self.rows)):
         row = Row()
         for col in self.cols:
             row.append(col[i])
         rows.add(row)
     self.rows = rows
コード例 #28
0
ファイル: comm_nodes.py プロジェクト: leonllm/ngraph
 def all_deps(self):
     """
     Returns:
         All dependencies except the dependency on the send nodes
     """
     deps = super(RecvOp, self).all_deps
     remove_deps = OrderedSet()
     for dep in deps:
         if isinstance(dep, SendOp):
             remove_deps.add(dep)
     return deps - remove_deps
コード例 #29
0
class SetQueue(queue.Queue):
    def _init(self, maxsize):
        self.queue = OrderedSet()

    def _put(self, item):
        self.queue.add(item)

    def _get(self):
        head = self.queue.__getitem__(0)
        self.queue.remove(head)
        return head
コード例 #30
0
ファイル: torrents.py プロジェクト: seonar22/nyaa-1
def get_default_trackers():
    trackers = OrderedSet()

    # Our main one first
    main_announce_url = app.config.get('MAIN_ANNOUNCE_URL')
    if main_announce_url:
        trackers.add(main_announce_url)

    # and finally our tracker list
    trackers.update(default_trackers())

    return list(trackers)
コード例 #31
0
class DepthFirst(SchedulingPolicy):
    def __init__(self):
        self.__coroutines = OrderedSet()
        self.__hierarchy = {}

    @property
    def busy(self):
        return bool(self.__coroutines)

    def add(self, coroutine):
        parent = coroutine.parent
        self.__coroutines.add(coroutine)
        self.__hierarchy.setdefault(parent, OrderedSet()).add(coroutine)

    def remove(self, coroutine):
        self.__coroutines.remove(coroutine)
        children = self.__hierarchy.pop(coroutine, None)
        if children is not None:
            assert len(children) == 0
        self.__hierarchy.get(coroutine.parent).remove(coroutine)

    def freeze(self, coroutine):
        self.__coroutines.remove(coroutine)

    def unfreeze(self, coroutine):
        self.__coroutines.add(coroutine)

    def round(self):
        c = self.__round(self.__hierarchy.get(None, ()))
        assert c is not None
        return (c, )

    def __round(self, coroutines):
        for coroutine in coroutines:
            assert coroutines is not None
            active = coroutine in self.__coroutines
            if active and coroutine.exception:
                return coroutine
            sub = self.__round(self.__hierarchy.get(coroutine, ()))
            if sub is not None:
                return sub
            if active:
                return coroutine

    def dump(self):
        def dump(c, idt=0):
            print('{}{}{}'.format('  ' * idt, c,
                                  ' (frozen)' if c.frozen else ''))
            for child in self.__hierarchy.get(c, []):
                dump(child, idt + 1)

        for root in self.__hierarchy.get(None, []):
            dump(root)
コード例 #32
0
    def get_domains(self, domain):
        result = OrderedSet()

        req = self.session.get(self.base_url.format(domain),
                               headers=self.headers)

        for entry in req.json():
            name_value = entry['name_value'].strip().lower()

            result.add(name_value)

        return list(result)
コード例 #33
0
ファイル: fixture.py プロジェクト: tobyk100/slash
class Fixture(FixtureBase):
    def __init__(self, store, fixture_func):
        super(Fixture, self).__init__()
        self.fixture_func = fixture_func
        self.info = self.fixture_func.__slash_fixture__
        self.scope = self.info.scope
        self.namespace = Namespace(store, store.get_current_namespace())

    def __repr__(self):
        return '<Function Fixture around {0}>'.format(self.fixture_func)

    def get_value(self, kwargs, active_fixture):
        if self.info.needs_this:
            assert 'this' not in kwargs
            kwargs['this'] = active_fixture
        return self.fixture_func(**kwargs)

    def _resolve(self, store):
        assert self.fixture_kwargs is None

        assert self.parametrization_ids is None
        self.parametrization_ids = OrderedSet()

        kwargs = OrderedDict()
        parametrized = set()

        for name, param in iter_parametrization_fixtures(self.fixture_func):
            store.register_fixture_id(param)
            parametrized.add(name)
            self.parametrization_ids.add(param.info.id)

        for param_name, arg in self.info.required_args.items():
            if param_name in parametrized:
                continue
            try:
                needed_fixture = self.namespace.get_fixture_by_name(
                    get_real_fixture_name_from_argument(arg))

                if needed_fixture.scope < self.scope:
                    raise InvalidFixtureScope(
                        'Fixture {0} is dependent on {1}, which has a smaller scope ({2} > {3})'
                        .format(self.info.name, param_name, self.scope,
                                needed_fixture.scope))

                if needed_fixture is self:
                    raise CyclicFixtureDependency(
                        'Cyclic fixture dependency detected in {0}: {1} depends on itself'
                        .format(self.info.func.__code__.co_filename,
                                self.info.name))
                kwargs[param_name] = needed_fixture.info.id
            except LookupError:
                raise UnknownFixtures(param_name)
        return kwargs
コード例 #34
0
ファイル: sched.py プロジェクト: mefyl/drake
class DepthFirst(SchedulingPolicy):

  def __init__(self):
    self.__coroutines = OrderedSet()
    self.__hierarchy = {}

  @property
  def busy(self):
    return bool(self.__coroutines)

  def add(self, coroutine):
    parent = coroutine.parent
    self.__coroutines.add(coroutine)
    self.__hierarchy.setdefault(parent, OrderedSet()).add(coroutine)

  def remove(self, coroutine):
    self.__coroutines.remove(coroutine)
    children = self.__hierarchy.pop(coroutine, None)
    if children is not None:
      assert len(children) == 0
    self.__hierarchy.get(coroutine.parent).remove(coroutine)

  def freeze(self, coroutine):
    self.__coroutines.remove(coroutine)

  def unfreeze(self, coroutine):
    self.__coroutines.add(coroutine)

  def round(self):
    c = self.__round(self.__hierarchy.get(None, ()))
    assert c is not None
    return (c,)

  def __round(self, coroutines):
    for coroutine in coroutines:
      assert coroutines is not None
      active = coroutine in self.__coroutines
      if active and coroutine.exception:
        return coroutine
      sub = self.__round(self.__hierarchy.get(coroutine, ()))
      if sub is not None:
        return sub
      if active:
        return coroutine

  def dump(self):
    def dump(c, idt = 0):
      print('{}{}{}'.format('  ' * idt, c, ' (frozen)' if c.frozen else ''))
      for child in self.__hierarchy.get(c, []):
        dump(child, idt + 1)
    for root in self.__hierarchy.get(None, []):
      dump(root)
コード例 #35
0
ファイル: fixture.py プロジェクト: Guy-Lev/slash
class Fixture(FixtureBase):

    def __init__(self, store, fixture_func):
        super(Fixture, self).__init__()
        self.fixture_func = fixture_func
        self.info = self.fixture_func.__slash_fixture__
        self.scope = self.info.scope
        self.namespace = Namespace(store, store.get_current_namespace())

    def __repr__(self):
        return '<Function Fixture around {0}>'.format(self.fixture_func)

    def get_value(self, kwargs, active_fixture):
        if self.info.needs_this:
            assert 'this' not in kwargs
            kwargs['this'] = active_fixture
        return self.fixture_func(**kwargs)

    def _resolve(self, store):
        assert self.fixture_kwargs is None

        assert self.parametrization_ids is None
        self.parametrization_ids = OrderedSet()

        kwargs = OrderedDict()
        parametrized = set()

        for name, param in iter_parametrization_fixtures(self.fixture_func):
            store.register_fixture_id(param)
            parametrized.add(name)
            self.parametrization_ids.add(param.info.id)

        for param_name, arg in self.info.required_args.items():
            if param_name in parametrized:
                continue
            try:
                needed_fixture = self.namespace.get_fixture_by_name(get_real_fixture_name_from_argument(arg))

                if needed_fixture.scope < self.scope:
                    raise InvalidFixtureScope('Fixture {0} is dependent on {1}, which has a smaller scope ({2} > {3})'.format(
                        self.info.name, param_name, self.scope, needed_fixture.scope))

                if needed_fixture is self:
                    raise CyclicFixtureDependency('Cyclic fixture dependency detected in {0}: {1} depends on itself'.format(
                        self.info.func.__code__.co_filename,
                        self.info.name))
                kwargs[param_name] = needed_fixture.info.id
            except LookupError:
                raise UnknownFixtures(param_name)
        return kwargs
コード例 #36
0
    def post(self, request):
        if request.data.get('flag') == 'tag':
            tag_list = request.data.getlist('tag_list[]')
            rules = cache.get("recommend_tag_list")

            recommend_list = OrderedSet()
            for rule, confidence in rules:
                tags, results = rule
                if tags and tag_list:
                    if set(tags) == set(tag_list):
                        for result in results:
                            recommend_list.add(result)
                else:
                    return Response({'recommend_tag_list': list(recommend_list)})

            return Response({'recommend_tag_list': list(recommend_list[:5])})
コード例 #37
0
ファイル: samprocessor.py プロジェクト: bm2-lab/cage
def __Dedupe(items, key=None):
    seen = OrderedSet()
    num_seen = list()
    gn_item = (item for item in items)
    while True:
        try:
            item = gn_item.next()
        except Exception as e:
            yield (None, num_seen)
            break
        else:
            val = item if key is None else key(item)
            if val not in seen:
                yield (item, None)
                seen.add(val)
                num_seen.append(1)
            else:
                num_seen[seen.index(val)] += 1
コード例 #38
0
ファイル: webdata.py プロジェクト: jaredsethnz/web_scraper
 def filter_by_children(self, *args):
     obj_attrs = []
     data = args[0]
     for d in data:
         names = OrderedSet()
         attrs = {}
         try:
             for dc in d.find_all('div'):
                 name = dc.find('span')
                 value = dc.find('div')
                 if value and name is not None:
                     if name.text not in names:
                         names.add(name.text)
                         attrs[name.text] = value.text
             obj_attrs.append(attrs)
         except AttributeError:
             self.view.display_item('Error filtering data from children.....')
     web_objs = self.sanitise_attributes(obj_attrs)
     return web_objs
コード例 #39
0
ファイル: pattern.py プロジェクト: petervaro/argon
    class UNIQUE_ARRAY(_ObjectHook):

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
        def __init__(self, name, flag, is_required):
            self._name        = name
            self._flag        = flag
            self._values      = OrderedSet()
            self._is_required = is_required

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
        def add_value(self, value):
            self._values.add(value)

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
        def close(self, name, flag):
            if (self._is_required and
                not self._values):
                    raise Pattern.UnfinishedPattern(
                        Pattern.UNIQUE_ARRAY, self._flag,
                        Pattern.EOL() if name is NotImplemented else flag) from None
            return self._values
コード例 #40
0
ファイル: molecule.py プロジェクト: evanfeinberg/InterMol
class Molecule(object):
    """An abstract molecule object.
    """
    __slots__ = ['name', '_atoms']
    def __init__(self, name = None):
        """Initialize the molecule

        Args:
            name (str): name of the molecule
        """
        if name != None:
            self.name = name
        else:
            # TODO Fix the naming resolution
            self.name = "Untitled"
        self._atoms = OrderedSet()

    def addAtom(self, atom):
        """Add and atom

        Args:
            atom (atom): the atom to add into the molecule
        """
        self._atoms.add(atom)

    def removeAtom(self, atom):
        """Remove Atom

        Args:
            atom (atom): the atom to remove from the molecule
        """
        self._atoms.remove(atom)

    def getAtoms(self):
        """Return an orderedset of atoms
        """
        return self._atoms

    def __repr__(self):
        return self.name
コード例 #41
0
def merge_ordered_set(diff, base, mine, theirs):
    mine_merged = mine - diff.deleted
    theirs_merged = theirs - diff.deleted
    mine_added = mine_merged - theirs_merged
    merged = OrderedSet()

    theirs_merged_iter = iter(theirs_merged)
    mine_merged_iter = iter(mine_merged)

    for theirs_el in theirs_merged_iter:
        for mine_el in mine_merged_iter:
            if mine_el in mine_added:
                merged.add(mine_el)
            else:
                break
        merged.add(theirs_el)

    for mine_el in mine_merged_iter:
        merged.add(mine_el)

    return merged
コード例 #42
0
ファイル: manager.py プロジェクト: gooddata/sgmanager
    def update_remote_groups(self, dry_run=True, threshold=None, remove=True):
        '''Update remote configuration with the local one.'''
        # Copy those so that we can modify them even with dry-run
        local = OrderedSet(self.local)
        remote = OrderedSet(self.remote)

        validate_groups(local)

        def parse_groups(groups, remote):
            if remote:
                self._process_remote_groups(groups)
            groups = {group.name: group for group in groups if group.name != 'default'}
            keys = OrderedSet(groups.keys())
            return groups, keys

        lgroups, lkeys = parse_groups(local, False)
        rgroups, rkeys = parse_groups(remote, True)

        changes = 0
        unchanged = 0
        groups_added = OrderedSet()
        groups_updated = OrderedSet()
        groups_removed = OrderedSet()
        rules_added = OrderedSet()
        rules_removed = OrderedSet()

        # Added groups
        for group in (lgroups[name] for name in lkeys - rkeys):
            grp = Group(group.name, group.description)
            groups_added.add(grp)
            rgroups[group.name] = grp
            rkeys.add(grp.name)
            changes += 1

        # Changed groups
        for rgroup, lgroup in ((rgroups[name], lgroups[name])
                               for name in rkeys & lkeys):
            if rgroup not in groups_added:
                unchanged += 1

            if rgroup.description != lgroup.description:
                # XXX: https://review.openstack.org/596609
                # groups_updated.add((rgroup, lgroup))
                pass

            # FIXME: when comparing using OrderedSet, added rules part contains
            #        all elements rather than different ones.
            lrules, rrules = set(lgroup.rules), set(rgroup.rules)

            if rrules != lrules:
                # Added rules
                for rule in lrules - rrules:
                    rules_added.add((rgroup.name, rule))
                    changes += 1

                # Removed rules
                for rule in rrules - lrules:
                    if remove:
                        rules_removed.add((rgroup.name, rule))
                        changes += 1
                    else:
                        unchanged += 1
            unchanged += len(rrules & lrules)

        # Removed groups
        for group in (rgroups[name] for name in rkeys - lkeys):
            if remove:
                if group._project is None:
                    continue
                groups_removed.add(group)
                changes += len(group.rules) + 1
            else:
                unchanged += len(group.rules) + 1

        if changes == 0 and not groups_updated:
            return

        # Report result
        logger.info(f'{changes:d} changes to be made:')
        for group in groups_added:
            logger.info(f'  - Create group {group.name!r}')
        for rgroup, lgroup in groups_updated:
            logger.info(f'  - Update description for {rgroup.name!r}:'
                        f' {rgroup.description!r} → {lgroup.description!r}')
        for group_name, rule in rules_added:
            logger.info(f'  - Create {rule!r} in group {group_name!r}')
        for group_name, rule in rules_removed:
            logger.info(f'  - Remove {rule!r} from group {group_name!r}')
        for group in groups_removed:
            logger.info(f'  - Remove group {group.name!r} with {len(group.rules)} rules')

        if threshold is not None:
            changes_percentage = changes / (unchanged + changes) * 100
            if changes_percentage > threshold:
                raise ThresholdException(f'Amount of changes is {changes_percentage:f}%'
                                         f' which is more than allowed ({threshold:f}%)')

        if dry_run:
            return

        # We've modified 'remote', so copy it again
        remote = OrderedSet(self.remote)
        rgroups, rkeys = parse_groups(remote, True)

        # Added groups
        for group in groups_added:
            ginfo = self.connection.create_security_group(
                name=group.name,
                description=group.description)
            remote.add(Group.from_remote(**ginfo))
        rgroups, rkeys = parse_groups(remote, True)

        # Updated groups
        for rgroup, lgroup in groups_updated:
            self.connection.update_security_group(
                name_or_id=rgroup._id,
                description=lgroup.description)
            # Updating group should not change its ID
            rgroup.description = lgroup.description

        # Added rules
        for group_name, rule in rules_added:
            rgroup = rgroups[group_name]
            cidr = str(rule.cidr) if rule.cidr is not None else None
            group_id = rgroups[rule.group]._id if rule.group is not None else None
            rinfo = self.connection.create_security_group_rule(
                secgroup_name_or_id=rgroup._id,
                port_range_min=rule.port_min,
                port_range_max=rule.port_max,
                protocol=rule.protocol.value,
                remote_ip_prefix=cidr,
                remote_group_id=group_id,
                direction=rule.direction.value,
                ethertype=rule.ethertype.value)
            rgroup.rules.add(Rule.from_remote(**rinfo))

        if remove:
            # Removed rules
            for group_name, rule in rules_removed:
                rgroup = rgroups[group_name]
                self.connection.delete_security_group_rule(
                    rule_id=rule._id)
                rgroup.rules.remove(rule)

            # Removed groups
            for group in groups_removed:
                self.connection.delete_security_group(
                    name_or_id=group._id)
                remote.remove(group)

        self.remote = remote
コード例 #43
0
class Propagator:
    MAX_REQUESTED_KEYS_TO_KEEP = 1000

    def __init__(self, metrics: MetricsCollector = NullMetricsCollector()):
        self.requests = Requests()
        self.requested_propagates_for = OrderedSet()
        self.metrics = metrics

    # noinspection PyUnresolvedReferences
    def propagate(self, request: Request, clientName):
        """
        Broadcast a PROPAGATE to all other nodes

        :param request: the REQUEST to propagate
        """
        if self.requests.has_propagated(request, self.name):
            logger.trace("{} already propagated {}".format(self, request))
        else:
            with self.metrics.measure_time(MetricsName.SEND_PROPAGATE_TIME):
                self.requests.add_propagate(request, self.name)
                propagate = self.createPropagate(request, clientName)
                logger.debug("{} propagating request {} from client {}".format(self, request.key, clientName),
                             extra={"cli": True, "tags": ["node-propagate"]})
                self.send(propagate)

    @staticmethod
    def createPropagate(
            request: Union[Request, dict], client_name) -> Propagate:
        """
        Create a new PROPAGATE for the given REQUEST.

        :param request: the client REQUEST
        :return: a new PROPAGATE msg
        """
        if not isinstance(request, (Request, dict)):
            logger.error("{}Request not formatted properly to create propagate"
                         .format(THREE_PC_PREFIX))
            return
        logger.trace("Creating PROPAGATE for REQUEST {}".format(request))
        request = request.as_dict if isinstance(request, Request) else \
            request
        if isinstance(client_name, bytes):
            client_name = client_name.decode()
        return Propagate(request, client_name)

    # noinspection PyUnresolvedReferences
    def canForward(self, request: Request):
        """
        Determine whether to forward client REQUESTs to replicas, based on the
        following logic:

        - If exactly f+1 PROPAGATE requests are received, then forward.
        - If less than f+1 of requests then probably there's no consensus on the
            REQUEST, don't forward.
        - If more than f+1 then already forwarded to replicas, don't forward

        Even if the node hasn't received the client REQUEST itself, if it has
        received enough number of PROPAGATE messages for the same, the REQUEST
        can be forwarded.

        :param request: the client REQUEST
        """

        if self.requests.forwarded(request):
            return 'already forwarded'

        # If not enough Propagates, don't bother comparing
        if not self.quorums.propagate.is_reached(self.requests.votes(request)):
            return 'not finalised'

        req = self.requests.req_with_acceptable_quorum(request,
                                                       self.quorums.propagate)
        if req:
            self.requests.set_finalised(req)
            return None
        else:
            return 'not finalised'

    # noinspection PyUnresolvedReferences
    def forward(self, request: Request):
        """
        Forward the specified client REQUEST to the other replicas on this node

        :param request: the REQUEST to propagate
        """
        key = request.key
        num_replicas = self.replicas.num_replicas
        logger.debug('{} forwarding request {} to {} replicas'
                     .format(self, key, num_replicas))
        self.replicas.pass_message(ReqKey(key))
        self.monitor.requestUnOrdered(key)
        self.requests.mark_as_forwarded(request, num_replicas)

    # noinspection PyUnresolvedReferences
    def recordAndPropagate(self, request: Request, clientName):
        """
        Record the request in the list of requests and propagate.

        :param request:
        :param clientName:
        """
        self.requests.add(request)
        self.propagate(request, clientName)
        self.tryForwarding(request)

    def tryForwarding(self, request: Request):
        """
        Try to forward the request if the required conditions are met.
        See the method `canForward` for the conditions to check before
        forwarding a request.
        """
        cannot_reason_msg = self.canForward(request)
        if cannot_reason_msg is None:
            # If haven't got the client request(REQUEST) for the corresponding
            # propagate request(PROPAGATE) but have enough propagate requests
            # to move ahead
            self.forward(request)
        else:
            logger.trace("{} not forwarding request {} to its replicas "
                         "since {}".format(self, request, cannot_reason_msg))

    def request_propagates(self, req_keys):
        """
        Request PROPAGATEs for the given request keys. Since replicas can
        request PROPAGATEs independently of each other, check if it has
        been requested recently
        :param req_keys:
        :return:
        """
        i = 0
        for digest in req_keys:
            if digest not in self.requested_propagates_for:
                self.request_msg(PROPAGATE, {f.DIGEST.nm: digest})
                self._add_to_recently_requested(digest)
                i += 1
            else:
                logger.debug('{} already requested PROPAGATE recently for {}'.
                             format(self, digest))
        return i

    def _add_to_recently_requested(self, key):
        while len(
                self.requested_propagates_for) > self.MAX_REQUESTED_KEYS_TO_KEEP:
            self.requested_propagates_for.pop(last=False)
        self.requested_propagates_for.add(key)
コード例 #44
0
ファイル: graph.py プロジェクト: carvey/PageRank
class Graph:

    def __init__(self, name):
        self.name = name
        self.node_set = OrderedSet()
        self.edge_set = OrderedSet()

        self.h_matrix = None
        self.s_matrix = None
        self.g_matrix = None
        self.pi_vector = None

    def create_graph_from_file(self, file):
        for line in file.read().splitlines():

            if "NodeName" in line:
                node_name = line.split(" = ")[1]
                node = Node(node_name)

                self.node_set.add(node)

            if "EdgeName" in line:
                nodes = line.split(" = ")[1].split("->")
                starting_node_name = nodes[0]
                target_node_name= nodes[1]

                starting_node = self.find_node(starting_node_name)
                target_node = self.find_node(target_node_name)

                if starting_node is None or target_node is None:
                    raise RuntimeError

                starting_node.add_edge(target_node)

    def find_node(self, name):
        """
        Since sets do not support getting an item out, loop over and compare each node
        for equality in order to find the desired node

        :param name: the name of the node to search for
        :return: the node with `name` if it is present in the graphs node_set
        """
        target_node = Node(name)

        if target_node in self.node_set:
            for node in self.node_set:
                if node == target_node:
                    return node

        return None

    def create_h_matrix(self, df=None, store=True):
        """
        Adjacency matrix
        :return:
        """
        # set up default matrix (n x n with all 0 values)
        if not df:
            df = self.h_matrix

        df = self.dataframe_nxn()

        for node in self.node_set:
            adjacency_map = node.compute_adjacency()

            for adjacent_node, probability in adjacency_map.items():
                df.set_value(node, adjacent_node, probability)

        if store:
            self.h_matrix = df

        return df


    def create_s_matrix(self, df=None, store=True):
        """
        Transition Stochastic Matrix
        :return:
        """
        if not df:
            df = self.create_h_matrix(store=False)

        def correct_dangling(row):
            if row.sum() == 0:
                return [1/len(row) for node in self.node_set]

            else:
                return row

        df = df.apply(correct_dangling, axis=1, reduce=False)


        if store:
            self.s_matrix = df

        return df


    def create_g_matrix(self, df=None, store=False):
        """
        Google Matrix
        :return:
        """
        scaling_param = .9
        if not df:
            df = self.create_s_matrix(store=False)

        df = np.dot(.9, df)
        df += np.dot((1-scaling_param), 1/len(self.node_set))

        return df

    def compute_page_rank(self, df=None):
        if not df:
            df = self.create_g_matrix()

        vector = [1/len(self.node_set) for x in self.node_set]

        for node in self.node_set:
            vector = np.dot(vector, df)

        vector = vector.round(3)
        page_map = {}
        for i, node in enumerate(self.node_set):
            page_map[node] = vector[i]

        return page_map


    def describe_graph(self):
        print("Details for graph: %s" % self.name)
        print("++++ Nodes in the graph")
        for node in self.node_set:
            print("NodeName = %s" % node.name)

        print("++++ Edges in the Graph")
        for node in self.node_set:
            node.describe()

        print("\n\nAdjency Matrix:\n")
        print(self.create_h_matrix().round(3))

        print("\n\nTransition Stochastic Matrix:\n")
        print(self.create_s_matrix().round(3))

        print("\n\nGoogle Matrix:\n")
        print(self.create_g_matrix().round(3))

        print("\n\nRankings:\n")

        data = self.compute_page_rank()

        sorted_nodes = sorted(data, key=data.get, reverse=True)
        for i, node in enumerate(sorted_nodes):
            print("Rank #%s: %s - %s" % (i+1, node, data[node]))


    def dataframe_nxn(self):
        structure = OrderedDict()
        blank_data = [0.0 for node in self.node_set]

        for node in self.node_set:
            structure[node] = pd.Series(blank_data, index=self.node_set)

        return pd.DataFrame(structure)
コード例 #45
0
ファイル: solve_buzz.py プロジェクト: buzzgeek/RepoBuzz
bestslices = []

### read input

f = open(ifile, "r")

info = [int(v) for v in f.readline().split(" ")]
rows = info[0]
cols = info[1]
mintps = info[2]
maxtiles = info[3]

set_indicies = OrderedSet()
for r in range(rows):
    for c in range(cols):
        set_indicies.add('{0:04d} {1:04d}'.format(c, r))

print(set_indicies)

shapes = list()
if not args.only_lines:
    for l in range(2 * mintps, maxtiles+1):
        for i in range(1, l):
            if l % i == 0:
                shapes.append((i, int(l/i)))
                shapes.append((int(l/i), i))
else:
    for l in range(2 * mintps, maxtiles+1):
        shapes.append((1, l))
        shapes.append((l, 1))
コード例 #46
0
ファイル: moleculetype.py プロジェクト: evanfeinberg/InterMol
class MoleculeType(object):
    """An abstract container for molecules of one type
    """
    def __init__(self, name):
        """Initialize the MoleculeType container

        Args:
            name (str): the name of the moleculetype to add
        """
        self.name = name
        self.moleculeSet = OrderedSet()
        self.bondForceSet = HashMap()
        self.pairForceSet = HashMap()
        self.angleForceSet = HashMap()
        self.dihedralForceSet = HashMap()
        self.torsiontorsionForceSet = HashMap()
        self.constraints = HashMap()
        self.exclusions = HashMap()
        self.settles = None
        self.nrexcl = None

    def add_molecule(self, molecule):
        """Add a molecule into the moleculetype container

        Args:
            molecule (Molecule): the molecule to append
        """
        self.moleculeSet.add(molecule)

    def remove_molecule(self, molecule):
        """Remove a molecule from the system.

        Args:
            molecule (Molecule): remove a molecule from the moleculeType
        """
        self.moleculeSet.remove(molecule)

    def getMolecule(self, molecule):
        """Get a molecule from the system

        Args:
            molecule (Molecule): retrieve an equivalent molecule from the moleculetype
        """
        return get_equivalent(self.moleculeSet, molecule, False)

    def addForce(self, force):
        """Add a forces to the moleculeType

        Args:
            forces (AbstractForce): Add a forces or contraint to the moleculeType
        """
        self.forceSet.add(force)

    def removeForce(self, force):
        """Remove a forces from the moleculeType

        Args:
            forces (AbstractForce): Remove a forces from the moleculeType
        """
        self.forceSet.remove(force)

    def getForce(self, force):
        """Get a forces from the moleculeType

        Args:
            forces (AbstractForce): Retrieve a forces from the moleculeType
        """
        return get_equivalent(self.forceSet, force, False)

    def setNrexcl(self, nrexcl):
        """Set the nrexcl

        Args:
            nrexcl (int): the value for nrexcl
        """
        self.nrexcl = nrexcl

    def getNrexcl(self):
        """Gets the nrexcl
        """
        return self.nrexcl
コード例 #47
0
ファイル: video_.py プロジェクト: deKross/web.component.page
# encoding: cinje

: from orderedset import OrderedSet
: from marrow.util.url import QueryString

: def render_video_block block
: """Render a YouTube video embed."""

: classes = OrderedSet(block.properties.get('cls', '').split() + ['block', 'video'])

: if 'width' in block.properties
	: classes.add('col-md-' + str(block.properties.width))
: end

<div&{id=block.properties.get('id', block.id), class_=classes}>
	<iframe width="560" height="315" src="https://www.youtube.com/embed/${block.video}" frameborder="0" allowfullscreen></iframe>
</div>

コード例 #48
0
ファイル: replica.py プロジェクト: evernym/plenum
class Replica(HasActionQueue, MessageProcessor):
    def __init__(self, node: 'plenum.server.node.Node', instId: int,
                 isMaster: bool = False):
        """
        Create a new replica.

        :param node: Node on which this replica is located
        :param instId: the id of the protocol instance the replica belongs to
        :param isMaster: is this a replica of the master protocol instance
        """
        super().__init__()
        self.stats = Stats(TPCStat)

        self.config = getConfig()

        routerArgs = [(ReqDigest, self._preProcessReqDigest)]

        for r in [PrePrepare, Prepare, Commit]:
            routerArgs.append((r, self.processThreePhaseMsg))

        routerArgs.append((Checkpoint, self.processCheckpoint))
        routerArgs.append((ThreePCState, self.process3PhaseState))

        self.inBoxRouter = Router(*routerArgs)

        self.threePhaseRouter = Router(
                (PrePrepare, self.processPrePrepare),
                (Prepare, self.processPrepare),
                (Commit, self.processCommit)
        )

        self.node = node
        self.instId = instId

        self.name = self.generateName(node.name, self.instId)

        self.outBox = deque()
        """
        This queue is used by the replica to send messages to its node. Replica
        puts messages that are consumed by its node
        """

        self.inBox = deque()
        """
        This queue is used by the replica to receive messages from its node.
        Node puts messages that are consumed by the replica
        """

        self.inBoxStash = deque()
        """
        If messages need to go back on the queue, they go here temporarily and
        are put back on the queue on a state change
        """

        self.isMaster = isMaster

        # Indicates name of the primary replica of this protocol instance.
        # None in case the replica does not know who the primary of the
        # instance is
        self._primaryName = None    # type: Optional[str]

        # Requests waiting to be processed once the replica is able to decide
        # whether it is primary or not
        self.postElectionMsgs = deque()

        # PRE-PREPAREs that are waiting to be processed but do not have the
        # corresponding request digest. Happens when replica has not been
        # forwarded the request by the node but is getting 3 phase messages.
        # The value is a list since a malicious entry might send PRE-PREPARE
        # with a different digest and since we dont have the request finalised,
        # we store all PRE-PPREPARES
        self.prePreparesPendingReqDigest = {}   # type: Dict[Tuple[str, int], List]

        # PREPAREs that are stored by non primary replica for which it has not
        #  got any PRE-PREPARE. Dictionary that stores a tuple of view no and
        #  prepare sequence number as key and a deque of PREPAREs as value.
        # This deque is attempted to be flushed on receiving every
        # PRE-PREPARE request.
        self.preparesWaitingForPrePrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # COMMITs that are stored for which there are no PRE-PREPARE or PREPARE
        # received
        self.commitsWaitingForPrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # Dictionary of sent PRE-PREPARE that are stored by primary replica
        # which it has broadcasted to all other non primary replicas
        # Key of dictionary is a 2 element tuple with elements viewNo,
        # pre-prepare seqNo and value is a tuple of Request Digest and time
        self.sentPrePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
        # element tuple with elements viewNo, pre-prepare seqNo and value is
        # a tuple of Request Digest and time
        self.prePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received Prepare requests. Key of dictionary is a 2
        # element tuple with elements viewNo, seqNo and value is a 2 element
        # tuple containing request digest and set of sender node names(sender
        # replica names in case of multiple protocol instances)
        # (viewNo, seqNo) -> ((identifier, reqId), {senders})
        self.prepares = Prepares()
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], Set[str]]]

        self.commits = Commits()    # type: Dict[Tuple[int, int],
        # Tuple[Tuple[str, int], Set[str]]]

        # Set of tuples to keep track of ordered requests. Each tuple is
        # (viewNo, ppSeqNo)
        self.ordered = OrderedSet()        # type: OrderedSet[Tuple[int, int]]

        # Dictionary to keep track of the which replica was primary during each
        # view. Key is the view no and value is the name of the primary
        # replica during that view
        self.primaryNames = {}  # type: Dict[int, str]

        # Holds msgs that are for later views
        self.threePhaseMsgsForLaterView = deque()
        # type: deque[(ThreePhaseMsg, str)]

        # Holds tuple of view no and prepare seq no of 3-phase messages it
        # received while it was not participating
        self.stashingWhileCatchingUp = set()       # type: Set[Tuple]

        # Commits which are not being ordered since commits with lower view
        # numbers and sequence numbers have not been ordered yet. Key is the
        # viewNo and value a map of pre-prepare sequence number to commit
        self.stashedCommitsForOrdering = {}         # type: Dict[int,
        # Dict[int, Commit]]

        self.checkpoints = SortedDict(lambda k: k[0])

        self.stashingWhileOutsideWaterMarks = deque()

        # Low water mark
        self._h = 0              # type: int

        # High water mark
        self.H = self._h + self.config.LOG_SIZE   # type: int

        self.lastPrePrepareSeqNo = self.h  # type: int

    @property
    def h(self) -> int:
        return self._h

    @h.setter
    def h(self, n):
        self._h = n
        self.H = self._h + self.config.LOG_SIZE

    @property
    def requests(self):
        return self.node.requests

    def shouldParticipate(self, viewNo: int, ppSeqNo: int):
        # Replica should only participating in the consensus process and the
        # replica did not stash any of this request's 3-phase request
        return self.node.isParticipating and (viewNo, ppSeqNo) \
                                             not in self.stashingWhileCatchingUp

    @staticmethod
    def generateName(nodeName: str, instId: int):
        """
        Create and return the name for a replica using its nodeName and
        instanceId.
         Ex: Alpha:1
        """
        return "{}:{}".format(nodeName, instId)

    @staticmethod
    def getNodeName(replicaName: str):
        return replicaName.split(":")[0]

    @property
    def isPrimary(self):
        """
        Is this node primary?

        :return: True if this node is primary, False otherwise
        """
        return self._primaryName == self.name if self._primaryName is not None \
            else None

    @property
    def primaryName(self):
        """
        Name of the primary replica of this replica's instance

        :return: Returns name if primary is known, None otherwise
        """
        return self._primaryName

    @primaryName.setter
    def primaryName(self, value: Optional[str]) -> None:
        """
        Set the value of isPrimary.

        :param value: the value to set isPrimary to
        """
        if not value == self._primaryName:
            self._primaryName = value
            self.primaryNames[self.viewNo] = value
            logger.debug("{} setting primaryName for view no {} to: {}".
                         format(self, self.viewNo, value))
            logger.debug("{}'s primaryNames for views are: {}".
                         format(self, self.primaryNames))
            self._stateChanged()

    def _stateChanged(self):
        """
        A series of actions to be performed when the state of this replica
        changes.

        - UnstashInBox (see _unstashInBox)
        """
        self._unstashInBox()
        if self.isPrimary is not None:
            # TODO handle suspicion exceptions here
            self.process3PhaseReqsQueue()
            # TODO handle suspicion exceptions here
            try:
                self.processPostElectionMsgs()
            except SuspiciousNode as ex:
                self.outBox.append(ex)
                self.discard(ex.msg, ex.reason, logger.warning)

    def _stashInBox(self, msg):
        """
        Stash the specified message into the inBoxStash of this replica.

        :param msg: the message to stash
        """
        self.inBoxStash.append(msg)

    def _unstashInBox(self):
        """
        Append the inBoxStash to the right of the inBox.
        """
        self.inBox.extend(self.inBoxStash)
        self.inBoxStash.clear()

    def __repr__(self):
        return self.name

    @property
    def f(self) -> int:
        """
        Return the number of Byzantine Failures that can be tolerated by this
        system. Equal to (N - 1)/3, where N is the number of nodes in the
        system.
        """
        return self.node.f

    @property
    def viewNo(self):
        """
        Return the current view number of this replica.
        """
        return self.node.viewNo

    def isPrimaryInView(self, viewNo: int) -> Optional[bool]:
        """
        Return whether a primary has been selected for this view number.
        """
        return self.primaryNames[viewNo] == self.name

    def isMsgForLaterView(self, msg):
        """
        Return whether this request's view number is greater than the current
        view number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo > self.viewNo

    def isMsgForCurrentView(self, msg):
        """
        Return whether this request's view number is equal to the current view
        number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo == self.viewNo

    def isMsgForPrevView(self, msg):
        """
        Return whether this request's view number is less than the current view
        number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo < self.viewNo

    def isPrimaryForMsg(self, msg) -> Optional[bool]:
        """
        Return whether this replica is primary if the request's view number is
        equal this replica's view number and primary has been selected for
        the current view.
        Return None otherwise.

        :param msg: message
        """
        if self.isMsgForLaterView(msg):
            self.discard(msg,
                         "Cannot get primary status for a request for a later "
                         "view {}. Request is {}".format(self.viewNo, msg),
                         logger.error)
        else:
            return self.isPrimary if self.isMsgForCurrentView(msg) \
                else self.isPrimaryInView(msg.viewNo)

    def isMsgFromPrimary(self, msg, sender: str) -> bool:
        """
        Return whether this message was from primary replica
        :param msg:
        :param sender:
        :return:
        """
        if self.isMsgForLaterView(msg):
            logger.error("{} cannot get primary for a request for a later "
                         "view. Request is {}".format(self, msg))
        else:
            return self.primaryName == sender if self.isMsgForCurrentView(
                msg) else self.primaryNames[msg.viewNo] == sender

    def _preProcessReqDigest(self, rd: ReqDigest) -> None:
        """
        Process request digest if this replica is not a primary, otherwise stash
        the message into the inBox.

        :param rd: the client Request Digest
        """
        if self.isPrimary is not None:
            self.processReqDigest(rd)
        else:
            logger.debug("{} stashing request digest {} since it does not know "
                         "its primary status".
                         format(self, (rd.identifier, rd.reqId)))
            self._stashInBox(rd)

    def serviceQueues(self, limit=None):
        """
        Process `limit` number of messages in the inBox.

        :param limit: the maximum number of messages to process
        :return: the number of messages successfully processed
        """
        # TODO should handle SuspiciousNode here
        r = self.inBoxRouter.handleAllSync(self.inBox, limit)
        r += self._serviceActions()
        return r
        # Messages that can be processed right now needs to be added back to the
        # queue. They might be able to be processed later

    def processPostElectionMsgs(self):
        """
        Process messages waiting for the election of a primary replica to
        complete.
        """
        while self.postElectionMsgs:
            msg = self.postElectionMsgs.popleft()
            logger.debug("{} processing pended msg {}".format(self, msg))
            self.dispatchThreePhaseMsg(*msg)

    def process3PhaseReqsQueue(self):
        """
        Process the 3 phase requests from the queue whose view number is equal
        to the current view number of this replica.
        """
        unprocessed = deque()
        while self.threePhaseMsgsForLaterView:
            request, sender = self.threePhaseMsgsForLaterView.popleft()
            logger.debug("{} processing pended 3 phase request: {}"
                         .format(self, request))
            # If the request is for a later view dont try to process it but add
            # it back to the queue.
            if self.isMsgForLaterView(request):
                unprocessed.append((request, sender))
            else:
                self.processThreePhaseMsg(request, sender)
        self.threePhaseMsgsForLaterView = unprocessed

    @property
    def quorum(self) -> int:
        r"""
        Return the quorum of this RBFT system. Equal to :math:`2f + 1`.
        Return None if `f` is not yet determined.
        """
        return self.node.quorum

    def dispatchThreePhaseMsg(self, msg: ThreePhaseMsg, sender: str) -> Any:
        """
        Create a three phase request to be handled by the threePhaseRouter.

        :param msg: the ThreePhaseMsg to dispatch
        :param sender: the name of the node that sent this request
        """
        senderRep = self.generateName(sender, self.instId)
        if self.isPpSeqNoAcceptable(msg.ppSeqNo):
            try:
                self.threePhaseRouter.handleSync((msg, senderRep))
            except SuspiciousNode as ex:
                self.node.reportSuspiciousNodeEx(ex)
        else:
            logger.debug("{} stashing 3 phase message {} since ppSeqNo {} is "
                         "not between {} and {}".
                         format(self, msg, msg.ppSeqNo, self.h, self.H))
            self.stashingWhileOutsideWaterMarks.append((msg, sender))

    def processReqDigest(self, rd: ReqDigest):
        """
        Process a request digest. Works only if this replica has decided its
        primary status.

        :param rd: the client request digest to process
        """
        self.stats.inc(TPCStat.ReqDigestRcvd)
        if self.isPrimary is False:
            self.dequeuePrePrepare(rd.identifier, rd.reqId)
        else:
            self.doPrePrepare(rd)

    def processThreePhaseMsg(self, msg: ThreePhaseMsg, sender: str):
        """
        Process a 3-phase (pre-prepare, prepare and commit) request.
        Dispatch the request only if primary has already been decided, otherwise
        stash it.

        :param msg: the Three Phase message, one of PRE-PREPARE, PREPARE,
            COMMIT
        :param sender: name of the node that sent this message
        """
        # Can only proceed further if it knows whether its primary or not
        if self.isMsgForLaterView(msg):
            self.threePhaseMsgsForLaterView.append((msg, sender))
            logger.debug("{} pended received 3 phase request for a later view: "
                         "{}".format(self, msg))
        else:
            if self.isPrimary is None:
                self.postElectionMsgs.append((msg, sender))
                logger.debug("Replica {} pended request {} from {}".
                             format(self, msg, sender))
            else:
                self.dispatchThreePhaseMsg(msg, sender)

    def processPrePrepare(self, pp: PrePrepare, sender: str):
        """
        Validate and process the PRE-PREPARE specified.
        If validation is successful, create a PREPARE and broadcast it.

        :param pp: a prePrepareRequest
        :param sender: name of the node that sent this message
        """
        key = (pp.viewNo, pp.ppSeqNo)
        logger.debug("{} Receiving PRE-PREPARE{} at {} from {}".
                     format(self, key, time.perf_counter(), sender))
        if self.canProcessPrePrepare(pp, sender):
            if not self.node.isParticipating:
                self.stashingWhileCatchingUp.add(key)
            self.addToPrePrepares(pp)
            logger.info("{} processed incoming PRE-PREPARE{}".
                        format(self, key))

    def tryPrepare(self, pp: PrePrepare):
        """
        Try to send the Prepare message if the PrePrepare message is ready to
        be passed into the Prepare phase.
        """
        if self.canSendPrepare(pp):
            self.doPrepare(pp)
        else:
            logger.debug("{} cannot send PREPARE".format(self))

    def processPrepare(self, prepare: Prepare, sender: str) -> None:
        """
        Validate and process the PREPARE specified.
        If validation is successful, create a COMMIT and broadcast it.

        :param prepare: a PREPARE msg
        :param sender: name of the node that sent the PREPARE
        """
        # TODO move this try/except up higher
        logger.debug("{} received PREPARE{} from {}".
                     format(self, (prepare.viewNo, prepare.ppSeqNo), sender))
        try:
            if self.isValidPrepare(prepare, sender):
                self.addToPrepares(prepare, sender)
                self.stats.inc(TPCStat.PrepareRcvd)
                logger.debug("{} processed incoming PREPARE {}".
                             format(self, (prepare.viewNo, prepare.ppSeqNo)))
            else:
                # TODO let's have isValidPrepare throw an exception that gets
                # handled and possibly logged higher
                logger.warning("{} cannot process incoming PREPARE".
                               format(self))
        except SuspiciousNode as ex:
            self.node.reportSuspiciousNodeEx(ex)

    def processCommit(self, commit: Commit, sender: str) -> None:
        """
        Validate and process the COMMIT specified.
        If validation is successful, return the message to the node.

        :param commit: an incoming COMMIT message
        :param sender: name of the node that sent the COMMIT
        """
        logger.debug("{} received COMMIT {} from {}".
                     format(self, commit, sender))
        if self.isValidCommit(commit, sender):
            self.stats.inc(TPCStat.CommitRcvd)
            self.addToCommits(commit, sender)
            logger.debug("{} processed incoming COMMIT{}".
                         format(self, (commit.viewNo, commit.ppSeqNo)))

    def tryCommit(self, prepare: Prepare):
        """
        Try to commit if the Prepare message is ready to be passed into the
        commit phase.
        """
        if self.canCommit(prepare):
            self.doCommit(prepare)
        else:
            logger.debug("{} not yet able to send COMMIT".format(self))

    def tryOrder(self, commit: Commit):
        """
        Try to order if the Commit message is ready to be ordered.
        """
        canOrder, reason = self.canOrder(commit)
        if canOrder:
            logger.debug("{} returning request to node".format(self))
            self.tryOrdering(commit)
        else:
            logger.trace("{} cannot return request to node: {}".
                         format(self, reason))

    def doPrePrepare(self, reqDigest: ReqDigest) -> None:
        """
        Broadcast a PRE-PREPARE to all the replicas.

        :param reqDigest: a tuple with elements identifier, reqId, and digest
        """
        if not self.node.isParticipating:
            logger.error("Non participating node is attempting PRE-PREPARE. "
                         "This should not happen.")
            return

        if self.lastPrePrepareSeqNo == self.H:
            logger.debug("{} stashing PRE-PREPARE {} since outside greater "
                         "than high water mark {}".
                         format(self, (self.viewNo, self.lastPrePrepareSeqNo+1),
                                self.H))
            self.stashingWhileOutsideWaterMarks.append(reqDigest)
            return
        self.lastPrePrepareSeqNo += 1
        tm = time.time()*1000
        logger.debug("{} Sending PRE-PREPARE {} at {}".
                     format(self, (self.viewNo, self.lastPrePrepareSeqNo),
                            time.perf_counter()))
        prePrepareReq = PrePrepare(self.instId,
                                   self.viewNo,
                                   self.lastPrePrepareSeqNo,
                                   *reqDigest,
                                   tm)
        self.sentPrePrepares[self.viewNo, self.lastPrePrepareSeqNo] = (reqDigest.key,
                                                                       tm)
        self.send(prePrepareReq, TPCStat.PrePrepareSent)

    def doPrepare(self, pp: PrePrepare):
        logger.debug("{} Sending PREPARE {} at {}".
                     format(self, (pp.viewNo, pp.ppSeqNo), time.perf_counter()))
        prepare = Prepare(self.instId,
                          pp.viewNo,
                          pp.ppSeqNo,
                          pp.digest,
                          pp.ppTime)
        self.send(prepare, TPCStat.PrepareSent)
        self.addToPrepares(prepare, self.name)

    def doCommit(self, p: Prepare):
        """
        Create a commit message from the given Prepare message and trigger the
        commit phase
        :param p: the prepare message
        """
        logger.debug("{} Sending COMMIT{} at {}".
                     format(self, (p.viewNo, p.ppSeqNo), time.perf_counter()))
        commit = Commit(self.instId,
                        p.viewNo,
                        p.ppSeqNo,
                        p.digest,
                        p.ppTime)
        self.send(commit, TPCStat.CommitSent)
        self.addToCommits(commit, self.name)

    def canProcessPrePrepare(self, pp: PrePrepare, sender: str) -> bool:
        """
        Decide whether this replica is eligible to process a PRE-PREPARE,
        based on the following criteria:

        - this replica is non-primary replica
        - the request isn't in its list of received PRE-PREPAREs
        - the request is waiting to for PRE-PREPARE and the digest value matches

        :param pp: a PRE-PREPARE msg to process
        :param sender: the name of the node that sent the PRE-PREPARE msg
        :return: True if processing is allowed, False otherwise
        """
        # TODO: Check whether it is rejecting PRE-PREPARE from previous view
        # PRE-PREPARE should not be sent from non primary
        if not self.isMsgFromPrimary(pp, sender):
            raise SuspiciousNode(sender, Suspicions.PPR_FRM_NON_PRIMARY, pp)

        # A PRE-PREPARE is being sent to primary
        if self.isPrimaryForMsg(pp) is True:
            raise SuspiciousNode(sender, Suspicions.PPR_TO_PRIMARY, pp)

        # A PRE-PREPARE is sent that has already been received
        if (pp.viewNo, pp.ppSeqNo) in self.prePrepares:
            raise SuspiciousNode(sender, Suspicions.DUPLICATE_PPR_SENT, pp)

        key = (pp.identifier, pp.reqId)
        if not self.requests.isFinalised(key):
            self.enqueuePrePrepare(pp, sender)
            return False

        # A PRE-PREPARE is sent that does not match request digest
        if self.requests.digest(key) != pp.digest:
            raise SuspiciousNode(sender, Suspicions.PPR_DIGEST_WRONG, pp)

        return True

    def addToPrePrepares(self, pp: PrePrepare) -> None:
        """
        Add the specified PRE-PREPARE to this replica's list of received
        PRE-PREPAREs.

        :param pp: the PRE-PREPARE to add to the list
        """
        key = (pp.viewNo, pp.ppSeqNo)
        self.prePrepares[key] = \
            ((pp.identifier, pp.reqId), pp.ppTime)
        self.dequeuePrepares(*key)
        self.dequeueCommits(*key)
        self.stats.inc(TPCStat.PrePrepareRcvd)
        self.tryPrepare(pp)

    def hasPrepared(self, request) -> bool:
        return self.prepares.hasPrepareFrom(request, self.name)

    def canSendPrepare(self, request) -> bool:
        """
        Return whether the request identified by (identifier, requestId) can
        proceed to the Prepare step.

        :param request: any object with identifier and requestId attributes
        """
        return self.shouldParticipate(request.viewNo, request.ppSeqNo) \
            and not self.hasPrepared(request) \
            and self.requests.isFinalised((request.identifier,
                                           request.reqId))

    def isValidPrepare(self, prepare: Prepare, sender: str) -> bool:
        """
        Return whether the PREPARE specified is valid.

        :param prepare: the PREPARE to validate
        :param sender: the name of the node that sent the PREPARE
        :return: True if PREPARE is valid, False otherwise
        """
        key = (prepare.viewNo, prepare.ppSeqNo)
        primaryStatus = self.isPrimaryForMsg(prepare)

        ppReqs = self.sentPrePrepares if primaryStatus else self.prePrepares

        # If a non primary replica and receiving a PREPARE request before a
        # PRE-PREPARE request, then proceed

        # PREPARE should not be sent from primary
        if self.isMsgFromPrimary(prepare, sender):
            raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare)

        # If non primary replica
        if primaryStatus is False:
            if self.prepares.hasPrepareFrom(prepare, sender):
                raise SuspiciousNode(sender, Suspicions.DUPLICATE_PR_SENT, prepare)
            # If PRE-PREPARE not received for the PREPARE, might be slow network
            if key not in ppReqs:
                self.enqueuePrepare(prepare, sender)
                return False
            elif prepare.digest != self.requests.digest(ppReqs[key][0]):
                raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
            elif prepare.ppTime != ppReqs[key][1]:
                raise SuspiciousNode(sender, Suspicions.PR_TIME_WRONG,
                                     prepare)
            else:
                return True
        # If primary replica
        else:
            if self.prepares.hasPrepareFrom(prepare, sender):
                raise SuspiciousNode(sender, Suspicions.DUPLICATE_PR_SENT, prepare)
            # If PRE-PREPARE was not sent for this PREPARE, certainly
            # malicious behavior
            elif key not in ppReqs:
                raise SuspiciousNode(sender, Suspicions.UNKNOWN_PR_SENT, prepare)
            elif prepare.digest != self.requests.digest(ppReqs[key][0]):
                raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
            elif prepare.ppTime != ppReqs[key][1]:
                raise SuspiciousNode(sender, Suspicions.PR_TIME_WRONG,
                                     prepare)
            else:
                return True

    def addToPrepares(self, prepare: Prepare, sender: str):
        self.prepares.addVote(prepare, sender)
        self.tryCommit(prepare)

    def hasCommitted(self, request) -> bool:
        return self.commits.hasCommitFrom(ThreePhaseKey(
            request.viewNo, request.ppSeqNo), self.name)

    def canCommit(self, prepare: Prepare) -> bool:
        """
        Return whether the specified PREPARE can proceed to the Commit
        step.

        Decision criteria:

        - If this replica has got just 2f PREPARE requests then commit request.
        - If less than 2f PREPARE requests then probably there's no consensus on
            the request; don't commit
        - If more than 2f then already sent COMMIT; don't commit

        :param prepare: the PREPARE
        """
        return self.shouldParticipate(prepare.viewNo, prepare.ppSeqNo) and \
            self.prepares.hasQuorum(prepare, self.f) and \
            not self.hasCommitted(prepare)

    def isValidCommit(self, commit: Commit, sender: str) -> bool:
        """
        Return whether the COMMIT specified is valid.

        :param commit: the COMMIT to validate
        :return: True if `request` is valid, False otherwise
        """
        primaryStatus = self.isPrimaryForMsg(commit)
        ppReqs = self.sentPrePrepares if primaryStatus else self.prePrepares
        key = (commit.viewNo, commit.ppSeqNo)
        if key not in ppReqs:
            self.enqueueCommit(commit, sender)
            return False

        if (key not in self.prepares and
                key not in self.preparesWaitingForPrePrepare):
            logger.debug("{} rejecting COMMIT{} due to lack of prepares".
                         format(self, key))
            # raise SuspiciousNode(sender, Suspicions.UNKNOWN_CM_SENT, commit)
            return False
        elif self.commits.hasCommitFrom(commit, sender):
            raise SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit)
        elif commit.digest != self.getDigestFor3PhaseKey(ThreePhaseKey(*key)):
            raise SuspiciousNode(sender, Suspicions.CM_DIGEST_WRONG, commit)
        elif key in ppReqs and commit.ppTime != ppReqs[key][1]:
            raise SuspiciousNode(sender, Suspicions.CM_TIME_WRONG,
                                 commit)
        else:
            return True

    def addToCommits(self, commit: Commit, sender: str):
        """
        Add the specified COMMIT to this replica's list of received
        commit requests.

        :param commit: the COMMIT to add to the list
        :param sender: the name of the node that sent the COMMIT
        """
        self.commits.addVote(commit, sender)
        self.tryOrder(commit)

    def hasOrdered(self, viewNo, ppSeqNo) -> bool:
        return (viewNo, ppSeqNo) in self.ordered

    def canOrder(self, commit: Commit) -> Tuple[bool, Optional[str]]:
        """
        Return whether the specified commitRequest can be returned to the node.

        Decision criteria:

        - If have got just 2f+1 Commit requests then return request to node
        - If less than 2f+1 of commit requests then probably don't have
            consensus on the request; don't return request to node
        - If more than 2f+1 then already returned to node; don't return request
            to node

        :param commit: the COMMIT
        """
        if not self.commits.hasQuorum(commit, self.f):
            return False, "no quorum: {} commits where f is {}".\
                          format(commit, self.f)

        if self.hasOrdered(commit.viewNo, commit.ppSeqNo):
            return False, "already ordered"

        if not self.isNextInOrdering(commit):
            viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
            if viewNo not in self.stashedCommitsForOrdering:
                self.stashedCommitsForOrdering[viewNo] = {}
            self.stashedCommitsForOrdering[viewNo][ppSeqNo] = commit
            # self._schedule(self.orderStashedCommits, 2)
            self.startRepeating(self.orderStashedCommits, 2)
            return False, "stashing {} since out of order".\
                format(commit)

        return True, None

    def isNextInOrdering(self, commit: Commit):
        viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
        if self.ordered and self.ordered[-1] == (viewNo, ppSeqNo-1):
            return True
        for (v, p) in self.commits:
            if v < viewNo:
                # Have commits from previous view that are unordered.
                # TODO: Question: would commits be always ordered, what if
                # some are never ordered and its fine, go to PBFT.
                return False
            if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered:
                # If unordered commits are found with lower ppSeqNo then this
                # cannot be ordered.
                return False

        # TODO: Revisit PBFT paper, how to make sure that last request of the
        # last view has been ordered? Need change in `VIEW CHANGE` mechanism.
        # Somehow view change needs to communicate what the last request was.
        # Also what if some COMMITs were completely missed in the same view
        return True

    def orderStashedCommits(self):
        # TODO: What if the first few commits were out of order and stashed?
        # `self.ordered` would be empty
        if self.ordered:
            lastOrdered = self.ordered[-1]
            vToRemove = set()
            for v in self.stashedCommitsForOrdering:
                if v < lastOrdered[0] and self.stashedCommitsForOrdering[v]:
                    raise RuntimeError("{} found commits from previous view {}"
                                       " that were not ordered but last ordered"
                                       " is {}".format(self, v, lastOrdered))
                pToRemove = set()
                for p, commit in self.stashedCommitsForOrdering[v].items():
                    if (v == lastOrdered[0] and lastOrdered == (v, p - 1)) or \
                            (v > lastOrdered[0] and
                                self.isLowestCommitInView(commit)):
                        logger.debug("{} ordering stashed commit {}".
                                     format(self, commit))
                        if self.tryOrdering(commit):
                            lastOrdered = (v, p)
                            pToRemove.add(p)

                for p in pToRemove:
                    del self.stashedCommitsForOrdering[v][p]
                if not self.stashedCommitsForOrdering[v]:
                    vToRemove.add(v)

            for v in vToRemove:
                del self.stashedCommitsForOrdering[v]

            # if self.stashedCommitsForOrdering:
            #     self._schedule(self.orderStashedCommits, 2)
            if not self.stashedCommitsForOrdering:
                self.stopRepeating(self.orderStashedCommits)

    def isLowestCommitInView(self, commit):
        # TODO: Assumption: This assumes that at least one commit that was sent
        #  for any request by any node has been received in the view of this
        # commit
        ppSeqNos = []
        for v, p in self.commits:
            if v == commit.viewNo:
                ppSeqNos.append(p)
        return min(ppSeqNos) == commit.ppSeqNo if ppSeqNos else True

    def tryOrdering(self, commit: Commit) -> None:
        """
        Attempt to send an ORDERED request for the specified COMMIT to the
        node.

        :param commit: the COMMIT message
        """
        key = (commit.viewNo, commit.ppSeqNo)
        logger.debug("{} trying to order COMMIT{}".format(self, key))
        reqKey = self.getReqKeyFrom3PhaseKey(key)   # type: Tuple
        digest = self.getDigestFor3PhaseKey(key)
        if not digest:
            logger.error("{} did not find digest for {}, request key {}".
                         format(self, key, reqKey))
            return
        self.doOrder(*key, *reqKey, digest, commit.ppTime)
        return True

    def doOrder(self, viewNo, ppSeqNo, identifier, reqId, digest, ppTime):
        key = (viewNo, ppSeqNo)
        self.addToOrdered(*key)
        ordered = Ordered(self.instId,
                          viewNo,
                          identifier,
                          reqId,
                          ppTime)
        # TODO: Should not order or add to checkpoint while syncing
        # 3 phase state.
        self.send(ordered, TPCStat.OrderSent)
        if key in self.stashingWhileCatchingUp:
            self.stashingWhileCatchingUp.remove(key)
        logger.debug("{} ordered request {}".format(self, (viewNo, ppSeqNo)))
        self.addToCheckpoint(ppSeqNo, digest)

    def processCheckpoint(self, msg: Checkpoint, sender: str):
        if self.checkpoints:
            seqNo = msg.seqNo
            _, firstChk = self.firstCheckPoint
            if firstChk.isStable:
                if firstChk.seqNo == seqNo:
                    self.discard(msg, reason="Checkpoint already stable",
                                 logMethod=logger.debug)
                    return
                if firstChk.seqNo > seqNo:
                    self.discard(msg, reason="Higher stable checkpoint present",
                                 logMethod=logger.debug)
                    return
            for state in self.checkpoints.values():
                if state.seqNo == seqNo:
                    if state.digest == msg.digest:
                        state.receivedDigests[sender] = msg.digest
                        break
                    else:
                        logger.error("{} received an incorrect digest {} for "
                                     "checkpoint {} from {}".format(self,
                                                                    msg.digest,
                                                                    seqNo,
                                                                    sender))
                        return
            if len(state.receivedDigests) == 2*self.f:
                self.markCheckPointStable(msg.seqNo)
        else:
            self.discard(msg, reason="No checkpoints present to tally",
                         logMethod=logger.warn)

    def _newCheckpointState(self, ppSeqNo, digest) -> CheckpointState:
        s, e = ppSeqNo, ppSeqNo + self.config.CHK_FREQ - 1
        logger.debug("{} adding new checkpoint state for {}".
                     format(self, (s, e)))
        state = CheckpointState(ppSeqNo, [digest, ], None, {}, False)
        self.checkpoints[s, e] = state
        return state

    def addToCheckpoint(self, ppSeqNo, digest):
        for (s, e) in self.checkpoints.keys():
            if s <= ppSeqNo <= e:
                state = self.checkpoints[s, e]  # type: CheckpointState
                state.digests.append(digest)
                state = updateNamedTuple(state, seqNo=ppSeqNo)
                self.checkpoints[s, e] = state
                break
        else:
            state = self._newCheckpointState(ppSeqNo, digest)
            s, e = ppSeqNo, ppSeqNo + self.config.CHK_FREQ

        if len(state.digests) == self.config.CHK_FREQ:
            state = updateNamedTuple(state, digest=serialize(state.digests),
                                     digests=[])
            self.checkpoints[s, e] = state
            self.send(Checkpoint(self.instId, self.viewNo, ppSeqNo,
                                 state.digest))

    def markCheckPointStable(self, seqNo):
        previousCheckpoints = []
        for (s, e), state in self.checkpoints.items():
            if e == seqNo:
                state = updateNamedTuple(state, isStable=True)
                self.checkpoints[s, e] = state
                break
            else:
                previousCheckpoints.append((s, e))
        else:
            logger.error("{} could not find {} in checkpoints".
                         format(self, seqNo))
            return
        self.h = seqNo
        for k in previousCheckpoints:
            logger.debug("{} removing previous checkpoint {}".format(self, k))
            self.checkpoints.pop(k)
        self.gc(seqNo)
        logger.debug("{} marked stable checkpoint {}".format(self, (s, e)))
        self.processStashedMsgsForNewWaterMarks()

    def gc(self, tillSeqNo):
        logger.debug("{} cleaning up till {}".format(self, tillSeqNo))
        tpcKeys = set()
        reqKeys = set()
        for (v, p), (reqKey, _) in self.sentPrePrepares.items():
            if p <= tillSeqNo:
                tpcKeys.add((v, p))
                reqKeys.add(reqKey)
        for (v, p), (reqKey, _) in self.prePrepares.items():
            if p <= tillSeqNo:
                tpcKeys.add((v, p))
                reqKeys.add(reqKey)

        logger.debug("{} found {} 3 phase keys to clean".
                     format(self, len(tpcKeys)))
        logger.debug("{} found {} request keys to clean".
                     format(self, len(reqKeys)))

        for k in tpcKeys:
            self.sentPrePrepares.pop(k, None)
            self.prePrepares.pop(k, None)
            self.prepares.pop(k, None)
            self.commits.pop(k, None)
            if k in self.ordered:
                self.ordered.remove(k)

        for k in reqKeys:
            self.requests.pop(k, None)

    def processStashedMsgsForNewWaterMarks(self):
        while self.stashingWhileOutsideWaterMarks:
            item = self.stashingWhileOutsideWaterMarks.pop()
            logger.debug("{} processing stashed item {} after new stable "
                         "checkpoint".format(self, item))

            if isinstance(item, ReqDigest):
                self.doPrePrepare(item)
            elif isinstance(item, tuple) and len(tuple) == 2:
                self.dispatchThreePhaseMsg(*item)
            else:
                logger.error("{} cannot process {} "
                             "from stashingWhileOutsideWaterMarks".
                             format(self, item))

    @property
    def firstCheckPoint(self) -> Tuple[Tuple[int, int], CheckpointState]:
        if not self.checkpoints:
            return None
        else:
            return self.checkpoints.peekitem(0)

    @property
    def lastCheckPoint(self) -> Tuple[Tuple[int, int], CheckpointState]:
        if not self.checkpoints:
            return None
        else:
            return self.checkpoints.peekitem(-1)

    def isPpSeqNoAcceptable(self, ppSeqNo: int):
        return self.h < ppSeqNo <= self.H

    def addToOrdered(self, viewNo: int, ppSeqNo: int):
        self.ordered.add((viewNo, ppSeqNo))

    def enqueuePrePrepare(self, request: PrePrepare, sender: str):
        logger.debug("Queueing pre-prepares due to unavailability of finalised "
                     "Request. Request {} from {}".format(request, sender))
        key = (request.identifier, request.reqId)
        if key not in self.prePreparesPendingReqDigest:
            self.prePreparesPendingReqDigest[key] = []
        self.prePreparesPendingReqDigest[key].append((request, sender))

    def dequeuePrePrepare(self, identifier: int, reqId: int):
        key = (identifier, reqId)
        if key in self.prePreparesPendingReqDigest:
            pps = self.prePreparesPendingReqDigest[key]
            for (pp, sender) in pps:
                logger.debug("{} popping stashed PRE-PREPARE{}".
                             format(self, key))
                if pp.digest == self.requests.digest(key):
                    self.prePreparesPendingReqDigest.pop(key)
                    self.processPrePrepare(pp, sender)
                    logger.debug(
                        "{} processed {} PRE-PREPAREs waiting for finalised "
                        "request for identifier {} and reqId {}".
                        format(self, pp, identifier, reqId))
                    break

    def enqueuePrepare(self, request: Prepare, sender: str):
        logger.debug("Queueing prepares due to unavailability of PRE-PREPARE. "
                     "Request {} from {}".format(request, sender))
        key = (request.viewNo, request.ppSeqNo)
        if key not in self.preparesWaitingForPrePrepare:
            self.preparesWaitingForPrePrepare[key] = deque()
        self.preparesWaitingForPrePrepare[key].append((request, sender))

    def dequeuePrepares(self, viewNo: int, ppSeqNo: int):
        key = (viewNo, ppSeqNo)
        if key in self.preparesWaitingForPrePrepare:
            i = 0
            # Keys of pending prepares that will be processed below
            while self.preparesWaitingForPrePrepare[key]:
                prepare, sender = self.preparesWaitingForPrePrepare[
                    key].popleft()
                logger.debug("{} popping stashed PREPARE{}".format(self, key))
                self.processPrepare(prepare, sender)
                i += 1
            self.preparesWaitingForPrePrepare.pop(key)
            logger.debug("{} processed {} PREPAREs waiting for PRE-PREPARE for"
                         " view no {} and seq no {}".
                         format(self, i, viewNo, ppSeqNo))

    def enqueueCommit(self, request: Commit, sender: str):
        logger.debug("Queueing commit due to unavailability of PREPARE. "
                     "Request {} from {}".format(request, sender))
        key = (request.viewNo, request.ppSeqNo)
        if key not in self.commitsWaitingForPrepare:
            self.commitsWaitingForPrepare[key] = deque()
        self.commitsWaitingForPrepare[key].append((request, sender))

    def dequeueCommits(self, viewNo: int, ppSeqNo: int):
        key = (viewNo, ppSeqNo)
        if key in self.commitsWaitingForPrepare:
            i = 0
            # Keys of pending prepares that will be processed below
            while self.commitsWaitingForPrepare[key]:
                commit, sender = self.commitsWaitingForPrepare[
                    key].popleft()
                logger.debug("{} popping stashed COMMIT{}".format(self, key))
                self.processCommit(commit, sender)
                i += 1
            self.commitsWaitingForPrepare.pop(key)
            logger.debug("{} processed {} COMMITs waiting for PREPARE for"
                         " view no {} and seq no {}".
                         format(self, i, viewNo, ppSeqNo))

    def getDigestFor3PhaseKey(self, key: ThreePhaseKey) -> Optional[str]:
        reqKey = self.getReqKeyFrom3PhaseKey(key)
        digest = self.requests.digest(reqKey)
        if not digest:
            logger.debug("{} could not find digest in sent or received "
                         "PRE-PREPAREs or PREPAREs for 3 phase key {} and req "
                         "key {}".format(self, key, reqKey))
            return None
        else:
            return digest

    def getReqKeyFrom3PhaseKey(self, key: ThreePhaseKey):
        reqKey = None
        if key in self.sentPrePrepares:
            reqKey = self.sentPrePrepares[key][0]
        elif key in self.prePrepares:
            reqKey = self.prePrepares[key][0]
        elif key in self.prepares:
            reqKey = self.prepares[key][0]
        else:
            logger.debug("Could not find request key for 3 phase key {}".
                         format(key))
        return reqKey

    @property
    def threePhaseState(self):
        # TODO: This method is incomplete
        # Gets the current stable and unstable checkpoints and creates digest
        # of unstable checkpoints
        if self.checkpoints:
            pass
        else:
            state = []
        return ThreePCState(self.instId, state)

    def process3PhaseState(self, msg: ThreePCState, sender: str):
        # TODO: This is not complete
        pass

    def send(self, msg, stat=None) -> None:
        """
        Send a message to the node on which this replica resides.

        :param msg: the message to send
        """
        logger.display("{} sending {}".format(self, msg.__class__.__name__),
                       extra={"cli": True})
        logger.trace("{} sending {}".format(self, msg))
        if stat:
            self.stats.inc(stat)
        self.outBox.append(msg)
コード例 #49
0
ファイル: assembly.py プロジェクト: souravsingh/pydna
    def _assemble(self):

        for dr in self.dsrecs:
            if dr.name in ("",".", "<unknown name>", None):
                dr.name = "frag{}".format(len(dr))

        if self.only_terminal_overlaps:
            algorithm = terminal_overlap
        else:
            algorithm = common_sub_strings

        # analyze_overlaps
        cols = {}
        for dsrec in self.dsrecs:
            dsrec.features = [f for f in dsrec.features if f.type!="overlap"]
            dsrec.seq = Dseq(dsrec.seq.todata)
        rcs = {dsrec:dsrec.rc() for dsrec in self.dsrecs}
        matches=[]
        dsset=OrderedSet()

        for a, b in itertools.combinations(self.dsrecs, 2):
            match = algorithm( str(a.seq).upper(),
                               str(b.seq).upper(),
                               self.limit)
            if match:
                matches.append((a, b, match))
                dsset.add(a)
                dsset.add(b)
            match = algorithm( str(a.seq).upper(),
                               str(rcs[b].seq).upper(),
                               self.limit)
            if match:
                matches.append((a, rcs[b], match))
                dsset.add(a)
                dsset.add(rcs[b])
                matches.append((rcs[a], b, [(len(a)-sa-le,len(b)-sb-le,le) for sa,sb,le in match]))
                dsset.add(b)
                dsset.add(rcs[a])

        self.no_of_olaps=0

        for a, b, match in matches:
            for start_in_a, start_in_b, length in match:
                self.no_of_olaps+=1
                chksum = a[start_in_a:start_in_a+length].seguid()
                #assert chksum == b[start_in_b:start_in_b+length].seguid()

                try:
                    fcol, revcol = cols[chksum]
                except KeyError:
                    fcol = '#%02X%02X%02X' % (random.randint(175,255),random.randint(175,255),random.randint(175,255))
                    rcol = '#%02X%02X%02X' % (random.randint(175,255),random.randint(175,255),random.randint(175,255))
                    cols[chksum] = fcol,rcol

                qual      = {"note"             : ["olp_{}".format(chksum)],
                             "chksum"           : [chksum],
                             "ApEinfo_fwdcolor" : [fcol],
                             "ApEinfo_revcolor" : [rcol]}

                if not chksum in [f.qualifiers["chksum"][0] for f in a.features if f.type == "overlap"]:
                    a.features.append( SeqFeature( FeatureLocation(start_in_a,
                                                                   start_in_a + length),
                                                                   type = "overlap",
                                                                   qualifiers = qual))
                if not chksum in [f.qualifiers["chksum"][0] for f in b.features if f.type == "overlap"]:
                    b.features.append( SeqFeature( FeatureLocation(start_in_b,
                                                                   start_in_b + length),
                                                                   type = "overlap",
                                                                   qualifiers = qual))
        for ds in dsset:
            ds.features = sorted([f for f in ds.features], key = operator.attrgetter("location.start"))

        self.analyzed_dsrecs = list(dsset)


        # Create graph

        self.G=nx.MultiDiGraph(multiedges=True, name ="original graph" , selfloops=False)
        self.G.add_node( '5' )
        self.G.add_node( '3' )

        for i, dsrec in enumerate(self.analyzed_dsrecs):

            overlaps = sorted( {f.qualifiers['chksum'][0]:f for f in dsrec.features
                                if f.type=='overlap'}.values(),
                               key = operator.attrgetter('location.start'))

            if overlaps:
                overlaps = ([SeqFeature(FeatureLocation(0, 0),
                             type = 'overlap',
                             qualifiers = {'chksum':['5']})]+
                             overlaps+
                            [SeqFeature(FeatureLocation(len(dsrec),len(dsrec)),
                                        type = 'overlap',
                                        qualifiers = {'chksum':['3']})])

                for olp1, olp2 in itertools.combinations(overlaps, 2):

                    n1 = olp1.qualifiers['chksum'][0]
                    n2 = olp2.qualifiers['chksum'][0]

                    if n1 == '5' and n2=='3':
                        continue

                    s1,e1,s2,e2 = (olp1.location.start.position,
                                   olp1.location.end.position,
                                   olp2.location.start.position,
                                   olp2.location.end.position,)

                    source_fragment = Fragment(dsrec,s1,e1,s2,e2,i)

                    self.G.add_edge( n1, n2,
                                     frag=source_fragment,
                                     weight = s1-e1,
                                     i = i)

        #linear assembly

        linear_products=defaultdict(list)

        for path in all_simple_paths_edges(self.G, '5', '3', data=True, cutoff=self.max_nodes):

            pred_frag = copy(path[0][2].values().pop()['frag'])
            source_fragments = [pred_frag, ]

            if pred_frag.start2<pred_frag.end1:
                result=pred_frag[pred_frag.start2+(pred_frag.end1-pred_frag.start2):pred_frag.end2]
            else:
                result=pred_frag[pred_frag.end1:pred_frag.end2]

            for first_node, second_node, edgedict in path[1:]:

                edgedict = edgedict.values().pop()

                f  = copy(edgedict['frag'])

                f.alignment =  pred_frag.alignment + pred_frag.start2- f.start1
                source_fragments.append(f)

                if f.start2>f.end1:
                    result+=f[f.end1:f.end2]
                else:
                    result+=f[f.start2+(f.end1-f.start2):f.end2]

                pred_frag = f

            add=True
            for lp in linear_products[len(result)]:
                if (str(result.seq).lower() == str(lp.seq).lower()
                    or
                    str(result.seq).lower() == str(lp.seq.reverse_complement()).lower()):
                    add=False
            for dsrec in self.dsrecs:
                if (str(result.seq).lower() == str(dsrec.seq).lower()
                    or
                    str(result.seq).lower() == str(dsrec.seq.reverse_complement()).lower()):
                    add=False
            if add:
                linear_products[len(result)].append(Contig( result, source_fragments))

        self.linear_products = list(itertools.chain.from_iterable(linear_products[size] for size in sorted(linear_products, reverse=True)))


        # circular assembly

        self.cG = self.G.copy()
        self.cG.remove_nodes_from(('5','3'))
        #circular_products=defaultdict(list)
        circular_products={}

        for pth in all_circular_paths_edges(self.cG):

            ns = min(enumerate(pth), key = lambda x:x[1][2]['i'])[0]

            path = pth[ns:]+pth[:ns]

            pred_frag = copy(path[0][2]['frag'])

            source_fragments = [pred_frag, ]

            if pred_frag.start2<pred_frag.end1:
                result=pred_frag[pred_frag.start2+(pred_frag.end1-pred_frag.start2):pred_frag.end2]
            else:
                result=pred_frag[pred_frag.end1:pred_frag.end2]

            result.seq = Dseq(str(result.seq))

            for first_node, second_node, edgedict in path[1:]:

                f  = copy(edgedict['frag'])

                f.alignment =  pred_frag.alignment + pred_frag.start2- f.start1
                source_fragments.append(f)

                if f.start2>f.end1:
                    nxt = f[f.end1:f.end2]
                else:
                    nxt =f[f.start2+(f.end1-f.start2):f.end2]
                nxt.seq = Dseq(str(nxt.seq))
                result+=nxt

                pred_frag = f

            #add=True
            #for cp in circular_products[len(result)]:
            #    if (str(result.seq).lower() in str(cp.seq).lower()*2
            #        or
            #        str(result.seq).lower() == str(cp.seq.reverse_complement()).lower()*2):
            #        pass
            #        add=False
            #        print "##--"
            #if add:
            #    circular_products[len(result)].append( Contig( Dseqrecord(result, circular=True), source_fragments))

            r = Dseqrecord(result, circular=True)
            circular_products[r.cseguid()] = Contig(r, source_fragments )


        #self.circular_products = list(itertools.chain.from_iterable(circular_products[size] for size in sorted(circular_products, reverse=True)))
        self.circular_products = sorted(circular_products.values(), key=len, reverse=True)
コード例 #50
0
class Node(object):
    UNKNOWN_STATE = "_UNKNOWN_"

    def __init__(self, id_, master_graph):
        self.id_ = id_
        self.master_graph = master_graph
        self.edges = OrderedSet()
        self._state = None

        # determine ownership
        self.determined = False
        self.graph = None

    def __repr__(self):
        if self.determined:
            if self.graph:
                if self.state is not self.UNKNOWN_STATE:
                    return "<Node#%s: %s %r>" \
                           % (self.id_, self.graph.name, self.state)
                else:
                    return "<Node#%s: %s>" % (self.id_, self.graph.name)
            else:
                return "<!Node#%s: DETERMINE NONE!>" % self.id_
        elif self.determined is False:
            if self.graph is self.master_graph:
                return "<!Node#%s: GUESS MASTER!>" % self.id_
            elif self.graph:
                if self.state is not self.UNKNOWN_STATE:
                    return "<Node#%s: ?%s %r>" \
                           % (self.id_, self.graph.name, self.state)
                else:
                    return "<Node#%s: ?%s>" % (self.id_, self.graph.name)
            else:
                return "<!Node#%s: NEW!>" % self.id_

    @property
    def state(self):
        if self._state is None:
            return self.UNKNOWN_STATE
        else:
            return self._state

    @state.setter
    def state(self, val):
        self._state = val

    @property
    def correct(self):
        # Allowed combinations:
        # 1. determined = True, graph is master
        # 2. determined = True, graph is not master
        # 3. determined = False, graph is not master
        if self.determined and self.graph is not None:
            return True
        if self.determined is False and isinstance(self.graph, LeafGraph):
            return True
        print("%s is not correct!" % self)
        return False

    @property
    def guessed(self):
        return self.determined is False and self.graph is not None

    @property
    def determined_master(self):
        return self.determined and self.graph is self.master_graph

    @property
    def is_new(self):
        return self.determined is False and self.graph is None

    def add_edge(self, edge):
        # NOTE: Order matters
        self.edges.add(edge)

    def guess_graph(self, graph):
        assert graph is not self.master_graph
        if self.determined:
            pass
        elif self.graph is None:
            self.graph = graph
        elif self.graph is graph:
            pass
        else:
            self.determine_graph(self.master_graph)

    def determine_graph(self, graph):
        if not self.determined:
            if self.graph is graph:
                graph.determine_node(self)
            else:
                self.master_graph.determine_node(self)
        elif self.determined_master is False and self.graph is not graph:
            raise RuntimeError("Node#%s is determined %s, but assigned %s"
                               % (self, self.graph.name, graph.name))

    def decide_edge(self, log):
        for edge in self.edges:
            if edge.accept(log):
                return edge
        return None

    def accept_edge(self, edge):
        return edge in self.edges
コード例 #51
0
ファイル: bob.py プロジェクト: robochat/buildbit
 def calc_build(self,_seen=None):
     """decides if it needs to be built by recursively asking it's prerequisites
     the same question.
     _seen is an internal variable (a set) for optimising the search. I'll be relying 
     on the set being a mutable container in order to not have to pass it explicitly 
     back up the call stack."""
     #There is an oportunity to optimise calculations to occur only once for rules that are called multiple
     #times by using a shared (global) buildseq + _already_seen set, or by passing those structures into
     #the calc_build method call.
     #i.e. if (self in buildseq) or (self in _already_seen): return buildseq
     #Or we can memoize this method
     
     #updated_only should be calculated during build calculation time (rather than build time) for consistancy.
     self.updated_only #force evaluation of lazy property
     
     buildseq = OrderedSet()
     _seen = set() if not _seen else _seen
     _seen.add(self) # this will also solve any circular dependency issues!
     
     for req in self.order_only:
         if not os.path.exists(req):
             reqrule = Rule.get(req,None) #super(ExplicitRule,self).get(req,None)
             if reqrule:
                 if reqrule not in _seen:
                     buildseq.update(reqrule.calc_build())
                 else:
                     warnings.warn('rule for %r has already been processed' %req,stacklevel=2)
             else:
                 warnings.warn('%r has an order_only prerequisite with no rule' %self,stacklevel=2)
     
     for req in self.reqs:
         reqrule = Rule.get(req,None) #super(ExplicitRule,self).get(req,None)
         if reqrule:
             if reqrule not in _seen:
                 buildseq.update(reqrule.calc_build())
             else:
                 warnings.warn('rule for %r has already been processed' %req,stacklevel=2)
         else: #perform checks
             try:
                 self.get_mtime(req) #get_mtime is cached to reduce number of file accesses
             except OSError as e:
                 raise AssertionError("No rule or file found for %r for targets: %r" %(req,self.targets))
         
     if len(buildseq)==0:
         if self.PHONY or any([not os.path.exists(target) for target in self.targets]):
             buildseq.add(self)
         else:
             oldest_target = self._oldest_target
             
             #Since none of the prerequisites have rules that need to update, we can assume
             #that all prerequisites should be real files (phony rules always update which
             #should skip this section of code). Hence non-existing files imply an malformed build
             #file.
             for req in self.reqs:
                 try: 
                     req_mtime = self.get_mtime(req)
                     if req_mtime > oldest_target:
                         buildseq.add(self)
                         break
                         
                 except OSError as e: 
                     raise AssertionError("A non file prerequisite was found (%r) for targets %r in wrong code path" %(req,self.targets))
     else:
         buildseq.add(self)
     
     return buildseq
コード例 #52
0
def genetic(players = None,
            population_size = 600, 
            iterations_limit = 240,
            retain_parents = .1,
            mutation_rate = .7,
            radiation_amount = 50):

    # Logging for debug purposes
    if DEBUG:
        genetic_log = open('genetic.log', 'w')
        def log_genetic_data(string):
            genetic_log.write(string + "\n")
    
    # If needed, generate random players
    if players is None:
        first_player = random.choice(PLAYERS)
        second_player = random.choice(PLAYERS)
    else:
        first_player, second_player = players
    
    # Create the evaluation function
    simulation_results_cache = {}
    prior = time.time()
    def evaluate_population(population):
        """
        Given a list of move lists (the population), 
        Returns a list of (move list, value of move list, corresponding battle_id)
        that is sorted by the values.
        """
        population_values = []
        duplicates = 0
        for move_list in population:
            if move_list.short_string() in simulation_results_cache:
                value, battle_id = simulation_results_cache[move_list.short_string()]
                duplicates += 1
            else:
                value, battle_id = battle_simulation(move_list, 
                                                     first_player(move_list),
                                                     second_player(move_list))
                simulation_results_cache[move_list.short_string()] = (value, battle_id)
            population_values.append( (move_list, value, battle_id) )
        population_values.sort(key = lambda item: -item[1]) # sort by value
        
        if DEBUG:
            log_genetic_data("\tDuplicates: %d" % (duplicates,))
            
        return population_values
        
    # Generate the new population
    population = [MoveList() for x in xrange(population_size)]
        
    # Iterate through the Genetic Algorithm
    for iteration in xrange(iterations_limit):
    
        # Log this iteration
        if DEBUG: log_genetic_data("Iteration: %d" % (1+iteration,))
        
        # Calculate the size of the segments of our new population
        parents_retained = int(round(retain_parents * population_size))
        mutants_generated = int(round(mutation_rate * population_size))
        
        # Run the simulation on each move_list, and sort by best
        population_values = evaluate_population(population)
            
        # Log the values and battle ids
        if DEBUG:
            for move_list, value, battle_id in population_values:
                log_genetic_data("\tValue: %d, Battle: %d, Move List: %s" % 
                                 (value, battle_id, move_list.short_string()))
            
        # create our new population
        population = OrderedSet()
        top_perfomers = population_values[:parents_retained]
        
        # Retain the top performers of the old generation
        for move_list, value, battle_id in top_perfomers:
            population.add(move_list)
            
        # Add in the mutants!
        while len(population) - parents_retained  < mutants_generated:
            mutant, value, battle_id = random.choice(top_perfomers)
            for r in xrange(radiation_amount):
                mutant = mutant.mutate()
            population.add(mutant)
        
        # Add in the children!
        while len(population) < population_size:
            dad, mom = random.sample(population, 2)
            child = dad.cross_over(mom).mutate()
            population.add(child)
        
        # Report to the user that we've finished an iteration!
        print "Iteration", str(iteration+1), "Time:", round(time.time() - prior, 3)
        prior = time.time()
    
    # Calculate the final resulting population
    results = evaluate_population(population)
    
    # Close up the log
    if DEBUG:
        log_genetic_data("Final Results")
        for move_list, value, battle_id in population_values:
            log_genetic_data("\tValue: %d, Battle: %d, Move List: %s" % 
                             (value, battle_id, move_list.short_string()))
    if DEBUG: genetic_log.close()
    
    # Return the best state
    return results[0][0]