Exemple #1
0
    def set_transaction_execution_result(self,
                                         txn_signature,
                                         is_valid,
                                         context_id,
                                         state_changes=None,
                                         events=None,
                                         data=None,
                                         error_message="",
                                         error_data=b""):
        with self._condition:
            if txn_signature not in self._scheduled:
                raise SchedulerError(
                    "transaction not scheduled: {}".format(txn_signature))
            self._set_least_batch_id(txn_signature=txn_signature)
            if not is_valid:
                self._remove_subsequent_result_because_of_batch_failure(
                    txn_signature)
            is_rescheduled = self._reschedule_if_outstanding(txn_signature)

            if not is_rescheduled:
                self._txn_results[txn_signature] = TxnExecutionResult(
                    signature=txn_signature,
                    is_valid=is_valid,
                    context_id=context_id if is_valid else None,
                    state_hash=self._first_state_hash if is_valid else None,
                    state_changes=state_changes,
                    events=events,
                    data=data,
                    error_message=error_message,
                    error_data=error_data)

            self._condition.notify_all()
    def add_batch(self, batch, state_hash=None, required=False):
        with self._condition:
            if self._final:
                raise SchedulerError(
                    "Scheduler is finalized. Cannot take new batches")

            preserve = required
            if not required:
                # If this is the first non-required batch, it is preserved for
                # the schedule to be completed (i.e. no empty schedules in the
                # event of unschedule_incomplete_batches being called before
                # the first batch is completed).
                preserve = _first(
                    filterfalse(lambda sb: sb.required,
                                self._batch_by_id.values())) is None

            batch_signature = batch.header_signature
            self._batch_by_id[batch_signature] = _AnnotatedBatch(
                batch, required=required, preserve=preserve)

            if state_hash is not None:
                self._required_state_hashes[batch_signature] = state_hash
            batch_length = len(batch.transactions)
            LOGGER.debug(
                "SerialScheduler::add_batch: batch=%s tnxs=%s added=%s STATE=%s",
                batch_signature[:8],
                [t.header_signature[:8] for t in batch.transactions],
                len(self._batch_by_id),
                state_hash[:10] if state_hash is not None else None)
            for idx, txn in enumerate(batch.transactions):
                if idx == batch_length - 1:
                    self._last_in_batch.append(txn.header_signature)
                self._txn_to_batch[txn.header_signature] = batch_signature
                self._txn_queue.append(txn)
            self._condition.notify_all()
    def add_batch(self, batch, state_hash=None):
        with self._condition:
            if self._final:
                raise SchedulerError('Invalid attempt to add batch to '
                                     'finalized scheduler; batch: {}'.format(
                                         batch.header_signature))
            if not self._batches:
                self._least_batch_id_wo_results = batch.header_signature

            self._batches.append(batch)
            self._batches_by_id[batch.header_signature] = batch
            for txn in batch.transactions:
                self._batches_by_txn_id[txn.header_signature] = batch
                self._txns_available.append(txn)
                self._transactions[txn.header_signature] = txn

            if state_hash is not None:
                b_id = batch.header_signature
                self._batches_with_state_hash[b_id] = state_hash

            # For dependency handling: First, we determine our dependencies
            # based on the current state of the predecessor tree.  Second,
            # we update the predecessor tree with reader and writer
            # information based on input and outputs.
            for txn in batch.transactions:
                header = TransactionHeader()
                header.ParseFromString(txn.header)

                # Calculate predecessors (transaction ids which must come
                # prior to the current transaction).
                predecessors = self._find_input_dependencies(header.inputs)
                predecessors.extend(
                    self._find_output_dependencies(header.outputs))

                txn_id = txn.header_signature
                # Update our internal state with the computed predecessors.
                self._txn_predecessors[txn_id] = list(set(predecessors))

                # Update the predecessor tree.
                #
                # Order of reader/writer operations is relevant.  A writer
                # may overshadow a reader.  For example, if the transaction
                # has the same input/output address, the end result will be
                # this writer (txn.header_signature) stored at the address of
                # the predecessor tree.  The reader information will have been
                # discarded.  Write operations to partial addresses will also
                # overshadow entire parts of the predecessor tree.
                #
                # Thus, the order here (inputs then outputs) will cause the
                # minimal amount of relevant information to be stored in the
                # predecessor tree, with duplicate information being
                # automatically discarded by the set_writer() call.
                for address in header.inputs:
                    self._predecessor_tree.add_reader(address, txn_id)
                for address in header.outputs:
                    self._predecessor_tree.set_writer(address, txn_id)

            self._condition.notify_all()
 def add_batch(self, batch, state_hash=None):
     with self._condition:
         if self._final:
             raise SchedulerError("Scheduler is finalized. Cannnot take"
                                  " new batches")
         batch_signature = batch.header_signature
         batch_length = len(batch.transactions)
         for idx, txn in enumerate(batch.transactions):
             if idx == batch_length - 1:
                 self._last_in_batch.append(txn.header_signature)
             self._txn_to_batch[txn.header_signature] = batch_signature
             self._txn_queue.put(txn)
Exemple #5
0
    def set_transaction_execution_result(self, txn_signature, is_valid,
                                         context_id):
        with self._condition:
            if txn_signature not in self._scheduled:
                raise SchedulerError(
                    "transaction not scheduled: {}".format(txn_signature))
            if not is_valid:
                self._remove_subsequent_result_because_of_batch_failure(
                    txn_signature)
            is_rescheduled = self._reschedule_if_outstanding(txn_signature)

            if not is_rescheduled:
                txn_result = TransactionExecutionResult(
                    is_valid=is_valid,
                    context_id=context_id if is_valid else None,
                    state_hash=self._first_state_hash if is_valid else None)

                self._txn_results[txn_signature] = txn_result

            self._condition.notify_all()
    def add_batch(self, batch, state_hash=None, required=False):
        with self._condition:
            if self._final:
                raise SchedulerError(
                    'Invalid attempt to add batch to finalized scheduler; batch: {}'
                    .format(batch.header_signature))
            if not self._batches:
                self._least_batch_id_wo_results = batch.header_signature

            preserve = required
            if not required:
                # If this is the first non-required batch, it is preserved for
                # the schedule to be completed (i.e. no empty schedules in the
                # event of unschedule_incomplete_batches being called before
                # the first batch is completed).
                preserve = _first(
                    filterfalse(lambda sb: sb.required,
                                self._batches_by_id.values())) is None

            self._batches.append(batch)
            self._batches_by_id[batch.header_signature] = _AnnotatedBatch(
                batch, required=required, preserve=preserve)
            for txn in batch.transactions:
                self._batches_by_txn_id[txn.header_signature] = batch
                self._txns_available.append(txn)
                self._transactions[txn.header_signature] = txn

            if state_hash is not None:
                b_id = batch.header_signature
                self._batches_with_state_hash[b_id] = state_hash

            LOGGER.debug("add_batch: batch=%s tnxs=%s added=%s STATE=%s",
                         batch.header_signature[:8],
                         [t.header_signature[:8] for t in batch.transactions],
                         len(self._batches_by_id),
                         state_hash[:8] if state_hash is not None else None)
            # For dependency handling: First, we determine our dependencies
            # based on the current state of the predecessor tree.  Second,
            # we update the predecessor tree with reader and writer
            # information based on input and outputs.
            for txn in batch.transactions:
                header = TransactionHeader()
                header.ParseFromString(txn.header)

                # Calculate predecessors (transaction ids which must come
                # prior to the current transaction).
                predecessors = self._find_input_dependencies(header.inputs)
                predecessors.extend(
                    self._find_output_dependencies(header.outputs))

                txn_id = txn.header_signature
                # Update our internal state with the computed predecessors.
                self._txn_predecessors[txn_id] = list(set(predecessors))

                # Update the predecessor tree.
                #
                # Order of reader/writer operations is relevant.  A writer
                # may overshadow a reader.  For example, if the transaction
                # has the same input/output address, the end result will be
                # this writer (txn.header_signature) stored at the address of
                # the predecessor tree.  The reader information will have been
                # discarded.  Write operations to partial addresses will also
                # overshadow entire parts of the predecessor tree.
                #
                # Thus, the order here (inputs then outputs) will cause the
                # minimal amount of relevant information to be stored in the
                # predecessor tree, with duplicate information being
                # automatically discarded by the set_writer() call.
                for address in header.inputs:
                    self._predecessor_tree.add_reader(address, txn_id)
                for address in header.outputs:
                    self._predecessor_tree.set_writer(address, txn_id)

            self._condition.notify_all()