def decrement_num_pairs(self, aid):
        """
        Decrements the remaining number of pairs of the request with the given absolute queue ID
        :param aid: tuple(int, int)
            The absolute queue ID
        :return: bool
        """
        try:
            queue_item = self.distQueue.local_peek(aid)
        except LinkLayerException as err:
            logger.warning(
                "Could not find queue item with aid = {}, when trying to decrement number of remaining pairs."
                .format(aid))
            raise err

        if queue_item.num_pairs_left > 1:
            logger.debug("Decrementing number of remaining pairs")
            queue_item.num_pairs_left -= 1

        elif queue_item.num_pairs_left == 1:
            logger.debug("Generated final pair, removing request")
            self.clear_request(aid=aid)
        else:
            raise LinkLayerException(
                "Current request with aid = {} has invalid number of remaining pairs."
                .format(aid))
Beispiel #2
0
    def _handle_cq(self, classical, qubit, sender):
        """
        Handles the Classical/Quantum messages from either end of the connection
        :param classical: obj any
            Classical data sent by sender
        :param qubit: list `~netsquid.qubits.qubit.Qubit`
            The qubits sent by the sender
        :param sender: int
            NodeID of the sender of the information
        :return: None
        """
        logger.debug(
            "Handling CQ from {}, got classical: {} and qubit {}".format(
                sender, classical, qubit))

        # Check whether we are in time window
        if not self._in_window and qubit:
            logger.warning("Received CQ out of detection time window")
            # Outside window, drop qubit
            self._drop_qubit(qubit)

            # Notify out of window error
            self._send_notification_to_one(self.ERR_OUT_OF_WINDOW, sender)
            self._reset_incoming()
            return

        if classical is not None:
            incoming_request = self._construct_request(sender, classical,
                                                       qubit)
            self._process_incoming_request(sender, incoming_request)

        else:
            if qubit is not None:
                self._drop_qubit(qubit)
def set_datacollection_version(results_path, max_tries=1000):
    """
    Writes the current datacollection version to a file.
    :param results_path:
    :return:
    """
    data_folder_path = os.path.split(results_path)[0]
    version_path = os.path.join(data_folder_path, "versions.json")
    if os.path.exists(version_path):
        with open(version_path, 'r') as f:
            tries = 0
            while tries < max_tries:
                tries += 1
                try:
                    versions = json.load(f)
                except json.decoder.JSONDecodeError:
                    # File is probably open elsewhere, try again
                    f.seek(0)
                else:
                    break
            else:
                logger.warning("Did not manage to open versions file, will not write to it")
                versions = None
    else:
        versions = {}
    if versions is not None:
        versions["datacollection"] = current_version
        with open(version_path, 'w') as f:
            json.dump(versions, f)
Beispiel #4
0
    def produce_entanglement(self, sender):
        """
        Handler for a message requesting the production of entangled pairs. Only performs a swap
        if the heralding station has received a qubit from both peers.
        :param classical: obj any
            Classical data sent by the sender
        :param qubit: list `~netsquid.qubits.qubit.Qubit`
            The qubits sent by the sender
        :param sender: int
            NodeID of the sender of the information
        :return: None
        """
        logger.debug("Producing entanglement")
        qubit = self.node_requests[sender].quantum_data
        # Check if we have a qubit from other end of connection
        if self._has_both_qubits():
            # There is a qubit available from Bob already to swap with
            logger.debug(
                "Have qubits from both A and B with request for production")

            # Check the absolute queue id's from both ends of the connection
            if not self._has_same_aid():
                logger.warning("Absolute queue IDs don't match!")
                self._send_notification_to_both(self.ERR_QUEUE_MISMATCH)
                self._drop_qubit(qubit)
                self._reset_incoming()
                return
 def update_mhp_cycle_number(self, current_cycle, max_cycle):
     """
     Updates the current MHP cycle number. Updates ready accordingly and triggers a timeout.
     :return: None
     :param current_cycle: int
         The current MHP cycle
     :param max_cycle: int
         The max MHP cycle
     """
     if self.timeout_cycle is not None:
         if check_schedule_cycle_bounds(current_cycle, max_cycle,
                                        self.timeout_cycle):
             logger.debug("Item timed out, calling callback")
             if not self.acked:
                 logger.warning("Item timed out before being acknowledged.")
             self.timeout_callback(self)
     if self.acked:
         if not self.ready:
             if self.schedule_cycle is not None:
                 if check_schedule_cycle_bounds(current_cycle, max_cycle,
                                                self.schedule_cycle):
                     self.ready = True
                     logger.debug("Item is ready to be scheduled")
             else:
                 self.ready = True
                 logger.debug("Item is ready to be scheduled")
Beispiel #6
0
def add_quberr(quberr_data, creates_and_oks_by_create_id, ok_keys_by_timestamp):
    max_timestamp = -1
    for raw_datapoint in quberr_data:
        quberr_datapoint = EGPQubErrDataPoint(raw_datapoint)
        if quberr_datapoint.timestamp > max_timestamp:
            max_timestamp = quberr_datapoint.timestamp
    for raw_datapoint in quberr_data:
        quberr_datapoint = EGPQubErrDataPoint(raw_datapoint)
        if quberr_datapoint.success:
            timestamp = quberr_datapoint.timestamp
            try:
                ok_keys = ok_keys_by_timestamp[timestamp]
            except KeyError:
                logger.warning("No OK corresponding to QBER datapoint with timestamp {}".format(timestamp))
                ok_keys = None
            if ok_keys is not None:
                if len(ok_keys) != 1:
                    # Check if both nodes received OK for same create ID at the same time
                    if len(ok_keys) == 2:
                        if ok_keys[0][0] != ok_keys[1][0]:
                            raise RuntimeError("The timestamp {} of this QBER datapoint"
                                               "does not have a unique corresponding ok datapoint".format(timestamp))
                    else:
                        raise RuntimeError("The timestamp {} of this QBER datapoint"
                                           "does not have a unique corresponding ok datapoint".format(timestamp))
                absolute_create_id, node_id, mhp_seq = ok_keys[0]

                # Add this QBER datapoint to the data structure
                if "QBER" in creates_and_oks_by_create_id[absolute_create_id]["oks"][node_id][mhp_seq]:
                    raise RuntimeError("OK for Absolute Create ID {},"
                                       "Node ID {} and MHP Seq {} already has a QBER".format(absolute_create_id,
                                                                                             node_id,
                                                                                             mhp_seq))
                creates_and_oks_by_create_id[absolute_create_id]["oks"][node_id][mhp_seq]["QBER"] = quberr_datapoint
    def remove_item(self, seq):
        """
        Removes the queue item corresponding to the provided sequence number from the queue
        :param seq: int
            Identifier of the queue item we wish to remove
        :return: obj `~qlinklayer.localQueue.LocalQueueItem`
            The queue item that we removed if any, else None
        """
        if seq in self.sequence_to_item:
            item = self.sequence_to_item.pop(seq)
            self.queue.remove(item)
            logger.debug(
                "Removing item with seq={} from local queue".format(seq))

            if self.throw_events:
                logger.debug("Scheduling item removed event now.")
                self._schedule_now(self._EVT_ITEM_REMOVED)
                self._seqs_removed.append(seq)

            return item

        else:
            logger.warning(
                "Sequence number {} not found in local queue".format(seq))
            return None
 def ack(self, seq):
     """
     Mark the queue item with queue sequence number seq as acknowledged by remote node
     """
     try:
         self.sequence_to_item[seq].acked = True
         logger.debug("Item with seq {} is acknowledged".format(seq))
     except KeyError:
         logger.warning(
             "Sequence number {} not found in local queue".format(seq))
         # Not in queue
         return
Beispiel #9
0
def parse_thoughput(creates_and_oks_by_create_id, max_time, num_points=10000, time_window=1e9, min_time=0,
                    in_seconds=False):

    priorities = list(range(3))

    if max_time == 0:
        return {p: [(0, -1)] * num_points for p in priorities}

    timestamps_per_prio = {p: [] for p in priorities}
    for create_id, create_data in creates_and_oks_by_create_id.items():
        if not create_data["expired"]:
            priority = create_data["create"].priority
            for node_id, node_data in create_data["oks"].items():
                # Don't count OKs double
                if node_id == 0:
                    for mhp_seq, mhp_data in node_data.items():
                        ok = mhp_data["ok"]
                        timestamps_per_prio[priority].append(ok.timestamp)

    throughputs_per_prio = {}
    for priority, timestamps in timestamps_per_prio.items():
        timestamps = sorted(timestamps)
        time_diff = max_time - min_time
        shift = (time_diff - time_window) / (num_points - 1)
        if shift > time_window:
            logger.warning("Got to short time-window {} (s), making it {} (s)".format(time_window * 1e-9, shift * 1e-9))
            time_window = shift

        left_side = min_time
        position = 0
        throughputs = []
        for _ in range(num_points):
            right_side = left_side + time_window
            i = position
            num_oks_in_window = 0
            while i < len(timestamps):
                t = timestamps[i]
                if t < left_side:
                    position += 1
                elif t >= right_side:
                    break
                else:
                    num_oks_in_window += 1
                i += 1
            if in_seconds:
                throughputs.append((left_side * 1e-9, num_oks_in_window / time_window * 1e9))
            else:
                throughputs.append((left_side, num_oks_in_window / time_window))
            left_side += shift

        throughputs_per_prio[priority] = throughputs

    return throughputs_per_prio
Beispiel #10
0
def get_creates_and_oks_by_create_id(filename, expired_create_ids):
    creates_data = parse_table_data_from_sql(filename, "EGP_Creates")
    oks_data = parse_table_data_from_sql(filename, "EGP_OKs")

    creates_and_oks_by_create_id = {}
    ok_keys_by_timestamp_and_node_id = {}

    # Get the creates
    for raw_datapoint in creates_data:
        datapoint = EGPCreateDataPoint(raw_datapoint)
        create_id = datapoint.create_id
        node_id = datapoint.node_id
        absolute_create_id = node_id, create_id
        expired = (create_id in expired_create_ids)
        if absolute_create_id in creates_and_oks_by_create_id:
            raise RuntimeError("Duplicate Absolute Create ID = {} for Creates".format(absolute_create_id))
        else:
            creates_and_oks_by_create_id[absolute_create_id] = {"create": datapoint, "oks": {}, "expired": expired}

    # Get the oks
    for raw_datapoint in oks_data:
        datapoint = EGPOKDataPoint(raw_datapoint)
        create_id = datapoint.create_id
        origin_id = datapoint.origin_id
        absolute_create_id = origin_id, create_id
        node_id = datapoint.node_id
        mhp_seq = datapoint.mhp_seq
        timestamp = datapoint.timestamp
        try:
            oks_dct = creates_and_oks_by_create_id[absolute_create_id]["oks"]
        except KeyError:
            logger.warning("OK with Absolute Create ID {} with no corresponding Create".format(absolute_create_id))
            oks_dct = None
        if oks_dct is not None:
            if node_id not in oks_dct:
                # We will later add state and quberr so make this a dict
                oks_dct[node_id] = {mhp_seq: {"ok": datapoint}}
            elif mhp_seq in oks_dct[node_id]:
                raise RuntimeError("Duplicate entry for"
                                   "Absolute Create ID = {}, Node ID = {} and MHP Seq = {}".format(absolute_create_id,
                                                                                                   node_id,
                                                                                                   mhp_seq))
            else:
                # We will later add state and quberr so make this a dict
                oks_dct[node_id][mhp_seq] = {"ok": datapoint}

            if timestamp in ok_keys_by_timestamp_and_node_id:
                ok_keys_by_timestamp_and_node_id[timestamp].append([absolute_create_id, node_id, mhp_seq])
            else:
                ok_keys_by_timestamp_and_node_id[timestamp] = [[absolute_create_id, node_id, mhp_seq]]

    return creates_and_oks_by_create_id, ok_keys_by_timestamp_and_node_id
Beispiel #11
0
 def free_qubit(self, id):
     """
     Frees the locally reserved qubit space
     :param id: int
         Address of the qubit to free
     """
     self.vacate_qubit(qid=id)
     if self.node.qmemory.position_in_use(id):
         q = self.node.qmemory.pop(id)[0]
         if q is not None:
             qapi.discard(q)
         else:
             logger.warning("Trying to free a non-existing qubit")
Beispiel #12
0
    def _do_swap(self):
        # Performs entanglement swapping, if two qubits are available
        node_reqs = [
            r.request_data for r in self.node_requests.values()
            if r is not None
        ]
        num_production_requests = node_reqs.count(self.CMD_PRODUCE)

        logger.debug(
            "Have {} production requests".format(num_production_requests))

        # Don't bother swapping because neither party requested entanglement
        if num_production_requests == 0:
            pass

        # Error if we only received on qubit during this cycle
        elif num_production_requests != 2:
            for _, qubit in self.qubits.items():
                if qubit:
                    self._drop_qubit(qubit)

            dataA, dataB = self._get_error_data(self.ERR_NO_CLASSICAL_OTHER)
            if self.node_requests[self.nodeA.nodeID] is not None:
                self._send_to_node(self.nodeA, dataA)
            elif self.node_requests[self.nodeB.nodeID] is not None:
                self._send_to_node(self.nodeB, dataB)

            logger.warning(
                "Midpoint only received entanglement generation data from one node"
            )

        else:
            for (id, q) in self.qubits.items():
                if q is None:
                    q = create_qubits(1)[0]
                    q.is_number_state = True
                    self.qubits[id] = q

            outcome = self.midPoint.measure(self.qubits[self.idA],
                                            self.qubits[self.idB])
            self.last_outcome = outcome
            logger.debug("Scheduling entanglement event now.")
            self._schedule_now(self._EVT_ENTANGLE_ATTEMPT)

            self._send_notification_to_both(outcome)

        # Reset incoming data
        self._reset_incoming()
    def _update_virtual_finish(self, aid):
        """
        Updates the virtual finish upon success
        :param aid: tuple of  int, int
            Contains the aid used for the generation
        :return: None
        """
        try:
            queue_item = self.distQueue.local_peek(aid)
        except LinkLayerException as err:
            logger.warning(
                "Could not find queue item with aid = {}, when trying to update virtual finish."
                .format(aid))
            raise err

        queue_item.update_virtual_finish()
    def ready(self, seq):
        """
        Mark the queue item with queue sequence number seq as ready
        """
        try:
            queue_item = self.sequence_to_item[seq]
        except KeyError:
            logger.warning(
                "Sequence number {} not found in local queue".format(seq))
            return

        if queue_item.acked:
            queue_item.ready = True
            logger.debug(
                "Item with seq {} is ready to be scheduled".format(seq))
        else:
            logger.warning(
                "Sequence number {} is not acked and cannot be made ready yet".
                format(seq))
            return
    def add_request(self, egp_request, create_id=0):
        """
        Adds a request to the distributed queue
        :param request: obj `~qlinklayer.egp.EGPRequest`
            The request to be added
        :param create_id: int
            The assigned create ID of this request
        """
        # Decide which cycle the request should begin processing
        schedule_cycle = self.get_schedule_cycle(egp_request)

        # Store the request into the queue
        try:
            qid = self.choose_queue(egp_request)
        except LinkLayerException:
            logger.warning(
                "Scheduler could not get a valid queue ID for request.")
            return False

        try:
            timeout_cycle = self.get_timeout_cycle(egp_request)
        except LinkLayerException:
            logger.warning(
                "Specified timeout ({}) is longer then the mhp_cycle_period * max_mhp_cycle_number = {}"
                .format(egp_request.max_time,
                        self.mhp_cycle_period * self.max_mhp_cycle_number))
            return False

        if timeout_cycle is not None:
            if self._compare_mhp_cycle(timeout_cycle, schedule_cycle) < 1:
                logger.warning(
                    "Request had to short max time to be satisfied"
                    ", sched_cycle = {} and timeout_cycle = {}".format(
                        schedule_cycle, timeout_cycle))
                return False

        scheduler_request = self._get_scheduler_request(
            egp_request,
            create_id,
            schedule_cycle,
            timeout_cycle,
            master_request=self.distQueue.master)

        try:
            self._add_to_queue(scheduler_request, qid)
        except LinkLayerException:
            logger.warning("Could not add request to queue.")
            return False

        return True
    def mark_gen_completed(self, aid):
        """
        Marks a generation performed by the EGP as completed and cleans up any remaining state.
        :param aid: tuple of  int, int
            Contains the aid used for the generation
        :return: None
        """
        logger.debug("Marking aid {} as completed".format(aid))
        if self.is_generating_aid(aid):
            # Get the used qubit info and free unused resources
            comm_q = self.curr_gen.comm_q
            storage_q = self.curr_gen.storage_q
            if comm_q != storage_q:
                self.qmm.vacate_qubit(comm_q)

            # TODO shouldn't remove current gen right? If MD
            # self.curr_gen = None

        else:
            request = self.get_request(aid)
            if not request.measure_directly:
                logger.warning("Marking gen completed for inactive request")

        self._post_process_success(aid)
def main(high_loss_path, other_path, tex_path):
    prio_names = ["NL", "CK", "MD"]

    c_loss_metrics = {p_loss: {} for p_loss in range(4, 11)}
    diffs = {
        p_loss: {m: {}
                 for m in ["F", "T", "L", "O"]}
        for p_loss in range(4, 11)
    }

    # Get the metrics from the high loss simulation
    for entry in sorted(os.listdir(high_loss_path)):
        if entry.endswith(".db"):
            scenario_key = entry.split("_key_")[1].split("_run_")[0]
            if "HIGH_C_LOSS" in scenario_key:
                if "_mix_" in scenario_key:
                    p_loss = int(
                        scenario_key.split("_HIGH_C_LOSS_")[1].split("_mix_")
                        [0][3:])
                    prio = "mix"
                    phys_setup, rest = scenario_key.split("_mix_")
                    mix, scheduler = rest.split("_weights_")
                    prios_in_file = prio_names
                    metric_key = (phys_setup, mix, scheduler)
                else:
                    for p in prio_names:
                        if p in scenario_key:
                            prio = p
                    p_loss = int(
                        scenario_key.split("_HIGH_C_LOSS_")[1].split(
                            "_{}_".format(prio))[0][3:])
                    phys_setup, rest = scenario_key.split("_{}_".format(prio))
                    num_pairs, rest = rest.split("_req_frac_")
                    req_frac, rest = rest.split("_origin_")
                    origin, scheduler = rest.split("_weights_")
                    prios_in_file = [prio]
                    metric_key = (phys_setup, prio, num_pairs, req_frac,
                                  origin, scheduler)

                print("Getting loss metric form key {}".format(metric_key))
                metrics = get_metrics_from_single_file(
                    os.path.join(high_loss_path, entry))
                total_matrix_time = metrics["TotalMatrixT (s)"] * 1e9

                fidelity = {
                    p: metrics["AvgFid_Prio{}".format(p)]
                    for p in prios_in_file
                }
                throughput = {
                    p: metrics["AvgThroughp_Prio{} (1/s)".format(p)]
                    for p in prios_in_file
                }
                latency = {
                    p: metrics["AvgReqLaten_Prio{}_NodeID0 (s)".format(p)]
                    for p in prios_in_file
                }
                nr_oks = {
                    p: metrics["NrOKs_Prio{}".format(p)]
                    for p in prios_in_file
                }

                c_loss_metrics[p_loss][metric_key] = {
                    "F": fidelity,
                    "T": throughput,
                    "L": latency,
                    "O": nr_oks,
                    "maxtime": total_matrix_time
                }

    other_timestamp = os.path.split(other_path)[1].split('_')[0]
    other_basename = os.path.join(other_path,
                                  "{}_key_".format(other_timestamp))
    for p_loss, metric_per_p_loss in c_loss_metrics.items():
        for metric_key, metric_per_key in metric_per_p_loss.items():
            if len(metric_key) == 3:
                phys_setup, mix, scheduler = metric_key
                no_loss_setup = phys_setup.split("_HIGH_C_LOSS")[0]
                no_loss_file = other_basename + "{}_mix_{}_weights_{}_run_0.db".format(
                    no_loss_setup, mix, scheduler)
                prios_in_file = prio_names
            else:
                phys_setup, prio, num_pairs, req_frac, origin, scheduler = metric_key
                no_loss_setup = phys_setup.split("_HIGH_C_LOSS")[0]
                no_loss_file = other_basename + "{}_{}_{}_req_frac_{}_origin_{}_weights_{}_run_0.db".format(
                    no_loss_setup, prio, num_pairs, req_frac, origin,
                    scheduler)
                prios_in_file = [prio]
            print("Getting no loss metrics from file {}".format(no_loss_file))

            metrics = get_metrics_from_single_file(
                no_loss_file, max_simulated_time=metric_per_key["maxtime"])
            total_matrix_time = metrics["TotalMatrixT (s)"] * 1e9
            if total_matrix_time < metric_per_key["maxtime"]:
                logger.warning("no loss has shorter matrix time")

            f_diffs_per_prio = {}
            t_diffs_per_prio = {}
            l_diffs_per_prio = {}
            o_diffs_per_prio = {}
            abs_func = np.abs
            for p in prios_in_file:
                f1 = metric_per_key["F"][p]
                f2 = metrics["AvgFid_Prio{}".format(p)]
                f_rel_diff = abs_func(f1 - f2) / max(f1, f2)
                f_diffs_per_prio[p] = f_rel_diff

                t1 = metric_per_key["T"][p]
                t2 = metrics["AvgThroughp_Prio{} (1/s)".format(p)]
                t_rel_diff = abs_func(t1 - t2) / max(t1, t2)
                t_diffs_per_prio[p] = t_rel_diff

                l1 = metric_per_key["L"][p]
                l2 = metrics["AvgReqLaten_Prio{}_NodeID0 (s)".format(p)]
                l_rel_diff = abs_func(l1 - l2) / max(l1, l2)
                l_diffs_per_prio[p] = l_rel_diff

                o1 = metric_per_key["O"][p]
                o2 = metrics["NrOKs_Prio{}".format(p)]
                o_rel_diff = abs_func(o1 - o2) / max(o1, o2)
                o_diffs_per_prio[p] = o_rel_diff

            diffs[p_loss]["F"][metric_key[1:]] = f_diffs_per_prio
            diffs[p_loss]["T"][metric_key[1:]] = t_diffs_per_prio
            diffs[p_loss]["L"][metric_key[1:]] = l_diffs_per_prio
            diffs[p_loss]["O"][metric_key[1:]] = o_diffs_per_prio

    latex_begin = r"""
        \begin{tabular}{|l|cccc|}
            \hline
            $p_\mathrm{loss}$ & Max Rel. Diff. Fid. & Max Rel. Diff. Throughp. & Max Rel. Diff. Laten. & Max Rel. Diff. Nr pairs \\ \hline
"""
    latex_end = r"""
       \end{tabular}"""

    latex_middle = ""
    for m_k_len, type in zip([5], ["single"]):
        print("{}: ".format(type))
        for p_loss, diffs_per_p_loss in diffs.items():
            max_m_diffs = {m: -1 for m in ["F", "T", "L", "O"]}
            max_m_scenarios = {m: None for m in ["F", "T", "L", "O"]}
            print("  p_loss = {}".format(p_loss))
            for m, diffs_per_m in diffs_per_p_loss.items():
                for metric_key, diffs_per_metric_key in diffs_per_m.items():
                    if len(metric_key) == m_k_len:
                        max_m_tmp = max(diffs_per_metric_key.values())
                        if max_m_tmp > max_m_diffs[m]:
                            max_m_diffs[m] = max_m_tmp
                            max_m_scenarios[m] = metric_key

            print("     {}".format(max_m_diffs))
            diffs = [max_m_diffs[m] for m in ["F", "T", "L", "O"]]
            row_name = "\t" * 3 + r"$10^{-" + str(p_loss) + "}$"
            latex_middle += row_name + "".join(
                [" & {0:.3f}".format(d) for d in diffs]) + r" \\ \hline" + "\n"
            print("")
        print("")

    latex_code = latex_begin + latex_middle[:-1] + latex_end
    print(latex_code)
    if tex_path is not None:
        table_name = "high_c_loss"
        with open(os.path.join(tex_path, "{}.tex".format(table_name),
                               'w')) as f:
            f.write(latex_code)