예제 #1
0
    def _periodic_index_update_executor(self):
        """
        Update the index in periodic intervals.

        Executor thread.

        """
        global LI_LOCK
        global LOCAL_INDEX

        while True:

            get_index_event = self._comm_dict["get_index_event"]
            receive_index_data_queue = self._comm_dict["get_index_data_queue"]

            get_index_event.set()

            with LI_LOCK:

                try:
                    bl.debug("Waiting for index")
                    index = receive_index_data_queue.get(True, 100)

                except queue.Empty as e:
                    bl.warning("Took more than 100 seconds to wait for "
                               "index ({}). There will be nothing on display "
                               "here.".format(e))

                else:
                    LOCAL_INDEX = index["index"]

            # if self._shutdown_event.wait(120):  # update every 2 minutes
            if self._shutdown_event.wait():  # wait forever, do not periodically update the index
                return
예제 #2
0
    async def _file_download_coro(self, reader, writer):
        """
        Respond to download requests.

        """
        # while the connection is open ...
        if not reader.at_eof():

            self._cancel_file_download_executor_event = threading.Event()
            download_connection_watchdog = self._loop.create_task(
                self._check_download_connection(reader, writer))

            # wait for incoming traffic from the client
            res = await self.read_data(reader, writer)

            if not res:
                await self.send_nack(writer)
                return

            res = res["requested_file"]
            namespace = res["namespace"]
            key = res["key"]
            request_json = {"namespace": namespace, "key": key}

            bl.debug("Request for {}/{} received".format(namespace, key))

            await self.send_ack(writer)

            # check if the file is already in the ceph_data_dict
            object_descriptor = "{}/{}".format(namespace, key)

            with self._ceph_data_lock:
                if object_descriptor in self._ceph_data_dict:
                    bl.debug("Found {} in ceph data, updating "
                             "timestamp".format(object_descriptor))
                    self._ceph_data_dict[object_descriptor]["timestamp"] = (
                        time.time())
                else:
                    bl.debug("Getting {}".format(object_descriptor))
                    req = {"namespace": namespace, "key": key}
                    # drop the request in the queue for the proxy manager
                    self._file_name_request_server_queue.put(request_json)

            # keep track how often we try to get data from the dictionary
            counter = 0

            # wait until we have everything downloaded
            while True:

                # wait a fraction of a second (rate throttling)
                time.sleep(.1)

                # get a list of keys in the GATEWAY_DATA
                with self._ceph_data_lock:
                    keys = list(self._ceph_data_dict.keys())

                if object_descriptor in keys:
                    with self._ceph_data_lock:
                        self._ceph_data_dict[object_descriptor][
                            "timestamp"] = time.time()
                        send_this = self._ceph_data_dict[object_descriptor][
                            "request_dict"]
                        break

                counter += 1
                if (counter > 100):
                    bl.warning(
                        "Too many iterations. Could not get data from ceph.")
                    return

            bl.debug("Got file contents from queue")

            await self._send_file_to_client(reader, writer, send_this)
예제 #3
0
def simulation_file(source_dict=None, namespace=None, object_key_list=[]):
    """
    Obtain a simulation file from the ceph cluster.

    Note: this is an async function so that we can perform this action in
    parallel.

    """
    bl.debug("Requesting {} in namespace {}".format(object_key_list,
                                                    namespace))

    expectation_list = list()
    for item in object_key_list:
        expectation_list.append("{}/{}".format(namespace, item))

    occurence_dict = dict()

    comm_dict = source_dict["external"]["comm_dict"]
    file_request_queue = comm_dict["file_request_queue"]
    file_request_answer_queue = comm_dict["file_contents_name_hash_queue"]

    before_qsize = file_request_answer_queue.qsize()
    if (before_qsize > 0):
        bl.debug_warning("Data return queue is not empty, contains {} "
                         "objects".format(before_qsize))

    # see if we have the data downloaded already, if not make the gateway client get it
    for obj in object_key_list:

        object_descriptor = "{}/{}".format(namespace, obj)

        with GW_LOCK:
            if object_descriptor in GATEWAY_DATA:
                bl.debug(
                    "Found {} in downloaded data, updating timestamp".format(
                        object_descriptor))
                GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()
            else:
                bl.debug("Downloading {}".format(object_descriptor))
                req = {"namespace": namespace, "key": obj}
                file_request_queue.put(req)

    # keep track how often we try to get data from the dictionary
    counter = 0

    # wait until we have everything downloaded
    while True:

        # wait a fraction of a second (rate throttling)
        time.sleep(.1)

        # do we have every file?
        all_present = True

        # get a list of keys in the GATEWAY_DATA
        with GW_LOCK:
            keys = list(GATEWAY_DATA.keys())

        for object_descriptor in expectation_list:

            if not object_descriptor in keys:
                all_present = False

            # update timestamp
            if object_descriptor in keys:
                with GW_LOCK:
                    GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()

        # break the loop
        if all_present:
            bl.debug("Data complete")
            break

        counter += 1
        if (counter > 1000):  # very large meshes take some time
            bl.warning("Too many iterations. Could not get data from gateway.")
            return

    # prepare output of function
    res_bin = [None] * len(object_key_list)

    for object_descriptor in expectation_list:
        with GW_LOCK:
            GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()
            request_dict = GATEWAY_DATA[object_descriptor]["request_dict"]

        obj_namespace = request_dict["namespace"]
        obj_key = request_dict["object"]

        bl.debug("Loading {}/{}".format(obj_namespace, obj_key))

        index = object_key_list.index(obj_key)
        res_bin[index] = request_dict

    return res_bin