async def rd_data(self, reader, length):
        """
        Read exactly the specified amount of bytes.

        """
        bl.debug("Called 'rd_data(reader, length)'")
        return await reader.readexactly(length)
    def _periodic_index_update_executor(self):
        """
        Update the index in periodic intervals.

        Executor thread.

        """
        global LI_LOCK
        global LOCAL_INDEX

        while True:

            get_index_event = self._comm_dict["get_index_event"]
            receive_index_data_queue = self._comm_dict["get_index_data_queue"]

            get_index_event.set()

            with LI_LOCK:

                try:
                    bl.debug("Waiting for index")
                    index = receive_index_data_queue.get(True, 100)

                except queue.Empty as e:
                    bl.warning("Took more than 100 seconds to wait for "
                               "index ({}). There will be nothing on display "
                               "here.".format(e))

                else:
                    LOCAL_INDEX = index["index"]

            # if self._shutdown_event.wait(120):  # update every 2 minutes
            if self._shutdown_event.wait():  # wait forever, do not periodically update the index
                return
示例#3
0
    async def _send_file_to_client(self, reader, writer, file_dictionary):
        """
        Answer file requests.

        Encode the binary data in the file_dictionary as a base64 string. This
        has to be reversed on the other side.

        """
        connection_info = writer.get_extra_info('peername')
        p_host = connection_info[0]
        p_port = connection_info[1]

        out_dict = dict()
        out_dict["namespace"] = file_dictionary["namespace"]
        out_dict["object"] = file_dictionary["object"]
        out_dict["contents"] = base64.b64encode(
            file_dictionary["value"]).decode()
        out_dict["tags"] = file_dictionary["tags"]

        bl.debug("Sending {}/{} to client [{}]".format(out_dict["namespace"],
                                                       out_dict["object"],
                                                       p_port))

        todo_val = "file_request"
        request_answer_dictionary = {"todo": todo_val, todo_val: out_dict}

        await self._send_dictionary(reader, writer, request_answer_dictionary)
示例#4
0
    def _periodic_ceph_file_deletion_executor(self):
        """
        Periodically delete old data in the ceph data dictionary.

        The executor.

        """
        while True:

            # wait for 1 second, this is essentially a 1 second interval timer
            if self._shutdown_backend_manager_event.wait(1):
                return

            current_time = time.time()

            with self._ceph_data_lock:
                for object_descriptor in list(self._ceph_data_dict.keys()):

                    timestamp = self._ceph_data_dict[object_descriptor][
                        "timestamp"]
                    elapsed_time = current_time - timestamp

                    if (elapsed_time > 60):

                        bl.debug("Removing {} after 60 seconds".format(
                            object_descriptor))
                        del self._ceph_data_dict[object_descriptor]
    def __init__(self, event_loop, comm_dict):
        bl.debug("Starting ProxyIndex")

        self._loop = event_loop
        self._comm_dict = comm_dict

        self._shutdown_event = comm_dict["shutdown_platt_gateway_event"]
示例#6
0
    async def _new_file_information_coro(self, reader, writer):
        """
        Coroutine for sending information about new files to the client.

        Checks the queue for new files in a separate executor, when this
        executor returns a dictionary it gets sent to the client on the
        registered connection.

        """
        self._cancel_new_file_executor_event = threading.Event()

        new_file_watchdog = self._loop.create_task(
            self._check_file_connection(reader, writer))

        # while the connection is open ...
        while not reader.at_eof():
            # check the queue in a separate executor
            new_file_in_queue = await self._loop.run_in_executor(
                None, self._check_file_queue)

            if not new_file_in_queue:
                return

            bl.debug("Received info for {} for sending via socket".format(
                new_file_in_queue))

            await self._inform_client_new_file(reader, writer,
                                               new_file_in_queue)
    def _periodic_file_deletion_executor(self):
        """
        Periodically delete old data in the GATEWAY_DATA dictionary.

        The executor.

        """
        while True:

            # wait for 1 second, this is essentially a 1 second interval timer
            if self._shutdown_event.wait(1):
                return

            current_time = time.time()

            with GW_LOCK:
                for object_descriptor in list(GATEWAY_DATA.keys()):

                    timestamp = GATEWAY_DATA[object_descriptor]["timestamp"]
                    elapsed_time = current_time - timestamp

                    if (elapsed_time > 60):

                        bl.debug("Removing {} after 60 seconds".format(
                            object_descriptor))
                        del GATEWAY_DATA[object_descriptor]
示例#8
0
    async def _index_request_coro(self, reader, writer):
        """
        Listens for index requests and answers them.

        Listens for incoming data. If request for index is spotted we obtain the
        index from the local data copy and return this to the client.

        """
        # while the connection is open ...
        while not reader.at_eof():

            # wait for incoming data from the client
            res = await self.read_data(reader, writer)
            if not res:
                await self.send_nack(writer)
                return
            await self.send_ack(writer)

            if res["todo"] == "index":

                bl.debug("Received index request")

                # tell the local data copy that we request the index (index
                # event)
                self._get_index_server_event.set()

                index = self._index_data_queue.get(True,
                                                   10)  # wait up to 10 seconds
                await self._send_index_to_client(reader, writer, index)
    def _watch_incoming_files_executor(self):
        """
        Enter new files into the global list.

        """
        while True:

            if self._shutdown_event.is_set():
                return

            try:
                ans = self._file_request_answer_queue.get(True, .1)

            except queue.Empty:
                pass

            else:
                request_dict = ans["file_request"]
                obj_key = request_dict["object"]
                obj_namespace = request_dict["namespace"]

                object_descriptor = "{}/{}".format(obj_namespace, obj_key)
                bl.debug("Reading {} and making available".format(
                    object_descriptor))

                occurence_key = object_descriptor
                occurence_dict = {
                    "timestamp": time.time(),
                    "request_dict": request_dict
                }

                with GW_LOCK:
                    GATEWAY_DATA[occurence_key] = occurence_dict
示例#10
0
    def _ceph_data_executor(self):
        """
        Run this in a separate executor.

        """
        while True:

            if self._shutdown_backend_manager_event.is_set():
                return

            try:
                ans = self._file_content_name_hash_server_queue.get(True, .1)

            except queue.Empty:
                pass

            else:
                request_dict = ans
                obj_key = request_dict["object"]
                obj_namespace = request_dict["namespace"]

                object_descriptor = "{}/{}".format(obj_namespace, obj_key)
                bl.debug("Reading {} and making available".format(
                    object_descriptor))

                occurence_key = object_descriptor
                occurence_dict = {
                    "timestamp": time.time(),
                    "request_dict": request_dict
                }

                with self._ceph_data_lock:
                    self._ceph_data_dict[occurence_key] = occurence_dict
    async def rd_len(self, reader):
        """
        Read 8 bytes for length information.

        """
        bl.debug("Called 'rd_len(reader)'")
        return await reader.read(8)
示例#12
0
    def __init__(self, comm_dict):
        bl.debug("Starting ProxyServices")

        self._comm_dict = comm_dict

        self._loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self._loop)

        self._pi = pi.ProxyIndex(self._loop, self._comm_dict)
        self._pd = pd.ProxyData(self._loop, self._comm_dict)

        watch_incoming_files_task = self._loop.create_task(
            self._pd._watch_incoming_files_coro())
        periodically_delete_files_task = self._loop.create_task(
            self._pd._periodic_file_deletion_coro())

        periodically_update_index_task = self._loop.create_task(
            self._pi._periodic_index_update_coro())
        watch_new_files_task = self._loop.create_task(
            self._pi._watch_new_files_coro())

        self._shutdown_event = self._comm_dict["shutdown_platt_gateway_event"]

        subscription_crawler_task = self._loop.create_task(
            pi._subscription_crawler_coro(self._shutdown_event))

        self._tasks = [
            watch_incoming_files_task,
            periodically_delete_files_task,
            periodically_update_index_task,
            watch_new_files_task,
            subscription_crawler_task
        ]

        try:
            # start the tasks
            self._loop.run_until_complete(asyncio.wait(self._tasks))

        except KeyboardInterrupt:
            pass

        finally:

            self._loop.stop()

            all_tasks = asyncio.Task.all_tasks()

            for task in all_tasks:
                task.cancel()
                with suppress(asyncio.CancelledError):
                    self._loop.run_until_complete(task)

            self._loop.close()

            bl.debug("ProxyServices is shut down")
示例#13
0
    async def _send_index_to_client(self, reader, writer, index):
        """
        Prepares a dictionary with the requested index.

        """
        bl.debug("Sending index to client")

        todo_val = "index"
        index_dictionary = {"todo": todo_val, todo_val: index}

        await self._send_dictionary(reader, writer, index_dictionary)
def _unsubscribe(dataset_hash):
    """
    Unsubscribe from timestep updates.

    """
    with SD_LOCK:
        try:
            subscription = SUBSCRIPTION_DICT[dataset_hash]
        except KeyError:
            pass
        else:
            bl.debug("Setting delete flag")
            subscription["delete"] = True
    def start(self):
        try:
            bl.info('Starting BackendManager on port {}'.format(self.port))
            bl.info("\tConnect the platt backend to {}:{}".format(self.host, self.port))

            self.loop.run_forever()

        except KeyboardInterrupt:
            self.stop()

        finally:
            bl.debug('BackendManager closed')
            self.loop.close()
示例#16
0
    async def _inform_client_new_file(self, reader, writer, new_file):
        """
        Prepares a dictionary with information about the new file at the ceph
        cluster and sends it out via the socket connection.

        """
        bl.debug("Sending information about new file to client ({})".format(
            new_file))

        todo_val = "new_file"
        new_file_dictionary = {"todo": todo_val, todo_val: new_file}

        await self._send_dictionary(reader, writer, new_file_dictionary)
示例#17
0
    async def read_data(self, reader, writer):
        """
        Read data from the connection.

        NOTE: Do not forget to send an ACK or NACK after using this method.
        Otherwise the connection might hang up.

        await self.send_ack(writer)
        await self.send_nack(writer)

        """
        # wait until we have read something that is up to 1k (until the newline
        # comes)
        length_b = await reader.read(1024)

        if reader.at_eof():
            return

        try:
            # try and parse it as an int (expecting the length of the data)
            length = struct.unpack("L", length_b)[0]
        except Exception as e:
            # if something goes wrong send a nack and start anew
            await self.send_nack(writer)
            bl.error("An Exception occured: {}".format(e))
            raise
            return
        else:
            # otherwise send the ack
            await self.send_ack(writer)

        try:
            # try and read exactly the length of the data
            data = await reader.readexactly(length)
            res = data.decode("UTF-8")
            res = json.loads(res)
        except json.decoder.JSONDecodeError:
            # if we can not parse the json send a nack and start from the
            # beginning
            bl.debug("Parsing {} as json failed".format(res))
            await self.send_nack(writer)
            raise
        except Exception as e:
            # if ANYTHING else goes wrong send a nack and start from the
            # beginning
            await self.send_nack(writer)
            bl.error("An Exception occured: {}".format(e))
            raise
        else:
            # otherwise return the received data
            return res
    def _create_index_entry_from_new_file_dict(self, new_file_dict):
        """
        Create a dictionary entry from the new file dictionary.

        TODO: Refactor this.

        """
        key = new_file_dict["key"]
        namespace = new_file_dict["namespace"]
        sha1sum = new_file_dict["sha1sum"]

        key_dict = self._create_dict_from_key(key, sha1sum=sha1sum)

        if key_dict is not None:

            return_dict = dict()
            return_dict[namespace] = key_dict

        else:
            bl.debug("Can not add file {}/{}".format(namespace, key))
            return

        return return_dict
示例#19
0
    async def _file_download_coro(self, reader, writer):
        """
        Respond to download requests.

        """
        # while the connection is open ...
        if not reader.at_eof():

            self._cancel_file_download_executor_event = threading.Event()
            download_connection_watchdog = self._loop.create_task(
                self._check_download_connection(reader, writer))

            # wait for incoming traffic from the client
            res = await self.read_data(reader, writer)

            if not res:
                await self.send_nack(writer)
                return

            res = res["requested_file"]
            namespace = res["namespace"]
            key = res["key"]
            request_json = {"namespace": namespace, "key": key}

            bl.debug("Request for {}/{} received".format(namespace, key))

            await self.send_ack(writer)

            # check if the file is already in the ceph_data_dict
            object_descriptor = "{}/{}".format(namespace, key)

            with self._ceph_data_lock:
                if object_descriptor in self._ceph_data_dict:
                    bl.debug("Found {} in ceph data, updating "
                             "timestamp".format(object_descriptor))
                    self._ceph_data_dict[object_descriptor]["timestamp"] = (
                        time.time())
                else:
                    bl.debug("Getting {}".format(object_descriptor))
                    req = {"namespace": namespace, "key": key}
                    # drop the request in the queue for the proxy manager
                    self._file_name_request_server_queue.put(request_json)

            # keep track how often we try to get data from the dictionary
            counter = 0

            # wait until we have everything downloaded
            while True:

                # wait a fraction of a second (rate throttling)
                time.sleep(.1)

                # get a list of keys in the GATEWAY_DATA
                with self._ceph_data_lock:
                    keys = list(self._ceph_data_dict.keys())

                if object_descriptor in keys:
                    with self._ceph_data_lock:
                        self._ceph_data_dict[object_descriptor][
                            "timestamp"] = time.time()
                        send_this = self._ceph_data_dict[object_descriptor][
                            "request_dict"]
                        break

                counter += 1
                if (counter > 100):
                    bl.warning(
                        "Too many iterations. Could not get data from ceph.")
                    return

            bl.debug("Got file contents from queue")

            await self._send_file_to_client(reader, writer, send_this)
async def _subscription_crawler_coro(shutdown_event):
    """
    Iterate over all subscriptions and see if the required files are available.

    """
    global LI_LOCK
    global LOCAL_INDEX
    global SUBSCRIPTION_DICT
    global SD_LOCK

    # done here because of circular import stuff
    import backend.global_settings as gloset

    while True:

        await asyncio.sleep(1)  # wait one second

        with SD_LOCK:

            for subscription in list(SUBSCRIPTION_DICT.keys()):  # make a list so we can modify the original dictionary
                try:
                    delete = SUBSCRIPTION_DICT[subscription]["delete"]
                except KeyError:
                    pass
                else:
                    del SUBSCRIPTION_DICT[subscription]
                    bl.debug("Deleted {} from subscription dict".format(subscription))

        with SD_LOCK:

            for subscription in SUBSCRIPTION_DICT.keys():

                bl.debug("Checking subscription {}".format(subscription))

                value = SUBSCRIPTION_DICT[subscription]

                try:
                    _ = value["delete"]
                    bl.debug("Skipping {}, delete flag detected".format(subscription))

                except KeyError:

                    bl.debug("Handling subscription {}".format(subscription))

                    namespace = value["namespace"]

                    scene_hash = value["scene_hash"]
                    dataset_hash = subscription

                    with LI_LOCK:
                        bl.debug("Obtaining available timesteps")
                        avail_timesteps = list(LOCAL_INDEX[namespace].keys())
                        bl.debug("... found {}".format(len(avail_timesteps)))

                    # sorting from...
                    # https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
                    # neat.
                    convert = lambda text: int(text) if text.isdigit() else text.lower()
                    alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
                    sorted_timesteps = sorted(avail_timesteps, key=alphanum_key)

                    bl.debug("Timesteps sorted")

                    # if current timestep is in sorted_timesteps  ...
                    current_timestep = value["dataset_object"].timestep()

                    try:
                        index = sorted_timesteps.index(current_timestep)
                        # index = sorted_timesteps.index(value["timestep"])
                        bl.debug("Found {} in timestep list at position {}".format(current_timestep, index))

                    except ValueError:
                        # current timestep is not in list... weird
                        # go back to start of loop
                        bl.debug("Could not find {} in timestep list".format(current_timestep))
                        continue

                    # check the last and second to last timestep
                    last_timestep = sorted_timesteps[-1]
                    bl.debug("Last timestep is {}".format(last_timestep))

                    # ... and not the last position
                    if sorted_timesteps[index] == last_timestep:
                        # is last in timestep list, nothing to do
                        bl.debug("Index position {} is the last timestep, no update required".format(index))
                        continue

                    check_dicts = list()

                    # check if the files we need are in the most recent timestep
                    for object_dict in value["object_dicts"]:
                        target = {namespace: {last_timestep: object_dict}}
                        check_dicts.append(target)

                    data_avail = True

                    with LI_LOCK:
                        for check_dict in check_dicts:
                            bl.debug("Checking for {} in local index".format(check_dict))
                            avail = ndc.contains(LOCAL_INDEX, check_dict)
                            if not avail:
                                bl.debug_warning("Not found, can't update to most recent timestep")
                                data_avail = False

                    if data_avail:
                        # set the timestep
                        bl.debug("Found all necessary files for most recent timestep")
                        dataset_timesteps = gloset.scene_manager.dataset_timesteps(
                            scene_hash, dataset_hash, set_timestep=last_timestep)
                        continue

                    else:

                        bl.debug("Did not find all necessary files for most recent timestep, checking for files in second to last timestep")

                        try:
                            second_last_timestep = sorted_timesteps[-2]
                        except:
                            bl.debug_warning("Could not find second to last timestep")
                            continue

                        # ... and not the second to last position
                        if sorted_timesteps[index] == second_last_timestep:
                            # is second to last in timestep list, nothing to do
                            bl.debug("We are already at the second to last timestep, nothing to do")
                            continue

                        check_dicts = list()

                        # check if the files we need are in the most recent timestep
                        for object_dict in value["object_dicts"]:
                            check_dicts.append({namespace: {second_last_timestep: object_dict}})

                        second_data_avail = True

                        with LI_LOCK:
                            for check_dict in check_dicts:
                                bl.debug("Checking for {} in local index".format(check_dict))
                                avail = ndc.contains(LOCAL_INDEX, check_dict)
                                if not avail:
                                    bl.debug_warning("Not found, can't update to most recent timestep")
                                    second_data_avail = False

                        if second_data_avail:
                            bl.debug("Found all necessary files for second to last timestep")
                            dataset_timesteps = gloset.scene_manager.dataset_timesteps(
                                scene_hash, dataset_hash, set_timestep=second_last_timestep)
def simulation_file(source_dict=None, namespace=None, object_key_list=[]):
    """
    Obtain a simulation file from the ceph cluster.

    Note: this is an async function so that we can perform this action in
    parallel.

    """
    bl.debug("Requesting {} in namespace {}".format(object_key_list,
                                                    namespace))

    expectation_list = list()
    for item in object_key_list:
        expectation_list.append("{}/{}".format(namespace, item))

    occurence_dict = dict()

    comm_dict = source_dict["external"]["comm_dict"]
    file_request_queue = comm_dict["file_request_queue"]
    file_request_answer_queue = comm_dict["file_contents_name_hash_queue"]

    before_qsize = file_request_answer_queue.qsize()
    if (before_qsize > 0):
        bl.debug_warning("Data return queue is not empty, contains {} "
                         "objects".format(before_qsize))

    # see if we have the data downloaded already, if not make the gateway client get it
    for obj in object_key_list:

        object_descriptor = "{}/{}".format(namespace, obj)

        with GW_LOCK:
            if object_descriptor in GATEWAY_DATA:
                bl.debug(
                    "Found {} in downloaded data, updating timestamp".format(
                        object_descriptor))
                GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()
            else:
                bl.debug("Downloading {}".format(object_descriptor))
                req = {"namespace": namespace, "key": obj}
                file_request_queue.put(req)

    # keep track how often we try to get data from the dictionary
    counter = 0

    # wait until we have everything downloaded
    while True:

        # wait a fraction of a second (rate throttling)
        time.sleep(.1)

        # do we have every file?
        all_present = True

        # get a list of keys in the GATEWAY_DATA
        with GW_LOCK:
            keys = list(GATEWAY_DATA.keys())

        for object_descriptor in expectation_list:

            if not object_descriptor in keys:
                all_present = False

            # update timestamp
            if object_descriptor in keys:
                with GW_LOCK:
                    GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()

        # break the loop
        if all_present:
            bl.debug("Data complete")
            break

        counter += 1
        if (counter > 1000):  # very large meshes take some time
            bl.warning("Too many iterations. Could not get data from gateway.")
            return

    # prepare output of function
    res_bin = [None] * len(object_key_list)

    for object_descriptor in expectation_list:
        with GW_LOCK:
            GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()
            request_dict = GATEWAY_DATA[object_descriptor]["request_dict"]

        obj_namespace = request_dict["namespace"]
        obj_key = request_dict["object"]

        bl.debug("Loading {}/{}".format(obj_namespace, obj_key))

        index = object_key_list.index(obj_key)
        res_bin[index] = request_dict

    return res_bin
示例#22
0
    def __init__(self,
                 ceph_conf,
                 ceph_pool,
                 ceph_user,

                 event_ceph_shutdown,

                 queue_datacopy_ceph_request_hash_for_new_file,
                 queue_datacopy_ceph_answer_hash_for_new_file,

                 queue_backend_ceph_request_file,
                 queue_backend_ceph_answer_file_name_contents_hash,

                 event_datacopy_ceph_update_index,
                 queue_datacopy_ceph_filename_and_hash,

                 lock_datacopy_ceph_filename_and_hash
    ):

        self._ceph_conf = ceph_conf
        self._ceph_pool = ceph_pool
        self._ceph_user = ceph_user

        self._event_ceph_shutdown = event_ceph_shutdown

        self._queue_datacopy_ceph_request_hash_for_new_file = (
            queue_datacopy_ceph_request_hash_for_new_file
        )
        self._queue_datacopy_ceph_answer_hash_for_new_file = (
            queue_datacopy_ceph_answer_hash_for_new_file
        )

        self._queue_backend_ceph_request_file = queue_backend_ceph_request_file
        self._queue_backend_ceph_answer_file_name_contents_hash = (
            queue_backend_ceph_answer_file_name_contents_hash
        )

        self._event_datacopy_ceph_update_index = event_datacopy_ceph_update_index
        self._queue_datacopy_ceph_filename_and_hash = (
            queue_datacopy_ceph_filename_and_hash
        )

        self._lock_datacopy_ceph_filename_and_hash = lock_datacopy_ceph_filename_and_hash

        # inter process communication between ceph manager and cepj connections
        self._queue_ceph_process_new_task = multiprocessing.Queue()
        self._queue_ceph_process_new_task_data = multiprocessing.Queue()
        self._queue_ceph_process_new_task_hashes = multiprocessing.Queue()
        self._queue_ceph_process_new_task_index_hashes = multiprocessing.Queue()
        self._queue_ceph_process_new_task_index_namespaces = multiprocessing.Queue()
        self._queue_ceph_process_new_task_index = multiprocessing.Queue()
        self._event_ceph_process_shutdown = multiprocessing.Event()

        self._queue_ceph_process_index = multiprocessing.Queue()  # gets a list of namespaces
        self._queue_ceph_process_namespace_index = multiprocessing.Queue()  # gets an index for a namespace
        self._queue_ceph_process_object_tags = multiprocessing.Queue()
        self._queue_ceph_process_object_data = multiprocessing.Queue()
        self._queue_ceph_process_object_hash = multiprocessing.Queue()

        # start the ceph connections
        self._start_ceph_connections()

        self._loop = asyncio.get_event_loop()

        ceph_tasks_loop_task = self._loop.create_task(
            self._ceph_task_coro())

        self._tasks = [
            ceph_tasks_loop_task
        ]

        try:
            # start the tasks
            self._loop.run_until_complete(asyncio.wait(self._tasks))

        except KeyboardInterrupt:
            pass

        finally:

            self._loop.stop()

            all_tasks = asyncio.Task.all_tasks()

            for task in all_tasks:
                task.cancel()
                with suppress(asyncio.CancelledError):
                    self._loop.run_until_complete(task)

            self._loop.close()

            bl.debug("CephManager is shut down")
示例#23
0
    async def _rw_handler(self, reader, writer):
        """
        This gets called when a connection is established.

        """
        connection_info = writer.get_extra_info('peername')
        p_host = connection_info[0]
        p_port = connection_info[1]

        bl.info("Connection open from {}/{}".format(p_host, p_port))

        # perform a handshake with the new connection
        task_dict = await self.read_data(reader, writer)
        if not task_dict:
            await self.send_nack(writer)
            return
        await self.send_ack(writer)
        task = task_dict["task"]

        bl.info("Connection from port {} is tasked with {}".format(
            p_port, task))

        try:

            # watch the connection
            self.connection_active_task = self._loop.create_task(
                self._connection_active_coro(reader, writer))

            # depending on the task that is to be performed this creates on of
            # four tasks
            #
            # push information about new files to the client
            if task == "new_file_message":
                self._new_file_connection_active = True

                self.send_new_files_task = self._loop.create_task(
                    self._new_file_information_coro(reader, writer))

                await self.send_new_files_task

                # watch the connection
                conn_active = await self.connection_active_task
                if not conn_active:
                    bl.info("Connection to {}/{} lost".format(p_host, p_port))
                    bl.debug("Cancelling send_new_files_task")
                    self.send_new_files_task.cancel()

                self._new_file_connection_active = False

            # manage requests for the complete index from the client
            if task == "index":
                self._index_connection_active = True
                self.get_index_task = self._loop.create_task(
                    self._index_request_coro(reader, writer))

                await self.get_index_task

                # watch the connection
                conn_active = await self.connection_active_task
                if not conn_active:
                    bl.info("Connection to {}/{} lost".format(p_host, p_port))
                    bl.debug("Cancelling get_index_task")
                    self.get_index_task.cancel()

                self._index_connection_active = False

            # manage requests for file data from the ceph cluster
            if task == "file_download":

                self.file_download_task = self._loop.create_task(
                    self._file_download_coro(reader, writer))
                # optional?
                await self.file_download_task

        except Exception as e:
            bl.error("Exception: {}".format(e))
            raise

        finally:
            writer.close()
示例#24
0
    def start(self):
        """
        Start the web server with the parameters that were set upon
        initialization.

        This mounts three different servers:

        * one that serves the configuration menu on ``http://HOST:PORT/``. The
          server class rests in ``backend.web_server_control``.
        * one that serves the visualization on ``http://HOST:PORT/scenes``. The
          server class rests in ``backend.web_server_display``.
        * one for the API endpoint on ``http://HOST:PORT/api``. The server
          class rests in ``backend.web_server_api``.

        Args:
         None: No parameters.

        Returns:
         None: Nothing.

        Notes:
         After this method is called, no further commands will be evaluated
         until after the backend is shut down.

        """
        # Initialise the global variables. For later use just import the
        # backend.global_settings and use the scene manager from there.
        global_settings.init(source_dict=self._source_dict)

        # Set the port
        cherrypy.config.update({
            'server.socket_port': self.port,
            'server.socket_host': '0.0.0.0'  # Can be reached from everywhere
        })

        bl.debug("Subscribing to SceneManagerPlugin")
        # Add the SceneManagerPlugin to the server bus
        SceneManagerPlugin(cherrypy.engine).subscribe()

        bl.debug("Adding WebSocketTool")
        cherrypy.tools.websocket = WebSocketTool()

        # Load the server class for displaying fem data
        cherrypy.tree.mount(ServerRoot(), '/', self._root_conf)
        cherrypy.tree.mount(ServerScenesDispatcher(), '/scenes',
                            self._scenes_conf)
        cherrypy.tree.mount(ServerAPI(), '/api', self._api_conf)
        cherrypy.tree.mount(WebSocketAPI(), '/websocket', self._websocket_conf)

        cherrypy.config.update({
            'log.screen': True,
            'log.access_file': '',
            'log.error_file': ''
        })
        cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files)
        cherrypy.engine.subscribe('exit', self._stop)

        # Start the server
        bl.debug("Starting engine")
        cherrypy.engine.start()
        bl.debug("Blocking engine")
        cherrypy.engine.block()

        return None