コード例 #1
0
    def delete_dataset(self, dataset_hash):
        """
        Remove one dataset from the scene.

        Args:
         dataset_hash: The unique identifier for the dataset we want to delete.

        Returns:
         list: The remaining datasets in the scene.

        Raises:
         TypeError: If ``type(dataset_hash)`` is not `str`.
         ValueError: If `dataset_hash` does not fit any dataset in the scene.

        """
        if not isinstance(dataset_hash, str):
            raise TypeError('dataset_hash is {}, expected str'.format(
                type(dataset_hash).__name__))

        try:
            self._dataset_list.pop(dataset_hash)

            # Delegate returning of the remainder to the standard method
            return self.list_datasets()
        except KeyError as e:
            bl.debug_warning(
                'dataset_hash does not fit any dataset in scene: {}'.format(e))
            raise ValueError('dataset_hash does not fit any dataset in scene')
コード例 #2
0
    def dataset(self, dataset_hash):
        """
        Return a dataset object for working with.

        Args:
         dataset_hash: The unique identifier for the dataset we want to access.

        Returns:
         _DatasetPrototype: The dataset that we want to access.

        Raises:
         TypeError: If ``type(dataset_hash)`` is not `str`.

        """
        if not isinstance(dataset_hash, str):
            raise TypeError('dataset_hash is {}, expected str'.format(
                type(dataset_hash).__name__))

        try:
            # Return the object
            return self._dataset_list[dataset_hash]
        except KeyError as e:
            bl.debug_warning(
                'dataset_hash does not fit any dataset in scene: {}'.format(e))
            raise ValueError('dataset_hash does not fit any dataset in scene')
コード例 #3
0
    def new_scene(self, dataset_list):
        """
        Create a new scene with an object.

        This adds a ScenePrototype to `self._scene_list`.

        Args:
         dataset_list (list (of str)): The path to the datasets we want to
          instantiate a new scene with.

        Returns:
         None, dict: `None` if no dataset could be added to a new scene and a
         dict with information what could be added and what not in the case
         that we could add dataset(s) to a new scene.

        Raises:
         TypeError: If ``type(object_path)`` is not `list`.

        Todo:
         Make it impossible to create an empty scene.

        """
        # Type checking for dataset_list
        if not isinstance(dataset_list, list):
            raise TypeError('dataset_list is {}, expected list'.format(
                type(dataset_list).__name__))

        # Do nothing if the dataset list is empty
        if len(dataset_list) == 0:
            return None

        # See which datasets are valid
        valid_datasets = []
        available_datasets = (
            self.list_available_datasets()['availableDatasets'])
        for dataset in dataset_list:
            if dataset in available_datasets:
                valid_datasets.append(dataset)

        # If there are no valid datasets to be added return None
        if len(valid_datasets) == 0:
            return None

        try:
            # Get a new instance of a scene
            new_scene = _ScenePrototype(source_dict=self.source)
            new_scene_hash = new_scene.name()
            self._scene_list[new_scene_hash] = new_scene
            # Here still dataset_list, so we can have a addDatasetFail entry
            return_dict = self.add_datasets(new_scene_hash, dataset_list)
            return return_dict
        except (ValueError, TypeError) as e:
            bl.debug_warning("Exception when creating new scene: {}".format(e))
            return None
コード例 #4
0
    def delete_loaded_dataset(self, scene_hash, dataset_hash):
        """
        Remove a dataset from a scene.

        If all datasets are gone the scene is to be deleted.

        Args:
         scene_hash (str): The hash of the scene from which we want to delete
          a dataset.
         dataset_hash (str): The hash of the dataset we want to delete.

        """
        if not isinstance(scene_hash, str):
            raise TypeError('scene_hash is {}, expected str'.format(
                type(scene_hash).__name__))

        if not isinstance(dataset_hash, str):
            raise TypeError('dataset_hash is {}, expected str'.format(
                type(dataset_hash).__name__))

        # If the scene does not exist
        if scene_hash not in self._scene_list:
            return None

        target_scene = self.scene(scene_hash)

        try:
            remaining_datasets = target_scene.delete_dataset(dataset_hash)
        except ValueError as e:
            bl.debug_warning(
                "Exception in delete_loaded_dataset: {}".format(e))
            # The dataset does not exist
            return None

        # If there are no more datasets left delete the scene
        if remaining_datasets == []:
            self.delete_scene(scene_hash)

            # We should probably return something else so we can distinguish
            # between errors and deleted scenes.
            return None

        return_dict = {
            'datasetDeleted': dataset_hash,
            'href': '/scenes/{}'.format(scene_hash)
        }

        return return_dict
コード例 #5
0
    def scene(self, scene_hash):
        """
        Return a scene object.

        Args:
         scene_hash (str): The unique identifier of the scene that we want to
          return.

        Returns:
         None or _ScenePrototype object: None if no scene with a matching hash
         could be found, otherwise return the scene object.

        Raises:
         TypeError: If ``type(scene_hash)`` is not `str`.

        See Also:
         :py:class:`backend.scenes_scene_prototype._ScenePrototype`

        """
        if not isinstance(scene_hash, str):
            raise TypeError('scene_hash is {}, expected str'.format(
                type(scene_hash).__name__))

        try:
            # See which index fits to the provided scene id
            index = list(self._scene_list.keys()).index(scene_hash)

            # Get all the scene objects out of the _scene_list
            scenes = list(self._scene_list.values())

            return scenes[index]

        except ValueError as e:
            bl.debug_warning("Scene with hash {} not found: {}".format(
                scene_hash, e))
            return None
コード例 #6
0
    def timestep_data(self, timestep, field, elementset, hash_dict=None):
        """
        Return the data for a given timestep and field and save it.

        Args:
         timestep (str): The timestep from which we want to get data.
         field (dict): The field from which we want to get data. Structure is
          {'type': TYPE (str), 'name': NAME (str)}.
         elementset (dict): The elementset we want to parse. Can be empty. If
          empty we just parse everything.
         hash_dict (bool, optional, defaults to False): Read the data again,
          even if we already have data corresponding to the selected timestep
          and field. Has fields 'mesh' and 'field'.

        Returns:
         dict or None: The data for the timestep or None, if the field could
         not be found.

        """
        return_dict = {
            'hash_dict': {
                'mesh': None,
                'field': None
            },
            'nodes': {
                'data': None
            },
            'tets': {
                'data': None
            },
            'nodes_center': None,
            'field': {
                'data': None
            },
            'wireframe': {
                'data': None
            },
            'free_edges': {
                'data': None
            }
        }

        try:
            mesh_dict = self._geometry_data(timestep,
                                            field,
                                            elementset,
                                            current_hash=hash_dict['mesh'])

        except (TypeError, KeyError) as e:
            bl.debug_warning("No mesh for given hash_dict found: {}".format(e))
            mesh_dict = self._geometry_data(timestep,
                                            field,
                                            elementset,
                                            current_hash=None)

        if field is not None:
            try:
                field_dict = self._field_data(timestep,
                                              field,
                                              elementset,
                                              current_hash=hash_dict['field'])

            except (TypeError, KeyError) as e:
                bl.debug_warning(
                    "No field for given hash_dict found: {}".format(e))
                field_dict = self._field_data(timestep,
                                              field,
                                              elementset,
                                              current_hash=None)

        else:
            # Corner case for unsetting the fields once they were set
            field_dict = None

        # no field_dict means we are showing a blank field
        if field_dict is None:
            field_type = 'nodal'
        else:
            field_type = field_dict['type']

        return_dict['hash_dict']['mesh'] = mesh_dict['hash']

        mesh_nodes = mesh_dict['nodes']
        mesh_elements = mesh_dict['elements']
        mesh_skins = mesh_dict["skins"]

        return_object_keys = mesh_dict[
            "object_key_list"]  # gets modified later

        if mesh_elements is not None:
            self._mesh_elements = mesh_elements

        if mesh_nodes is not None:

            elementset_data = self._elementset_data(elementset)

            self._compressed_model_surface = dm.model_surface(
                mesh_elements, mesh_nodes, mesh_skins, elementset_data)

            self._nodal_field_map_dict[mesh_dict[
                'hash']] = self._compressed_model_surface['nodal_field_map']
            self._blank_field_node_count_dict[mesh_dict[
                'hash']] = self._compressed_model_surface['old_max_node_index']
            self._surface_triangulation_dict[
                mesh_dict['hash']] = self._compressed_model_surface[
                    'surface_triangulation']

            return_dict['nodes'] = self._compressed_model_surface['nodes']
            return_dict['nodes_center'] = self._compressed_model_surface[
                'nodes_center']
            return_dict['tets'] = self._compressed_model_surface['triangles']
            return_dict['wireframe'] = self._compressed_model_surface[
                'wireframe']
            return_dict['free_edges'] = self._compressed_model_surface[
                'free_edges']

        # field does not exist
        if field_dict is None:

            node_count = self._blank_field_node_count_dict[mesh_dict['hash']]

            field_values = self._blank_field(node_count)['data']['nodal']

            # this is necessary so we parse a new field when we need it
            field_hash = None
            field_hash = self._string_hash(str(node_count), update=field_hash)

            # update the mesh hash with the selected elementset
            if self.source_type == 'local':
                for element_type in elementset:  # add every elementset
                    elementset_path = elementset[element_type]
                    field_hash = self._file_hash(elementset_path,
                                                 update=field_hash)
            if self.source_type == 'external':
                for element_type in elementset:  # add every elementset
                    elementset_sha1 = elementset[element_type]['sha1sum']
                    field_hash = self._string_hash(elementset_sha1,
                                                   update=field_hash)
            return_dict['hash_dict']['field'] = field_hash

        # field dict is not None
        else:

            return_object_keys += field_dict["object_key_list"]

            if field_type == 'nodal':
                field_values = field_dict['data']['nodal']

            if field_type == 'elemental':
                elemental_field_dict = field_dict['data']['elemental']
                field_values = dm.expand_elemental_fields(
                    elemental_field_dict, self._mesh_elements,
                    self._surface_triangulation_dict[mesh_dict['hash']])
            return_dict['hash_dict']['field'] = field_dict['hash']

        if field_values is not None:
            if field_type == 'nodal':
                return_dict['field'] = dm.model_surface_fields_nodal(
                    self._nodal_field_map_dict[mesh_dict['hash']],
                    field_values)

            if field_type == 'elemental':
                return_dict['field'] = dm.model_surface_fields_elemental(
                    field_values)

        return_dict["object_key_list"] = return_object_keys

        return return_dict
コード例 #7
0
    def _geometry_data_external(self,
                                timestep,
                                field,
                                elementset,
                                current_hash=list()):
        """
        Get data from the gateway.

        Procedure is as follows:
         * try to calculate the hash from the index (in fixed order)
          * if this fails (e.g. a single hash is missing) we download every file
            from the gateway
          * if this succeeds and the hash is identical to current_hash return
            None -> no action necessary
          * if this succeeds and the hash is NOT identical to current_hash we
            download every file from the gateway
         * we downloaded all files -> calculate the hash (in fixed order)
          * if the hash is identical to current_hash return None -> no action
            necessary
          * if the hash is NOT identical to current_hash return downloaded data

        Fixed order for hash calculation is (if existent)
        * nodes
        * elements (sorted by dictionary)
        * skins (sorted by dictionary)

        We don't download elementsets here.

        """
        return_dict = dict()

        # This is so dumb.
        import backend.global_settings as gloset
        ext_index = gloset.scene_manager.ext_src_dataset_index(
            update=False, dataset=self._dataset_name)

        try:
            dest_field_name = field["name"]
        except TypeError:
            dest_field_name = "__no__field__"

        ta_ma = list(ext_index[self._dataset_name][timestep].keys())

        if len(ta_ma) == 1:
            timestep_dict = ext_index[self._dataset_name][timestep]["ta"]
        elif "ma" in ta_ma:
            if dest_field_name in ext_index[
                    self._dataset_name][timestep]["ma"]:
                timestep_dict = ext_index[self._dataset_name][timestep]["ma"]
            else:
                timestep_dict = ext_index[self._dataset_name][timestep]["ta"]
        else:
            timestep_dict = ext_index[self._dataset_name][timestep]["ta"]

        # parse nodes
        nodes_key = timestep_dict['nodes']['object_key']
        nodes_hash = timestep_dict['nodes']['sha1sum']
        nodes_format = binary_formats.nodes()

        # parse elements
        elements = {}
        elements_types = list(timestep_dict['elements'].keys())
        for elements_type in elements_types:
            if elements_type in binary_formats.valid_element_types():

                current_elem = timestep_dict['elements']

                elements_format = getattr(binary_formats, elements_type)()

                elements[elements_type] = {}
                elements[elements_type]['key'] = current_elem[elements_type][
                    'object_key']
                elements[elements_type]['hash'] = current_elem[elements_type][
                    'sha1sum']
                elements[elements_type]['fmt'] = elements_format

        skins = {}
        try:
            skin_element_types = list(timestep_dict["skin"].keys())
            current_skin = timestep_dict['skin']
            skin_format = binary_formats.skin()

            for element_type in skin_element_types:
                if element_type in binary_formats.valid_element_types():
                    skins[element_type] = {}
                    skins[element_type]['key'] = current_skin[element_type][
                        'object_key']
                    skins[element_type]['hash'] = current_skin[element_type][
                        'sha1sum']
                    skins[element_type]['fmt'] = skin_format

        except KeyError as e:
            bl.debug_warning("No skins found: {}".format(e))

        # calculate the hash if we have the sha1sums in the index, else just
        # get everything for every timestep
        hash_list = list()
        hash_list.append(nodes_hash)
        for element in elements:
            hash_list.append(elements[element]['hash'])
        # for element_type in elementset:
        #     hash_list.append(elementset[element_type]['sha1sum'])
        for skin in skins:
            hash_list.append(skins[element_type]['hash'])

        calc_hashes = True
        for one_hash in hash_list:
            if one_hash == "":
                calc_hashes = False

        # init to None
        mesh_checksum = None

        if calc_hashes:

            for one_hash in hash_list:
                mesh_checksum = self._string_hash(one_hash,
                                                  update=mesh_checksum)

        object_key_list = []
        fmt_list = []

        object_key_list.append(nodes_key)
        fmt_list.append(nodes_format)

        for element in elements:
            object_key_list.append(elements[element]['key'])
            fmt_list.append(elements[element]['fmt'])
        for skin in skins:
            object_key_list.append(skins[skin]['key'])
            fmt_list.append(skins[skin]['fmt'])

        return_dict["object_key_list"] = object_key_list

        if (current_hash is list() or mesh_checksum is None
                or mesh_checksum not in current_hash):

            # reset mesh checksum
            mesh_checksum = None

            geom_dict_data = self._read_binary_data_external(
                object_key_list, fmt_list)

            geom_data = list()
            for d in geom_dict_data:
                geom_data.append(d["contents"])
                mesh_checksum = self._string_hash(d["sha1sum"],
                                                  update=mesh_checksum)

            if mesh_checksum in current_hash:
                return_dict['hash'] = mesh_checksum
                return_dict['nodes'] = None
                return_dict['elements'] = None
                return_dict['skins'] = None
                return return_dict

            return_dict['hash'] = mesh_checksum
            return_dict['nodes'] = {}
            return_dict['nodes']['fmt'] = nodes_format
            return_dict['nodes']['data'] = geom_data[0]

            return_dict['elements'] = {}
            for it, element in enumerate(elements):
                element_path = elements[element]['key']

                return_dict['elements'][element] = {}
                return_dict['elements'][element]['fmt'] = elements[element][
                    'fmt']
                return_dict['elements'][element]['data'] = geom_data[it + 1]

            return_dict["skins"] = {}
            for it, skin in enumerate(skins):
                skin_path = skins[skin]['key']

                return_dict["skins"][skin] = {}
                return_dict["skins"][skin]['fmt'] = skins[skin]['fmt']
                return_dict["skins"][skin]['data'] = geom_data[it + 1 +
                                                               len(elements)]

        else:
            return_dict['hash'] = mesh_checksum
            return_dict['nodes'] = None
            return_dict['elements'] = None
            return_dict['skins'] = None

        return return_dict
コード例 #8
0
    def add_datasets(self, scene_hash, dataset_list):
        """
        Add one or multiple dataset(s) to the scene.

        Args:
         scene_hash (str): The hash of the scene to which we want to add
          datasets.
         dataset_list (list (of str)): The relative path to the object
          root, relative to `data_dir`.

        Raises:
         TypeError: If ``type(scene_hash)`` is not `str`.
         TypeError: If an entry in `dataset_list` is not `str`.
         TypeError: If ``type(dataset_list)`` is not `list`.
         ValueError: If ``len(dataset_list)`` is `0`.

        Returns:
         None or dict: None if we could not add any datasets to the scene, or
         a dict if some or all datasets could be added.

        Notes:
         See FIXME in code.

        """
        if not isinstance(scene_hash, str):
            raise TypeError('scene_hash is {}, expected str'.format(
                type(scene_hash).__name__))

        target_scene = self.scene(scene_hash)

        if not isinstance(dataset_list, list):
            raise TypeError('dataset_list is {}, expected list'.format(
                type(dataset_list).__name__))

        if len(dataset_list) == 0:
            raise ValueError('dataset_list is empty')

        # Encode the scene hash into the return_dict
        return_dict = {
            'sceneHash': '{}'.format(scene_hash),
            'href': '/scenes/{}'.format(scene_hash),
            'addDatasetsSuccess': []
        }

        # Add each object
        for one_dataset in dataset_list:
            try:
                if not isinstance(one_dataset, str):
                    raise TypeError('one_dataset is {}, expected str'.format(
                        type(one_dataset).__name__))

                dataset_hash = target_scene.add_dataset(one_dataset)
                dataset_meta = target_scene._dataset_list[dataset_hash].meta()
                return_dict['addDatasetsSuccess'].append(dataset_meta)
                self.ext_src_dataset_index(update=True, dataset=one_dataset)

            # Catch everything that could have gone wrong and just report that
            # the dataset could not be added. NOTE: This also catches the case
            # that an entry in the list was not a string, so we might run in to
            # trouble? But it came from a list, so it can also go back into a
            # list I guess... Maybe FIXME.
            except (TypeError, ValueError) as e:
                bl.debug_warning("Exception when adding dataset {}: {}".format(
                    one_dataset, e))
                try:
                    return_dict['addDatasetsFail'].append(one_dataset)
                except KeyError as f:
                    bl.debug_warning("Further exception: {}".format(e))
                    return_dict['addDatasetsFail'] = []
                    return_dict['addDatasetsFail'].append(one_dataset)

        # If we have nothing to return..
        if len(return_dict['addDatasetsSuccess']) == 0:
            return None
        else:
            return return_dict
コード例 #9
0
    def elementset_dict(self):
        """
        Get a list of elementsets for the selected timestep.

        Args:
         None: No args.

        Returns:
         dict: A dict with two lists of elementsets, one for elemental and one for
          nodal elementsets.


        Todo:
         Make this more resilient against non exising directories via try
         except.

        """
        # results
        return_dict = {}

        if self.source_type == 'local':
            timestep_dir = self.dataset_path / 'fo' / self._selected_timestep

            elset_names = []
            all_elsets = timestep_dir.glob('*.elset.*.bin')
            elset_groups = []

            for elset in all_elsets:
                filename = elset.parts[-1]
                elset_name = re.match('(.+)\.elset\.', filename).groups()[0]
                return_dict[elset_name] = {}
                elset_names.append(elset_name)

            for elset_name_key in return_dict:
                elset_groups.append(
                    timestep_dir.glob('{}.elset.*.bin'.format(elset_name_key)))

            for elset_group in elset_groups:
                for elset in elset_group:
                    filename = elset.parts[-1]
                    elset_type = re.search('elset\.(.+)\.bin',
                                           filename).groups()[0]
                    elset_name = re.search('(.+)\.elset\.',
                                           filename).groups()[0]
                    return_dict[elset_name][elset_type] = elset

        if self.source_type == 'external':
            import backend.global_settings as gloset
            ext_index = gloset.scene_manager.ext_src_dataset_index(
                update=False, dataset=self.dataset_name)
            try:
                return_dict = ext_index[self.dataset_name][
                    self._selected_timestep]['elset']
            except KeyError as e:
                bl.debug_warning(
                    "KeyError in field_dict (return_dict): {}".format(e))
                return_dict = dict()

        elset_keys = []
        for elset_name_key in return_dict:
            elset_keys.append(elset_name_key)

        # dummy for parsing it all
        return_dict['__all__'] = {}
        elset_keys.append('__all__')

        # return_dict = {'elementsets': [key1, key2, ...], 'key1': {...}, 'key2': {...}}
        return_dict['elementsets'] = sorted(elset_keys)

        return return_dict
コード例 #10
0
    def field_dict(self):
        """
        Get a list of fields for the selected timestep.

        Args:
         None: No args.

        Returns:
         dict: A dict with two lists of fields, one for elemental and one for
          nodal fields.

        Todo:
         Make this more resilient against non exising directories via try
         except.

        """
        elemental_fields = []
        nodal_fields = []

        if self.source_type == 'local':
            timestep_dir = self.dataset_path / 'fo' / self._selected_timestep

            elemental_field_dir = timestep_dir / 'eo'
            elemental_field_paths = sorted(elemental_field_dir.glob('*.bin'))
            for field in elemental_field_paths:
                elemental_fields.append(
                    field.stem)  # just append the file name

            # cut the element type from the field name
            try:
                elemental_fields = [
                    field.rsplit('.', 1) for field in elemental_fields
                ]
                elemental_fields = sorted(
                    np.unique(np.asarray(elemental_fields)[:, 0]))
            except IndexError as e:
                bl.debug_warning("IndexError in field_dict: {}".format(e))
                raise

            nodal_field_dir = timestep_dir / 'no'
            nodal_field_paths = sorted(nodal_field_dir.glob('*.bin'))
            for field in nodal_field_paths:
                nodal_fields.append(field.stem)  # just append the file name

        if self.source_type == 'external':
            import backend.global_settings as gloset
            ext_index = gloset.scene_manager.ext_src_dataset_index(
                update=False, dataset=self.dataset_name)
            timestep_dict = ext_index[self.dataset_name][
                self._selected_timestep]

            # elemental_fields = []
            # nodal_fields = []

            try:
                elemental_fields += list(
                    timestep_dict["ta"]['elemental'].keys())  # thermal fields
            except KeyError as e:
                bl.debug_warning(
                    "KeyError in thermal field_dict (elemental_fields): {}".
                    format(e))
            try:
                elemental_fields += list(
                    timestep_dict["ma"]
                    ['elemental'].keys())  # mechanical fields
            except KeyError as e:
                bl.debug_warning(
                    "KeyError in mechanical field_dict (elemental_fields): {}".
                    format(e))

            try:
                nodal_fields += list(
                    timestep_dict["ta"]['nodal'].keys())  # thermal fields
            except KeyError as e:
                bl.debug_warning(
                    "KeyError in thermal field_dict (nodal_fields): {}".format(
                        e))
            try:
                nodal_fields += list(
                    timestep_dict["ma"]['nodal'].keys())  # mechanical fields
            except KeyError as e:
                bl.debug_warning(
                    "KeyError in mechanical field_dict (nodal_fields): {}".
                    format(e))

        # sorting from...
        # https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
        # neat.
        convert = lambda text: int(text) if text.isdigit() else text.lower()
        alphanum_key = lambda key: [
            convert(c) for c in re.split('([0-9]+)', key)
        ]
        return_dict = {
            'elemental': sorted(elemental_fields, key=alphanum_key),
            'nodal': sorted(nodal_fields, key=alphanum_key)
        }

        return return_dict
コード例 #11
0
async def _subscription_crawler_coro(shutdown_event):
    """
    Iterate over all subscriptions and see if the required files are available.

    """
    global LI_LOCK
    global LOCAL_INDEX
    global SUBSCRIPTION_DICT
    global SD_LOCK

    # done here because of circular import stuff
    import backend.global_settings as gloset

    while True:

        await asyncio.sleep(1)  # wait one second

        with SD_LOCK:

            for subscription in list(SUBSCRIPTION_DICT.keys()):  # make a list so we can modify the original dictionary
                try:
                    delete = SUBSCRIPTION_DICT[subscription]["delete"]
                except KeyError:
                    pass
                else:
                    del SUBSCRIPTION_DICT[subscription]
                    bl.debug("Deleted {} from subscription dict".format(subscription))

        with SD_LOCK:

            for subscription in SUBSCRIPTION_DICT.keys():

                bl.debug("Checking subscription {}".format(subscription))

                value = SUBSCRIPTION_DICT[subscription]

                try:
                    _ = value["delete"]
                    bl.debug("Skipping {}, delete flag detected".format(subscription))

                except KeyError:

                    bl.debug("Handling subscription {}".format(subscription))

                    namespace = value["namespace"]

                    scene_hash = value["scene_hash"]
                    dataset_hash = subscription

                    with LI_LOCK:
                        bl.debug("Obtaining available timesteps")
                        avail_timesteps = list(LOCAL_INDEX[namespace].keys())
                        bl.debug("... found {}".format(len(avail_timesteps)))

                    # sorting from...
                    # https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
                    # neat.
                    convert = lambda text: int(text) if text.isdigit() else text.lower()
                    alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
                    sorted_timesteps = sorted(avail_timesteps, key=alphanum_key)

                    bl.debug("Timesteps sorted")

                    # if current timestep is in sorted_timesteps  ...
                    current_timestep = value["dataset_object"].timestep()

                    try:
                        index = sorted_timesteps.index(current_timestep)
                        # index = sorted_timesteps.index(value["timestep"])
                        bl.debug("Found {} in timestep list at position {}".format(current_timestep, index))

                    except ValueError:
                        # current timestep is not in list... weird
                        # go back to start of loop
                        bl.debug("Could not find {} in timestep list".format(current_timestep))
                        continue

                    # check the last and second to last timestep
                    last_timestep = sorted_timesteps[-1]
                    bl.debug("Last timestep is {}".format(last_timestep))

                    # ... and not the last position
                    if sorted_timesteps[index] == last_timestep:
                        # is last in timestep list, nothing to do
                        bl.debug("Index position {} is the last timestep, no update required".format(index))
                        continue

                    check_dicts = list()

                    # check if the files we need are in the most recent timestep
                    for object_dict in value["object_dicts"]:
                        target = {namespace: {last_timestep: object_dict}}
                        check_dicts.append(target)

                    data_avail = True

                    with LI_LOCK:
                        for check_dict in check_dicts:
                            bl.debug("Checking for {} in local index".format(check_dict))
                            avail = ndc.contains(LOCAL_INDEX, check_dict)
                            if not avail:
                                bl.debug_warning("Not found, can't update to most recent timestep")
                                data_avail = False

                    if data_avail:
                        # set the timestep
                        bl.debug("Found all necessary files for most recent timestep")
                        dataset_timesteps = gloset.scene_manager.dataset_timesteps(
                            scene_hash, dataset_hash, set_timestep=last_timestep)
                        continue

                    else:

                        bl.debug("Did not find all necessary files for most recent timestep, checking for files in second to last timestep")

                        try:
                            second_last_timestep = sorted_timesteps[-2]
                        except:
                            bl.debug_warning("Could not find second to last timestep")
                            continue

                        # ... and not the second to last position
                        if sorted_timesteps[index] == second_last_timestep:
                            # is second to last in timestep list, nothing to do
                            bl.debug("We are already at the second to last timestep, nothing to do")
                            continue

                        check_dicts = list()

                        # check if the files we need are in the most recent timestep
                        for object_dict in value["object_dicts"]:
                            check_dicts.append({namespace: {second_last_timestep: object_dict}})

                        second_data_avail = True

                        with LI_LOCK:
                            for check_dict in check_dicts:
                                bl.debug("Checking for {} in local index".format(check_dict))
                                avail = ndc.contains(LOCAL_INDEX, check_dict)
                                if not avail:
                                    bl.debug_warning("Not found, can't update to most recent timestep")
                                    second_data_avail = False

                        if second_data_avail:
                            bl.debug("Found all necessary files for second to last timestep")
                            dataset_timesteps = gloset.scene_manager.dataset_timesteps(
                                scene_hash, dataset_hash, set_timestep=second_last_timestep)
コード例 #12
0
    def scenes(
            self,
            scene_hash=None, dataset_hash=None,
            dataset_operation=None, mesh_operation=None
    ):
        """
        Contains the logic for manipulating scenes over the API.

        Distributes the call parameters to subfunctions.

        """
        # Parse the HTTP method
        http_method = cherrypy.request.method

        # Init the output
        output = None

        # server busy stuff
        ##################################################
        if scene_hash is not None:

            # if POSTing or PATCHing we have to check if we block or not
            if (http_method == 'POST' or http_method == 'PATCH'):

                try:
                    if self.is_scene_locked(scene_hash):
                        raise cherrypy.HTTPError(503, 'scene is locked, try again later')

                except KeyError as e:
                    bl.debug_warning("KeyError: {}".format(e))

                self.lock_scene_if_unlocked(scene_hash)


        ##################################################

        if (
                scene_hash is None and
                dataset_hash is None and
                dataset_operation is None and
                mesh_operation is None
        ):
            # GET
            if http_method == 'GET':
                # There is nothing to parse
                output = self.get_scenes()

            # POST
            if http_method == 'POST':

                # Parse datasetsToAdd from JSON
                try:
                    json_input = cherrypy.request.json
                    datasets = json_input['datasetsToAdd']
                    output = self.post_scenes(datasets)

                except (KeyError, TypeError) as e:
                    bl.debug_warning("KeyError/TypeError: {}".format(e))
                    output = None

        ##################################################

        if (
                scene_hash is not None and
                dataset_hash is None and
                dataset_operation is None and
                mesh_operation is None
        ):
            # GET
            if http_method == 'GET':
                output = self.get_scenes_scenehash(scene_hash)

            # POST
            if http_method == 'POST':
                # Parse datasetsToAdd from JSON
                try:
                    json_input = cherrypy.request.json
                    datasets = json_input['datasetsToAdd']
                    output = self.post_scenes_scenehash(scene_hash, datasets)

                except (KeyError, TypeError) as e:
                    bl.debug_warning("KeyError/TypeError: {}".format(e))
                    output = None

            # DELETE
            if http_method == 'DELETE':
                output = self.delete_scenes_scenehash(scene_hash)

        ##################################################

        if (
                scene_hash is not None and
                dataset_hash is not None and
                dataset_operation is None and
                mesh_operation is None
        ):

            if dataset_hash == 'colorbar':
                # GET
                if http_method == 'GET':
                    output = self.get_dataset_scenes_scenehash_colorbar(
                        scene_hash)

                # PATCH
                if http_method == 'PATCH':
                    try:
                        colorbar_information = cherrypy.request.json
                        output = self.patch_dataset_scenes_scenehash_colorbar(
                            scene_hash, colorbar_information)

                    except (KeyError, TypeError) as e:
                        bl.debug_warning("KeyError/TypeError: {}".format(e))
                        output = None

            else:
                # GET
                if http_method == 'GET':
                    output = self.get_dataset_scenes_scenehash_datasethash(
                        scene_hash, dataset_hash)

                # DELETE
                if http_method == 'DELETE':
                    output = self.delete_dataset_scenes_scenehash_datasethash(
                        scene_hash, dataset_hash)

        ##################################################

        if (
                scene_hash is not None and
                dataset_hash is not None and
                dataset_operation is not None and
                mesh_operation is None
        ):

            # GET
            if http_method == 'GET':

                if dataset_operation == 'orientation':
                    output = self.get_scenes_scenehash_datasethash_orientation(
                        scene_hash, dataset_hash)

                ##################################################

                if dataset_operation == 'timesteps':
                    output = self.get_scenes_scenehash_datasethash_timesteps(
                        scene_hash, dataset_hash)

                ##################################################

                if dataset_operation == 'fields':
                    output = self.get_scenes_scenehash_datasethash_fields(
                        scene_hash, dataset_hash)

                ##################################################

                if dataset_operation == 'elementsets':
                    output = self.get_scenes_scenehash_datasethash_elementsets(
                        scene_hash, dataset_hash)

                ##################################################

                if dataset_operation == 'mesh':
                    output = self.get_scenes_scenehash_datasethash_mesh(
                        scene_hash, dataset_hash)

                ##################################################

                if dataset_operation == 'tracking':
                    output = self.get_scenes_scenehash_datasethash_tracking(
                        scene_hash, dataset_hash)

            # PATCH
            if http_method == 'PATCH':

                if dataset_operation == 'orientation':

                    # Parse datasetsToAdd from JSON
                    try:
                        json_input = cherrypy.request.json
                        orientation = json_input['datasetOrientation']
                        output = (
                            self.
                            patch_scenes_scenehash_datasethash_orientation(
                                scene_hash, dataset_hash,
                                new_orientation=orientation)
                        )

                    except (KeyError, TypeError) as e:
                        bl.debug_warning("KeyError/TypeError: {}".format(e))
                        output = None

                ##################################################

                if dataset_operation == 'timesteps':

                    # Parse datasetsToAdd from JSON
                    try:
                        json_input = cherrypy.request.json
                        timestep = json_input['datasetTimestepSelected']
                        output = (
                            self.patch_scenes_scenehash_datasethash_timesteps(
                                scene_hash, dataset_hash,
                                new_timestep=timestep)
                        )

                    except (KeyError, TypeError) as e:
                        bl.debug_warning("KeyError/TypeError: {}".format(e))
                        output = None

                ##################################################

                if dataset_operation == 'fields':

                                        # Parse datasetsToAdd from JSON
                    try:
                        json_input = cherrypy.request.json
                        field = json_input['datasetFieldSelected']
                        output = (
                            self.patch_scenes_scenehash_datasethash_fields(
                                scene_hash, dataset_hash,
                                new_field=field)
                        )

                    except (KeyError, TypeError) as e:
                        bl.debug_warning("KeyError/TypeError: {}".format(e))
                        output = None

                ##################################################

                if dataset_operation == 'elementsets':

                                        # Parse datasetsToAdd from JSON
                    try:
                        json_input = cherrypy.request.json
                        elementset = json_input['datasetElementsetSelected']
                        output = (
                            self.patch_scenes_scenehash_datasethash_elementsets(
                                scene_hash, dataset_hash,
                                new_elementset=elementset)
                        )

                    except (KeyError, TypeError) as e:
                        bl.debug_warning("KeyError/TypeError: {}".format(e))
                        output = None

                ##################################################

                if dataset_operation == 'tracking':

                    output = (
                        self.patch_scenes_scenehash_datasethash_tracking(
                            scene_hash, dataset_hash)  # this is just a toggle
                    )


        ##################################################

        if (
                scene_hash is not None and
                dataset_hash is not None and
                dataset_operation is not None and
                mesh_operation is not None
        ):
            # GET
            if http_method == 'GET':

                if dataset_operation == 'mesh':

                    if mesh_operation == 'hash':
                        output = self.get_scenes_scenehash_datasethash_mesh_hash(
                            scene_hash, dataset_hash)

                    if mesh_operation == 'geometry':
                        output = self.get_scenes_scenehash_datasethash_mesh_geometry(
                            scene_hash, dataset_hash)

                    if mesh_operation == 'field':
                        output = self.get_scenes_scenehash_datasethash_mesh_field(
                            scene_hash, dataset_hash)


        ##################################################

        # server busy stuff
        ##################################################
        if scene_hash is not None:

            # maybe we need to unblock the server
            if (http_method == 'POST' or http_method == 'PATCH'):
                self.unlock_scene_if_locked(scene_hash)

        return output
コード例 #13
0
def simulation_file(source_dict=None, namespace=None, object_key_list=[]):
    """
    Obtain a simulation file from the ceph cluster.

    Note: this is an async function so that we can perform this action in
    parallel.

    """
    bl.debug("Requesting {} in namespace {}".format(object_key_list,
                                                    namespace))

    expectation_list = list()
    for item in object_key_list:
        expectation_list.append("{}/{}".format(namespace, item))

    occurence_dict = dict()

    comm_dict = source_dict["external"]["comm_dict"]
    file_request_queue = comm_dict["file_request_queue"]
    file_request_answer_queue = comm_dict["file_contents_name_hash_queue"]

    before_qsize = file_request_answer_queue.qsize()
    if (before_qsize > 0):
        bl.debug_warning("Data return queue is not empty, contains {} "
                         "objects".format(before_qsize))

    # see if we have the data downloaded already, if not make the gateway client get it
    for obj in object_key_list:

        object_descriptor = "{}/{}".format(namespace, obj)

        with GW_LOCK:
            if object_descriptor in GATEWAY_DATA:
                bl.debug(
                    "Found {} in downloaded data, updating timestamp".format(
                        object_descriptor))
                GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()
            else:
                bl.debug("Downloading {}".format(object_descriptor))
                req = {"namespace": namespace, "key": obj}
                file_request_queue.put(req)

    # keep track how often we try to get data from the dictionary
    counter = 0

    # wait until we have everything downloaded
    while True:

        # wait a fraction of a second (rate throttling)
        time.sleep(.1)

        # do we have every file?
        all_present = True

        # get a list of keys in the GATEWAY_DATA
        with GW_LOCK:
            keys = list(GATEWAY_DATA.keys())

        for object_descriptor in expectation_list:

            if not object_descriptor in keys:
                all_present = False

            # update timestamp
            if object_descriptor in keys:
                with GW_LOCK:
                    GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()

        # break the loop
        if all_present:
            bl.debug("Data complete")
            break

        counter += 1
        if (counter > 1000):  # very large meshes take some time
            bl.warning("Too many iterations. Could not get data from gateway.")
            return

    # prepare output of function
    res_bin = [None] * len(object_key_list)

    for object_descriptor in expectation_list:
        with GW_LOCK:
            GATEWAY_DATA[object_descriptor]["timestamp"] = time.time()
            request_dict = GATEWAY_DATA[object_descriptor]["request_dict"]

        obj_namespace = request_dict["namespace"]
        obj_key = request_dict["object"]

        bl.debug("Loading {}/{}".format(obj_namespace, obj_key))

        index = object_key_list.index(obj_key)
        res_bin[index] = request_dict

    return res_bin