def list_connections(self, nodes: Union[str, List[str]] = None, limit: Optional[int] = None, **filters) -> List[Dict[str, Any]]: """Get all active user connections. Optionally filter the connections by specifying the `filters` keyword arguments. Args: nodes: Node (server) names on which users will be disconnected. limit: limit the number of elements returned. If `None`, all objects are returned. **filters: Available filter parameters: ['id', 'parent_id', 'username', 'user_full_name', 'project_index', 'project_id', 'project_name', 'open_jobs_count', 'project_type', 'date_connection_created', 'duration', 'session_id', 'client', 'config_level'] """ # TODO: This fully initialises a Cluster object every time the function # is run. It would be better to somehow cache it for a given connection. all_nodes = Cluster(self.connection).list_nodes(to_dictionary=True) all_connections = [] if nodes is None: nodes = [node['name'] for node in all_nodes if node['status'] == 'running'] else: nodes = nodes if isinstance(nodes, list) else [nodes] msg = 'Error fetching chunk of active user connections.' for node in nodes: all_connections.extend( helper.fetch_objects_async(self.connection, monitors.get_user_connections, monitors.get_user_connections_async, dict_unpack_value="userConnections", limit=limit, chunk_size=1000, error_msg=msg, node_name=node, filters=filters)) return all_connections
def get_contents(self, to_dictionary: bool = False, **filters) -> List: """Get contents of folder. It can contains other folders or different kinds of objects. Args: to_dictionary (bool, optional): If True returns dicts, by default (False) returns objects. **filters: Available filter parameters: ['id', 'name', 'description', 'date_created', 'date_modified', 'acg'] Returns: Contents as Python objects (when `to_dictionary` is `False` (default value)) or contents as dictionaries otherwise. """ objects = fetch_objects_async(self.connection, folders.get_folder_contents, folders.get_folder_contents_async, limit=None, chunk_size=1000, id=self.id, filters=filters) if to_dictionary: return objects else: from mstrio.utils.object_mapping import map_objects_list return map_objects_list(self.connection, objects)
def _get_users(cls, connection: "Connection", name_begins: str = None, abbreviation_begins: str = None, to_dictionary: bool = False, limit: int = None, **filters) -> Union[List["User"], List[dict]]: msg = "Error getting information for a set of users." objects = helper.fetch_objects_async( connection, users.get_users_info, users.get_users_info_async, limit=limit, chunk_size=1000, error_msg=msg, name_begins=name_begins, abbreviation_begins=abbreviation_begins, filters=filters, ) if to_dictionary: return objects else: return [ cls.from_dict(source=obj, connection=connection) for obj in objects ]
def _list_applications(cls, connection: "Connection", to_dictionary: bool = False, limit: int = None, **filters) -> List["Application"]: msg = "Error getting information for a set of Applications." objects = helper.fetch_objects_async(connection, monitors.get_projects, monitors.get_projects_async, dict_unpack_value='projects', limit=limit, chunk_size=50, error_msg=msg, filters=filters) if to_dictionary: return objects else: apps = cls._from_bulk_response(connection, objects) apps_loaded = Application._list_loaded_applications( connection, to_dictionary=True) apps_loaded_ids = [app['id'] for app in apps_loaded] unloaded = [app for app in apps if app.id not in apps_loaded_ids] if unloaded: msg = "Applications {} are either unloaded or idled. Change status using the 'load()' or 'resume()' method to use all functionality.".format( [app.name for app in unloaded]) helper.exception_handler(msg, exception_type=UserWarning) return apps
def _list_all(cls, connection: Connection, name: Optional[str] = None, to_dictionary: bool = False, to_dataframe: bool = False, limit: Optional[int] = None, **filters) -> Union[List["Dossier"], List[dict], DataFrame]: msg = "Error retrieving documents from the environment." if to_dictionary and to_dataframe: helper.exception_handler( "Please select either to_dictionary=True or to_dataframe=True, but not both.", ValueError) objects = helper.fetch_objects_async( connection, api=documents.get_dossiers, async_api=documents.get_dossiers_async, dict_unpack_value='result', limit=limit, chunk_size=1000, error_msg=msg, filters=filters, search_term=name) if to_dictionary: return objects elif to_dataframe: return DataFrame(objects) else: return [ cls.from_dict(source=obj, connection=connection) for obj in objects ]
def _list_all(cls, connection: "Connection", name: str = None, to_dictionary: bool = False, to_dataframe: bool = False, limit: int = None, **filters) -> Union[List["Document"], List[dict]]: msg = "Error retrieving documents from the environment." if to_dictionary and to_dataframe: helper.exception_handler( "Please select either `to_dictionary=True` or `to_dataframe=True`, but not both.", ValueError) objects = helper.fetch_objects_async( connection, api=documents.get_documents, async_api=documents.get_documents_async, dict_unpack_value='result', limit=limit, chunk_size=1000, error_msg=msg, filters=filters, search_term=name) if to_dictionary: return objects elif to_dataframe: return DataFrame(objects) else: return cls._from_bulk_response(connection, objects)
def list_subscriptions(connection: Connection, project_id: Optional[str] = None, project_name: Optional[str] = None, to_dictionary: bool = False, limit: Optional[int] = None, **filters) -> Union[List["Subscription"], List[dict]]: """Get all subscriptions per project as list of Subscription objects or dictionaries. Optionally filter the subscriptions by specifying filters. Specify either `project_id` or `project_name`. When `project_id` is provided (not `None`), `project_name` is omitted. Args: connection(object): MicroStrategy connection object project_id: Project ID project_name: Project name to_dictionary: If True returns a list of subscription dicts, otherwise (default) returns a list of subscription objects limit: limit the number of elements returned. If `None` (default), all objects are returned. **filters: Available filter parameters: ['id', 'name', 'editable', 'allowDeliveryChanges', 'allowPersonalizationChanges', 'allowUnsubscribe', 'dateCreated', 'dateModified', 'owner', 'schedules', 'contents', 'recipients', 'delivery'] """ project_id = Subscription._project_id_check(connection, project_id, project_name) chunk_size = 1000 if version.parse( connection.iserver_version) >= version.parse('11.3.0300') else 1000000 msg = 'Error getting subscription list.' objects = helper.fetch_objects_async( connection=connection, api=subscriptions_.list_subscriptions, async_api=subscriptions_.list_subscriptions_async, limit=limit, chunk_size=chunk_size, filters=filters, error_msg=msg, dict_unpack_value="subscriptions", project_id=project_id, ) if to_dictionary: return objects else: return [ dispatch_from_dict( source=obj, connection=connection, project_id=project_id, ) for obj in objects ]
def _get_user_groups(cls, connection: "Connection", name_begins: str = None, to_dictionary: bool = False, limit: int = None, **filters) -> List["UserGroup"]: msg = "Error getting information for a set of User Groups." objects = helper.fetch_objects_async( connection, usergroups.get_info_all_user_groups, usergroups.get_info_all_user_groups_async, limit=limit, chunk_size=1000, error_msg=msg, name_begins=name_begins, filters=filters, ) if to_dictionary: return objects else: return [cls.from_dict(source=obj, connection=connection) for obj in objects]
def get_predefined_folder_contents(connection: "Connection", folder_type: PredefinedFolders, project_id: Optional[str] = None, to_dictionary: bool = False, limit: Optional[int] = None, **filters) -> List: """Get contents of a pre-defined MicroStrategy folder in a specific project. Available values for `folder_type` are stored in enum `PredefinedFolders`. Note: When `project_id` is `None`, then its value is overwritten by `project_id` from `connection` object. Args: connection (object): MicroStrategy connection object returned by `connection.Connection()` folder_type (enum): pre-defined folder type. Available values are stored in enum `PredefinedFolders`. project_id (string, optional): project ID to_dictionary (bool, optional): If True returns dicts, by default (False) returns objects. limit (int): limit the number of elements returned. If `None` (default), all objects are returned. **filters: Available filter parameters: ['id', 'name', 'description', 'date_created', 'date_modified', 'acg'] Returns: list of objects or list of dictionaries """ objects = fetch_objects_async(connection, folders.get_predefined_folder_contents, folders.get_predefined_folder_contents_async, limit=limit, chunk_size=1000, folder_type=folder_type.value, project_id=project_id, filters=filters) if to_dictionary: return objects else: from mstrio.utils.object_mapping import map_objects_list return map_objects_list(connection, objects)
def list_folders(connection: "Connection", project_id: Optional[str] = None, to_dictionary: bool = False, limit: Optional[int] = None, **filters) -> Union[List["Folder"], List[dict]]: """Get a list of folders - either all folders in a specific project or all folders that are outside of projects, called configuration-level folders. The list of configuration-level folders includes folders such as users, user groups, databases, etc. which are not project-specific. If you pass a `project_id`, you get folders in that project; if not, then you get configuration-level folders. Note: Id of project is not taken directly from `Connection` object, so you have to specify it explicitly. Args: connection (object): MicroStrategy connection object returned by `connection.Connection()` project_id (string): project ID to_dictionary (bool, optional): If True returns dicts, by default (False) returns objects. limit (int): limit the number of elements returned. If `None` (default), all objects are returned. **filters: Available filter parameters: ['id', 'name', 'description', 'date_created', 'date_modified', 'acg'] Returns: list of `Folder` objects or list of dictionaries """ objects = fetch_objects_async(connection, folders.list_folders, folders.list_folders_async, limit=limit, chunk_size=1000, project_id=project_id, filters=filters) if to_dictionary: return objects else: from mstrio.utils.object_mapping import map_objects_list return map_objects_list(connection, objects)
def _get_search_result_list_format(connection: Connection, search_id: str, project_id: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, to_dictionary: bool = True, **filters): from mstrio.utils.object_mapping import map_objects_list objects = fetch_objects_async( connection=connection, api=browsing.get_search_results, async_api=browsing.get_search_results_async, search_id=search_id, project_id=project_id, limit=limit, offset=offset, chunk_size=1000, dict_unpack_value=None, filters=filters, ) if to_dictionary: return objects return map_objects_list(connection, objects)
def _list_all(cls, connection: Connection, name: str = None, to_dictionary: bool = False, to_dataframe: bool = False, limit: int = None, **filters) -> Union[List["Dataset"], List[dict], DataFrame]: DATASET_SUBTYPES = [776, 779] DSS_XML_SEARCH_TYPE_EXACTLY = 2 msg = "Error creating an instance for searching objects" res_e = objects.create_search_objects_instance( connection=connection, name=name, pattern=DSS_XML_SEARCH_TYPE_EXACTLY, object_type=cls._OBJECT_TYPE.value, error_msg=msg) search_id = res_e.json()['id'] msg = "Error retrieving datasets from the environment." res_o = helper.fetch_objects_async( connection, api=objects.get_objects, async_api=objects.get_objects_async, dict_unpack_value=None, limit=limit, chunk_size=1000, error_msg=msg, filters=filters, search_id=search_id, ) datasets = [r for r in res_o if r['subtype'] in DATASET_SUBTYPES] if to_dictionary: return datasets elif to_dataframe: return DataFrame(datasets) else: return [ cls.from_dict(source=obj, connection=connection) for obj in datasets ]
def _get_usergroups(cls, connection: "Connection", name_begins: str = None, to_dictionary: bool = False, limit: int = None, **filters) -> List["UserGroup"]: msg = "Error getting information for a set of User Groups." objects = helper.fetch_objects_async( connection, usergroups.get_info_all_user_groups, usergroups.get_info_all_user_groups_async, dict_unpack_value=None, limit=limit, chunk_size=1000, error_msg=msg, name_begins=name_begins, filters=filters) if to_dictionary: return objects else: return cls._from_bulk_response(connection, objects)
def list_connections(self, nodes: Union[str, List[str]] = None, limit: int = None, **filters) -> List[Dict[str, Any]]: """Get all active user connections. Optionally filter the connections by specifying the `filters` keyword arguments. Args: nodes: Node (server) names on which users will be disconnected. limit: limit the number of elements returned to a sample of connections. If `None`, all connections are returned. **filters: Available filter parameters: ['id', 'parent_id', 'username', 'user_full_name', 'project_index', 'project_id', 'project_name', 'open_jobs_count', 'application_type', 'date_connection_created', 'duration', 'session_id', 'client', 'config_level'] """ all_nodes = Cluster(self.connection).list_nodes() all_connections = [] if nodes is None: nodes = [ node['name'] for node in all_nodes if node['status'] == 'running' ] else: nodes = nodes if isinstance(nodes, list) else [nodes] msg = "Error fetching chunk of active user connections." for node in nodes: all_connections.extend( helper.fetch_objects_async(self.connection, monitors.get_user_connections, monitors.get_user_connections_async, dict_unpack_value="userConnections", limit=limit, chunk_size=1000, error_msg=msg, node_name=node, filters=filters)) return all_connections
def _list_projects(cls, connection: Connection, to_dictionary: bool = False, limit: Optional[int] = None, **filters) -> Union[List["Project"], List[dict]]: msg = "Error getting information for a set of Projects." objects = helper.fetch_objects_async(connection, monitors.get_projects, monitors.get_projects_async, dict_unpack_value='projects', limit=limit, chunk_size=50, error_msg=msg, filters=filters) if to_dictionary: return objects else: projects = [cls.from_dict(source=obj, connection=connection) for obj in objects] projects_loaded = Project._list_loaded_projects(connection, to_dictionary=True) projects_loaded_ids = [project['id'] for project in projects_loaded] unloaded = [project for project in projects if project.id not in projects_loaded_ids] if unloaded: msg = (f"Projects {[project.name for project in unloaded]} are either unloaded or " "idled. Change status using the 'load()' or 'resume()' method to use all " "functionality.") helper.exception_handler(msg, exception_type=UserWarning) return projects
def _list_contact_groups( cls, connection: "Connection", to_dictionary: bool = False, limit: Optional[int] = None, offset: Optional[int] = None, **filters) -> Union[List["ContactGroup"], List[dict]]: objects = fetch_objects_async( connection=connection, api=contact_groups.get_contact_groups, async_api=contact_groups.get_contact_groups_async, limit=limit, offset=offset, chunk_size=1000, filters=filters, dict_unpack_value='contactGroups', ) if to_dictionary: return objects return [ ContactGroup.from_dict(source=obj, connection=connection) for obj in objects ]
def list_cube_caches( connection: "Connection", nodes: Optional[Union[List[str], str]] = None, cube_id: Optional[str] = None, loaded: Optional[bool] = False, db_connection_id: Optional[str] = None, project_ids: Optional[List[str]] = None, to_dictionary: Optional[bool] = False, limit: Optional[int] = None) -> Union[List["CubeCache"], List[dict]]: """List cube caches. You can filter them by cube (`cube_id`), database connection (`db_connection_id`) and projects (`project_ids`). You can also obtain only loaded caches (`loaded=True`). You can specify from which `nodes` caches will be retrieved. If `nodes` are `None` then all nodes are retrieved from the cluster. Args: connection: MicroStrategy connection object returned by `connection.Connection()`. nodes (list of strings or string, optional): names of nodes on which caches will be searched. By default it equals `None` and in that case all nodes' names are loaded from the cluster. cube_id (string, optional): When provided, only caches for the cube with given ID will be returned (if any). loaded (bool, optional): If True then only loaded caches will be retrieved. Otherwise all cubes will be returned. db_connection_id (string, optional): When provided, only caches for the database connection with given ID will be returned (if any). project_ids (list of string, optional): When provided only caches for projects with given IDs will be returned (if any). to_dictionary (bool, optional): If True returns dict, by default (False) returns CubeCache objects limit(integer, optional): Cut-off value for the number of objects returned. Default value is `None` which means no limit. Returns: List of CubeCache objects when parameter `to_dictionary` is set to False (default value) or list of dictionaries otherwise. """ if project_ids is not None: project_ids = ','.join(project_ids) # form accepted by request if nodes is None: cluster_ = Cluster(connection) nodes = cluster_.list_nodes(project=connection.project_id, to_dictionary=True) nodes = [node.get('name') for node in nodes] nodes = [nodes] if type(nodes) == str else nodes caches = [] for node in nodes: caches += fetch_objects_async(connection=connection, api=monitors.get_cube_caches, async_api=monitors.get_cube_caches_async, dict_unpack_value='cubeCaches', node=node, limit=limit, project_ids=project_ids, chunk_size=1000, loaded=loaded, filters={}) if cube_id: caches = [cache for cache in caches if cache.get('source', {}).get('id', '') == cube_id] if db_connection_id: caches = [ cache for cache in caches if db_connection_id in [db.get('id', '') for db in cache.get('databaseConnections', [])] ] if to_dictionary: return caches else: return CubeCache.from_dict(connection, caches)
def list_objects(connection: "Connection", object_type: Union[ObjectTypes, ObjectSubTypes, int, List[Union[ObjectTypes, ObjectSubTypes, int]]], application_id: str = None, name: str = None, pattern: Union[SearchType, int] = None, domain: Union[SearchDomain, int] = None, root: str = None, uses_object: str = None, uses_recursive: bool = False, used_by_object: str = None, used_by_recursive: bool = None, limit: int = None, chunk_size: int = 1000, err_msg_instance: str = None, err_msg_results: str = None, **filters) -> List[dict]: """List objects based on provided type. We need to use endpoint from Browsing API. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. object_type(enum class object or integer or list of enum class objects or integers, optional): Type(s) of object(s) to be searched, such as Folder, Attribute or User. Possible values available in ENUMs mstrio.utils.entity.ObjectTypes and mstrio.utils.entity.ObjectSubTypes. application_id(string, optional): Application (aka Project) ID in which search will be done. name(string, optional): Value the search pattern is set to, which will be applied to the names of object types being searched. For example, search for all report objects (type) whose name begins with (patter) B (name). pattern(integer or enum class object, optional): Pattern to search for, such as Begin With or Exactly. Possible values are available in ENUM mstrio.browsing.SearchType. Default value is CONTAINS (4). domain(integer or enum class object, optional): Domain where the search will be performed, such as Local or Project. Possible values are available in ENUM mstrio.browsing.SearchDomain. Default value is DOMAIN_PROJECT (2). root(string, optional): Folder ID of the root folder where the search will be performed. uses_object(string, optional): Constrain the search to only return objects which use the given object. The value should be 'objectId; object type', for example 'E02FE6DC430378A8BBD315AA791FC580;3'. It is not allowed to use both 'usesObject' and 'usedByObject' in one request. uses_recursive(boolean, optional): Control the Intelligence server to also find objects that use the given objects indirectly. Default value is false. used_by_object(string, optional): Constrain the search to only return objects which are used by the given object. The value should be 'object Id; object type', for example: 'E02FE6DC430378A8BBD315AA791FC580;3'. It is not allowed to use both 'usesObject' and 'usedByObject' in one request. used_by_recursive(boolean, optional): Control the Intelligence server to also find objects that are used by the given objects indirectly. Default value is false. limit(integer, optional): Cut-off value for the number of objects returned. chunk_size(integer, optional): Number of objects in each chunk. Default value is 1000. err_msg_instance(string, optional): Error message for request to create search instance. err_msg_results(string, optional): Error message for request to get results of searching. filters: optional keyword arguments to filter results Returns: list of dictionaries with basic information of objects returned from the metadata. """ if not isinstance(object_type, list): object_type = [object_type] # convert enum values to integers object_type = [int(t) for t in object_type] if pattern is not None: pattern = int(pattern) if domain is not None: domain = int(domain) res_inst = browsing.store_search_instance( connection=connection, project_id=application_id, name=name, pattern=pattern, domain=domain, root=root, object_type=object_type, uses_object=uses_object, uses_recursive=uses_recursive, used_by_object=used_by_object, used_by_recursive=used_by_recursive, error_msg=err_msg_instance) search_id = res_inst.json()['id'] res = helper.fetch_objects_async( connection=connection, api=browsing.get_search_results, async_api=browsing.get_search_results_async, limit=limit, chunk_size=chunk_size, error_msg=err_msg_results, search_id=search_id, project_id=application_id, dict_unpack_value=None, filters=filters) return res