def test_dataset_elements_info_by_pages(self):
        """
        Factory can retrieve multiple elements at once by pages.
        """

        editor = TokenDAO("normal user privileged with link", 1, 5, "user1",
                          privileges=Privileges.RO_WATCH_DATASET
                          )

        dataset = DatasetDAO("user1/dataset1", "example_dataset", "dataset for testing purposes", "none",
                             tags=["example", "0"])

        self.session.flush()

        editor = editor.link_dataset(dataset)

        elements = [DatasetElementDAO("example{}".format(x), "none", None, dataset=dataset).title for x in range(5)]

        self.session.flush()

        dataset = dataset.update()

        page_size = global_config.get_page_size()
        for page in range(len(elements) // page_size + int(len(elements) % page_size > 0)):
            retrieved_elements = DatasetElementFactory(editor, dataset).get_elements_info(page)

            for x in retrieved_elements:
                self.assertIn(x.title, elements)
Пример #2
0
    def get(self):
        """
        Retrieves all the information of the server.
        :return:
        """
        required_privileges = [
            Privileges.RO_WATCH_DATASET,
        ]

        _, token = self.token_parser.parse_args(
            required_any_token_privileges=required_privileges)

        response = {
            'Server': 'mldatahub {}'.format(__version__),
            'Page-Size': global_config.get_page_size()
        }

        return response
    def test_dataset_elements_info_by_different_pages_size(self):
        """
        Factory can retrieve multiple elements with different page sizes.
        """
        initial_page_size = global_config.get_page_size()
        global_config.set_page_size(10)

        editor = TokenDAO("normal user privileged with link", 1, 5, "user1",
                          privileges=Privileges.RO_WATCH_DATASET
                          )

        dataset = DatasetDAO("user1/dataset1", "example_dataset", "dataset for testing purposes", "none",
                             tags=["example", "0"])

        self.session.flush()

        editor = editor.link_dataset(dataset)

        elements = [DatasetElementDAO("example{}".format(x), "none", None, dataset=dataset).title for x in range(5)]

        self.session.flush()

        dataset = dataset.update()

        # We need to know the order of the elements
        ordered_elements = [l for l in DatasetElementFactory(editor, dataset).get_elements_info(page_size=len(elements))]

        pages_size = [1, 2, 3, 4, 5]

        for page_size in pages_size:
            num_pages = len(elements) // page_size + int(len(elements) % page_size > 0)

            for page in range(num_pages):
                retrieved_elements = [l for l in DatasetElementFactory(editor, dataset).get_elements_info(page, page_size=page_size)]
                for retrieved_element, ordered_element in zip(retrieved_elements, ordered_elements[page*page_size:(page+1)*page_size]):
                    self.assertEqual(retrieved_element._id, ordered_element._id)

        with self.assertRaises(Conflict):
            retrieved_elements = DatasetElementFactory(editor, dataset).get_elements_info(page_size=global_config.get_page_size()+1)

        global_config.set_page_size(initial_page_size)
Пример #4
0
    def __init__(self):
        super().__init__()
        self.session = global_config.get_session()
        self.get_parser = reqparse.RequestParser()

        arguments = {
            "elements":
                {
                    "type": list,
                    "required": True,
                    "help": "List of element ids to retrieve content from (limited to {}).".format(global_config.get_page_size()),
                    "location": "json"
                },
        }

        for argument, kwargs in arguments.items():
            self.get_parser.add_argument(argument, **kwargs)
Пример #5
0
    def __init__(self):
        super().__init__()
        self.get_parser = reqparse.RequestParser()
        self.get_parser.add_argument("page", type=int, required=False, help="Page number to retrieve.", default=0)
        self.get_parser.add_argument("page-size", type=int, required=False, help="Size of the page to retrieve.", default=global_config.get_page_size())
        self.get_parser.add_argument("elements", type=list, required=False, location="json", help="List of IDs to retrieve. Overrides the page attribute")
        self.get_parser.add_argument("options", type=dict, required=False, location="json", help="options string")

        self.post_parser = reqparse.RequestParser()
        self.session = global_config.get_session()

        arguments = {
            "title":
                {
                    "type": str,
                    "required": True,
                    "help": "Title for the dataset.",
                    "location": "json"
                },
            "description":
                {
                    "type": str,
                    "required": True,
                    "help": "Description for the dataset.",
                    "location": "json"
                },
            "http_ref":
                {
                    "type": str,
                    "required": False,
                    "help": "Reference data (perhaps a Bibtex in string format?)",
                    "location": "json"
                },
            "tags":
                {
                    "type": list,
                    "required": False,
                    "help": "Tags for the dataset (ease the searches for this dataset).",
                    "location": "json"
                },
        }

        for argument, kwargs in arguments.items():
            self.post_parser.add_argument(argument, **kwargs)