Example #1
0
    def __next__(self):
        """Return the next row in the dataset iterator. Raises StopIteration if
        end of file is reached or file has been closed.

        Automatically closes any open file when end of iteration is reached for
        the first time.

        Returns
        -------
        vizier.datastore.base.DatasetRow
        """
        if self.is_open:
            # Catch exception to close any open file
            try:
                row = next(self.reader)
                if self.has_row_ids:
                    row = DatasetRow(int(row[0]), row[1:])
                else:
                    row = DatasetRow(self.line_count, row)
                self.line_count += 1
                return row
            except StopIteration as ex:
                self.close()
                raise ex
        raise StopIteration
 def test_default_json_reader(self):
     """Test functionality of Json dataset reader."""
     reader = DefaultJsonDatasetReader(JSON_FILE)
     with self.assertRaises(StopIteration):
         next(reader)
     count = 0
     with reader.open() as r:
         for row in r:
             self.assertEqual(len(row.values), 3)
             self.assertEqual(row.identifier, count)
             count += 1
     self.assertEqual(count, 2)
     with self.assertRaises(StopIteration):
         next(reader)
     # Create a new dataset and read it
     tmp_file = tempfile.mkstemp()[1]
     reader = DefaultJsonDatasetReader(tmp_file)
     values = ['A', 'B', 1, 2]
     rows = [
         DatasetRow(0, values),
         DatasetRow(1, values),
         DatasetRow(2, values)
     ]
     reader.write(rows)
     count = 0
     with reader.open() as reader:
         for row in reader:
             self.assertEqual(len(row.values), 4)
             self.assertEqual(row.identifier, count)
             count += 1
     self.assertEqual(count, len(rows))
     os.remove(tmp_file)
Example #3
0
    def filter_columns(self, identifier: str, columns: List[int],
                       names: List[str],
                       datastore: Datastore) -> VizualApiResult:
        """Dataset projection operator. Returns a copy of the dataset with the
        given identifier that contains only those columns listed in columns.
        The list of names contains optional new names for the filtered columns.
        A value of None in names indicates that the name of the corresponding
        column is not changed.

        Raises ValueError if no dataset with given identifier exists or if any
        of the filter columns are unknown.

        Parameters
        ----------
        identifier: string
            Unique dataset identifier
        columns: list(int)
            List of column identifier for columns in the result.
        names: list(string)
            Optional new names for filtered columns.
        datastore : vizier.datastore.fs.base.FileSystemDatastore
            Datastore to retireve and update datasets

        Returns
        -------
        vizier.engine.packages.vizual.api.VizualApiResult
        """
        # Get dataset. Raise exception if dataset is unknown
        dataset = datastore.get_dataset(identifier)
        if dataset is None:
            raise ValueError('unknown dataset \'' + identifier + '\'')
        # The schema of the new dataset only contains the columns in the given
        # list. Keep track of their index positions to filter values.
        schema = list()
        val_filter = list()
        for i in range(len(columns)):
            col_idx = dataset.get_index(columns[i])
            if col_idx is None:
                raise ValueError('unknown column identifier \'' +
                                 str(columns[i]) + '\'')
            col = dataset.columns[col_idx]
            if not names[i] is None:
                schema.append(
                    DatasetColumn(identifier=col.identifier,
                                  name=names[i],
                                  data_type=col.data_type))
            else:
                schema.append(col)
            val_filter.append(col_idx)
        # Create a list of projected rows
        rows = list()
        for row in dataset.fetch_rows():
            values = list()
            for v_idx in val_filter:
                values.append(row.values[v_idx])
            rows.append(DatasetRow(identifier=row.identifier, values=values))
        # Store updated dataset to get new identifier
        ds = datastore.create_dataset(columns=schema, rows=rows, properties={})
        return VizualApiResult(ds)
Example #4
0
    def load_dataset(
        self,
        f_handle: FileHandle,
        proposed_schema: List[Tuple[str,
                                    str]] = []) -> FileSystemDatasetHandle:
        """Create a new dataset from a given file.

        Raises ValueError if the given file could not be loaded as a dataset.

        Parameters
        ----------
        f_handle : vizier.filestore.base.FileHandle
            Handle for an uploaded file

        Returns
        -------
        vizier.datastore.fs.dataset.FileSystemDatasetHandle
        """
        # The file handle might be None in which case an exception is raised
        if f_handle is None:
            raise ValueError('unknown file')
        # Expects a file in a supported tabular data format.
        if not f_handle.is_tabular:
            raise ValueError('cannot create dataset from file \'' +
                             f_handle.name + '\'')
        # Open the file as a csv file. Expects that the first row contains the
        # column names. Read dataset schema and dataset rows into two separate
        # lists.
        columns: List[DatasetColumn] = []
        rows: List[DatasetRow] = []
        with f_handle.open() as csvfile:
            reader = csv.reader(csvfile, delimiter=f_handle.delimiter)
            for col_name in next(reader):
                columns.append(
                    DatasetColumn(identifier=len(columns),
                                  name=col_name.strip()))
            for row in reader:
                values = [cast(v.strip()) for v in row]
                rows.append(
                    DatasetRow(identifier=str(len(rows)), values=values))
        # Get unique identifier and create subfolder for the new dataset
        identifier = get_unique_identifier()
        dataset_dir = self.get_dataset_dir(identifier)
        os.makedirs(dataset_dir)
        # Write rows to data file
        data_file = os.path.join(dataset_dir, DATA_FILE)
        DefaultJsonDatasetReader(data_file).write(rows)
        # Create dataset an write descriptor to file
        dataset = FileSystemDatasetHandle(identifier=identifier,
                                          columns=columns,
                                          data_file=data_file,
                                          row_count=len(rows),
                                          max_row_id=len(rows) - 1)
        dataset.to_file(
            descriptor_file=os.path.join(dataset_dir, DESCRIPTOR_FILE))
        return dataset
 def test_deduplicate_annotations(self):
     """Test removing duplicated annotations."""
     store = FileSystemDatastore(STORE_DIR)
     ds = store.create_dataset(
         columns=[
             DatasetColumn(identifier=0, name='A'),
             DatasetColumn(identifier=1, name='B')
         ],
         rows=[DatasetRow(identifier=0, values=['a', 'b'])],
         annotations=DatasetMetadata(
             cells=[
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=1),
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=2),
                 DatasetAnnotation(column_id=1, row_id=0, key='X', value=3),
                 DatasetAnnotation(column_id=1, row_id=1, key='X', value=3),
                 DatasetAnnotation(column_id=0, row_id=0, key='Y', value=1),
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=1),
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=2),
                 DatasetAnnotation(column_id=1, row_id=0, key='X', value=3),
                 DatasetAnnotation(column_id=1, row_id=1, key='X', value=3),
             ],
             columns=[
                 DatasetAnnotation(column_id=0, key='A', value='x'),
                 DatasetAnnotation(column_id=1, key='A', value='x'),
                 DatasetAnnotation(column_id=0, key='A', value='x'),
                 DatasetAnnotation(column_id=1, key='A', value='x'),
                 DatasetAnnotation(column_id=0, key='A', value='x'),
                 DatasetAnnotation(column_id=1, key='A', value='x'),
                 DatasetAnnotation(column_id=0, key='A', value='x'),
                 DatasetAnnotation(column_id=1, key='A', value='x')
                 ],
             rows=[
                 DatasetAnnotation(row_id=0, key='E', value=100),
                 DatasetAnnotation(row_id=0, key='E', value=100)
             ]
         )
     )
     ds = store.get_dataset(ds.identifier)
     self.assertEqual(len(ds.annotations.cells), 4)
     self.assertEqual(len(ds.annotations.columns), 2)
     self.assertEqual(len(ds.annotations.rows), 1)
     annos = ds.annotations.for_cell(column_id=0, row_id=0)
     self.assertEqual(len(annos), 3)
     self.assertTrue(1 in [a.value for a in annos])
     self.assertTrue(2 in [a.value for a in annos])
     self.assertFalse(3 in [a.value for a in annos])
     self.assertEqual(len(ds.annotations.find_all(values=annos, key='X')), 2)
     with self.assertRaises(ValueError):
         ds.annotations.find_one(values=annos, key='X')
     self.assertEqual(len(ds.annotations.for_column(column_id=0)), 1)
     self.assertEqual(len(ds.annotations.for_row(row_id=0)), 1)
     annotations = ds.annotations.filter(columns=[1])
     self.assertEqual(len(annotations.cells), 1)
     self.assertEqual(len(annotations.columns), 1)
     self.assertEqual(len(annotations.rows), 1)
Example #6
0
    def update_cell(self, identifier: str, column_id: int, row_id: str,
                    value: str, datastore: Datastore) -> VizualApiResult:
        """Update a cell in a given dataset.

        Raises ValueError if no dataset with given identifier exists or if the
        specified cell is outside of the current dataset ranges.

        Parameters
        ----------
        identifier : string
            Unique dataset identifier
        column_id: int
            Unique column identifier for updated cell
        row_id: int
            Unique row identifier
        value: string
            New cell value
        datastore : vizier.datastore.fs.base.FileSystemDatastore
            Datastore to retireve and update datasets

        Returns
        -------
        vizier.engine.packages.vizual.api.VizualApiResult
        """
        # Get dataset. Raise exception if dataset is unknown
        dataset = datastore.get_dataset(identifier)
        if dataset is None:
            raise ValueError('unknown dataset \'' + identifier + '\'')
        # Get column index forst in case it raises an exception
        col_idx = dataset.get_index(column_id)
        if col_idx is None:
            raise ValueError('unknown column identifier \'' + str(column_id) +
                             '\'')
        # Update the specified cell in the given data array
        rows = dataset.fetch_rows()
        row_index = -1
        for i in range(len(rows)):
            if int(rows[i].identifier) == int(row_id):
                row_index = i
                break
        # Make sure that row refers a valid row in the dataset
        if row_index < 0:
            raise ValueError('invalid row identifier \'' + str(row_id) + '\'')
        r = rows[row_index]
        values = list(r.values)
        values[col_idx] = value
        rows[row_index] = DatasetRow(identifier=r.identifier, values=values)
        # Store updated dataset to get new identifier
        ds = datastore.create_dataset(columns=dataset.columns,
                                      rows=rows,
                                      properties={})
        return VizualApiResult(ds)
 def test_query_annotations(self):
     """Test retrieving annotations via the datastore."""
     store = FileSystemDatastore(STORE_DIR)
     ds = store.create_dataset(
         columns=[
             DatasetColumn(identifier=0, name='A'),
             DatasetColumn(identifier=1, name='B')
         ],
         rows=[DatasetRow(identifier=0, values=['a', 'b'])],
         properties=EXAMPLE_PROPERTIES
     )
     properties = store.get_properties(ds.identifier)
     self.assertEqual(len(properties["columns"]), 2)
Example #8
0
def DATASET_ROW(obj):
    """Convert dictionary into a dataset row object.

    Parameters
    ----------
    obj: dict
        Default serialization of a dataset row

    Returns
    -------
    vizier.datastore.dataset.DatasetRow
    """
    return DatasetRow(identifier=obj[labels.ID], values=obj[labels.ROWVALUES])
Example #9
0
    def open(self):
        """Setup the reader by querying the database and creating an in-memory
        copy of the dataset rows.

        Returns
        -------
        vizier.datastore.reader.MimirDatasetReader
        """
        # Query the database to retrieve dataset rows if reader is not already
        # open
        if not self.is_open:
            # Query the database to get the list of rows. Sort rows according to
            # order in row_ids and return a InMemReader
            sql = base.get_select_query(self.table_name, columns=self.columns)
            if self.rowid != None:
                sql += ' WHERE ROWID() = ' + str(self.rowid)
            if self.is_range_query:
                if self.limit > 0:
                    sql +=  ' LIMIT ' + str(self.limit)
                if self.offset > 0:
                    sql += ' OFFSET ' + str(self.offset) 
            rs = mimir.vistrailsQueryMimirJson(sql+ ';', True, False)
            #self.row_ids = rs['prov']
            # Initialize mapping of column rdb names to index positions in
            # dataset rows
            self.col_map = dict()
            for i in range(len(rs['schema'])):
                col = rs['schema'][i]
                self.col_map[base.sanitize_column_name(col['name'])] = i
            rs_rows = rs['data']
            row_ids = rs['prov']
            annotation_flags = rs['colTaint']
            self.rows = list()
            for row_index in range(len(rs_rows)):
                row = rs_rows[row_index]
                row_annotation_flags = annotation_flags[row_index]  
                row_id = str(row_ids[row_index])
                values = [None] * len(self.columns)
                annotation_flag_values = [None] * len(self.columns)
                for i in range(len(self.columns)):
                    col = self.columns[i]
                    col_index = self.col_map[col.name_in_rdb]
                    values[i] = base.mimir_value_to_python(row[col_index], col)
                    annotation_flag_values[i] = row_annotation_flags[col_index]
                self.rows.append(DatasetRow(row_id, values, annotation_flag_values))
            self.read_index = 0
            self.is_open = True
        return self
Example #10
0
    def empty_dataset(
        self,
        datastore: Datastore,
        filestore: Filestore,
        initial_columns: List[Tuple[str, str]] = [("''", "unnamed_column")]
    ) -> VizualApiResult:
        """Create (or load) a new dataset from a given file or Uri. It is
        guaranteed that either the file identifier or the url are not None but
        one of them will be None. The user name and password may only be given
        if an url is given.

        The resources refer to any resoures (e.g., file identifier) that have
        been generated by a previous execution of the respective task. This
        allows to associate an identifier with a downloaded file to avoid future
        downloads (unless the reload flag is True).

        Parameters
        ----------
        datastore : vizier.datastore.fs.base.FileSystemDatastore
            Datastore to retireve and update datasets
        filestore: vizier.filestore.Filestore
            Filestore to retrieve uploaded datasets

        Returns
        -------
        vizier.engine.packages.vizual.api.VizualApiResult
        """
        assert (isinstance(datastore, MimirDatastore))
        ds = datastore.create_dataset(
            columns=[
                MimirDatasetColumn(identifier=id,
                                   name_in_dataset=col,
                                   data_type="varchar")
                for id, (default, col) in enumerate(initial_columns)
            ],
            rows=[
                DatasetRow(
                    identifier=str(id),
                    values=[default for default, col in initial_columns])
                for id in range(1, 2)
            ],
            human_readable_name="Empty Table",
        )

        return VizualApiResult(dataset=ds)
 def test_query_annotations(self):
     """Test retrieving annotations via the datastore."""
     store = FileSystemDatastore(STORE_DIR)
     ds = store.create_dataset(
         columns=[
             DatasetColumn(identifier=0, name='A'),
             DatasetColumn(identifier=1, name='B')
         ],
         rows=[DatasetRow(identifier=0, values=['a', 'b'])],
         annotations=DatasetMetadata(
             cells=[
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=1),
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=2),
                 DatasetAnnotation(column_id=1, row_id=0, key='X', value=3),
                 DatasetAnnotation(column_id=0, row_id=0, key='Y', value=1)
             ],
             columns=[
                 DatasetAnnotation(column_id=0, key='A', value='x'),
                 DatasetAnnotation(column_id=1, key='A', value='x')
                 ],
             rows=[
                 DatasetAnnotation(row_id=0, key='E', value=100)
             ]
         )
     )
     annos = store.get_annotations(ds.identifier, column_id=1)
     self.assertEqual(len(annos.columns), 1)
     self.assertEqual(len(annos.rows), 0)
     self.assertEqual(len(annos.cells), 0)
     annos = store.get_annotations(ds.identifier, column_id=0)
     self.assertEqual(len(annos.columns), 1)
     self.assertEqual(len(annos.rows), 0)
     self.assertEqual(len(annos.cells), 0)
     annos = store.get_annotations(ds.identifier, row_id=0)
     self.assertEqual(len(annos.columns), 0)
     self.assertEqual(len(annos.rows), 1)
     self.assertEqual(len(annos.cells), 0)
     annos = store.get_annotations(ds.identifier, column_id=1, row_id=0)
     self.assertEqual(len(annos.columns), 0)
     self.assertEqual(len(annos.rows), 0)
     self.assertEqual(len(annos.cells), 1)
     annos = store.get_annotations(ds.identifier, column_id=0, row_id=0)
     self.assertEqual(len(annos.columns), 0)
     self.assertEqual(len(annos.rows), 0)
     self.assertEqual(len(annos.cells), 3)
Example #12
0
    def open(self) -> "MimirDatasetReader":
        """Setup the reader by querying the database and creating an in-memory
        copy of the dataset rows.

        Returns
        -------
        vizier.datastore.reader.MimirDatasetReader
        """
        # Query the database to retrieve dataset rows if reader is not already
        # open
        if not self.is_open:
            # Query the database to get the list of rows. Sort rows according to
            # order in row_ids and return a InMemReader
            rs = mimir.getTable(
                table=self.table_name,
                columns=[col.name_in_rdb for col in self.columns],
                offset_to_rowid=self.rowid,
                limit=self.limit if self.is_range_query else None,
                offset=self.offset if self.is_range_query else None,
                include_uncertainty=True)

            #self.row_ids = rs['prov']
            # Initialize mapping of column rdb names to index positions in
            # dataset rows
            rs_rows = rs['data']
            row_ids = rs['prov']
            annotation_flags = rs['colTaint']
            self.rows = list()
            for row_index in range(len(rs_rows)):
                row = rs_rows[row_index]
                row_annotation_flags = annotation_flags[row_index]
                row_id = str(row_ids[row_index])
                values = [None] * len(self.columns)
                annotation_flag_values: List[bool] = [False] * len(
                    self.columns)
                for i in range(len(self.columns)):
                    col = self.columns[i]
                    values[i] = base.mimir_value_to_python(row[i], col)
                    annotation_flag_values[i] = not row_annotation_flags[i]
                self.rows.append(
                    DatasetRow(row_id, values, annotation_flag_values))
            self.read_index = 0
            self.is_open = True
        return self
Example #13
0
    def __next__(self):
        """Return the next row in the dataset iterator. Raises StopIteration if
        end of file is reached or file has been closed.

        Automatically closes any open file when end of iteration is reached for
        the first time.

        Returns
        -------
        vizier.datastore.base.DatasetRow
        """
        if self.is_open:
            if self.read_index < len(self.rows):
                r_dict = self.rows[self.read_index]
                row = DatasetRow(identifier=r_dict[KEY_ROW_ID],
                                 values=r_dict[KEY_ROW_VALUES])
                self.read_index += 1
                return row
        raise StopIteration
 def test_properties(self):
     """Test loading a dataset from file."""
     store = FileSystemDatastore(STORE_DIR)
     ds = store.create_dataset(
         columns=[
             DatasetColumn(identifier=0, name='A'),
             DatasetColumn(identifier=1, name='B')
         ],
         rows=[DatasetRow(identifier=0, values=[1, 2])],
         properties=EXAMPLE_PROPERTIES
     )
     ds = store.get_dataset(ds.identifier)
     column_props = ds.properties['columns']
     self.assertEqual(len(column_props), 2)
     self.assertTrue('A' in [prop['name'] for prop in column_props])
     # Reload datastore
     store = FileSystemDatastore(STORE_DIR)
     ds = store.get_dataset(ds.identifier)
     column_props = ds.properties['columns']
     self.assertEqual(len(column_props), 2)
 def test_create_dataset(self):
     """Test loading a dataset from file."""
     store = FileSystemDatastore(STORE_DIR)
     ds = store.create_dataset(
         columns=[
             DatasetColumn(identifier=0, name='A'),
             DatasetColumn(identifier=1, name='B')
         ],
         rows=[DatasetRow(identifier=0, values=['a', 'b'])]
     )
     ds = store.get_dataset(ds.identifier)
     column_ids = [col.identifier for col in ds.columns]
     self.assertEqual(len(ds.columns), 2)
     for id in [0, 1]:
         self.assertTrue(id in column_ids)
     column_names = [col.name for col in ds.columns]
     for name in ['A', 'B']:
         self.assertTrue(name in column_names)
     rows = ds.fetch_rows()
     self.assertEqual(len(rows), 1)
     self.assertEqual(rows[0].values, ['a', 'b'])
     self.assertEqual(len(ds.annotations.cells), 0)
     self.assertEqual(len(ds.annotations.columns), 0)
     self.assertEqual(len(ds.annotations.rows), 0)
     # Reload the datastore
     store = FileSystemDatastore(STORE_DIR)
     ds = store.get_dataset(ds.identifier)
     column_ids = [col.identifier for col in ds.columns]
     self.assertEqual(len(ds.columns), 2)
     for id in [0, 1]:
         self.assertTrue(id in column_ids)
     column_names = [col.name for col in ds.columns]
     for name in ['A', 'B']:
         self.assertTrue(name in column_names)
     rows = ds.fetch_rows()
     self.assertEqual(len(rows), 1)
     self.assertEqual(rows[0].values, ['a', 'b'])
     self.assertEqual(len(ds.annotations.cells), 0)
     self.assertEqual(len(ds.annotations.columns), 0)
     self.assertEqual(len(ds.annotations.rows), 0)
Example #16
0
    def insert_row(self, identifier: str, position: int,
                   datastore: Datastore) -> VizualApiResult:
        """Insert row at given position in a dataset.

        Raises ValueError if no dataset with given identifier exists or if the
        specified row psotion isoutside the dataset bounds.

        Parameters
        ----------
        identifier: string
            Unique dataset identifier
        position: int
            Index position at which the row will be inserted
        datastore : vizier.datastore.fs.base.FileSystemDatastore
            Datastore to retireve and update datasets

        Returns
        -------
        vizier.engine.packages.vizual.api.VizualApiResult
        """
        # Get dataset. Raise exception if dataset is unknown
        dataset = datastore.get_dataset(identifier)
        print('---------------' + str(dataset.__class__.__name__))
        if dataset is None:
            raise ValueError('unknown dataset \'' + identifier + '\'')
        assert (isinstance(dataset, FileSystemDatasetHandle))
        # Make sure that position is a valid row index in the new dataset
        if position < 0 or position > dataset.row_count:
            raise ValueError('invalid row index \'' + str(position) + '\'')
        # Create empty set of values
        rows = dataset.fetch_rows()
        rows.insert(
            position,
            DatasetRow(identifier=str(dataset.max_row_id() + 1),
                       values=[None] * len(dataset.columns)))
        # Store updated dataset to get new identifier
        ds = datastore.create_dataset(columns=dataset.columns,
                                      rows=rows,
                                      properties={})
        return VizualApiResult(ds)
 def test_validate_dataset(self):
     """Test the validate dataset function."""
     columns = []
     rows = []
     # Empty dataset
     max_col_id, max_row_id = validate_dataset(columns, rows)
     self.assertEqual(max_col_id, -1)
     self.assertEqual(max_row_id, -1)
     max_col_id, max_row_id = validate_dataset(
         columns=columns,
         rows=rows
     )
     self.assertEqual(max_col_id, -1)
     self.assertEqual(max_row_id, -1)
     # Valid set of columns and rows
     columns = [DatasetColumn(0, 'A'), DatasetColumn(10, 'B')]
     rows = [DatasetRow(0, [1, 2]), DatasetRow(4, [None, 2]), DatasetRow(2, [0, 0])]
     max_col_id, max_row_id = validate_dataset(columns, rows)
     self.assertEqual(max_col_id, 10)
     self.assertEqual(max_row_id, 4)
     max_col_id, max_row_id = validate_dataset(
         columns=columns,
         rows=rows
     )
     self.assertEqual(max_col_id, 10)
     self.assertEqual(max_row_id, 4)
     # Column errors
     with self.assertRaises(ValueError):
         validate_dataset(columns + [DatasetColumn()], [])
     with self.assertRaises(ValueError):
         validate_dataset(columns + [DatasetColumn(10, 'C')], [])
     # Row errors
     with self.assertRaises(ValueError):
         validate_dataset(columns, rows + [DatasetRow(1000, [0, 1, 3])])
     with self.assertRaises(ValueError):
         validate_dataset(columns, rows + [DatasetRow(-1, [1, 3])])
     with self.assertRaises(ValueError):
         validate_dataset(columns, rows + [DatasetRow(0, [1, 3])])
Example #18
0
    def create_dataset(
            self,
            columns: List[DatasetColumn],
            rows: List[DatasetRow],
            properties: Optional[Dict[str, Any]] = None,
            human_readable_name: str = "Untitled Dataset",
            backend_options: Optional[List[Tuple[str, str]]] = None,
            dependencies: Optional[List[str]] = None) -> DatasetDescriptor:
        """Create a new dataset in the datastore. Expects at least the list of
        columns and the rows for the dataset.

        Raises ValueError if (1) the column identifier are not unique, (2) the
        row identifier are not uniqe, (3) the number of columns and values in a
        row do not match, (4) any of the column or row identifier have a
        negative value, or (5) if the given column or row counter have value
        lower or equal to any of the column or row identifier.

        Parameters
        ----------
        columns: list(vizier.datastore.dataset.DatasetColumn)
            List of columns. It is expected that each column has a unique
            identifier.
        rows: list(vizier.datastore.dataset.DatasetRow)
            List of dataset rows.
        properties: dict(string, ANY), optional
            Properties for dataset components

        Returns
        -------
        vizier.datastore.dataset.DatasetDescriptor
        """
        # Validate (i) that each column has a unique identifier, (ii) each row
        # has a unique identifier, and (iii) that every row has exactly one
        # value per column.
        properties = {} if properties is None else properties
        dependencies = [] if dependencies is None else dependencies
        identifiers = set(
            int(row.identifier) for row in rows
            if row.identifier is not None and int(row.identifier) >= 0)
        identifiers.add(0)
        max_row_id = max(identifiers)
        rows = [
            DatasetRow(identifier=row.identifier if row.identifier is not None
                       and int(row.identifier) >= 0 else str(idx + max_row_id),
                       values=row.values,
                       caveats=row.caveats) for idx, row in enumerate(rows)
        ]
        _, max_row_id = validate_dataset(columns=columns, rows=rows)
        # Get new identifier and create directory for new dataset
        identifier = get_unique_identifier()
        dataset_dir = self.get_dataset_dir(identifier)
        os.makedirs(dataset_dir)
        # Write rows to data file
        data_file = os.path.join(dataset_dir, DATA_FILE)
        DefaultJsonDatasetReader(data_file).write(rows)
        # Create dataset an write dataset file
        dataset = FileSystemDatasetHandle(identifier=identifier,
                                          columns=columns,
                                          data_file=data_file,
                                          row_count=len(rows),
                                          max_row_id=max_row_id,
                                          properties=properties)
        dataset.to_file(
            descriptor_file=os.path.join(dataset_dir, DESCRIPTOR_FILE))
        # Write metadata file if annotations are given
        if properties is not None:
            dataset.write_properties_to_file(
                self.get_properties_filename(identifier))
        # Return handle for new dataset
        return DatasetDescriptor(identifier=dataset.identifier,
                                 name=human_readable_name,
                                 columns=dataset.columns)
api = VizierApiClient(URLS)
PROJECT_ID = api.create_project({"name": "Test Client Datastore"}).identifier

at_exit(api.delete_project, PROJECT_ID)

# We're just doing some unit testing on the fields specific to DatastoreClient, so
# ignore complaints about instantiating an abstract class
store = DatastoreClient(  # type: ignore[abstract]
    urls=DatastoreClientUrlFactory(urls=URLS, project_id=PROJECT_ID))

ds = store.create_dataset(columns=[
    DatasetColumn(identifier=0, name='Name'),
    DatasetColumn(identifier=1, name='Age', data_type="int")
],
                          rows=[
                              DatasetRow(identifier=0, values=['Alice', 32]),
                              DatasetRow(identifier=1, values=['Bob', 23])
                          ],
                          properties={"example_property": "foo"})

# print(ds)
# print([col.identifier for col in ds.columns])
# print([col.name for col in ds.columns])

dh = store.get_dataset(ds.identifier)
assert dh is not None
for row in dh.fetch_rows():
    print([row.identifier] + row.values)

caveats = dh.get_caveats()
# print("\n".join(c.__repr__ for c in caveats))
 def test_update_annotations(self):
     """Test updating annotations via the datastore."""
     store = FileSystemDatastore(STORE_DIR)
     ds = store.create_dataset(
         columns=[
             DatasetColumn(identifier=0, name='A'),
             DatasetColumn(identifier=1, name='B')
         ],
         rows=[DatasetRow(identifier=0, values=['a', 'b'])],
         annotations=DatasetMetadata(
             cells=[
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=1),
                 DatasetAnnotation(column_id=0, row_id=0, key='X', value=2),
                 DatasetAnnotation(column_id=1, row_id=0, key='X', value=3),
                 DatasetAnnotation(column_id=0, row_id=0, key='Y', value=1)
             ],
             columns=[
                 DatasetAnnotation(column_id=0, key='A', value='x'),
                 DatasetAnnotation(column_id=1, key='A', value='x')
                 ],
             rows=[
                 DatasetAnnotation(row_id=0, key='E', value=100)
             ]
         )
     )
     # INSERT row annotatins
     store.update_annotation(
         ds.identifier,
         key='D',
         row_id=0,
         new_value=200
     )
     annos = store.get_annotations(ds.identifier, row_id=0)
     self.assertEqual(len(annos.rows), 2)
     for key in ['D', 'E']:
         self.assertTrue(key in [a.key for a in annos.rows])
     for val in [100, 200]:
         self.assertTrue(val in [a.value for a in annos.rows])
     # UPDATE column annotation
     store.update_annotation(
         ds.identifier,
         key='A',
         column_id=1,
         old_value='x',
         new_value='y'
     )
     annos = store.get_annotations(ds.identifier, column_id=1)
     self.assertEqual(annos.columns[0].key, 'A')
     self.assertEqual(annos.columns[0].value, 'y')
     # DELETE cell annotation
     store.update_annotation(
         ds.identifier,
         key='X',
         column_id=0,
         row_id=0,
         old_value=2,
     )
     annos = store.get_annotations(ds.identifier, column_id=0, row_id=0)
     self.assertEqual(len(annos.cells), 2)
     for a in annos.cells:
         self.assertNotEqual(a.value, 2)
     result = store.update_annotation(
         ds.identifier,
         key='X',
         column_id=1,
         row_id=0,
         old_value=3,
     )
     self.assertTrue(result)
     annos = store.get_annotations(ds.identifier, column_id=1, row_id=0)
     self.assertEqual(len(annos.cells), 0)