コード例 #1
0
def load_dataset(dataset, datasets_path=None, bucket=None):
    dataset_path = _get_dataset_path(dataset, datasets_path, bucket)
    metadata = Metadata(str(dataset_path / 'metadata.json'))
    tables = metadata.get_tables()
    if not hasattr(metadata, 'modality'):
        if len(tables) > 1:
            modality = 'multi-table'
        else:
            table = metadata.get_table_meta(tables[0])
            if any(table.get(field) for field in TIMESERIES_FIELDS):
                modality = 'timeseries'
            else:
                modality = 'single-table'

        metadata._metadata['modality'] = modality
        metadata.modality = modality

    if not hasattr(metadata, 'name'):
        metadata._metadata['name'] = dataset_path.name
        metadata.name = dataset_path.name

    return metadata
コード例 #2
0
def load_dataset(dataset,
                 datasets_path=None,
                 bucket=None,
                 aws_key=None,
                 aws_secret=None,
                 max_columns=None):
    dataset_path = _get_dataset_path(dataset, datasets_path, bucket, aws_key,
                                     aws_secret)
    with open(dataset_path / 'metadata.json') as metadata_file:
        metadata_content = json.load(metadata_file)

    if max_columns:
        if len(metadata_content['tables']) > 1:
            raise ValueError(
                'max_columns is not supported for multi-table datasets')

        _apply_max_columns_to_metadata(metadata_content, max_columns)

    metadata = Metadata(metadata_content, dataset_path)
    tables = metadata.get_tables()
    if not hasattr(metadata, 'modality'):
        if len(tables) > 1:
            modality = 'multi-table'
        else:
            table = metadata.get_table_meta(tables[0])
            if any(table.get(field) for field in TIMESERIES_FIELDS):
                modality = 'timeseries'
            else:
                modality = 'single-table'

        metadata._metadata['modality'] = modality
        metadata.modality = modality

    if not hasattr(metadata, 'name'):
        metadata._metadata['name'] = dataset_path.name
        metadata.name = dataset_path.name

    return metadata
コード例 #3
0
class Dataset:
    """Dataset abstraction for benchmarking.

    This class loads as TimeSeries dataset from an sdv.Metadata
    in the format expected by DeepEcho models.

    It handles the extraction of the context columns from analyzing
    the data and identifying the columns that are constant for each
    entity_id.

    Args:
        dataset_path (str):
            Path to the dataset folder, where the metadata.json can be
            found.
        max_entities (int):
            Optionally restrict the number of entities to the indicated
            amount. If not given, use all the entities from the dataset.
        segment_size (int, pd.Timedelta or str):
            If specified, cut each training sequence in several segments of the
            indicated size. The size can either can passed as an integer value,
            which will interpreted as the number of data points to put on each
            segment, or as a pd.Timedelta (or equivalent str representation),
            which will be interpreted as the segment length in time. Timedelta
            segment sizes can only be used with sequence indexes of type datetime.
    """

    VERSION = '0.1.1'

    def _load_table(self):
        columns = list(self.metadata.get_fields(self.table_name).keys())
        primary_key = self.metadata.get_primary_key(self.table_name)
        if primary_key:
            columns.remove(primary_key)

        self.data = self.metadata.load_table(self.table_name)[columns]

    @staticmethod
    def _is_constant(column):
        def wrapped(group):
            return len(group[column].unique()) == 1

        return wrapped

    def _get_context_columns(self):
        context_columns = []
        candidate_columns = set(self.data.columns) - set(self.entity_columns)
        if self.entity_columns:
            for column in candidate_columns:
                if self.data.groupby(self.entity_columns).apply(
                        self._is_constant(column)).all():
                    context_columns.append(column)

        else:
            for column in candidate_columns:
                if self._is_constant(self.data[column]):
                    context_columns.append(column)

        return context_columns

    def _download(self):
        os.makedirs(DATA_DIR, exist_ok=True)
        filename = '{}_v{}.zip'.format(self.name, self.VERSION)
        url = urljoin(DATA_URL, filename)
        LOGGER.info('Downloading dataset %s from %s', self.name, url)
        with urlopen(url) as remote:
            with ZipFile(BytesIO(remote.read())) as zipfile:
                zipfile.extractall(DATA_DIR)

    def _filter_entities(self, max_entities):
        entities = self.data[self.entity_columns].drop_duplicates()
        if max_entities < len(entities):
            entities = entities.sample(max_entities)

            data = pd.DataFrame()
            for _, row in entities.iterrows():
                mask = [True] * len(self.data)
                for column in self.entity_columns:
                    mask &= self.data[column] == row[column]

                data = data.append(self.data[mask])

            self.data = data

    def _get_evaluation_data(self, segment_size):
        sequences = assemble_sequences(self.data, self.entity_columns,
                                       self.context_columns, segment_size,
                                       self.sequence_index)
        evaluation_data = pd.DataFrame(columns=self.data.columns)
        for idx, sequence in enumerate(sequences):
            sequence_df = pd.DataFrame(sequence['data'],
                                       index=self.model_columns).T
            for column, value in zip(self.context_columns,
                                     sequence['context']):
                sequence_df[column] = value

            for column in self.entity_columns:
                sequence_df[column] = idx

            evaluation_data = evaluation_data.append(sequence_df)

        return evaluation_data

    def _load_metadata(self):
        dataset_path = os.path.join(DATA_DIR, self.name)
        metadata_path = os.path.join(dataset_path, 'metadata.json')

        try:
            self.metadata = Metadata(metadata_path)
            version = self.metadata.get_table_meta(
                self.table_name)['deepecho_version']
            assert version == self.VERSION
        except Exception:
            self._download()
            self.metadata = Metadata(metadata_path)

    def __init__(self,
                 dataset,
                 table_name=None,
                 max_entities=None,
                 segment_size=None):
        if os.path.isdir(dataset):
            self.name = os.path.basename(dataset)
            self.table_name = table_name or self.name
            self.metadata = Metadata(os.path.join(dataset, 'metadata.json'))
        else:
            self.name = dataset
            self.table_name = table_name or self.name
            self._load_metadata()

        self._load_table()

        table_meta = self.metadata.get_table_meta(self.table_name)
        self.entity_columns = table_meta.get('entity_columns') or []
        self.sequence_index = table_meta.get('sequence_index')
        if 'context_columns' in table_meta:
            self.context_columns = table_meta['context_columns']
        else:
            self.context_columns = self._get_context_columns()

        self.model_columns = [
            column for column in self.data.columns
            if column not in self.entity_columns + self.context_columns +
            [self.sequence_index]
        ]

        if max_entities:
            self._filter_entities(max_entities)

        if not segment_size:
            self.evaluation_data = self.data
        else:
            self.evaluation_data = self._get_evaluation_data(segment_size)

    def describe(self):
        """Describe this datasets.

        The output is a ``pandas.Series`` containing:
            * ``entities``: Number of entities in the dataset.
            * ``entity_colums``: Number of entity columns.
            * ``context_colums``: Number of context columns.
            * ``data_columns``: Number of data columns.
            * ``max_sequence_len``: Maximum sequence length.
            * ``min_sequence_len``: Minimum sequence length.

        Returns:
            pandas.Series
        """
        groupby = self.data.groupby(self.entity_columns)
        sizes = groupby.size()
        return pd.Series({
            'entities': len(sizes),
            'entity_columns': len(self.entity_columns),
            'context_columns': len(self.context_columns),
            'model_columns': len(self.model_columns),
            'max_sequence_len': sizes.max(),
            'min_sequence_len': sizes.min(),
        })

    def __repr__(self):
        return "Dataset('{}')".format(self.name)