def append(self, dataset_definition, data_dir=None):
        """ Add some documents to the dataset

        This is by no mean an efficient operation, processing all the files
        at once might be more suitable in most occastions.
        """
        from freediscovery.engine.lsi import _LSIWrapper
        dsid_dir = self.dsid_dir
        db_old = self.db_.data
        internal_id_offset = db_old.internal_id.max()
        db_extra = DocumentIndex.from_list(dataset_definition, data_dir,
                                           internal_id_offset + 1, dsid_dir)
        db_new = db_extra.data
        vect = self.vect_
        tfidf = self.tfidf_

        filenames_new = list(db_new.file_path.values)

        # write down the new features file
        X_new_raw = vect.transform(filenames_new)
        X_new = tfidf.transform(X_new_raw)
        X_old = self._load_features()
        X = scipy.sparse.vstack((X_new, X_old))
        joblib.dump(X, str(dsid_dir / 'features'))

        # write down the new filenames file
        filenames_old = list(self.filenames_)
        filenames = filenames_old + filenames_new

        data_dir = DocumentIndex._detect_data_dir(filenames)
        self._pars['data_dir'] = data_dir

        self._filenames = [os.path.relpath(el, data_dir)
                           for el in filenames]

        with (dsid_dir / 'filenames').open('wb') as fh:
            pickle.dump(self._filenames, fh)
        del db_new['file_path']

        # write down the new pars file
        self._pars = self.pars_
        self._pars['n_samples'] = len(filenames)
        with (dsid_dir / 'pars').open('wb') as fh:
            pickle.dump(self._pars, fh)

        # write down the new database file
        db = pd.concat((db_old, db_new))
        if 'file_path' in db.columns:
            del db['file_path']
        db.to_pickle(str(dsid_dir / 'db'))
        self._db = DocumentIndex(self.pars_['data_dir'], db)

        # find all exisisting LSI models and update them as well
        if (dsid_dir / 'lsi').exists():
            for lsi_id in os.listdir(str(dsid_dir / 'lsi')):
                lsi_obj = _LSIWrapper(cache_dir=self.cache_dir,
                                      mid=lsi_id)
                lsi_obj.append(X_new)

        # remove all trained models for this dataset
        for model_type in ['categorizer', 'dupdet', 'cluster', 'threading']:
            if (dsid_dir / model_type).exists():
                for mid in os.listdir(str(dsid_dir / model_type)):
                    shutil.rmtree(str(dsid_dir / model_type / mid))
    def ingest(self, data_dir=None, file_pattern='.*', dir_pattern='.*',
               dataset_definition=None, vectorize=True,
               document_id_generator='indexed_file_path',
               ):
        """Perform data ingestion

        Parameters
        ----------
        data_dir : str
            path to the data directory (used only if metadata not provided),
            default: None
        dataset_defintion : list of dicts
            a list of dictionaries with keys
            ['file_path', 'document_id', 'rendition_id']
            describing the data ingestion (this overwrites data_dir)
        vectorize : bool (default: True)
        """
        dsid_dir = self.cache_dir / self.dsid
        if (dsid_dir / 'db').exists():
            raise ValueError('Dataset {} already vectorized!'
                             .format(self.dsid))
        db_list = list(sorted(dsid_dir.glob('db*')))
        if len(db_list) == 0:
            internal_id_offset = -1
        elif len(db_list) >= 1:
            internal_id_offset = int(db_list[-1].name[3:])

        pars = self.pars_

        if pars.get('column_ids', None) is not None:
            if dataset_definition is None:
                raise ValueError("CSV files can only be privided using "
                                 "`dataset_definition` parameter")
            else:
                if len(dataset_definition) > 1:
                    raise ValueError(
                            "Only one CSV can be provided at a time"
                    )
                file_path = dataset_definition[0]['file_path']
                X = pd.read_csv(
                        file_path, sep=pars['column_separator'], header=None)
                dataset_definition = [
                        {'file_path': f"{file_path}:{idx}", 'document_id': idx}
                        for idx in range(len(X))]

                db = DocumentIndex.from_list(
                        dataset_definition, data_dir,
                        internal_id_offset + 1, dsid_dir,
                        document_id_generator=document_id_generator)
        elif dataset_definition is not None:
            db = DocumentIndex.from_list(
                    dataset_definition, data_dir,
                    internal_id_offset + 1, dsid_dir,
                    document_id_generator=document_id_generator)
        elif data_dir is not None:
            db = DocumentIndex.from_folder(
                    data_dir, file_pattern, dir_pattern,
                    internal_id_offset + 1,
                    document_id_generator=document_id_generator)
        else:
            db = None

        if db is not None:
            data_dir = db.data_dir

            batch_suffix = '.{:09}'.format(db.data.internal_id.iloc[-1])

            self._filenames = db.data.file_path.values.tolist()
            del db.data['file_path']


            if 'file_path' in db.data.columns:
                del db.data['file_path']
            db.data.to_pickle(str(dsid_dir / ('db' + batch_suffix)))
            with (dsid_dir / ('filenames' + batch_suffix)).open('wb') as fh:
                pickle.dump(self._filenames, fh)
            self._db = db

        if vectorize:
            db_list = list(sorted(dsid_dir.glob('db*')))
            filenames_list = list(sorted(dsid_dir.glob('filenames*')))
            if len(db_list) == 0:
                raise ValueError('No ingested files found!')

            if len(db_list) == 1:
                with filenames_list[0].open('rb') as fh:
                    filenames_concat = pickle.load(fh)
            elif len(db_list) >= 2:
                # accumulate different batches into a single file
                # filename file
                filenames_concat = []
                for fname in filenames_list:
                    with fname.open('rb') as fh:
                        filenames_concat += pickle.load(fh)

            if self.pars_['data_dir'] is None:
                data_dir = DocumentIndex._detect_data_dir(filenames_concat)
                self._pars['data_dir'] = data_dir
            else:
                data_dir = self._pars['data_dir']

            self._filenames = [os.path.relpath(el, data_dir)
                               for el in filenames_concat]

            with (dsid_dir / 'filenames').open('wb') as fh:
                pickle.dump(self._filenames, fh)

            for fname in filenames_list:
                fname.unlink()

            # save databases
            if len(db_list) == 1:
                db_list[0].rename(dsid_dir / 'db')
                self.db_.filenames_ = self._filenames
                self.db_.data['file_path'] = self._filenames
            elif len(db_list) >= 2:

                db_concat = []
                for fname in db_list:
                    db_concat.append(pd.read_pickle(str(fname)))
                db_new = pd.concat(db_concat, axis=0)
                db_new.filenames_ = self._filenames
                db_new.set_index('internal_id', drop=False, inplace=True)
                self._db = DocumentIndex(data_dir, db_new)
                if 'file_path' in db_new.columns:
                    del db_new['file_path']
                db_new.to_pickle(str(dsid_dir / 'db'))

            # save parameters
            self._pars['n_samples'] = len(self._filenames)
            self._pars['data_dir'] = data_dir

            with (dsid_dir / 'pars').open('wb') as fh:
                pickle.dump(self._pars, fh)

            self.transform()

            if (dsid_dir / 'raw').exists():
                shutil.rmtree(str(dsid_dir / 'raw'))

        if db is None and not vectorize:
            raise ValueError('At least one of data_dir, dataset_definition, '
                             'vectorize parameters must be provided!')
        return