def test_delete(self): self._save_observations() records = Observation.find(self.dataset) self.assertNotEqual(records, []) Observation.delete_all(self.dataset) records = [x for x in Observation.find(self.dataset)] self.assertEqual(records, [])
def remove_parent_observations(self, parent_id): """Remove obervations for this dataset with the passed `parent_id`. :param parent_id: Remove observations with this ID as their parent dataset ID. """ Observation.delete_all(self, {PARENT_DATASET_ID: parent_id})
def delete_task(dataset, query=None): """Background task to delete dataset and its associated observations.""" Observation.delete_all(dataset, query=query) if query is None: super(dataset.__class__, dataset).delete( {DATASET_ID: dataset.dataset_id}) Observation.delete_encoding(dataset)
def delete_task(dataset, query=None): """Background task to delete dataset and its associated observations.""" Observation.delete_all(dataset, query=query) if query is None: super(dataset.__class__, dataset).delete({DATASET_ID: dataset.dataset_id}) Observation.delete_encoding(dataset)
def test_delete_all(self): self.__save_records() records = Observation.find(self.dataset) self.assertNotEqual(records, []) Observation.delete_all(self.dataset) records = Observation.find(self.dataset) self.assertEqual(records, [])
def remove_parent_observations(self, parent_id): """Remove obervations for this dataset with the passed `parent_id`. :param parent_id: Remove observations with this ID as their parent dataset ID. """ Observation.delete_all(self, {PARENT_DATASET_ID: parent_id}) # clear the cached dframe self.__dframe = None
def replace_observations(self, dframe, overwrite=False, set_num_columns=True): """Remove all rows for this dataset and save the rows in `dframe`. :param dframe: Replace rows in this dataset with this DataFrame's rows. :returns: BambooFrame equivalent to the passed in `dframe`. """ self.build_schema(dframe, overwrite=overwrite, set_num_columns=set_num_columns) dframe = self.add_id_column_to_dframe(dframe) Observation.delete_all(self) return self.save_observations(dframe)
def replace_observations(self, dframe, overwrite=False, set_num_columns=True): """Remove all rows for this dataset and save the rows in `dframe`. :param dframe: Replace rows in this dataset with this DataFrame's rows. :param overwrite: If true replace the schema, otherwise update it. Default False. :param set_num_columns: If true update the dataset stored number of columns. Default True. :returns: DataFrame equivalent to the passed in `dframe`. """ self.build_schema(dframe, overwrite=overwrite, set_num_columns=set_num_columns) Observation.delete_all(self) return self.save_observations(dframe)
def replace_observations(self, dframe, overwrite=False, set_num_columns=True): """Remove all rows for this dataset and save the rows in `dframe`. :param dframe: Replace rows in this dataset with this DataFrame's rows. :param overwrite: If true replace the schema, otherwise update it. Default False. :param set_num_columns: If true update the dataset stored number of columns. Default True. :returns: DataFrame equivalent to the passed in `dframe`. """ self.build_schema(dframe, overwrite=overwrite, set_num_columns=set_num_columns) Observation.delete_all(self) return self.save_observations(dframe)
def __create_or_update(self, url=None, csv_file=None, json_file=None, schema=None, na_values=[], perish=0, dataset_id=None): result = None error = 'url, csv_file or schema required' try: if schema or url or csv_file or json_file: if dataset_id is None: dataset = Dataset() dataset.save() else: dataset = Dataset.find_one(dataset_id) Observation.delete_all(dataset) if schema: dataset.import_schema(schema) na_values = safe_json_loads(na_values) if url: dataset.import_from_url(url, na_values=na_values) elif csv_file: dataset.import_from_csv(csv_file, na_values=na_values) elif json_file: dataset.import_from_json(json_file) result = {Dataset.ID: dataset.dataset_id} perish = parse_int(perish) perish and dataset.delete(countdown=perish) except urllib2.URLError: error = 'could not load: %s' % url except IOError: error = 'could not get a filehandle for: %s' % csv_file except JSONError as e: error = e.__str__() self.set_response_params(result, success_status_code=201) return self._dump_or_error(result, error)
def __create_or_update(self, url=None, csv_file=None, json_file=None, schema=None, na_values=[], perish=0, dataset_id=None): result = None error = 'url, csv_file or schema required' try: if schema or url or csv_file or json_file: if dataset_id is None: dataset = Dataset() dataset.save() else: dataset = Dataset.find_one(dataset_id) Observation.delete_all(dataset) if schema: dataset.import_schema(schema) na_values = safe_json_loads(na_values) if url: dataset.import_from_url(url, na_values=na_values) elif csv_file: dataset.import_from_csv(csv_file, na_values=na_values) elif json_file: dataset.import_from_json(json_file) result = {Dataset.ID: dataset.dataset_id} perish = parse_int(perish) perish and dataset.delete(countdown=perish) except urllib2.URLError: error = 'could not load: %s' % url except IOError: error = 'could not get a filehandle for: %s' % csv_file except JSONError as e: error = e.__str__() self.set_response_params(result, success_status_code=201) return self._dump_or_error(result, error)
def delete_task(dataset): """Background task to delete dataset and its associated observations.""" Observation.delete_all(dataset) super(dataset.__class__, dataset).delete({DATASET_ID: dataset.dataset_id})