def test_bamboo_service(self): # comment out when we can test or mock it differently raise SkipTest service_url = 'http://bamboo.io/' service_name = 'bamboo' xml_submission1 = os.path.join(self.this_directory, u'fixtures', u'dhisform_submission1.xml') xml_submission2 = os.path.join(self.this_directory, u'fixtures', u'dhisform_submission2.xml') xml_submission3 = os.path.join(self.this_directory, u'fixtures', u'dhisform_submission3.xml') # make sure xform doesnt have a bamboo dataset self.xform.bamboo_dataset = '' self.xform.save() # make a first submission without the service self._make_submission(xml_submission1) self.assertEqual(self.response.status_code, 201) # add rest service AFTER 1st submission self._add_rest_service(service_url, service_name) # submit another one. self._make_submission(xml_submission2) self.assertEqual(self.response.status_code, 201) self.wait(5) # it should have created the whole dataset xform = XForm.objects.get(id=self.xform.id) self.assertTrue(xform.bamboo_dataset != '' and xform.bamboo_dataset is not None) dataset = Dataset(connection=Connection(service_url), dataset_id=xform.bamboo_dataset) self.assertEqual(dataset.get_info()['num_rows'], 2) # submit a third one. check that we have 3 records self._make_submission(xml_submission3) self.assertEqual(self.response.status_code, 201) self.wait(5) self.assertEqual(dataset.get_info()['num_rows'], 3) # test regeneration dsi = dataset.get_info() regen_url = reverse(link_to_bamboo, kwargs={ 'username': self.user.username, 'id_string': self.xform.id_string }) response = self.client.post(regen_url, {}) # deleting DS redirects to profile page self.assertEqual(response.status_code, 302) self.wait(5) xform = XForm.objects.get(id=self.xform.id) self.assertTrue(xform.bamboo_dataset) dataset = Dataset(connection=Connection(service_url), dataset_id=xform.bamboo_dataset) new_dsi = dataset.get_info() self.assertEqual(new_dsi['num_rows'], dsi['num_rows']) self.assertNotEqual(new_dsi['id'], dsi['id'])
def setUp(self): self.bamboo_url = self.TEST_BAMBOO_URL self.connection = Connection(self.bamboo_url) self.default_connection = Connection(DEFAULT_BAMBOO_URL) # these two datasets (if created) will automatically # get deleted by the test harness # NOTE: do not reuse these names for tests, they # should only be created through the helper functions self.dataset = None self.aux_dataset = None # add any additional datasets should be added # to this list and they will be deleted as well self.datasets_to_delete = []
def join(cls, left_dataset, right_dataset, on, connection=None): """ Create a new dataset that is the result of a join, where this left_dataset is the lefthand side and *right_dataset* is the righthand side and *on* is the column on which to join. The column that is joined on must be unique in the righthand side and must exist in both datasets. """ if connection is None: connection = Connection() if not isinstance(left_dataset, Dataset) or\ not isinstance(right_dataset, Dataset): raise PyBambooException( 'datasets must be an instances of Dataset.') data = { 'dataset_id': left_dataset.id, 'other_dataset_id': right_dataset.id, 'on': on, } result = connection.make_api_request( 'POST', '/datasets/join', data=data) if 'id' in result.keys(): return Dataset(result['id'], connection=connection) return False
def merge(cls, datasets, connection=None): """ Create a new dataset that is a row-wise merge of those in *datasets*. Returns the new merged dataset. """ if connection is None: connection = Connection() # TODO: allow list of dataset_ids? checked_datasets = [] for dataset in datasets: if not isinstance(dataset, Dataset): raise PyBambooException( 'Datasets need to be instances of Dataset.') checked_datasets.append(dataset.id) data = {'dataset_ids': safe_json_dumps( checked_datasets, PyBambooException('datasets is not JSON-serializable.'))} result = connection.make_api_request( 'POST', '/datasets/merge', data=data) if 'id' in result.keys(): return Dataset(result['id'], connection=connection) # this is never reached... # see TestDataset.test_merge_fail() return False
def send(self, url, parsed_instance): xform = parsed_instance.instance.xform rows = [parsed_instance.to_dict_for_mongo()] # prefix meta columns names for bamboo prefix = (u'%(id_string)s_%(id)s' % { 'id_string': xform.id_string, 'id': xform.id }) for row in rows: for col, value in row.items(): if col.startswith('_') or col.startswith('meta_') \ or col.startswith('meta/'): new_col = (u'%(prefix)s%(col)s' % { 'prefix': prefix, 'col': col }) row.update({new_col: value}) del (row[col]) # create dataset on bamboo first (including current submission) if not xform.bamboo_dataset: dataset_id = get_new_bamboo_dataset(xform, force_last=True) xform.bamboo_dataset = dataset_id xform.save() else: dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)), dataset_id=xform.bamboo_dataset) dataset.update_data(rows=rows)
def run_test_suite(dataset_file_path_list): print "running test suite for %s" % " ".join(dataset_file_path_list) alldata = [] for dataset_name in dataset_file_path_list: d = {} d['hostname'] = os.uname()[1] d['bamboo_url'] = URL d['unix_time'] = time.time() conn = Connection(url=URL) d['commit'] = conn.version['commit'] d['branch'] = conn.version['branch'] dataset = Dataset(connection=conn, path=dataset_name) d['import_time'] = time_till_import_is_finished(dataset) info = dataset.get_info() d['row'] = info['num_rows'] d['col'] = info['num_columns'] d['add_1_calculations_time'] = time_to_add_1_calculations(dataset) d['add_5_calculations_1by1_time'] = time_to_add_5_calculations_1by1( dataset) d['add_5_calculations_batch_time'] = time_to_add_5_calculations_batch( dataset) d['update_1_time'] = time_to_add_1_update(dataset) d['update_5_1by1_time'] = time_to_add_5_update_1by1(dataset) d['update_5_batch_time'] = time_to_add_5_update_batch(dataset) dataset.delete() alldata.append(d) return alldata
def update_sources(site): sources = 'sources.json' sources_dir = os.path.join(os.path.dirname(__file__), 'data') if isinstance(site, basestring): sources = os.path.join(os.path.dirname(__file__), 'data', site.lower(), 'sources.json') sources_dir = os.path.join(sources_dir, site.lower()) else: sources = os.path.join(os.path.dirname(__file__), 'sources.json') if not os.path.exists(sources): raise Exception(u"Please define a sources.json.") f = open(sources) sources_dict = json.loads(f.read()) f.close() assert 'bamboo_server' in sources_dict assert 'sources' in sources_dict connection = Connection(sources_dict['bamboo_server']) for k, v in sources_dict['sources'].iteritems(): if v == "": path = os.path.join(sources_dir, k) if not os.path.exists(path): raise Exception(u"%s does not exist," % path) try: dataset = Dataset(path=path, connection=connection, na_values=["---", "None"], data_format='csv') except Exception, e: print u"Exception: Publishing %s failed!\n\t%s" % (k, e) else: sources_dict['sources'][k] = dataset.id
def delete_bamboo_dataset(xform): if not xform.bamboo_dataset: return False try: dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)), dataset_id=xform.bamboo_dataset) return dataset.delete() except ErrorParsingBambooData: return False
def _set_sources(self, site=None): path = os.path.join(os.path.dirname(__file__), 'sources.json') if isinstance(site, basestring): path = os.path.join( os.path.dirname(__file__), 'data', site.lower(), 'sources.json' ) f = open(path) self._sources_dict = json.loads(f.read()) self._sources = self._sources_dict['sources'] self.connection = Connection(self._sources_dict['bamboo_server'])
def get_new_bamboo_dataset(xform, force_last=False): dataset_id = u'' try: content_data = get_csv_data(xform, force_last=force_last) dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)), content=content_data, na_values=['n/a']) except (ErrorParsingBambooData, NoRecordsFoundError): return dataset_id if dataset.id: return dataset.id return dataset_id
def __init__(self, dataset_id=None, url=None, path=None, content=None, data_format='csv', schema_path=None, schema_content=None, na_values=None, connection=None, reset=False): """ Create a new pybamboo.Dataset from one of the following: * dataset_id - the id of an existing bamboo.Dataset * url - url to a .csv file * path - path to a local .csv or .json file * content - a CSV or JSON string * data_format - whether path or content is csv | json * schema_path - path to a JSON SDF schema * schema_content - a JSON SDF string One can also pass in a pybamboo.Connection object. If this is not supplied one will be created automatically with the default options. """ if dataset_id is None and url is None \ and path is None and content is None \ and schema_path is None and schema_content is None: raise PyBambooException( 'Must supply dataset_id, url, content, schema or file path.') if data_format not in self.DATA_FORMATS: raise PyBambooException('Illegal data_format: %s. data_format' ' must be one of %s' % (data_format, self.DATA_FORMATS)) req_data = {} if reset: req_data.update({'dataset_id': self._id}) if na_values is not None: if not isinstance(na_values, (list, tuple, set)): raise PyBambooException('N/A values must be a list.') self.NA_VALUES = na_values req_data.update({'na_values': safe_json_dumps(na_values, PyBambooException('na_values ' 'are not JSON-serializable'))}) if connection is None: self._connection = Connection() else: self._connection = connection if dataset_id is not None: # TODO: check if this dataset exists? self._id = dataset_id return if url is not None: # TODO: check valid url? req_data.update({'url': url}) self._id = self._connection.make_api_request( 'POST', '/datasets', req_data).get('id') return # files might be overloaded by schema or path/content files = {} if schema_path is not None or schema_content is not None: # TODO: check for bad file stuff? schema_data = schema_content if schema_content is not None \ else open(schema_path) files.update({'schema': ('data.schema.json', schema_data)}) if path is not None or content is not None: # TODO: check for bad file stuff? data = content if content is not None else open(path) files.update({'%s_file' % data_format: ('data.%s' % data_format, data)}) self._id = self._connection.make_api_request('POST', '/datasets', files=files, data=req_data).get('id')
import json import sys import time from pybamboo.connection import Connection from pybamboo.dataset import Dataset PROD_BAMBOO_ID_FILE = 'ids/ids.prod.json' DEV_BAMBOO_ID_FILE = 'ids/ids.dev.json' DEV_BAMBOO = True # set bamboo instance if DEV_BAMBOO: connection = Connection('http://localhost:8080') bamboo_id_file = DEV_BAMBOO_ID_FILE else: connection = Connection() bamboo_id_file = PROD_BAMBOO_ID_FILE # get state of current datasets with open(bamboo_id_file) as f: bamboo_ids = json.loads(f.read()) if not bamboo_ids: print '"%s" not found: exiting' % bamboo_id_file sys.exit(0) print 'current dataset status:' print json.dumps(bamboo_ids, indent=4, sort_keys=True) # upload originals for sector in bamboo_ids.keys(): for name, id in bamboo_ids[sector]['originals'].iteritems():
def __init__(self, site=None): self.connection = Connection(BAMBOO_URL) self._set_sources(site) self._db = MongoClient().bamboo_dev