Exemplo n.º 1
0
    def test_bamboo_service(self):
        # comment out when we can test or mock it differently
        raise SkipTest
        service_url = 'http://bamboo.io/'
        service_name = 'bamboo'

        xml_submission1 = os.path.join(self.this_directory, u'fixtures',
                                       u'dhisform_submission1.xml')
        xml_submission2 = os.path.join(self.this_directory, u'fixtures',
                                       u'dhisform_submission2.xml')
        xml_submission3 = os.path.join(self.this_directory, u'fixtures',
                                       u'dhisform_submission3.xml')

        # make sure xform doesnt have a bamboo dataset
        self.xform.bamboo_dataset = ''
        self.xform.save()

        # make a first submission without the service
        self._make_submission(xml_submission1)
        self.assertEqual(self.response.status_code, 201)

        # add rest service AFTER 1st submission
        self._add_rest_service(service_url, service_name)

        # submit another one.
        self._make_submission(xml_submission2)
        self.assertEqual(self.response.status_code, 201)
        self.wait(5)
        # it should have created the whole dataset
        xform = XForm.objects.get(id=self.xform.id)
        self.assertTrue(xform.bamboo_dataset != ''
                        and xform.bamboo_dataset is not None)
        dataset = Dataset(connection=Connection(service_url),
                          dataset_id=xform.bamboo_dataset)
        self.assertEqual(dataset.get_info()['num_rows'], 2)

        # submit a third one. check that we have 3 records
        self._make_submission(xml_submission3)
        self.assertEqual(self.response.status_code, 201)
        self.wait(5)
        self.assertEqual(dataset.get_info()['num_rows'], 3)

        # test regeneration
        dsi = dataset.get_info()
        regen_url = reverse(link_to_bamboo,
                            kwargs={
                                'username': self.user.username,
                                'id_string': self.xform.id_string
                            })
        response = self.client.post(regen_url, {})
        # deleting DS redirects to profile page
        self.assertEqual(response.status_code, 302)
        self.wait(5)
        xform = XForm.objects.get(id=self.xform.id)
        self.assertTrue(xform.bamboo_dataset)
        dataset = Dataset(connection=Connection(service_url),
                          dataset_id=xform.bamboo_dataset)
        new_dsi = dataset.get_info()
        self.assertEqual(new_dsi['num_rows'], dsi['num_rows'])
        self.assertNotEqual(new_dsi['id'], dsi['id'])
Exemplo n.º 2
0
 def test_merge_default_connection(self):
     dataset = Dataset(path=self.CSV_FILE,
                       connection=self.default_connection)
     other_dataset = Dataset(path=self.CSV_FILE,
                             connection=self.default_connection)
     result = Dataset.merge([dataset, other_dataset])
     self.assertTrue(isinstance(result, Dataset))
     self._cleanup(dataset)
     self._cleanup(other_dataset)
     self._cleanup(result)
Exemplo n.º 3
0
 def test_join_default_connection(self):
     dataset = Dataset(path=self.CSV_FILE,
                       connection=self.default_connection)
     aux_dataset = Dataset(path=self.AUX_CSV_FILE,
                           connection=self.default_connection)
     self.wait()
     result = Dataset.join(dataset, aux_dataset, 'food_type')
     self.wait()
     self.assertTrue(isinstance(result, Dataset))
     self._cleanup(dataset)
     self._cleanup(aux_dataset)
     self._cleanup(result)
Exemplo n.º 4
0
    def test_create_dataset_from_schema(self):
        dataset = Dataset(schema_path=self.SCHEMA_FILE,
                          connection=self.connection)
        self.assertTrue(dataset.id is not None)
        self._cleanup(dataset)

        # schema string
        schema_str = open(self.SCHEMA_FILE).read()
        dataset = Dataset(schema_content=schema_str,
                          connection=self.connection)
        self.assertTrue(dataset.id is not None)
        self._cleanup(dataset)
Exemplo n.º 5
0
def run_test_suite(dataset_file_path_list):
    print "running test suite for %s" % " ".join(dataset_file_path_list)
    alldata = []
    for dataset_name in dataset_file_path_list:
        d = {}
        d['hostname'] = os.uname()[1]
        d['bamboo_url'] = URL
        d['unix_time'] = time.time()
        conn = Connection(url=URL)
        d['commit'] = conn.version['commit']
        d['branch'] = conn.version['branch']
        dataset = Dataset(connection=conn, path=dataset_name)
        d['import_time'] = time_till_import_is_finished(dataset)
        info = dataset.get_info()
        d['row'] = info['num_rows']
        d['col'] = info['num_columns']
        d['add_1_calculations_time'] = time_to_add_1_calculations(dataset)
        d['add_5_calculations_1by1_time'] = time_to_add_5_calculations_1by1(
            dataset)
        d['add_5_calculations_batch_time'] = time_to_add_5_calculations_batch(
            dataset)
        d['update_1_time'] = time_to_add_1_update(dataset)
        d['update_5_1by1_time'] = time_to_add_5_update_1by1(dataset)
        d['update_5_batch_time'] = time_to_add_5_update_batch(dataset)
        dataset.delete()
        alldata.append(d)
    return alldata
Exemplo n.º 6
0
def update_sources(site):
    sources = 'sources.json'
    sources_dir = os.path.join(os.path.dirname(__file__), 'data')
    if isinstance(site, basestring):
        sources = os.path.join(os.path.dirname(__file__), 'data', site.lower(),
                               'sources.json')
        sources_dir = os.path.join(sources_dir, site.lower())
    else:
        sources = os.path.join(os.path.dirname(__file__), 'sources.json')
    if not os.path.exists(sources):
        raise Exception(u"Please define a sources.json.")
    f = open(sources)
    sources_dict = json.loads(f.read())
    f.close()

    assert 'bamboo_server' in sources_dict
    assert 'sources' in sources_dict

    connection = Connection(sources_dict['bamboo_server'])
    for k, v in sources_dict['sources'].iteritems():
        if v == "":
            path = os.path.join(sources_dir, k)
            if not os.path.exists(path):
                raise Exception(u"%s does not exist," % path)
            try:
                dataset = Dataset(path=path,
                                  connection=connection,
                                  na_values=["---", "None"],
                                  data_format='csv')
            except Exception, e:
                print u"Exception: Publishing %s failed!\n\t%s" % (k, e)
            else:
                sources_dict['sources'][k] = dataset.id
Exemplo n.º 7
0
    def test_create_dataset_from_schema_with_data(self):
        # schema + JSON data
        dataset = Dataset(path=self.JSON_FILE,
                          data_format='json',
                          schema_path=self.SCHEMA_FILE,
                          connection=self.connection)
        self.assertTrue(dataset.id is not None)
        self._cleanup(dataset)

        # schema + CSV data
        dataset = Dataset(path=self.CSV_FILE,
                          data_format='csv',
                          schema_path=self.SCHEMA_FILE,
                          connection=self.connection)
        self.assertTrue(dataset.id is not None)
        self._cleanup(dataset)
Exemplo n.º 8
0
    def send(self, url, parsed_instance):

        xform = parsed_instance.instance.xform
        rows = [parsed_instance.to_dict_for_mongo()]

        # prefix meta columns names for bamboo
        prefix = (u'%(id_string)s_%(id)s' % {
            'id_string': xform.id_string,
            'id': xform.id
        })

        for row in rows:
            for col, value in row.items():
                if col.startswith('_') or col.startswith('meta_') \
                    or col.startswith('meta/'):
                    new_col = (u'%(prefix)s%(col)s' % {
                        'prefix': prefix,
                        'col': col
                    })
                    row.update({new_col: value})
                    del (row[col])

        # create dataset on bamboo first (including current submission)
        if not xform.bamboo_dataset:
            dataset_id = get_new_bamboo_dataset(xform, force_last=True)
            xform.bamboo_dataset = dataset_id
            xform.save()
        else:
            dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)),
                              dataset_id=xform.bamboo_dataset)
            dataset.update_data(rows=rows)
Exemplo n.º 9
0
 def test_merge(self):
     # already have one dataset in self.dataset
     dataset = Dataset(path=self.CSV_FILE, connection=self.connection)
     result = Dataset.merge([self.dataset, dataset],
                            connection=self.connection)
     self.assertTrue(isinstance(result, Dataset))
     self._cleanup(dataset)
     self._cleanup(result)
Exemplo n.º 10
0
def delete_bamboo_dataset(xform):
    if not xform.bamboo_dataset:
        return False
    try:
        dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)),
                          dataset_id=xform.bamboo_dataset)
        return dataset.delete()
    except ErrorParsingBambooData:
        return False
Exemplo n.º 11
0
 def test_na_values(self):
     dataset = Dataset(path=self.CSV_FILE,
                       connection=self.connection,
                       na_values=['n/a'])
     self.wait()
     first_row = dataset.get_data(query={
         'food_type': 'street_meat',
         'amount': 2,
         'rating': 'delectible',
         'risk_factor': 'low_risk'
     },
                                  limit=1)[-1]
     self.assertEqual(first_row.get('comments'), 'null')
     self._cleanup(dataset)
Exemplo n.º 12
0
def get_new_bamboo_dataset(xform, force_last=False):
    dataset_id = u''

    try:
        content_data = get_csv_data(xform, force_last=force_last)
        dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)),
                          content=content_data,
                          na_values=['n/a'])
    except (ErrorParsingBambooData, NoRecordsFoundError):
        return dataset_id

    if dataset.id:
        return dataset.id

    return dataset_id
Exemplo n.º 13
0
    def _get_sum(self, key, value, period):
        sum_value = 0
        for v in value[key]:
            if 'aggregations' in v:
                sum_value += self._get_aggregate('aggregations', v, period)
                continue
            dataset_id = v['dataset_id']
            # dataset_id form sources.json is most recent
            if dataset_id != self._sources[v['source']]\
                    and self._sources[v['source']] != "":
                dataset_id = self._sources[v['source']]
            dataset = Dataset(
                dataset_id=dataset_id, connection=self.connection)

            params = {}
            if 'calculation' in v:
                # check or create calculations
                if isinstance(v['calculation'], list):
                    for calculation in v['calculation']:
                        self._add_calculation(calculation, dataset, period)
                if isinstance(v['calculation'], dict):
                    self._add_calculation(v['calculation'], dataset, period)
            if 'query' in v:
                query_string = json.dumps(v['query'])
                template = env.from_string(query_string)
                query_string = template.render(period=period)
                v['query'] = json.loads(query_string)
                params['query'] = v['query']
            if 'count' in v and 'query' in v:
                params['count'] = v['count']
            if 'distinct' in v:
                params['distinct'] = v['distinct']
            val = dataset.get_data(**params)
            if isinstance(val, dict):
                raise Exception("Bamboo Error: %s" % val)
            sum_value += val
        return sum_value
Exemplo n.º 14
0
 def test_create_dataset_from_url(self):
     dataset = Dataset(
         url='http://formhub.org/mberg/forms/good_eats/data.csv',
         connection=self.connection)
     self.assertTrue(self.dataset.id is not None)
     self._cleanup(dataset)
Exemplo n.º 15
0
 def test_create_dataset_bad_data_format(self):
     with self.assertRaises(PyBambooException):
         Dataset(path=self.CSV_FILE,
                 data_format='BAD',
                 connection=self.connection)
Exemplo n.º 16
0
 def test_create_dataset_no_info(self):
     with self.assertRaises(PyBambooException):
         Dataset()
Exemplo n.º 17
0
 def test_create_dataset_default_connection(self):
     dataset = Dataset(path=self.CSV_FILE,
                       connection=self.default_connection)
     self._cleanup(dataset)
Exemplo n.º 18
0
    def _get_aggregate(self, key, value, period):
        sum_value = 0
        for v in value[key]:
            dataset_id = v['dataset_id']
            # dataset_id form sources.json is most recent
            if dataset_id != self._sources[v['source']]\
                    and self._sources[v['source']] != "":
                dataset_id = self._sources[v['source']]
            dataset = Dataset(
                dataset_id=dataset_id, connection=self.connection)

            params = {}
            if 'calculation' in v:
                # check or create calculations
                if isinstance(v['calculation'], list):
                    for calculation in v['calculation']:
                        self._add_calculation(calculation, dataset, period)
                if isinstance(v['calculation'], dict):
                    self._add_calculation(v['calculation'], dataset, period)
            if 'query' in v:
                query_string = json.dumps(v['query'])
                template = env.from_string(query_string)
                query_string = template.render(period=period)
                v['query'] = json.loads(query_string)
                params['query'] = v['query']
            # if 'count' in v and 'query' in v:
            #     params['count'] = v['count']
            if 'distinct' in v:
                params['distinct'] = v['distinct']
            data = dataset.get_data(format='csv', **params)
            if data.strip() == '':
                # no data to create a dataset - skip
                continue
            # create a aggregate dataset
            aggr_dataset = Dataset(
                content=data,
                data_format='csv',
                connection=self.connection)
            if 'aggregate' in v:
                # check or create calculations
                if isinstance(v['aggregate'], list):
                    for calculation in v['aggregate']:
                        calc = aggr_dataset.add_calculation(
                            name=calculation['name'],
                            formula=calculation['formula']
                        )
                        if calc:
                            aggr_ds = aggr_dataset.get_aggregations()['']
                            k = aggr_ds.get_data()
                            val = k[0][calculation['name']]
                            if isinstance(val, basestring):
                                raise ValueError("Dataset %s return %s"
                                                 % (aggr_ds.id, val))
                            sum_value += val
                            aggr_ds.delete()
                if isinstance(v['aggregate'], dict):
                    calculation = v['aggregate']
                    calc = aggr_dataset.add_calculation(
                        name=calculation['name'],
                        formula=calculation['formula']
                    )
                    if calc:
                        aggr_ds = aggr_dataset.get_aggregations()['']
                        k = aggr_ds.get_data()
                        val = k[0][calculation['name']]
                        if isinstance(val, basestring):
                            raise ValueError("Dataset %s return %s"
                                             % (aggr_ds.id, val))
                        sum_value += val
                        aggr_ds.delete()
            aggr_dataset.delete()
        return sum_value
def main():
    dataset_url = "%sdatasets/%s.csv" % (BAMBOO_DEV_URL, args.dataset)

    dataset = Dataset(url=dataset_url)
    print dataset.id
Exemplo n.º 20
0
# get state of current datasets
with open(bamboo_id_file) as f:
    bamboo_ids = json.loads(f.read())
if not bamboo_ids:
    print '"%s" not found: exiting' % bamboo_id_file
    sys.exit(0)
print 'current dataset status:'
print json.dumps(bamboo_ids, indent=4, sort_keys=True)

# upload originals
for sector in bamboo_ids.keys():
    for name, id in bamboo_ids[sector]['originals'].iteritems():
        if not id:
            print 'dataset: %s not uploaded, uploading %s.csv' % (name, name)
            dataset = Dataset(connection=connection,
                              path='csvs/originals/%s.csv' % name)
            state = dataset.get_info()['state']
            while state != 'ready':
                time.sleep(1)
                state = dataset.get_info()['state']
                print state
            bamboo_ids[sector]['originals'][name] = dataset.id
            with open(bamboo_id_file, 'wb') as f:
                f.write(json.dumps(bamboo_ids))

# merge originals
for sector in bamboo_ids.keys():
    if not bamboo_ids[sector]['merged']:
        print 'no merged dataset for sector: %s' % sector
        datasets = [
            Dataset(connection=connection, dataset_id=id)
Exemplo n.º 21
0
 def test_create_dataset_from_json(self):
     dataset = Dataset(path=self.JSON_FILE,
                       data_format='json',
                       connection=self.connection)
     self.assertTrue(dataset.id is not None)
     self._cleanup(dataset)
Exemplo n.º 22
0
 def _create_aux_dataset_from_file(self):
     self.aux_dataset = Dataset(path=self.AUX_CSV_FILE,
                                connection=self.connection)
     self.wait()
    print 'Could not load %s' % BAMBOO_HASH_FILE
    sys.exit(1)

# update the datasets
hash_updates = dict()
for name, content in bamboo_hash.iteritems():
    filename = content['filename']
    bamboo_id = content['bamboo_id']
    sector = content.get('sector')
    file_path = 'data/' + filename
    print '%s -> %s' % (filename, bamboo_id)
    if bamboo_id:
        print '%s has bamboo id: %s. Updating bamboo dataset.' %\
            (name, bamboo_id)
        try:
            dataset = Dataset(dataset_id=bamboo_id)
            dataset.remove_calculation('sector')
            dataset.reset(path=file_path)
            if sector:
                formula = '"%s"' % sector
                print 'Adding column for sector: %s, formula: %s' %\
                    (sector, formula)
                result = dataset.add_calculation('sector', formula)
                if result:
                    print 'Calculation added successfully!'
                else:
                    print 'Problem adding calculation!'
        except PyBambooException:
            print 'Error creating dataset for file: %s' % filename
    else:
        print '%s has no bamboo id. Adding file to bamboo.' % name
Exemplo n.º 24
0
 def test_merge_fail(self):
     other_dataset = Dataset('12345', connection=self.connection)
     result = Dataset.merge([self.dataset, other_dataset],
                            connection=self.connection)
     self.assertFalse(result)