Beispiel #1
0
    def backup_table(self, table_name):
        client = Client("hscic")
        sql = "SELECT max(month) FROM {hscic}.%s" % table_name
        latest_date = client.query(sql).rows[0][0]
        latest_year_and_month = latest_date.strftime("%Y_%m")
        table = client.get_table(table_name)

        storage_client = StorageClient()
        bucket = storage_client.bucket()
        year_and_months = set()

        prefix_base = "backups/{}/".format(table_name)

        for blob in bucket.list_blobs(prefix=prefix_base):
            match = re.search("/(\d{4}_\d{2})/", blob.name)
            year_and_months.add(match.groups()[0])

        if latest_year_and_month in year_and_months:
            print("{} table already backed up for {}".format(
                table_name, latest_year_and_month))
            return

        storage_prefix = "{}/{}/{}-".format(prefix_base, latest_year_and_month,
                                            table_name)
        exporter = TableExporter(table, storage_prefix)
        exporter.export_to_storage()
Beispiel #2
0
def query_and_export(table_name, sql, substitutions):
    try:
        client = Client('hscic')
        table = client.get_table(table_name)

        storage_prefix = 'hscic/views/{}-'.format(table_name)
        logger.info("Generating view %s and saving to %s" %
                    (table_name, storage_prefix))

        logger.info("Running SQL for %s: %s" % (table_name, sql))
        table.insert_rows_from_query(sql, substitutions=substitutions)

        exporter = TableExporter(table, storage_prefix)

        logger.info('Deleting existing data in storage at %s' % storage_prefix)
        exporter.delete_from_storage()

        logger.info('Exporting data to storage at %s' % storage_prefix)
        exporter.export_to_storage()

        logger.info("View generation complete for %s" % table_name)
    except Exception:
        # Log the formatted error, because the multiprocessing pool
        # this is called from only shows the error message (with no
        # traceback)
        logger.error(traceback.format_exc())
        raise
    def download_and_import(self, table):
        '''Download table from storage and import into local database.

        We sort the downloaded file with `sort` rather than in BigQuery,
        because we hit resource limits when we try to do so.  See #698 and #711
        for discussion.
        '''
        table_id = table.table_id
        storage_prefix = 'hscic/views/{}-'.format(table_id)
        exporter = TableExporter(table, storage_prefix)

        raw_file = tempfile.NamedTemporaryFile()
        raw_path = raw_file.name
        sorted_file = tempfile.NamedTemporaryFile()
        sorted_path = sorted_file.name

        self.log('Downloading {} to {}'.format(table_id, raw_path))
        exporter.download_from_storage_and_unzip(raw_file)

        self.log('Sorting {} to {}'.format(table_id, sorted_path))
        cmd = 'head -1 {} > {}'.format(raw_path, sorted_path)
        subprocess.check_call(cmd, shell=True)

        field_names = sorted_file.readline().strip().split(',')

        cmd = generate_sort_cmd(table_id, field_names, raw_path, sorted_path)
        subprocess.check_call(cmd, shell=True)

        copy_sql = "COPY {}({}) FROM STDIN WITH (FORMAT CSV)".format(
            table_id, ','.join(field_names))

        with connection.cursor() as cursor:
            with utils.constraint_and_index_reconstructor(table_id):
                self.log("Deleting from table %s..." % table_id)
                cursor.execute("DELETE FROM %s" % table_id)
                self.log("Copying CSV to %s..." % table_id)
                cursor.copy_expert(copy_sql, sorted_file)

        raw_file.close()
        sorted_file.close()
Beispiel #4
0
    def reimport_all(self):
        last_imported = ImportLog.objects.latest_in_category(
            'prescribing').current_at
        self.date = last_imported - relativedelta(years=5)
        client = Client('tmp_eu')
        while self.date <= last_imported:
            date_str = self.date.strftime('%Y-%m-%d')
            sql = ('SELECT pct AS pct_id, practice AS practice_id, '
                   'bnf_code AS presentation_code, items AS total_items, '
                   'net_cost, actual_cost, quantity, '
                   'FORMAT_TIMESTAMP("%%Y_%%m_%%d", month) AS processing_date '
                   'FROM {hscic}.normalised_prescribing_standard '
                   "WHERE month = '%s'" % date_str)
            table_name = "prescribing_%s" % date_str.replace('-', '_')
            table = client.get_or_create_table(table_name)
            table.insert_rows_from_query(sql)
            exporter = TableExporter(table, 'tmp/{}-*'.format(table_name))
            exporter.export_to_storage()

            with tempfile.NamedTemporaryFile(mode='wb') as tmpfile:
                logger.info("Importing data for %s" % self.date)
                exporter.download_from_storage_and_unzip(tmpfile)
                with transaction.atomic():
                    self.drop_partition()
                    self.create_partition()
                    self.import_prescriptions(tmpfile.name)
                    self.create_partition_indexes()
                    self.add_parent_trigger()
            self.date += relativedelta(months=1)
Beispiel #5
0
def get_csv_of_empty_classes_for_level(level):
    """Using BigQuery, make a CSV of BNF codes at the given level
    (e.g. `section`, `paragraph`) that have never had any prescribing.

    Returns a path to the CSV

    """
    temp_table = write_zero_prescribing_codes_table(level)
    storage_prefix = 'tmp/{}'.format(temp_table.table_id)
    exporter = TableExporter(temp_table, storage_prefix)

    logger.info("Copying %s to %s" % (temp_table.table_id, storage_prefix))
    exporter.export_to_storage()

    path = "/%s/%s.csv" % (tempfile.gettempdir(), temp_table.table_id)
    logger.info("Downloading %s to %s" % (storage_prefix, path))
    with open(path, 'w') as f:
        exporter.download_from_storage_and_unzip(f)
    return path
Beispiel #6
0
    def handle(self, *args, **options):
        path = options["filename"]
        head, filename = os.path.split(path)
        converted_path = "{}_formatted.CSV".format(os.path.splitext(path)[0])
        _, year_and_month = os.path.split(head)

        logger.info("path: %s", path)
        logger.info("converted_path: %s", converted_path)
        logger.info("year_and_month: %s", year_and_month)

        date = year_and_month + "_01"
        try:
            datetime.datetime.strptime(date, "%Y_%m_%d")
        except ValueError:
            message = ("The file path must have a YYYY_MM "
                       "date component in the containing directory: ")
            message += path
            raise CommandError(message)

        hscic_dataset_client = Client("hscic")
        tmp_dataset_client = Client("tmp_eu")

        # Check that we haven't already processed data for this month
        sql = """SELECT COUNT(*)
        FROM {dataset}.prescribing
        WHERE month = TIMESTAMP('{date}')""".format(
            dataset=hscic_dataset_client.dataset_id,
            date=date.replace("_", "-"))
        try:
            results = hscic_dataset_client.query(sql)
            assert results.rows[0][0] == 0
        except NotFound:
            pass

        # Create BQ table backed backed by uploaded source CSV file
        raw_data_table_name = "raw_prescribing_data_{}".format(year_and_month)
        gcs_path = "hscic/prescribing/{}/{}".format(year_and_month, filename)

        logger.info("raw_data_table_name: %s", raw_data_table_name)
        logger.info("gcs_path: %s", gcs_path)

        raw_data_table = tmp_dataset_client.create_storage_backed_table(
            raw_data_table_name, RAW_PRESCRIBING_SCHEMA, gcs_path)

        # Append aggregated data to prescribing table
        sql = """
         SELECT
          Area_Team_Code AS sha,
          LEFT(PCO_Code, 3) AS pct,
          Practice_Code AS practice,
          BNF_Code AS bnf_code,
          BNF_Description AS bnf_name,
          SUM(Items) AS items,
          SUM(NIC) AS net_cost,
          SUM(Actual_Cost) AS actual_cost,
          SUM(Quantity * Items) AS quantity,
          TIMESTAMP('%s') AS month,
         FROM %s
         WHERE Practice_Code NOT LIKE '%%998'  -- see issue #349
         GROUP BY
           bnf_code, bnf_name, pct,
           practice, sha
        """ % (
            date.replace("_", "-"),
            raw_data_table.qualified_name,
        )

        logger.info("sql: %s", sql)

        prescribing_table = hscic_dataset_client.get_table("prescribing")
        prescribing_table.insert_rows_from_query(
            sql, legacy=True, write_disposition="WRITE_APPEND")

        # Write aggregated data to new table, for download
        sql = """
         SELECT
          LEFT(PCO_Code, 3) AS pct_id,
          Practice_Code AS practice_code,
          BNF_Code AS presentation_code,
          SUM(Items) AS total_items,
          SUM(NIC) AS net_cost,
          SUM(Actual_Cost) AS actual_cost,
          SUM(Quantity * Items) AS quantity,
          '%s' AS processing_date,
         FROM %s
         WHERE Practice_Code NOT LIKE '%%998'  -- see issue #349
         GROUP BY
           presentation_code, pct_id, practice_code
        """ % (
            date,
            raw_data_table.qualified_name,
        )

        fmtd_data_table_name = "formatted_prescribing_%s" % year_and_month

        logger.info("sql: %s", sql)
        logger.info("fmtd_data_table_name: %s", fmtd_data_table_name)

        fmtd_data_table = tmp_dataset_client.get_table(fmtd_data_table_name)
        fmtd_data_table.insert_rows_from_query(sql, legacy=True)

        # Export new table to storage, and download
        exporter = TableExporter(fmtd_data_table, gcs_path + "_formatted-")
        exporter.export_to_storage(print_header=False)

        with tempfile.NamedTemporaryFile(dir=head) as f:
            exporter.download_from_storage_and_unzip(f)

            # Sort the output.
            #
            # Why? Because this is equivalent to CLUSTERing the table on
            # loading, but less resource-intensive than doing it in
            # Postgres. And the table is too big to sort within BigQuery.
            subprocess.call(
                "ionice -c 2 nice -n 10 sort -k3,3 -k1,1 -k2,2 -t, %s > %s" %
                (f.name, converted_path),
                shell=True,
            )
    def handle(self, *args, **options):
        path = options['filename']
        head, filename = os.path.split(path)
        converted_path = '{}_formatted.CSV'.format(os.path.splitext(path)[0])
        _, year_and_month = os.path.split(head)

        logger.info('path: %s', path)
        logger.info('converted_path: %s', converted_path)
        logger.info('year_and_month: %s', year_and_month)

        date = year_and_month + '_01'
        try:
            datetime.datetime.strptime(date, '%Y_%m_%d')
        except ValueError:
            message = ('The file path must have a YYYY_MM '
                       'date component in the containing directory: ')
            message += path
            raise CommandError(message)

        hscic_dataset_client = Client('hscic')
        tmp_dataset_client = Client('tmp_eu')

        # Check that we haven't already processed data for this month
        sql = '''SELECT COUNT(*)
        FROM {dataset}.prescribing
        WHERE month = TIMESTAMP('{date}')'''.format(
            dataset=hscic_dataset_client.dataset_id,
            date=date.replace('_', '-'),
        )
        try:
            results = hscic_dataset_client.query(sql)
            assert results.rows[0][0] == 0
        except NotFound:
            pass

        # Create BQ table backed backed by uploaded source CSV file
        raw_data_table_name = 'raw_prescribing_data_{}'.format(year_and_month)
        gcs_path = 'hscic/prescribing/{}/{}'.format(year_and_month, filename)

        logger.info('raw_data_table_name: %s', raw_data_table_name)
        logger.info('gcs_path: %s', gcs_path)

        schema = [
            {'name': 'Regional_Office_Name', 'type': 'string'},
            {'name': 'Regional_Office_Code', 'type': 'string'},
            {'name': 'Area_Team_Name', 'type': 'string'},
            {'name': 'Area_Team_Code', 'type': 'string', 'mode': 'required'},
            {'name': 'PCO_Name', 'type': 'string'},
            {'name': 'PCO_Code', 'type': 'string'},
            {'name': 'Practice_Name', 'type': 'string'},
            {'name': 'Practice_Code', 'type': 'string', 'mode': 'required'},
            {'name': 'BNF_Code', 'type': 'string', 'mode': 'required'},
            {'name': 'BNF_Description', 'type': 'string', 'mode': 'required'},
            {'name': 'Items', 'type': 'integer', 'mode': 'required'},
            {'name': 'Quantity', 'type': 'integer', 'mode': 'required'},
            {'name': 'ADQ_Usage', 'type': 'float'},
            {'name': 'NIC', 'type': 'float', 'mode': 'required'},
            {'name': 'Actual_Cost', 'type': 'float', 'mode': 'required'},
        ]
        raw_data_table = tmp_dataset_client.create_storage_backed_table(
            raw_data_table_name,
            schema,
            gcs_path
        )

        # Append aggregated data to prescribing table
        sql = '''
         SELECT
          Area_Team_Code AS sha,
          LEFT(PCO_Code, 3) AS pct,
          Practice_Code AS practice,
          BNF_Code AS bnf_code,
          BNF_Description AS bnf_name,
          SUM(Items) AS items,
          SUM(NIC) AS net_cost,
          SUM(Actual_Cost) AS actual_cost,
          SUM(Quantity * Items) AS quantity,
          TIMESTAMP('%s') AS month,
         FROM %s
         WHERE Practice_Code NOT LIKE '%%998'  -- see issue #349
         GROUP BY
           bnf_code, bnf_name, pct,
           practice, sha
        ''' % (date.replace('_', '-'), raw_data_table.qualified_name)

        logger.info('sql: %s', sql)

        prescribing_table = hscic_dataset_client.get_table('prescribing')
        prescribing_table.insert_rows_from_query(
            sql,
            legacy=True,
            write_disposition='WRITE_APPEND'
        )

        # Write aggregated data to new table, for download
        sql = '''
         SELECT
          LEFT(PCO_Code, 3) AS pct_id,
          Practice_Code AS practice_code,
          BNF_Code AS presentation_code,
          SUM(Items) AS total_items,
          SUM(NIC) AS net_cost,
          SUM(Actual_Cost) AS actual_cost,
          SUM(Quantity * Items) AS quantity,
          '%s' AS processing_date,
         FROM %s
         WHERE Practice_Code NOT LIKE '%%998'  -- see issue #349
         GROUP BY
           presentation_code, pct_id, practice_code
        ''' % (date, raw_data_table.qualified_name)

        fmtd_data_table_name = 'formatted_prescribing_%s' % year_and_month

        logger.info('sql: %s', sql)
        logger.info('fmtd_data_table_name: %s', fmtd_data_table_name)

        fmtd_data_table = tmp_dataset_client.get_table(fmtd_data_table_name)
        fmtd_data_table.insert_rows_from_query(sql, legacy=True)

        # Export new table to storage, and download
        exporter = TableExporter(fmtd_data_table, gcs_path + '_formatted-')
        exporter.export_to_storage(print_header=False)

        with tempfile.NamedTemporaryFile(dir=head) as f:
            exporter.download_from_storage_and_unzip(f)

            # Sort the output.
            #
            # Why? Because this is equivalent to CLUSTERing the table on
            # loading, but less resource-intensive than doing it in
            # Postgres. And the table is too big to sort within BigQuery.
            subprocess.call(
                "ionice -c 2 nice -n 10 sort -k3,3 -k1,1 -k2,2 -t, %s > %s" % (
                    f.name, converted_path),
                shell=True)
    def test_the_lot(self):
        client = Client('test')

        schema = build_schema(
            ('a', 'INTEGER'),
            ('b', 'STRING'),
        )

        headers = ['a', 'b']
        rows = [
            (1, 'apple'),
            (2, 'banana'),
            (3, 'coconut'),
        ]

        t1 = client.get_or_create_table('t1', schema)
        t1_qname = t1.qualified_name

        # Test Table.insert_rows_from_csv
        t1.insert_rows_from_csv('gcutils/tests/test_table.csv')

        self.assertEqual(sorted(t1.get_rows()), rows)

        # Test Table.insert_rows_from_query
        t2 = client.get_table('t2')

        sql = 'SELECT * FROM {} WHERE a > 1'.format(t1_qname)
        t2.insert_rows_from_query(sql)

        self.assertEqual(sorted(t2.get_rows()), rows[1:])

        # Test Client.query
        sql = 'SELECT * FROM {} WHERE a > 2'.format(t1_qname)
        results = client.query(sql)

        self.assertEqual(sorted(results.rows), rows[2:])

        # Test Client.query_into_dataframe
        sql = 'SELECT * FROM {} WHERE a > 2'.format(t1_qname)
        df = client.query_into_dataframe(sql)

        self.assertEqual(df.values.tolist(), [list(rows[2])])

        # Test TableExporter.export_to_storage and
        # TableExporter.download_from_storage_and_unzip
        t1_exporter = TableExporter(t1, self.storage_prefix + 'test_table-')
        t1_exporter.export_to_storage()

        with tempfile.NamedTemporaryFile(mode='r+') as f:
            t1_exporter.download_from_storage_and_unzip(f)
            f.seek(0)
            reader = csv.reader(f)
            data = [reader.next()] + sorted(reader)

        self.assertEqual(data, [map(str, row) for row in [headers] + rows])

        # Test Table.insert_rows_from_storage
        storage_path = self.storage_prefix + 'test_table.csv'
        self.upload_to_storage('gcutils/tests/test_table.csv', storage_path)

        t2.insert_rows_from_storage(storage_path)

        self.assertEqual(sorted(t2.get_rows()), rows)

        # Test Client.create_storage_backed_table
        storage_path = self.storage_prefix + 'test_table_headers.csv'
        self.upload_to_storage(
            'gcutils/tests/test_table_headers.csv',
            storage_path
        )

        schema = [
            {'name': 'a', 'type': 'integer'},
            {'name': 'b', 'type': 'string'},
        ]

        t3 = client.create_storage_backed_table(
            't3',
            schema,
            storage_path
        )

        results = client.query('SELECT * FROM {}'.format(t3.qualified_name))

        self.assertEqual(sorted(results.rows), rows)

        self.upload_to_storage(
            'gcutils/tests/test_table_headers_2.csv',
            storage_path
        )

        results = client.query('SELECT * FROM {}'.format(t3.qualified_name))

        self.assertEqual(sorted(results.rows), rows + [(4, u'damson')])

        # Test Client.create_table_with_view
        sql = 'SELECT * FROM {{project}}.{} WHERE a > 1'.format(t1_qname)

        t4 = client.create_table_with_view('t4', sql, False)

        results = client.query('SELECT * FROM {}'.format(t4.qualified_name))

        self.assertEqual(sorted(results.rows), rows[1:])

        # Test Client.insert_rows_from_pg
        PCT.objects.create(code='ABC', name='CCG 1')
        PCT.objects.create(code='XYZ', name='CCG 2')

        def transformer(row):
            return [ord(row[0][0]), row[1]]
        t1.insert_rows_from_pg(PCT, ['code', 'name'], transformer)

        self.assertEqual(sorted(t1.get_rows()), [(65, 'CCG 1'), (88, 'CCG 2')])

        # Test Table.delete_all_rows
        t1.delete_all_rows()

        self.assertEqual(list(t1.get_rows()), [])
    def test_the_lot(self):
        client = Client("test")
        archive_client = Client("archive")

        orig_schema = build_schema(("a", "STRING"), ("b", "INTEGER"))

        schema = build_schema(("a", "INTEGER"), ("b", "STRING"))

        headers = ["a", "b"]
        rows = [(1, "apple"), (2, "banana"), (3, "coconut")]

        t1 = client.get_or_create_table("t1", orig_schema)
        t1_qname = t1.qualified_name

        # Test Table.insert_rows_from_csv
        t1.insert_rows_from_csv("gcutils/tests/test_table.csv", schema)

        self.assertEqual(sorted(t1.get_rows()), rows)

        # Test Table.insert_rows_from_query
        t2 = client.get_table("t2")

        sql = "SELECT * FROM {} WHERE a > 1".format(t1_qname)
        t2.insert_rows_from_query(sql)

        self.assertEqual(sorted(t2.get_rows()), rows[1:])

        # Test Client.query
        sql = "SELECT * FROM {} WHERE a > 2".format(t1_qname)
        results = client.query(sql)

        self.assertEqual(sorted(results.rows), rows[2:])

        # Test Client.query_into_dataframe
        sql = "SELECT * FROM {} WHERE a > 2".format(t1_qname)
        df = client.query_into_dataframe(sql)

        self.assertEqual(df.values.tolist(), [list(rows[2])])

        # Test TableExporter.export_to_storage and
        # TableExporter.download_from_storage_and_unzip
        t1_exporter = TableExporter(t1, self.storage_prefix + "test_table-")
        t1_exporter.export_to_storage()

        with tempfile.NamedTemporaryFile(mode="r+") as f:
            t1_exporter.download_from_storage_and_unzip(f)
            f.seek(0)
            reader = csv.reader(f)
            data = [next(reader)] + sorted(reader)

        self.assertEqual(data,
                         [list(map(str, row)) for row in [headers] + rows])

        # Test Table.insert_rows_from_storage
        storage_path = self.storage_prefix + "test_table.csv"
        self.upload_to_storage("gcutils/tests/test_table.csv", storage_path)

        t2.insert_rows_from_storage(storage_path)

        self.assertEqual(sorted(t2.get_rows()), rows)

        # Test Client.create_storage_backed_table
        storage_path = self.storage_prefix + "test_table_headers.csv"
        self.upload_to_storage("gcutils/tests/test_table_headers.csv",
                               storage_path)

        schema = build_schema(("a", "INTEGER"), ("b", "STRING"))

        t3 = client.create_storage_backed_table("t3", schema, storage_path)

        results = client.query("SELECT * FROM {}".format(t3.qualified_name))

        self.assertEqual(sorted(results.rows), rows)

        self.upload_to_storage("gcutils/tests/test_table_headers_2.csv",
                               storage_path)

        results = client.query("SELECT * FROM {}".format(t3.qualified_name))

        self.assertEqual(sorted(results.rows), rows + [(4, "damson")])

        # Test Client.create_table_with_view
        sql = "SELECT * FROM {{project}}.{} WHERE a > 1".format(t1_qname)

        t4 = client.create_table_with_view("t4", sql, False)

        results = client.query("SELECT * FROM {}".format(t4.qualified_name))

        self.assertEqual(sorted(results.rows), rows[1:])

        # Test Table.copy_to_new_dataset
        t1.copy_to_new_dataset("archive")
        t1_archived = archive_client.get_table("t1")
        self.assertEqual(sorted(t1_archived.get_rows()), rows)
        self.assertEqual(sorted(t1.get_rows()), rows)

        # Test Table.move_to_new_dataset
        t2.move_to_new_dataset("archive")
        t2_archived = archive_client.get_table("t2")
        self.assertEqual(sorted(t2_archived.get_rows()), rows)
        with self.assertRaises(NotFound):
            list(t2.get_rows())

        # Test Client.insert_rows_from_pg
        PCT.objects.create(code="ABC", name="CCG 1")
        PCT.objects.create(code="XYZ", name="CCG 2")

        def transformer(row):
            return [ord(row[0][0]), row[1]]

        t1.insert_rows_from_pg(
            PCT,
            build_schema(("code", "INTEGER"), ("name", "STRING")),
            transformer=transformer,
        )

        self.assertEqual(sorted(t1.get_rows()), [(65, "CCG 1"), (88, "CCG 2")])

        # Test Table.delete_all_rows
        t1.delete_all_rows()

        self.assertEqual(list(t1.get_rows()), [])