Ejemplo n.º 1
0
    def patientItemCollectionLinkFromSourceItem(self, sourceItem,
                                                collectionItem, patientItem,
                                                conn):
        hash_key = hash('{}{}'.format(
            patientItem["patient_item_id"],
            collectionItem["item_collection_item_id"]))
        if hash_key in self.patient_item_collection_links:
            return

        # Produce a patient_item_collection_link record model for the given sourceItem
        patientItemCollectionLink = RowItemModel({
            "patient_item_id":
            patientItem["patient_item_id"],
            "item_collection_item_id":
            collectionItem["item_collection_item_id"],
        })
        insertQuery = DBUtil.buildInsertQuery(
            "patient_item_collection_link",
            list(patientItemCollectionLink.keys()))
        insertParams = list(patientItemCollectionLink.values())
        try:
            # Optimistic insert of a new unique item
            DBUtil.execute(insertQuery, insertParams, conn=conn)
            self.patient_item_collection_links.add(hash_key)
        except conn.IntegrityError as err:
            # If turns out to be a duplicate, okay, just note it and continue to insert whatever else is possible
            log.warn(err)
            self.patient_item_collection_links.add(hash_key)
 def patientItemModelFromSourceItem(self, sourceItem, clinicalItem, conn):
     # Produce a patient_item record model for the given sourceItem
     patient_item = RowItemModel({
         "external_id":
         None,
         "patient_id":
         int(sourceItem["rit_uid"][2:], 16),
         "encounter_id":
         None,
         "clinical_item_id":
         clinicalItem["clinical_item_id"],
         "item_date":
         str(sourceItem["itemDate"]
             ),  # without str(), the time is being converted in postgres
         "item_date_utc":
         None,  # it's a date - so, no need to have a duplicate here
     })
     insert_query = DBUtil.buildInsertQuery("patient_item",
                                            patient_item.keys())
     insert_params = patient_item.values()
     try:
         # Optimistic insert of a new unique item
         DBUtil.execute(insert_query, insert_params, conn=conn)
     except conn.IntegrityError, err:
         # If turns out to be a duplicate, okay, just note it and continue to insert whatever else is possible
         log.warn(err)
Ejemplo n.º 3
0
    def patientItemFromSourceItem(self, sourceItem, clinicalItem, conn):
        # Produce a patient_item record model for the given sourceItem
        patientItem = RowItemModel({
            "external_id":
            sourceItem["order_proc_id_coded"],
            "patient_id":
            int(sourceItem["jc_uid"][2:], 16),
            "encounter_id":
            sourceItem["pat_enc_csn_id_coded"],
            "clinical_item_id":
            clinicalItem["clinical_item_id"],
            "item_date":
            sourceItem["order_time_jittered"],
            "item_date_utc":
            str(sourceItem["order_time_jittered_utc"]
                ),  # without str(), the time is being converted in postgres
        })

        key_hash = hash('{}{}{}'.format(patientItem["patient_id"],
                                        patientItem["clinical_item_id"],
                                        patientItem["item_date"]))
        if key_hash in self.patient_items:
            return self.patient_items[key_hash]

        insertQuery = DBUtil.buildInsertQuery("patient_item",
                                              list(patientItem.keys()))
        insertParams = list(patientItem.values())
        try:
            # Optimistic insert of a new unique item
            DBUtil.execute(insertQuery, insertParams, conn=conn)
            patientItem["patient_item_id"] = DBUtil.execute(
                DBUtil.identityQuery("patient_item"), conn=conn)[0][0]
            self.patient_items[key_hash] = patientItem
        except conn.IntegrityError as err:
            # If turns out to be a duplicate, okay, pull out existint ID and continue to insert whatever else is possible
            log.warn(
                err
            )  # Lookup just by the composite key components to avoid attempting duplicate insertion again
            searchPatientItem = {
                "patient_id": patientItem["patient_id"],
                "clinical_item_id": patientItem["clinical_item_id"],
                "item_date": patientItem["item_date"],
            }
            (patientItem["patient_item_id"],
             isNew) = DBUtil.findOrInsertItem("patient_item",
                                              searchPatientItem,
                                              conn=conn)
            self.patient_items[key_hash] = patientItem

        return patientItem
Ejemplo n.º 4
0
    def setUp(self):
        """Prepare state for test cases"""
        DBTestCase.setUp(self)

        log.info("Sourcing from BigQuery DB")
        ClinicalItemDataLoader.build_clinical_item_psql_schemata()

        self.converter = STARRTreatmentTeamConversion.STARRTreatmentTeamConversion()  # Instance to test on
        self.bqConn = self.converter.bqConn
        self.starrUtil = STARRUtil.StarrCommonUtils(self.converter.bqClient)

        # point the converter to dummy source table
        STARRTreatmentTeamConversion.SOURCE_TABLE = TEST_SOURCE_TABLE

        log.warn("Removing test table, if exists: {}".format(TEST_SOURCE_TABLE))
        bq_cursor = self.bqConn.cursor()
        bq_cursor.execute('DROP TABLE IF EXISTS {};'.format(TEST_SOURCE_TABLE))
    def setUp(self):
        """Prepare state for test cases"""
        log.setLevel(logging.INFO)  # without this no logs are printed

        DBTestCase.setUp(self)
        ClinicalItemDataLoader.build_clinical_item_psql_schemata()

        # point the converter to dummy source table
        STARRDemographicsConversion.SOURCE_TABLE = TEST_SOURCE_TABLE

        log.warn(
            "Removing test table if it exists: {}".format(TEST_SOURCE_TABLE))
        bq_cursor = self.bqConn.cursor()
        bq_cursor.execute('DROP TABLE IF EXISTS {};'.format(TEST_SOURCE_TABLE))

        log.info("Generating test source data")
        self.generate_test_and_expected_data(self.TEST_DATA_SIZE)
        self.starrUtil.dump_test_data_to_csv(self.header, self.test_data,
                                             self.test_data_csv)
        self.starrUtil.upload_csv_to_bigquery('starr_datalake2018',
                                              'demographic', 'test_dataset',
                                              'starr_demographic',
                                              self.test_data_csv, self.header)
        self.dump_patient_ids_to_test_to_csv(self.pat_id_csv)
Ejemplo n.º 6
0
    def setUp(self):
        log.setLevel(logging.INFO)  # without this no logs are printed
        """Prepare state for test cases"""
        DBTestCase.setUp(self)

        log.info("Sourcing from BigQuery DB")
        ClinicalItemDataLoader.build_clinical_item_psql_schemata()

        self.converter = STARROrderProcConversion.STARROrderProcConversion(
        )  # Instance to test on
        self.bqConn = self.converter.bqConn
        self.starrUtil = STARRUtil.StarrCommonUtils(self.converter.bqClient)

        # point the converter to dummy source table
        STARROrderProcConversion.SOURCE_TABLE = TEST_SOURCE_TABLE
        STARROrderProcConversion.ORDERSET_TABLE = TEST_ORDERSET_TABLE
        STARROrderProcConversion.TARGET_DATASET_ID = TEST_DEST_DATASET

        log.warn("Removing test tables, if they exist: {} and {}".format(
            TEST_SOURCE_TABLE, TEST_ORDERSET_TABLE))
        bq_cursor = self.bqConn.cursor()
        bq_cursor.execute('DROP TABLE IF EXISTS {};'.format(TEST_SOURCE_TABLE))
        bq_cursor.execute(
            'DROP TABLE IF EXISTS {};'.format(TEST_ORDERSET_TABLE))