Exemple #1
0
    def _unload_npi_data(self):
        with CsvWriter('{0}/npi_ids.txt'.format(self.temp_folder),
                       self.delimiter) as writer:
            for npi_id in self.npi_ids:
                writer.writerow([npi_id])

        s3_provider.upload_file('{0}/npi_ids.txt'.format(self.temp_folder),
                                '{}/npi_ids.txt'.format(self.temp_s3_folder))
        create_table_sql = 'DROP TABLE IF EXISTS {0}; CREATE TABLE {0}(npi_id VARCHAR(500)) DISTKEY(npi_id);'.format(
            self.temp_table_name)
        redshift_provider.copy_data_from_s3({
            'data_preparation_query':
            create_table_sql,
            'table_name':
            self.temp_table_name,
            'columns':
            'npi_id',
            's3_location':
            '{}/npi_ids.txt'.format(self.temp_s3_folder),
            'delimiter':
            self.delimiter
        })

        unload_query = "SELECT DISTINCT npi_id FROM {0} WHERE  npi_id NOT IN (SELECT  COALESCE(npi,'''') FROM npilookupvalues);".format(
            self.temp_table_name)
        redshift_provider.unload_data({
            'unload_query':
            unload_query,
            's3_location':
            '{}/NpiValues'.format(self.temp_s3_folder),
            'destination_folder_location':
            '{}/NpiValues'.format(self.temp_folder)
        })
        redshift_provider.execute_query('DROP TABLE IF EXISTS {};'.format(
            self.temp_table_name))
Exemple #2
0
    def _unload_npi_data(self):
        with CsvWriter('{0}/npi_ids.txt'.format(self.temp_folder),
                       self.delimiter) as writer:
            for npi_id in self.npi_ids:
                writer.writerow([npi_id])

        s3_provider.upload_file('{0}/npi_ids.txt'.format(self.temp_folder),
                                '{}/npi_ids.txt'.format(self.temp_s3_folder))
        create_table_sql = 'DROP TABLE IF EXISTS {0}; CREATE TABLE {0}(npi_id VARCHAR(500)) DISTKEY(npi_id);'.format(
            self.temp_table_name)
        redshift_provider.copy_data_from_s3({
            'data_preparation_query':
            create_table_sql,
            'table_name':
            self.temp_table_name,
            'columns':
            'npi_id',
            's3_location':
            '{}/npi_ids.txt'.format(self.temp_s3_folder),
            'delimiter':
            self.delimiter
        })

        source_fields = ['npi']
        for i in range(1, 16):
            source_fields.append(
                '"healthcare provider primary taxonomy switch_{0}"'.format(i))
            source_fields.append(
                '"healthcare provider taxonomy code_{0}"'.format(i))
        if len(self.npi_ncpdp_map_fields) > 0:
            for i in range(1, 51):
                source_fields.append(
                    '"other provider identifier state_{0}"'.format(i))
                source_fields.append(
                    '"other provider identifier issuer_{0}"'.format(i))
                source_fields.append(
                    '"other provider identifier_{0}"'.format(i))

        for index, field in enumerate(source_fields):
            self.npi_header_columns[field] = index
        unload_query = "SELECT DISTINCT {0} FROM npilookupvalues INNER JOIN {1} ON  npilookupvalues.npi = {1}.npi_id;".format(
            str.join(',', source_fields), self.temp_table_name)
        redshift_provider.unload_data({
            'unload_query':
            unload_query,
            's3_location':
            '{}/NpiValues'.format(self.temp_s3_folder),
            'destination_folder_location':
            '{}/NpiValues'.format(self.temp_folder)
        })
        redshift_provider.execute_query('DROP TABLE IF EXISTS {};'.format(
            self.temp_table_name))
Exemple #3
0
 def _update_npi_database(self):
     with CsvReader('{0}/Export.csv'.format(self.temp_folder),
                    delimiter=',') as file:
         columns = file.header_columns
     s3_provider.upload_file('{0}/Export.csv'.format(self.temp_folder),
                             '{}/Export.csv'.format(self.temp_s3_folder))
     redshift_provider.copy_data_from_s3({
         'table_name':
         'npilookupvalues',
         'ignore_header':
         True,
         'columns':
         '"{}"'.format('","'.join(columns)),
         's3_location':
         '{}/Export.csv'.format(self.temp_s3_folder),
         'delimiter':
         ',',
         'csv_quote':
         '"'
     })
    def _unload_linked_claim_replacement_data(self):
        with CsvWriter('{}/linked_claim_previous_claim_ids.txt'.format(self.temp_folder), self.delimiter) as writer:
            for previous_claim_id in self.previous_claim_ids:
                writer.writerow([previous_claim_id])

        s3_provider.upload_file('{}/linked_claim_previous_claim_ids.txt'.format(self.temp_folder), '{}/linked_claim_previous_claim_ids.txt'.format(self.temp_s3_folder))
        create_table_sql = 'DROP TABLE IF EXISTS {0}; CREATE TABLE {0}(previous_claim_id VARCHAR(500)) DISTKEY(previous_claim_id);'.format(self.temp_table_name)
        redshift_provider.copy_data_from_s3({
            'data_preparation_query': create_table_sql,
            'table_name': self.temp_table_name,
            'columns': 'previous_claim_id',
            's3_location': '{}/linked_claim_previous_claim_ids.txt'.format(self.temp_s3_folder),
            'delimiter': self.delimiter
        })

        unload_query = '''SELECT DISTINCT m.previous_claim_number_medical_claims_header, m.claim_id_medical_claim_header
         FROM medical_claims_header m INNER JOIN {} t ON m.previous_claim_number_medical_claims_header = t.previous_claim_id;'''.format(self.temp_table_name)
        redshift_provider.unload_data({
            'unload_query': unload_query,
            's3_location': '{}/LinkedClaimIds'.format(self.temp_s3_folder),
            'destination_folder_location': '{}/LinkedClaimIds'.format(self.temp_folder)
        })
        redshift_provider.execute_query('DROP TABLE IF EXISTS {};'.format(self.temp_table_name))
    def _unload_npi_data(self):
        with CsvWriter('{0}/npi_ids.txt'.format(self.temp_folder),
                       self.delimiter) as writer:
            for npi_id in self.npi_ids:
                writer.writerow([npi_id])

        s3_provider.upload_file('{0}/npi_ids.txt'.format(self.temp_folder),
                                '{}/npi_ids.txt'.format(self.temp_s3_folder))
        create_table_sql = 'DROP TABLE IF EXISTS {0}; CREATE TABLE {0}(npi_id VARCHAR(500)) DISTKEY(npi_id);'.format(
            self.temp_table_name)
        redshift_provider.copy_data_from_s3({
            'data_preparation_query':
            create_table_sql,
            'table_name':
            self.temp_table_name,
            'columns':
            'npi_id',
            's3_location':
            '{}/npi_ids.txt'.format(self.temp_s3_folder),
            'delimiter':
            self.delimiter
        })

        fields = ['"{}"'.format(x['field']) for x in self.npi_field_maps]
        unload_query = 'SELECT {} FROM npilookupvalues n INNER JOIN {} t ON n.npi = t.npi_id'.format(
            ', '.join(fields), self.temp_table_name)
        redshift_provider.unload_data({
            'unload_query':
            unload_query,
            's3_location':
            '{}/NpiValues'.format(self.temp_s3_folder),
            'destination_folder_location':
            '{}/NpiValues'.format(self.temp_folder)
        })
        redshift_provider.execute_query('DROP TABLE IF EXISTS {};'.format(
            self.temp_table_name))
    def _unload_data(self):
        with CsvWriter('{0}/member_contract_list.txt'.format(self.temp_folder),
                       self.delimiter) as writer:
            for member_and_contract in self.member_contract_set:
                writer.writerow(member_and_contract.split(self.delimiter))

        s3_provider.upload_file(
            '{0}/member_contract_list.txt'.format(self.temp_folder),
            '{}/member_contract_list.txt'.format(self.temp_s3_folder))
        create_table_sql = 'DROP TABLE IF EXISTS {0}; CREATE TABLE {0}(member_id VARCHAR(500), contract_id VARCHAR(500));'.format(
            self.temp_table_name)
        redshift_provider.copy_data_from_s3({
            'data_preparation_query':
            create_table_sql,
            'table_name':
            self.temp_table_name,
            'columns':
            'member_id,contract_id',
            's3_location':
            '{}/member_contract_list.txt'.format(self.temp_s3_folder),
            'delimiter':
            self.delimiter
        })

        unload_query = "select DISTINCT {} from member m INNER JOIN {} t ON m.member_id_member = t.member_id AND m.contract_id_product_reference = t.contract_id".format(
            str.join(',', self.unload_columns), self.temp_table_name)
        redshift_provider.unload_data({
            'unload_query':
            unload_query,
            's3_location':
            '{}/MemberData'.format(self.temp_s3_folder),
            'destination_folder_location':
            '{}/MemberData'.format(self.temp_folder)
        })
        redshift_provider.execute_query('DROP TABLE IF EXISTS {};'.format(
            self.temp_table_name))