示例#1
0
 def test_table_properties(self):
     self.assertEqual(self.accessor.line_item_daily_summary_table,
                      GCPCostEntryLineItemDailySummary)
     self.assertEqual(self.accessor.line_item_daily_table,
                      get_model("GCPCostEntryLineItemDaily"))
     self.assertEqual(self.accessor.line_item_table,
                      get_model("GCPCostEntryLineItem"))
    def test_insert_on_conflict_do_update_with_conflict(self):
        """Test that an INSERT succeeds ignoring the conflicting row."""
        table_name = AWS_CUR_TABLE_MAP["reservation"]
        table = get_model(table_name)
        data = self.creator.create_columns_for_table_with_bakery(table)
        query = self.accessor._get_db_obj_query(table)
        with schema_context(self.schema):
            initial_res_count = 1
            initial_count = query.count()
            data["number_of_reservations"] = initial_res_count
            row_id = self.accessor.insert_on_conflict_do_update(
                table, data, conflict_columns=["reservation_arn"], set_columns=list(data.keys())
            )
            insert_count = query.count()
            row = query.order_by("-id").all()[0]
            self.assertEqual(insert_count, initial_count + 1)
            self.assertEqual(row.number_of_reservations, initial_res_count)

            data["number_of_reservations"] = initial_res_count + 1
            row_id_2 = self.accessor.insert_on_conflict_do_update(
                table, data, conflict_columns=["reservation_arn"], set_columns=list(data.keys())
            )
            row = query.filter(id=row_id_2).first()

            self.assertEqual(insert_count, query.count())
            self.assertEqual(row_id, row_id_2)
            self.assertEqual(row.number_of_reservations, initial_res_count + 1)
    def purge_expired_report_data_by_date(self, expired_date, simulate=False):
        partition_from = str(date(expired_date.year, expired_date.month, 1))
        with AzureReportDBAccessor(self._schema) as accessor:
            all_bill_objects = accessor.get_bill_query_before_date(
                expired_date).all()
            table_names = [
                accessor._table_map["ocp_on_azure_daily_summary"],
                accessor._table_map["ocp_on_azure_project_daily_summary"],
                accessor.line_item_daily_summary_table._meta.db_table,
                accessor.ocpall_line_item_daily_summary_table._meta.db_table,
                accessor.ocpall_line_item_project_daily_summary_table._meta.
                db_table,
            ]
            table_names.extend(UI_SUMMARY_TABLES)
            table_models = [get_model(tn) for tn in table_names]

        with schema_context(self._schema):
            removed_items = []
            all_providers = set()
            all_period_starts = set()

            # Iterate over the remainder as they could involve much larger amounts of data
            for bill in all_bill_objects:
                removed_items.append({
                    "provider_uuid":
                    bill.provider_id,
                    "billing_period_start":
                    str(bill.billing_period_start)
                })
                all_providers.add(bill.provider_id)
                all_period_starts.add(str(bill.billing_period_start))

            LOG.info(
                f"Deleting data for providers {all_providers} and periods {all_period_starts}"
            )

            if not simulate:
                # Will call trigger to detach, truncate, and drop partitions
                LOG.info(
                    "Deleting table partitions total for the following tables: "
                    + f"{table_names} with partitions <= {partition_from}")
                del_count = execute_delete_sql(
                    PartitionedTable.objects.filter(
                        schema_name=self._schema,
                        partition_of_table_name__in=table_names,
                        partition_parameters__default=False,
                        partition_parameters__from__lte=partition_from,
                    ))
                LOG.info(f"Deleted {del_count} table partitions")

            if not simulate:
                cascade_delete(all_bill_objects.query.model,
                               all_bill_objects,
                               skip_relations=table_models)

        return removed_items
示例#4
0
    def purge_expired_report_data_by_date(self, expired_date, simulate=False):
        partition_from = str(date(expired_date.year, expired_date.month, 1))
        removed_items = []
        all_account_ids = set()
        all_period_start = set()

        with AWSReportDBAccessor(self._schema) as accessor:
            all_bill_objects = accessor.get_bill_query_before_date(
                expired_date).all()
            for bill in all_bill_objects:
                removed_items.append({
                    "account_payer_id":
                    bill.payer_account_id,
                    "billing_period_start":
                    str(bill.billing_period_start)
                })
                all_account_ids.add(bill.payer_account_id)
                all_period_start.add(str(bill.billing_period_start))

            table_names = [
                accessor._table_map["ocp_on_aws_daily_summary"],
                accessor._table_map["ocp_on_aws_project_daily_summary"],
                accessor.line_item_daily_summary_table._meta.db_table,
                accessor.ocpall_line_item_daily_summary_table._meta.db_table,
                accessor.ocpall_line_item_project_daily_summary_table._meta.
                db_table,
            ]
            table_names.extend(UI_SUMMARY_TABLES)
            table_models = [get_model(tn) for tn in table_names]

        with schema_context(self._schema):
            if not simulate:
                # Will call trigger to detach, truncate, and drop partitions
                LOG.info(
                    "Deleting table partitions total for the following tables: "
                    + f"{table_names} with partitions <= {partition_from}")
                del_count = execute_delete_sql(
                    PartitionedTable.objects.filter(
                        schema_name=self._schema,
                        partition_of_table_name__in=table_names,
                        partition_parameters__default=False,
                        partition_parameters__from__lte=partition_from,
                    ))
                LOG.info(f"Deleted {del_count} table partitions")

                # Using skip_relations here as we have already dropped partitions above
                cascade_delete(all_bill_objects.query.model,
                               all_bill_objects,
                               skip_relations=table_models)

            LOG.info(
                f"Deleting data related to billing account ids {all_account_ids} "
                f"for billing periods starting {all_period_start}")

        return removed_items
    def test_get_primary_key_attribute_error(self):
        """Test that an AttributeError is raised on bad primary key lookup."""
        table_name = AWS_CUR_TABLE_MAP["product"]
        table = get_model(table_name)
        with schema_context(self.schema):
            data = self.creator.create_columns_for_table_with_bakery(table)
            obj = self.accessor.create_db_object(table_name, data)
            obj.save()

            data["sku"] = "".join(random.choice(string.digits) for _ in range(5))
            with self.assertRaises(AttributeError):
                self.accessor._get_primary_key(table_name, data)
    def test_get_primary_key(self):
        """Test that a primary key is returned."""
        table_name = random.choice(self.foreign_key_tables)
        table = get_model(table_name)
        with schema_context(self.schema):
            data = self.creator.create_columns_for_table_with_bakery(table)
            if table_name == AWS_CUR_TABLE_MAP["bill"]:
                data["provider_id"] = self.aws_provider_uuid
            obj = self.accessor.create_db_object(table_name, data)
            obj.save()

            p_key = self.accessor._get_primary_key(table_name, data)

            self.assertIsNotNone(p_key)
    def test_clean_data(self):
        """Test that data cleaning produces proper data types."""
        table_name = random.choice(self.all_tables)
        table = get_model(table_name)
        column_types = self.report_schema.column_types[table_name]

        data = self.creator.create_columns_for_table_with_bakery(table)
        cleaned_data = self.accessor.clean_data(data, table_name)

        for key, value in cleaned_data.items():
            if key not in column_types:
                continue
            column_type = column_types[key]
            type = map_django_field_type_to_python_type(column_type)
            self.assertIsInstance(value, type)
示例#8
0
    def test_remove_all_aws_providers(self):
        """Remove all AWS providers."""
        provider_query = Provider.objects.all().filter(type="AWS-local")

        customer = None
        for provider in provider_query:
            customer = provider.customer
            with tenant_context(provider.customer):
                manager = ProviderManager(provider.uuid)
                manager.remove(
                    self._create_delete_request(self.user,
                                                {"Sources-Client": "False"}))
        for view in AWS_UI_SUMMARY_TABLES:
            with tenant_context(customer):
                model = get_model(view)
                self.assertFalse(model.objects.count())
    def test_partition_handler_str_table(self):
        new_table_sql = f"""
create table {self.schema}._eek_pt0 (usage_start date not null, id int) partition by range (usage_start);
"""
        with schema_context(self.schema):
            with connection.cursor() as cur:
                cur.execute(new_table_sql)

            partable = get_model("PartitionedTable")
            default_part = partable(
                schema_name=self.schema,
                table_name="_eek_pt0_default",
                partition_of_table_name="_eek_pt0",
                partition_type=partable.RANGE,
                partition_col="usage_start",
                partition_parameters={"default": True},
                active=True,
            )
            default_part.save()

            ocrsu = OCPCloudReportSummaryUpdater(self.schema,
                                                 self.ocp_on_aws_ocp_provider,
                                                 None)
            num_eek = partable.objects.filter(
                schema_name=self.schema,
                partition_of_table_name="_eek_pt0").count()
            self.assertEqual(num_eek, 1)

            ocrsu._handle_partitions(self.schema, "_eek_pt0",
                                     datetime.date(1970, 10, 1),
                                     datetime.date(1970, 12, 1))
            eek_p = partable.objects.filter(
                schema_name=self.schema,
                partition_of_table_name="_eek_pt0",
                partition_parameters__default=False).all()
            self.assertEqual(len(eek_p), 3)

            eek_p.delete()
            default_part.delete()

            with connection.cursor() as cur:
                cur.execute(f"drop table {self.schema}._eek_pt0 ;")
    def test_insert_on_conflict_do_nothing_with_conflict(self):
        """Test that an INSERT succeeds ignoring the conflicting row."""
        table_name = AWS_CUR_TABLE_MAP["product"]
        table = get_model(table_name)
        with schema_context(self.schema):
            data = self.creator.create_columns_for_table_with_bakery(table)
            query = self.accessor._get_db_obj_query(table_name)

            initial_count = query.count()

            row_id = self.accessor.insert_on_conflict_do_nothing(table, data)

            insert_count = query.count()

            self.assertEqual(insert_count, initial_count + 1)

            row_id_2 = self.accessor.insert_on_conflict_do_nothing(table, data)

            self.assertEqual(insert_count, query.count())
            self.assertEqual(row_id, row_id_2)
    def test_insert_on_conflict_do_nothing_without_conflict(self):
        """Test that an INSERT succeeds inserting all non-conflicting rows."""
        # table_name = random.choice(self.foreign_key_tables)
        table_name = AWS_CUR_TABLE_MAP["product"]
        table = get_model(table_name)

        data = [
            self.creator.create_columns_for_table_with_bakery(table),
            self.creator.create_columns_for_table_with_bakery(table),
        ]
        query = self.accessor._get_db_obj_query(table_name)
        with schema_context(self.schema):
            previous_count = query.count()
            previous_row_id = None
            for entry in data:
                row_id = self.accessor.insert_on_conflict_do_nothing(table, entry)
                count = query.count()

                self.assertEqual(count, previous_count + 1)
                self.assertNotEqual(row_id, previous_row_id)

                previous_count = count
                previous_row_id = row_id
    def test_insert_on_conflict_do_update_without_conflict(self):
        """Test that an INSERT succeeds inserting all non-conflicting rows."""
        table_name = AWS_CUR_TABLE_MAP["reservation"]
        table = get_model(table_name)

        data = [
            self.creator.create_columns_for_table_with_bakery(table),
            self.creator.create_columns_for_table_with_bakery(table),
        ]
        query = self.accessor._get_db_obj_query(table_name)
        with schema_context(self.schema):
            previous_count = query.count()
            previous_row_id = None
            for entry in data:
                row_id = self.accessor.insert_on_conflict_do_update(
                    table, entry, conflict_columns=["reservation_arn"], set_columns=list(entry.keys())
                )
                count = query.count()

                self.assertEqual(count, previous_count + 1)
                self.assertNotEqual(row_id, previous_row_id)

                previous_count = count
                previous_row_id = row_id
示例#13
0
    def purge_expired_report_data_by_date(self, expired_date, simulate=False):
        LOG.info("Executing purge_expired_report_data_by_date")
        partition_from = str(date(expired_date.year, expired_date.month, 1))
        removed_items = []
        all_report_periods = []
        all_cluster_ids = set()
        all_period_starts = set()

        with OCPReportDBAccessor(self._schema) as accessor:
            # all_usage_periods = accessor.get_usage_periods_by_date(expired_date)
            all_usage_periods = accessor._get_db_obj_query(
                accessor._table_map["report_period"]).filter(
                    report_period_start__lte=expired_date)

            table_names = [
                # accessor._aws_table_map["ocp_on_aws_daily_summary"],
                # accessor._aws_table_map["ocp_on_aws_project_daily_summary"],
                accessor._table_map["line_item_daily_summary"]
            ]
            table_names.extend(UI_SUMMARY_TABLES)
            table_models = [get_model(tn) for tn in table_names]

        with schema_context(self._schema):
            # Iterate over the remainder as they could involve much larger amounts of data
            for usage_period in all_usage_periods:
                removed_items.append({
                    "usage_period_id":
                    usage_period.id,
                    "interval_start":
                    str(usage_period.report_period_start)
                })
                all_report_periods.append(usage_period.id)
                all_cluster_ids.add(usage_period.cluster_id)
                all_period_starts.add(str(usage_period.report_period_start))

            all_report_periods.sort()
            LOG.info(
                f"Removing all data related to "
                f"cluster_ids: {all_cluster_ids}; starting periods {all_period_starts}"
            )

            if not simulate:
                # Will call trigger to detach, truncate, and drop partitions
                LOG.info(
                    "Deleting table partitions total for the following tables: "
                    + f"{table_names} with partitions <= {partition_from}")
                del_count = execute_delete_sql(
                    PartitionedTable.objects.filter(
                        schema_name=self._schema,
                        partition_of_table_name__in=table_names,
                        partition_parameters__default=False,
                        partition_parameters__from__lte=partition_from,
                    ))
                LOG.info(f"Deleted {del_count} table partitions")

            if not simulate:
                cascade_delete(all_usage_periods.query.model,
                               all_usage_periods,
                               skip_relations=table_models)

        return removed_items
示例#14
0
 def ocpall_line_item_project_daily_summary_table(self):
     return get_model("OCPAllCostLineItemProjectDailySummaryP")