Example #1
0
def parse_block(message):
    """
    Process a block. 
    Create and parse if block didn't previously exist.
    Fetch and validate if block did previously exist.
    
    :param message: asgi valid message containing
        block_hash (required) string - hash of the block to process
        parse_next (optional) boolean - set True to move to the next 
                                        block after passing validation
    """
    with schema_context(message.get('chain')):
        block_hash = message.get('block_hash')

        if not block_hash:
            logger.error('no block hash in message')
            return

        block, created = Block.objects.get_or_create(
            hash=block_hash
        )

        if not created:
            logger.info('existing block {} found'.format(block))
            # save prompts for block validation
        block.save()
    def setUpClass(cls):
        super(SharedAuthTest, cls).setUpClass()
        settings.SHARED_APPS = ('tenant_schemas',
                                'django.contrib.auth',
                                'django.contrib.contenttypes', )
        settings.TENANT_APPS = ('dts_test_app', )
        settings.INSTALLED_APPS = settings.SHARED_APPS + settings.TENANT_APPS
        cls.sync_shared()
        Tenant(domain_url='test.com', schema_name=get_public_schema_name()).save()

        # Create a tenant
        cls.tenant = Tenant(domain_url='tenant.test.com', schema_name='tenant')
        cls.tenant.save()

        # Create some users
        with schema_context(get_public_schema_name()):  # this could actually also be executed inside a tenant
            cls.user1 = User(username='******', email="*****@*****.**")
            cls.user1.save()
            cls.user2 = User(username='******', email="*****@*****.**")
            cls.user2.save()

        # Create instances on the tenant that point to the users on public
        with tenant_context(cls.tenant):
            cls.d1 = ModelWithFkToPublicUser(user=cls.user1)
            cls.d1.save()
            cls.d2 = ModelWithFkToPublicUser(user=cls.user2)
            cls.d2.save()
Example #3
0
def check_block_hash(message):
    schema = message.get('chain')
    with schema_context(schema):
        block_hash = message.get('block_hash')
        block_height = message.get('block_height')

        check_hash = get_block_hash(block_height, schema)

        if check_hash != block_hash:
            logger.error('block at height {} has incorrect hash'.format(block_height))
    def test_switching_tenant_without_previous_tenant(self):
        tenant = Tenant(domain_url='something.test.com', schema_name='test')
        tenant.save()

        connection.tenant = None
        with tenant_context(tenant):
            DummyModel(name="No exception please").save()

        connection.tenant = None
        with schema_context(tenant.schema_name):
            DummyModel(name="Survived it!").save()
Example #5
0
def display_info(message):
    """
    get the latest info objects and send them for display on the front end
    :param message: 
    :return: 
    """
    schema = str(message.get('chain'))
    chain = Chain.objects.get(schema_name=schema)
    with schema_context(schema):
        max_height = 0
        connections = 0

        for coin in chain.coins.all():
            info = Info.objects.filter(
                unit=coin.unit_code
            ).order_by(
                '-max_height'
            ).first()
            if not info:
                continue
            update_info(
                '{}-supply'.format(coin.code),
                '{:,}'.format(info.money_supply if info.money_supply else 0),
                schema
            )
            update_info(
                '{}-parked'.format(coin.code),
                '{:,}'.format(info.total_parked if info.total_parked else 0),
                schema
            )
            update_info(
                '{}-fee'.format(coin.code),
                '{:,}'.format(info.pay_tx_fee if info.pay_tx_fee else 0),
                schema
            )

            max_height = str(info.max_height)
            connections = str(info.connections)

        update_info('connections', connections, schema)
        update_info('height', max_height, schema)
Example #6
0
def parse_address(message):
    with schema_context(message.get('chain')):
        addr = message.get('address')

        if not addr:
            logger.error('no address passed in message')
            return

        try:
            tx_output = TxOutput.objects.get(pk=message.get('tx_output'))
        except TxOutput.DoesNotExist:
            logger.error('tx_output not found: {}'.format(message.get('tx_output')))
            return

        address, created = Address.objects.get_or_create(
           address=addr,
        )

        if created:
            address.save()

        tx_output.address = address
        tx_output.save()
Example #7
0
def repair_block(message):
    """
    Repair an existing block
    
    :param message: asgi valid message containing:
        block_hash (required) string - hash of block to repair
    """
    with schema_context(message.get('chain')):
        block_hash = message.get('block_hash')

        if not block_hash:
            logger.error('no block hash in message')
            return

        try:
            block = Block.objects.get(hash=block_hash)
        except Block.DoesNotExist:
            logger.error('no block found for hash {}'.format(block_hash[:7]))
            return

        valid, error_message = block.validate()
        if valid:
            logger.info('block {} is valid'.format(block))
            return

        logger.info('repairing block {}: {}'.format(block, error_message))

        # merkle root error means missing, extra or duplicate transactions
        if error_message == 'merkle root incorrect':
            fix_merkle_root(block, message.get('chain'))
            return

        if error_message == 'incorrect tx indexing':
            fix_merkle_root(block, message.get('chain'))
            return

        if error_message in ['missing attribute: self.previous_block',
                             'no previous block hash',
                             'incorrect previous height',
                             'no previous block hash']:
            fix_previous_block(block, message.get('chain'))
            return

        if error_message in ['incorrect next height',
                             'next block does not lead on from this block',
                             'missing next block']:
            fix_next_block(block, message.get('chain'))
            return

        if error_message in ['custodian votes do not match',
                             'park rate votes do not match',
                             'motion votes do not match',
                             'fee votes do not match']:
            fix_block_votes(block, message.get('chain'))
            return

        if error_message == 'active park rates do not match':
            fix_block_park_rates(block, message.get('chain'))
            return

        # all other errors with the block can be solved by re-parsing it
        logger.info('re-parsing {}'.format(block))
        rpc, msg = send_rpc(
            {
                'method': 'getblock',
                'params': [block_hash, True, True]
            },
            schema_name=message.get('chain')
        )
        if not rpc:
            return False
        # parse the block to save it
        block.parse_rpc_block(rpc)
Example #8
0
    def _save_aws_org_method(self, ou, unit_path, level, account=None):
        """
        Recursively crawls the org units and accounts.

        Args:
            ou (dict): The aws organizational unit dictionary
            unit_path (str): The tree path to the org unit
            level (int): The level of the node in the org data tree
            account_id (str): The AWS account number.  If internal node, None
        Returns:
            (AWSOrganizationalUnit): That was created or looked up
        """
        unit_name = ou.get("Name", ou.get("Id"))
        unit_id = ou.get("Id")
        account_alias = None
        account_id = None

        with schema_context(self.schema):
            # This is a leaf node
            if account:
                # Look for an existing alias
                account_id = account.get("Id")
                account_name = account.get("Name")
                account_alias = self._account_alias_map.get(account_id)
                if not account_alias:
                    # Create a new account alias (not cached
                    account_alias, created = AWSAccountAlias.objects.get_or_create(
                        account_id=account_id)
                    self._account_alias_map[account_id] = account_alias
                    LOG.info(
                        f"Saving account alias {account_alias} (created={created})"
                    )

                if account_name and account_alias.account_alias != account_name:
                    # The name was not set or changed since last scan.
                    LOG.info(
                        "Updating account alias for account_id=%s, old_account_alias=%s, new_account_alias=%s"
                        % (account_id, account_alias.account_alias,
                           account_name))
                    account_alias.account_alias = account_name
                    account_alias.save()

            # If we add provider here right now it will duplicate the entries
            org_unit, created = AWSOrganizationalUnit.objects.get_or_create(
                org_unit_name=unit_name,
                org_unit_id=unit_id,
                org_unit_path=unit_path,
                account_alias=account_alias,
                level=level,
            )

            # Remove key since we have seen it
            lookup_key = self._create_lookup_key(unit_id, account_id)
            self._structure_yesterday.pop(lookup_key, None)
            if created:
                # only log it was saved if was created to reduce logging on everyday calls
                LOG.info(
                    "Saving account or org unit: unit_name={}, unit_id={}, "
                    "unit_path={}, account_alias={}, provider_uuid={}, account_id={}, level={}"
                    .format(
                        unit_name,
                        unit_id,
                        unit_path,
                        account_alias,
                        self.account.get("provider_uuid"),
                        self.account_id,
                        level,
                    ))
            elif org_unit.deleted_timestamp is not None:
                LOG.warning(
                    "Org unit {} was found with a deleted_timestamp for account"
                    " with provider_uuid={} and account_id={}. Setting deleted_timestamp to null!"
                    .format(org_unit.org_unit_id,
                            self.account.get("provider_uuid"),
                            self.account_id))
                org_unit.deleted_timestamp = None
                org_unit.save()
            # Since we didn't add the provider foreign key initially
            # we need to add a bit of self healing here to repair the
            # nodes that are currently in customer's databases.
            if not org_unit.provider and self.provider:
                org_unit.provider = self.provider
                org_unit.save()
            return org_unit
Example #9
0
 def get_cost_entry_bills_query_by_provider(self, provider_uuid):
     """Return all cost entry bills for the specified provider."""
     table_name = AzureCostEntryBill
     with schema_context(self.schema):
         return self._get_db_obj_query(table_name).filter(
             provider_id=provider_uuid)
    def test_delete_line_items_use_data_cutoff_date(self, mock_should_process):
        """Test that only three days of data are deleted."""
        mock_should_process.return_value = True

        today = self.date_accessor.today_with_timezone("UTC").replace(
            hour=0, minute=0, second=0, microsecond=0)
        first_of_month = today.replace(day=1)
        first_of_next_month = first_of_month + relativedelta(months=1)
        days_in_month = [
            today - relativedelta(days=i) for i in range(today.day)
        ]

        self.manifest.billing_period_start_datetime = first_of_month
        self.manifest.save()

        data = []

        with open(self.test_report, "r") as f:
            reader = csv.DictReader(f)
            for row in reader:
                data.append(row)

        for row in data:
            row["lineItem/UsageStartDate"] = random.choice(days_in_month)
            row["bill/BillingPeriodStartDate"] = first_of_month
            row["bill/BillingPeriodEndDate"] = first_of_next_month

        tmp_file = "/tmp/test_delete_data_cutoff.csv"
        field_names = data[0].keys()

        with open(tmp_file, "w") as f:
            writer = csv.DictWriter(f, fieldnames=field_names)
            writer.writeheader()
            writer.writerows(data)

        processor = AWSReportProcessor(
            schema_name=self.schema,
            report_path=tmp_file,
            compression=UNCOMPRESSED,
            provider_uuid=self.aws_provider_uuid,
            manifest_id=self.manifest.id,
        )
        processor.process()

        # Get latest data date.
        with schema_context(self.schema):
            bills = self.accessor.get_cost_entry_bills()
            for bill_id in bills.values():
                line_item_query = self.accessor.get_lineitem_query_for_billid(
                    bill_id)
                undeleted_max_date = line_item_query.aggregate(
                    max_date=Max("usage_start"))

        mock_should_process.return_value = False
        processor._delete_line_items(AWSReportDBAccessor,
                                     self.column_map,
                                     is_finalized=False)

        with schema_context(self.schema):
            bills = self.accessor.get_cost_entry_bills()
            for bill_id in bills.values():
                line_item_query = self.accessor.get_lineitem_query_for_billid(
                    bill_id)
                if today.day <= 3:
                    self.assertEqual(line_item_query.count(), 0)
                else:
                    max_date = line_item_query.aggregate(
                        max_date=Max("usage_start"))
                    self.assertLess(
                        max_date.get("max_date").date(),
                        processor.data_cutoff_date)
                    self.assertLess(
                        max_date.get("max_date").date(),
                        undeleted_max_date.get("max_date").date())
                    self.assertNotEqual(line_item_query.count(), 0)
    def test_process_finalized_rows_small_batch_size(self):
        """Test that a finalized bill is processed properly on batch size."""
        data = []
        table_name = AWS_CUR_TABLE_MAP["line_item"]

        with open(self.test_report, "r") as f:
            reader = csv.DictReader(f)
            for row in reader:
                data.append(row)

        for row in data:
            row["bill/InvoiceId"] = "12345"

        tmp_file = "/tmp/test_process_finalized_rows.csv"
        field_names = data[0].keys()

        with open(tmp_file, "w") as f:
            writer = csv.DictWriter(f, fieldnames=field_names)
            writer.writeheader()
            writer.writerows(data)

        processor = AWSReportProcessor(
            schema_name=self.schema,
            report_path=self.test_report,
            compression=UNCOMPRESSED,
            provider_uuid=self.aws_provider_uuid,
        )

        # Process for the first time
        processor.process()
        report_db = self.accessor
        report_schema = report_db.report_schema

        bill_table_name = AWS_CUR_TABLE_MAP["bill"]
        bill_table = getattr(report_schema, bill_table_name)
        with schema_context(self.schema):
            bill = bill_table.objects.first()
            self.assertIsNone(bill.finalized_datetime)

        table = getattr(report_schema, table_name)
        with schema_context(self.schema):
            orig_count = table.objects.count()

        # Wipe stale data
        with schema_context(self.schema):
            self.accessor._get_db_obj_query(table_name).delete()

        processor = AWSReportProcessor(
            schema_name=self.schema,
            report_path=tmp_file,
            compression=UNCOMPRESSED,
            provider_uuid=self.aws_provider_uuid,
        )
        processor._batch_size = 2
        # Process for the second time
        processor.process()

        with schema_context(self.schema):
            count = table.objects.count()
            self.assertTrue(count == orig_count)
            count = table.objects.filter(invoice_id__isnull=False).count()
            self.assertTrue(count == orig_count)

        with schema_context(self.schema):
            bill = bill_table.objects.first()
            self.assertIsNotNone(bill.finalized_datetime)
 def test_crawl_org_for_acts(self, mock_session):
     "Test that if an exception is raised the crawl continues"
     mock_session.client = MagicMock()
     paginator_dict = {
         "r-0": {
             "OrganizationalUnits": [
                 {
                     "Id": "ou-0",
                     "Arn": "arn-0",
                     "Name": "Big_Org_0"
                 },
                 {
                     "Id": "ou-1",
                     "Arn": "arn-1",
                     "Name": "Big_Org_1"
                 },
                 {
                     "Id": "ou-2",
                     "Arn": "arn-2",
                     "Name": "Big_Org_2"
                 },
             ]
         },
         "ou-0": {
             "OrganizationalUnits": [{
                 "Id": "sou-0",
                 "Arn": "arn-0",
                 "Name": "Sub_Org_0"
             }]
         },
         "ou-1": {
             "OrganizationalUnits": []
         },
         "ou-2": Exception("Error"),
         "sou-0": {
             "OrganizationalUnits": []
         },
     }
     account_side_effect = []
     paginator_side_effect = []
     ou_ids = ["r-0", "ou-0", "ou-1", "ou-2", "sou-0"]
     for ou_id in ou_ids:
         parent_acts = _generate_act_for_parent_side_effect(
             self.schema, ou_id)
         account_side_effect.extend(parent_acts)
         paginator = MagicMock()
         paginator.paginate(
             ParentId=ou_id
         ).build_full_result.return_value = paginator_dict[ou_id]
         paginator_side_effect.append(paginator)
     unit_crawler = AWSOrgUnitCrawler(self.account)
     unit_crawler._init_session()
     unit_crawler._client.list_roots.return_value = {
         "Roots": [{
             "Id": "r-0",
             "Arn": "arn-0",
             "Name": "root_0"
         }]
     }
     unit_crawler._client.list_accounts_for_parent.side_effect = account_side_effect
     unit_crawler._client.get_paginator.side_effect = paginator_side_effect
     unit_crawler.crawl_account_hierarchy()
     with schema_context(self.schema):
         cur_count = AWSOrganizationalUnit.objects.count()
         total_entries = (len(ou_ids) * GEN_NUM_ACT_DEFAULT) + len(ou_ids)
         self.assertEqual(cur_count, total_entries)
Example #13
0
    def test_update_summary_tables_aws_end_date(self, mock_charge_info):
        """Test that the summary table task respects a date range."""
        provider = 'AWS'
        provider_aws_uuid = self.aws_test_provider_uuid
        ce_table_name = AWS_CUR_TABLE_MAP['cost_entry']
        daily_table_name = AWS_CUR_TABLE_MAP['line_item_daily']
        summary_table_name = AWS_CUR_TABLE_MAP['line_item_daily_summary']

        start_date = self.start_date.replace(
            day=1, hour=0, minute=0, second=0,
            microsecond=0) + relativedelta.relativedelta(months=-1)

        end_date = start_date + timedelta(days=10)
        end_date = end_date.replace(hour=23, minute=59, second=59)

        daily_table = getattr(self.aws_accessor.report_schema,
                              daily_table_name)
        summary_table = getattr(self.aws_accessor.report_schema,
                                summary_table_name)
        ce_table = getattr(self.aws_accessor.report_schema, ce_table_name)

        with schema_context(self.schema):
            ce_start_date = ce_table.objects\
                .filter(interval_start__gte=start_date)\
                .aggregate(Min('interval_start'))['interval_start__min']
            ce_end_date = ce_table.objects\
                .filter(interval_start__lte=end_date)\
                .aggregate(Max('interval_start'))['interval_start__max']

        # The summary tables will only include dates where there is data
        expected_start_date = max(start_date, ce_start_date)
        expected_start_date = expected_start_date.replace(hour=0,
                                                          minute=0,
                                                          second=0,
                                                          microsecond=0)
        expected_end_date = min(end_date, ce_end_date)
        expected_end_date = expected_end_date.replace(hour=0,
                                                      minute=0,
                                                      second=0,
                                                      microsecond=0)

        update_summary_tables(self.schema, provider, provider_aws_uuid,
                              start_date, end_date)

        with schema_context(self.schema):
            daily_entry = daily_table.objects.all().aggregate(
                Min('usage_start'), Max('usage_end'))
            result_start_date = daily_entry['usage_start__min']
            result_end_date = daily_entry['usage_end__max']

        self.assertEqual(result_start_date, expected_start_date)
        self.assertEqual(result_end_date, expected_end_date)

        with schema_context(self.schema):
            summary_entry = summary_table.objects.all().aggregate(
                Min('usage_start'), Max('usage_end'))
            result_start_date = summary_entry['usage_start__min']
            result_end_date = summary_entry['usage_end__max']

        self.assertEqual(result_start_date, expected_start_date)
        self.assertEqual(result_end_date, expected_end_date)
Example #14
0
 def get_group_count(group_id, failures=False, cached=Conf.CACHED):
     # Wrapper method to get count of groups with awareness of schema
     schema_name = connection.schema_name
     with schema_context(schema_name):
         return count_group(group_id, failures, cached)
Example #15
0
 def fetch_task(task_id, wait=0, cached=Conf.CACHED):
     # Wrapper method to fetch a single task with awareness of schema
     schema_name = connection.schema_name
     with schema_context(schema_name):
         return fetch(task_id, wait, cached)
Example #16
0
 def get_result(task_id, wait=0, cached=Conf.CACHED):
     # Wrapper method to get result of a task with awareness of schema
     schema_name = connection.schema_name
     with schema_context(schema_name):
         return result(task_id, wait, cached)
    def _generate_ocp_on_azure_data(self, cluster_id=None):
        """Generate OpenShift and Azure data sufficient for matching."""
        if not cluster_id:
            cluster_id = self.ocp_provider_resource_name
        creator = ReportObjectCreator(self.schema, self.column_map)
        bill_table_name = AZURE_REPORT_TABLE_MAP['bill']
        with AzureReportDBAccessor(self.schema, self.column_map) as accessor:
            accessor._get_db_obj_query(bill_table_name).all().delete()
        bill_ids = []
        today = DateAccessor().today_with_timezone('UTC')
        last_month = today - relativedelta(months=1)

        instance_id = '/subscriptions/99999999-9999-9999-9999-999999999999'\
                      + '/resourceGroups/koku-99hqd-rg/providers/Microsoft.Compute/'\
                      + 'virtualMachines/koku-99hqd-worker-eastus1-jngbr'
        node = instance_id.split('/')[8]

        with schema_context(self.schema):
            for cost_entry_date in (today, last_month):
                bill = creator.create_azure_cost_entry_bill(
                    provider_uuid=self.azure_provider.uuid,
                    bill_date=cost_entry_date)
                bill_ids.append(str(bill.id))
                product = creator.create_azure_cost_entry_product(
                    provider_uuid=self.azure_provider.uuid,
                    instance_id=instance_id)
                meter = creator.create_azure_meter(
                    provider_uuid=self.azure_provider.uuid)
                creator.create_azure_cost_entry_line_item(
                    bill, product, meter, usage_date_time=cost_entry_date)
        with OCPReportDBAccessor(self.schema, self.column_map) as ocp_accessor:
            for cost_entry_date in (today, last_month):
                period = creator.create_ocp_report_period(
                    self.ocp_test_provider_uuid,
                    period_date=cost_entry_date,
                    cluster_id=cluster_id)
                report = creator.create_ocp_report(period, cost_entry_date)
                creator.create_ocp_usage_line_item(period, report, node=node)
            ocp_report_table_name = OCP_REPORT_TABLE_MAP['report']
            with schema_context(self.schema):
                report_table = getattr(ocp_accessor.report_schema,
                                       ocp_report_table_name)

                report_entry = report_table.objects.all().aggregate(
                    Min('interval_start'), Max('interval_start'))
                start_date = report_entry['interval_start__min']
                end_date = report_entry['interval_start__max']

                start_date = start_date.replace(hour=0,
                                                minute=0,
                                                second=0,
                                                microsecond=0)
                end_date = end_date.replace(hour=0,
                                            minute=0,
                                            second=0,
                                            microsecond=0)

            ocp_accessor.populate_line_item_daily_table(
                start_date, end_date, cluster_id)
            ocp_accessor.populate_line_item_daily_summary_table(
                start_date, end_date, cluster_id)

        return bill_ids
Example #18
0
    def test_process_usage_and_storage_with_invalid_data(self):
        """Test that processing succeeds when rows are missing data."""
        pod_report = f"{self.temp_dir}/e6b3701e-1e91-433b-b238-a31e49937558_February-2019-my-ocp-cluster-1-invalid.csv"
        storage_report = f"{self.temp_dir}/e6b3701e-1e91-433b-b238-a31e49937558_storage-invalid.csv"

        pod_data = []
        storage_data = []
        with open(self.test_report_path) as f:
            reader = csv.DictReader(f)
            for row in reader:
                row["node"] = None
                pod_data.append(row)

        header = pod_data[0].keys()
        with open(pod_report, "w") as f:
            writer = csv.DictWriter(f, fieldnames=header)
            writer.writeheader()
            writer.writerows(pod_data)

        with open(self.storage_report_path) as f:
            reader = csv.DictReader(f)
            for row in reader:
                row["persistentvolume"] = None
                storage_data.append(row)

        header = storage_data[0].keys()
        with open(storage_report, "w") as f:
            writer = csv.DictWriter(f, fieldnames=header)
            writer.writeheader()
            writer.writerows(storage_data)

        storage_processor = OCPReportProcessor(
            schema_name="acct10001",
            report_path=storage_report,
            compression=UNCOMPRESSED,
            provider_uuid=self.ocp_provider_uuid,
        )

        report_db = self.accessor
        table_name = OCP_REPORT_TABLE_MAP["storage_line_item"]
        report_schema = report_db.report_schema
        table = getattr(report_schema, table_name)
        with schema_context(self.schema):
            storage_before_count = table.objects.count()

        storage_processor.process()

        with schema_context(self.schema):
            storage_after_count = table.objects.count()
        self.assertEqual(storage_after_count, storage_before_count)

        processor = OCPReportProcessor(
            schema_name="acct10001",
            report_path=pod_report,
            compression=UNCOMPRESSED,
            provider_uuid=self.ocp_provider_uuid,
        )

        report_db = self.accessor
        table_name = OCP_REPORT_TABLE_MAP["line_item"]
        report_schema = report_db.report_schema
        table = getattr(report_schema, table_name)
        with schema_context(self.schema):
            before_count = table.objects.count()

        processor.process()

        with schema_context(self.schema):
            after_count = table.objects.count()
        self.assertEqual(after_count, before_count)
Example #19
0
    def test_process_usage_and_storage_default(self):
        """Test the processing of an uncompressed storage and usage files."""
        storage_processor = OCPReportProcessor(
            schema_name="acct10001",
            report_path=self.storage_report,
            compression=UNCOMPRESSED,
            provider_uuid=self.ocp_provider_uuid,
        )

        report_db = self.accessor
        table_name = OCP_REPORT_TABLE_MAP["storage_line_item"]
        report_schema = report_db.report_schema
        table = getattr(report_schema, table_name)
        with schema_context(self.schema):
            storage_before_count = table.objects.count()

        storage_processor.process()

        with schema_context(self.schema):
            storage_after_count = table.objects.count()
        self.assertGreater(storage_after_count, storage_before_count)

        processor = OCPReportProcessor(
            schema_name="acct10001",
            report_path=self.test_report,
            compression=UNCOMPRESSED,
            provider_uuid=self.ocp_provider_uuid,
        )

        report_db = self.accessor
        table_name = OCP_REPORT_TABLE_MAP["line_item"]
        report_schema = report_db.report_schema
        table = getattr(report_schema, table_name)
        with schema_context(self.schema):
            before_count = table.objects.count()

        processor.process()

        with schema_context(self.schema):
            after_count = table.objects.count()
        self.assertGreater(after_count, before_count)

        node_label_processor = OCPReportProcessor(
            schema_name="acct10001",
            report_path=self.node_report,
            compression=UNCOMPRESSED,
            provider_uuid=self.ocp_provider_uuid,
        )

        report_db = self.accessor
        table_name = OCP_REPORT_TABLE_MAP["node_label_line_item"]
        report_schema = report_db.report_schema
        table = getattr(report_schema, table_name)
        with schema_context(self.schema):
            node_label_before_count = table.objects.count()

        node_label_processor.process()

        with schema_context(self.schema):
            node_label_after_count = table.objects.count()
        self.assertGreater(node_label_after_count, node_label_before_count)
    def test_populate_line_item_daily_summary_table(self):
        """Test that the daily summary table is populated."""
        summary_table_name = AZURE_REPORT_TABLE_MAP["line_item_daily_summary"]
        summary_table = getattr(self.accessor.report_schema, summary_table_name)

        bills = self.accessor.get_cost_entry_bills_query_by_provider(self.azure_provider_uuid)
        with schema_context(self.schema):
            bill_ids = [str(bill.id) for bill in bills.all()]

        table_name = AZURE_REPORT_TABLE_MAP["line_item"]
        line_item_table = getattr(self.accessor.report_schema, table_name)
        tag_query = self.accessor._get_db_obj_query(table_name)
        possible_keys = []
        possible_values = []
        with schema_context(self.schema):
            for item in tag_query:
                possible_keys += list(item.tags.keys())
                possible_values += list(item.tags.values())

            li_entry = line_item_table.objects.all().aggregate(Min("usage_date"), Max("usage_date"))
            start_date = li_entry["usage_date__min"]
            end_date = li_entry["usage_date__max"]

        start_date = start_date.date() if isinstance(start_date, datetime.datetime) else start_date
        end_date = end_date.date() if isinstance(end_date, datetime.datetime) else end_date

        query = self.accessor._get_db_obj_query(summary_table_name)
        with schema_context(self.schema):
            query.delete()
            initial_count = query.count()

        self.accessor.populate_line_item_daily_summary_table(start_date, end_date, bill_ids)
        with schema_context(self.schema):
            self.assertNotEqual(query.count(), initial_count)

            summary_entry = summary_table.objects.all().aggregate(Min("usage_start"), Max("usage_start"))
            result_start_date = summary_entry["usage_start__min"]
            result_end_date = summary_entry["usage_start__max"]

            self.assertEqual(result_start_date, start_date)
            self.assertEqual(result_end_date, end_date)

            entry = query.order_by("-id")

            summary_columns = [
                "usage_start",
                "usage_quantity",
                "pretax_cost",
                "cost_entry_bill_id",
                "meter_id",
                "tags",
            ]

            for column in summary_columns:
                self.assertIsNotNone(getattr(entry.first(), column))

            found_keys = []
            found_values = []
            for item in query.all():
                found_keys += list(item.tags.keys())
                found_values += list(item.tags.values())

            self.assertEqual(set(sorted(possible_keys)), set(sorted(found_keys)))
            self.assertEqual(set(sorted(possible_values)), set(sorted(found_values)))
 def test_bills_for_provider_uuid(self):
     """Test that bills_for_provider_uuid returns the right bills."""
     bills = self.accessor.bills_for_provider_uuid(self.azure_provider_uuid, start_date=self.dh.this_month_start)
     with schema_context(self.schema):
         self.assertEquals(len(bills), 1)
Example #22
0
    def load_aws_data(self,
                      customer,
                      static_data_file,
                      account_id=None,
                      provider_resource_name=None):
        """Load AWS data into the database."""
        provider_type = Provider.PROVIDER_AWS_LOCAL
        if account_id is None:
            account_id = "9999999999999"
        if provider_resource_name is None:
            provider_resource_name = "arn:aws:iam::999999999999:role/CostManagement"
        nise_provider_type = provider_type.replace("-local", "")
        report_name = "Test"
        with patch.object(settings, "AUTO_DATA_INGEST", False):
            provider = baker.make(
                "Provider",
                type=provider_type,
                authentication__provider_resource_name=provider_resource_name,
                customer=customer,
                billing_source__bucket="test-bucket",
            )
        template, static_data_path = self.prepare_template(
            provider_type, static_data_file)
        options = {
            "static_report_file": static_data_path,
            "aws_report_name": report_name,
            "aws_bucket_name": self.nise_data_path,
        }
        base_path = f"{self.nise_data_path}/{report_name}"

        with schema_context(self.schema):
            baker.make("AWSAccountAlias",
                       account_id=account_id,
                       account_alias="Test Account")

        for start_date, end_date, bill_date in self.dates:
            manifest = baker.make(
                "CostUsageReportManifest",
                _fill_optional=True,
                provider=provider,
                billing_period_start_datetime=bill_date,
            )
            with open(static_data_path, "w") as f:
                f.write(
                    template.render(start_date=start_date,
                                    end_date=end_date,
                                    account_id=account_id))

            run(nise_provider_type.lower(), options)

            report_path = self.build_report_path(provider_type, bill_date,
                                                 base_path)
            for report in os.scandir(report_path):
                if os.path.isdir(report):
                    for report in [
                            f.path
                            for f in os.scandir(f"{report_path}/{report.name}")
                    ]:
                        if os.path.isdir(report):
                            continue
                        elif "manifest" in report.lower():
                            continue
                        self.process_report(report, "GZIP", provider_type,
                                            provider, manifest)
            with patch("masu.processor.tasks.chain"), patch.object(
                    settings, "AUTO_DATA_INGEST", False):
                update_summary_tables(self.schema,
                                      provider_type,
                                      provider.uuid,
                                      start_date,
                                      end_date,
                                      manifest_id=manifest.id)
        update_cost_model_costs(self.schema, provider.uuid,
                                self.dh.last_month_start, self.dh.today)
        refresh_materialized_views(self.schema, provider_type)
        shutil.rmtree(base_path, ignore_errors=True)
Example #23
0
 def delete_task_group(group_id, tasks=False, cached=Conf.CACHED):
     # Wrapper method to delete task group with awareness of schema
     schema_name = connection.schema_name
     with schema_context(schema_name):
         return delete_group(group_id, tasks, cached)
Example #24
0
    def test_update_summary_tables_ocp(self, mock_markup, mock_rate_map,
                                       mock_charge_info, mock_cost_summary):
        """Test that the summary table task runs."""
        markup = {}
        mem_rate = {'tiered_rates': [{'value': '1.5', 'unit': 'USD'}]}
        cpu_rate = {'tiered_rates': [{'value': '2.5', 'unit': 'USD'}]}
        rate_metric_map = {
            'cpu_core_usage_per_hour': cpu_rate,
            'memory_gb_usage_per_hour': mem_rate
        }

        mock_markup.return_value = markup
        mock_rate_map.return_value = rate_metric_map

        provider = 'OCP'
        provider_ocp_uuid = self.ocp_test_provider_uuid

        daily_table_name = OCP_REPORT_TABLE_MAP['line_item_daily']
        start_date = self.start_date.replace(
            day=1) + relativedelta.relativedelta(months=-1)

        with schema_context(self.schema):
            daily_query = self.ocp_accessor._get_db_obj_query(daily_table_name)

            initial_daily_count = daily_query.count()

        self.assertEqual(initial_daily_count, 0)
        update_summary_tables(self.schema, provider, provider_ocp_uuid,
                              start_date)

        with schema_context(self.schema):
            self.assertNotEqual(daily_query.count(), initial_daily_count)

        update_charge_info(schema_name=self.schema,
                           provider_uuid=provider_ocp_uuid)

        table_name = OCP_REPORT_TABLE_MAP['line_item_daily_summary']
        with ProviderDBAccessor(provider_ocp_uuid) as provider_accessor:
            provider_obj = provider_accessor.get_provider()

        usage_period_qry = self.ocp_accessor.get_usage_period_query_by_provider(
            provider_obj.id)
        with schema_context(self.schema):
            cluster_id = usage_period_qry.first().cluster_id

            items = self.ocp_accessor._get_db_obj_query(table_name).filter(
                cluster_id=cluster_id)
            for item in items:
                self.assertIsNotNone(item.pod_charge_memory_gigabyte_hours)
                self.assertIsNotNone(item.pod_charge_cpu_core_hours)

            storage_daily_name = OCP_REPORT_TABLE_MAP[
                'storage_line_item_daily']

            items = self.ocp_accessor._get_db_obj_query(
                storage_daily_name).filter(cluster_id=cluster_id)
            for item in items:
                self.assertIsNotNone(item.volume_request_storage_byte_seconds)
                self.assertIsNotNone(
                    item.persistentvolumeclaim_usage_byte_seconds)

            storage_summary_name = OCP_REPORT_TABLE_MAP[
                'storage_line_item_daily_summary']
            items = self.ocp_accessor._get_db_obj_query(
                storage_summary_name).filter(cluster_id=cluster_id)
            for item in items:
                self.assertIsNotNone(
                    item.volume_request_storage_gigabyte_months)
                self.assertIsNotNone(
                    item.persistentvolumeclaim_usage_gigabyte_months)

        mock_charge_info.apply_async.assert_called()
        mock_cost_summary.si.assert_called()
Example #25
0
    def test_populate_line_item_daily_summary_table(self):
        """Test that the line item daily summary table populates."""
        self.tearDown()
        self.reporting_period = self.creator.create_ocp_report_period(
            provider_uuid=self.ocp_provider_uuid, cluster_id=self.cluster_id
        )
        self.report = self.creator.create_ocp_report(self.reporting_period)
        report_table_name = OCP_REPORT_TABLE_MAP['report']
        summary_table_name = OCP_REPORT_TABLE_MAP['line_item_daily_summary']

        report_table = getattr(self.accessor.report_schema, report_table_name)
        summary_table = getattr(self.accessor.report_schema, summary_table_name)

        for _ in range(25):
            self.creator.create_ocp_usage_line_item(self.reporting_period, self.report)
        with schema_context(self.schema):
            report_entry = report_table.objects.all().aggregate(
                Min('interval_start'), Max('interval_start')
            )
            start_date = report_entry['interval_start__min']
            end_date = report_entry['interval_start__max']

            start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
            end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)

            query = self.accessor._get_db_obj_query(summary_table_name)
            initial_count = query.count()

            self.accessor.populate_line_item_daily_table(start_date, end_date, self.cluster_id)
            self.accessor.populate_line_item_daily_summary_table(
                start_date, end_date, self.cluster_id
            )

            self.assertNotEqual(query.count(), initial_count)

            summary_entry = summary_table.objects.all().aggregate(
                Min('usage_start'), Max('usage_start')
            )
            result_start_date = summary_entry['usage_start__min']
            result_end_date = summary_entry['usage_start__max']

            self.assertEqual(result_start_date, start_date)
            self.assertEqual(result_end_date, end_date)

            entry = query.first()

        summary_columns = [
            'cluster_id',
            'namespace',
            'node',
            'node_capacity_cpu_core_hours',
            'node_capacity_cpu_cores',
            'node_capacity_memory_gigabyte_hours',
            'node_capacity_memory_gigabytes',
            'pod',
            'pod_labels',
            'pod_limit_cpu_core_hours',
            'pod_limit_memory_gigabyte_hours',
            'pod_request_cpu_core_hours',
            'pod_request_memory_gigabyte_hours',
            'pod_usage_cpu_core_hours',
            'pod_usage_memory_gigabyte_hours',
            'usage_end',
            'usage_start',
        ]

        for column in summary_columns:
            self.assertIsNotNone(getattr(entry, column))
    def test_compute_org_structure_interval(self):
        """Test function that computes org structure for an interval."""
        unit_crawler = AWSOrgUnitCrawler(self.account)
        unit_crawler._build_accout_alias_map()
        with schema_context(self.schema):
            cur_count = AWSOrganizationalUnit.objects.count()
            self.assertEqual(cur_count, 0)
        unit_crawler._structure_yesterday = {}
        # Add root node with 1 account
        created_nodes = []
        root = {"Id": "R_001", "Name": "root"}
        root_account = {"Id": "A_001", "Name": "Root Account"}
        created_nodes.append(
            unit_crawler._save_aws_org_method(root, "R_001", 0, None))
        created_nodes.append(
            unit_crawler._save_aws_org_method(root, "R_001", 0, root_account))

        # Add sub_org_unit_1 with 2 accounts
        sub_org_unit_1 = {"Id": "OU_1000", "Name": "sub_org_unit_1"}
        created_nodes.append(
            unit_crawler._save_aws_org_method(sub_org_unit_1, "R_001&OU_1000",
                                              1, None))
        created_nodes.append(
            unit_crawler._save_aws_org_method(sub_org_unit_1, "R_001&OU_1000",
                                              1, {
                                                  "Id": "A_002",
                                                  "Name": "Sub Org Account 2"
                                              }))
        created_nodes.append(
            unit_crawler._save_aws_org_method(sub_org_unit_1, "R_001&OU_1000",
                                              1, {
                                                  "Id": "A_003",
                                                  "Name": "Sub Org Account 3"
                                              }))

        # Change created date to two_days_ago
        with schema_context(self.schema):
            two_days_ago = (unit_crawler._date_accessor.today() -
                            timedelta(2)).strftime("%Y-%m-%d")
            for node in created_nodes:
                node.created_timestamp = two_days_ago
                node.save()
            curr_count = AWSOrganizationalUnit.objects.filter(
                created_timestamp__lte=two_days_ago).count()
            self.assertEqual(curr_count, 5)
            expected_count_2_days_ago = curr_count

        # # Add sub_org_unit_2 and move sub_org_unit_1 2 accounts here
        created_nodes = []
        sub_org_unit_2 = {"Id": "OU_2000", "Name": "sub_org_unit_2"}
        created_nodes.append(
            unit_crawler._save_aws_org_method(sub_org_unit_2, "R_001&OU_2000",
                                              1, None))
        created_nodes.append(
            unit_crawler._save_aws_org_method(sub_org_unit_2, "R_001&OU_2000",
                                              1, {
                                                  "Id": "A_002",
                                                  "Name": "Sub Org Account 2"
                                              }))
        created_nodes.append(
            unit_crawler._save_aws_org_method(sub_org_unit_2, "R_001&OU_2000",
                                              1, {
                                                  "Id": "A_003",
                                                  "Name": "Sub Org Account 3"
                                              }))
        deleted_nodes = unit_crawler._delete_aws_org_unit("OU_1000")

        # Test fake node delete
        unit_crawler._delete_aws_org_unit("sub_org_unit_1_Fake")

        with schema_context(self.schema):
            yesterday = (unit_crawler._date_accessor.today() -
                         timedelta(1)).strftime("%Y-%m-%d")
            for node in created_nodes:
                node.created_timestamp = yesterday
                node.save()
            for node in deleted_nodes:
                node.deleted_timestamp = yesterday
                node.save()
            curr_count = AWSOrganizationalUnit.objects.filter(
                created_timestamp__lte=yesterday).count()
            deleted_count = AWSOrganizationalUnit.objects.filter(
                deleted_timestamp__lte=yesterday).count()
            self.assertEqual(curr_count, 8)
            self.assertEqual(deleted_count, 3)
            expected_yesterday_count = curr_count - deleted_count

        unit_crawler._delete_aws_account("A_002")
        sub_org_unit_2 = {"Id": "OU_3000", "Name": "sub_org_unit_3"}
        unit_crawler._save_aws_org_method(sub_org_unit_2, "R_001&OU_3000", 1,
                                          None)

        with schema_context(self.schema):
            today = unit_crawler._date_accessor.today().strftime("%Y-%m-%d")
            curr_count = AWSOrganizationalUnit.objects.filter(
                created_timestamp__lte=today).count()
            deleted_count = AWSOrganizationalUnit.objects.filter(
                deleted_timestamp__lte=today).count()
            self.assertEqual(curr_count, 9)
            expected_today_count = curr_count - deleted_count

        # 2 days ago count matches
        structure_2_days_ago = unit_crawler._compute_org_structure_interval(
            two_days_ago)
        self.assertEqual(expected_count_2_days_ago, len(structure_2_days_ago))
        # Yesterday count matches
        unit_crawler._compute_org_structure_yesterday()
        self.assertEqual(expected_yesterday_count,
                         len(unit_crawler._structure_yesterday))
        # today
        structure_today = unit_crawler._compute_org_structure_interval(today)
        self.assertEqual(len(structure_today), expected_today_count)
Example #27
0
 def get_usage_period_query_by_provider(self, provider_uuid):
     """Return all report periods for the specified provider."""
     table_name = OCP_REPORT_TABLE_MAP['report_period']
     with schema_context(self.schema):
         return self._get_db_obj_query(table_name)\
             .filter(provider_id=provider_uuid)
    def test_do_not_overwrite_finalized_bill_timestamp(self):
        """Test that a finalized bill timestamp does not get overwritten."""
        data = []
        with open(self.test_report, "r") as f:
            reader = csv.DictReader(f)
            for row in reader:
                data.append(row)

        for row in data:
            row["bill/InvoiceId"] = "12345"

        tmp_file = "/tmp/test_process_finalized_rows.csv"
        field_names = data[0].keys()

        with open(tmp_file, "w") as f:
            writer = csv.DictWriter(f, fieldnames=field_names)
            writer.writeheader()
            writer.writerows(data)

        processor = AWSReportProcessor(
            schema_name=self.schema,
            report_path=self.test_report,
            compression=UNCOMPRESSED,
            provider_uuid=self.aws_provider_uuid,
        )

        # Process for the first time
        processor.process()
        report_db = self.accessor
        report_schema = report_db.report_schema

        bill_table_name = AWS_CUR_TABLE_MAP["bill"]
        bill_table = getattr(report_schema, bill_table_name)
        with schema_context(self.schema):
            bill = bill_table.objects.first()

        with open(tmp_file, "w") as f:
            writer = csv.DictWriter(f, fieldnames=field_names)
            writer.writeheader()
            writer.writerows(data)

        processor = AWSReportProcessor(
            schema_name=self.schema,
            report_path=tmp_file,
            compression=UNCOMPRESSED,
            provider_uuid=self.aws_provider_uuid,
        )
        # Process for the second time
        processor.process()

        finalized_datetime = bill.finalized_datetime

        with open(tmp_file, "w") as f:
            writer = csv.DictWriter(f, fieldnames=field_names)
            writer.writeheader()
            writer.writerows(data)

        processor = AWSReportProcessor(
            schema_name=self.schema,
            report_path=tmp_file,
            compression=UNCOMPRESSED,
            provider_uuid=self.aws_provider_uuid,
        )
        # Process for the third time to make sure the timestamp is the same
        processor.process()
        self.assertEqual(bill.finalized_datetime, finalized_datetime)
Example #29
0
import sys
import cv2
import django

from raedamdjango.cerebro import load_model, clean_boxes

sys.path.append("./raedamdjango/")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
django.setup()

from tenant_schemas.utils import schema_context
from parking.models import ParkingCamera

CAM_SHORT_ID = '787a8c5f'

with schema_context('devorg'):
    cam = ParkingCamera.objects.get(id__startswith=CAM_SHORT_ID)
    cap = cv2.VideoCapture(cam.url)
    success, frame = cap.read()
    if not success:
        raise ValueError(f"[!] Cannot read from source '{cam.url}'")

    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    model = load_model()
    r = model.detect([rgb_frame], verbose=0)[0]
    cleaned_boxes = clean_boxes(r, ['car', 'bus', 'truck'], 0.8)
    cam.spots = cleaned_boxes.tolist()
    cam.save()

    for box in cleaned_boxes:
Example #30
0
 def get_cost_entry_bills_by_date(self, start_date):
     """Return a cost entry bill for the specified start date."""
     table_name = AzureCostEntryBill
     with schema_context(self.schema):
         return self._get_db_obj_query(table_name).filter(
             billing_period_start=start_date)
Example #31
0
    def test_repartition_all_tables(self):
        """
        Repartition using driver function
        """
        with schema_context(self.schema_name):
            aws_lids = AWSCostEntryLineItemDailySummary.objects.order_by(
                "-usage_start")[0]
            aws_lids.usage_start = aws_lids.usage_start.replace(
                year=(aws_lids.usage_start.year + 11))
            aws_lids.save()
            ocp_lids = OCPUsageLineItemDailySummary.objects.order_by(
                "-usage_start")[0]
            ocp_lids.usage_start = ocp_lids.usage_start.replace(
                year=(aws_lids.usage_start.year + 11))
            ocp_lids.save()
            with conn.cursor() as cur:
                cur.execute(f"""
select (
    select count(*) as a_num_recs
      from {AWSCostEntryLineItemDailySummary._meta.db_table}_default
) as "num_aws_lids_default",
(
    select count(*) as o_num_recs
      from {OCPUsageLineItemDailySummary._meta.db_table}_default
) as "num_ocp_lids_default";
""")
                res = cur.fetchone()
            self.assertTrue(res[0] > 0)
            self.assertTrue(res[1] > 0)

            ppart.repartition_default_data(schema_name=self.schema_name)

            with conn.cursor() as cur:
                cur.execute(f"""
select (
    select count(*) as a_num_recs
      from {AWSCostEntryLineItemDailySummary._meta.db_table}_default
) as "num_aws_lids_default",
(
    select count(*) as o_num_recs
      from {OCPUsageLineItemDailySummary._meta.db_table}_default
) as "num_ocp_lids_default";
""")
                res = cur.fetchone()
            self.assertEqual(res, (0, 0))

            a_newpart = f"{AWSCostEntryLineItemDailySummary._meta.db_table}_{aws_lids.usage_start.strftime('%Y_%m')}"
            o_newpart = f"{OCPUsageLineItemDailySummary._meta.db_table}_{ocp_lids.usage_start.strftime('%Y_%m')}"
            with conn.cursor() as cur:
                cur.execute(f"""
select (
    select count(*) as a_num_recs
      from {a_newpart}
) as "num_aws_lids_default",
(
    select count(*) as o_num_recs
      from {o_newpart}
) as "num_ocp_lids_default";
""")
                res = cur.fetchone()
            self.assertEqual(res, (1, 1))

            # test that insert with new partition bounds will work successfully
            new_ocp_lids = OCPUsageLineItemDailySummary(uuid=uuid.uuid4())
            for col in (x for x in new_ocp_lids._meta.fields
                        if x.name != "uuid"):
                setattr(new_ocp_lids, col.name,
                        getattr(ocp_lids, col.name, None))
            new_day = (new_ocp_lids.usage_start.day +
                       1 if new_ocp_lids.usage_start.day < 28 else
                       new_ocp_lids.usage_start.day - 1)
            new_ocp_lids.usage_start = ocp_lids.usage_start.replace(
                day=new_day)
            new_ocp_lids.save()

            with conn.cursor() as cur:
                cur.execute(f"""
select (
    select count(*) as a_num_recs
      from {a_newpart}
) as "num_aws_lids_default",
(
    select count(*) as o_num_recs
      from {o_newpart}
) as "num_ocp_lids_default";
""")
                res = cur.fetchone()
            self.assertEqual(res, (1, 2))
Example #32
0
def repair_transaction(message):
    """
    repair the given transaction
    :param message: 
    :return: 
    """
    with schema_context(message.get('chain')):
        tx_id = message.get('tx_id')
        if not tx_id:
            logger.error('no tx_id passed')

        # get the raw transaction
        rpc_tx, msg = send_rpc(
            {
                'method': 'getrawtransaction',
                'params': [tx_id, 1]
            },
            schema_name=message.get('chain')
        )
        if not rpc_tx:
            return

        block_hash = rpc_tx.get('blockhash')
        if not block_hash:
            logger.error('no block hash found in rpc for tx {}'.format(tx_id[:8]))
            # indicates that block is orphaned?
            # get the transaction to get the block it is attached to
            try:
                tx = Transaction.objects.get(tx_id=tx_id)
            except Transaction.DoesNotExist:
                logger.warning('no existing tx with id {}'.format(tx_id[:8]))
                return

            if not tx.block:
                logger.warning('tx {} has no block'.format(tx_id[:8]))

            # get the current height of this block
            block_height = tx.block.height

            # then delete the block
            tx.block.delete()

            # get the block hash of the actual block at this height
            block_hash = get_block_hash(block_height, message.get('chain'))

        block, block_created = Block.objects.get_or_create(hash=block_hash)
        if block_created:
            # save has triggered validation which will parse the full block with tx
            logger.warning('block {} is new when parsing tx {}'.format(block, tx_id))
            return

        # get the block too for the index
        rpc_block, msg = send_rpc(
            {
                'method': 'getblock',
                'params': [block_hash]
            },
            schema_name=message.get('chain')
        )

        if not rpc_block:
            return

        tx_list = rpc_block.get('tx', [])
        if not tx_list:
            logger.error('problem getting tx_list from block {}'.format(block))
            return

        tx_index = tx_list.index(tx_id)

        try:
            tx = Transaction.objects.get(tx_id=tx_id)
        except Transaction.DoesNotExist:
            logger.warning('tx {} is new.'.format(tx_id[:8]))
            tx = Transaction(tx_id=tx_id, block=block, index=tx_index)
            tx.save(validate=False)

        logger.info('repairing tx {}'.format(tx))

        valid, error_message = tx.validate()

        if valid:
            logger.info('tx {} is valid'.format(tx))
            return

        logger.error('tx {} invalid: {}'.format(tx, error_message))

        if error_message == 'incorrect index':
            tx.index = tx_index
            tx.save()
            logger.info('updated index of {}'.format(tx))
            return

        if error_message == 'no block':
            tx.block = block
            tx.save()
            logger.info('update block on {}'.format(tx))
            return

        if error_message == 'output has no address':
            for tout in rpc_tx.get('vout', []):
                try:
                    tx_out = tx.outputs.get(index=tout.get('n'))
                except TxOutput.DoesNotExist:
                    logger.warning('output not found: {}'.format(tout.get('n')))
                    tx.save()
                    continue

                script = tout.get('scriptPubKey')
                if not script:
                    logger.warning(
                        'no script found in rpc for output {}'.format(tx_out)
                    )
                    continue

                if script.get('type') == 'park':
                    logger.info('park output')
                    park_data = script.get('park', {})
                    tx_out.park_duration = park_data.get('duration')
                    address = park_data.get('unparkaddress')
                else:
                    addresses = script.get('addresses', [])
                    if not addresses:
                        logger.warning(
                            'no addresses found in rpc for output {}'.format(tx_out)
                        )
                        continue
                    address = addresses[0]

                address_object, _ = Address.objects.get_or_create(address=address)
                if tx_out.address == address_object:
                    logger.info(
                        'output {} already has address {}'.format(tx_out, address)
                    )
                    continue
                tx_out.address = address_object
                # update the value too
                tx_out.value = convert_to_satoshis(tout.get('value', 0.0))
                tx_out.save()
                logger.info('added {} to {}'.format(address, tx_out))
            return

        if error_message == 'address missing from previous output' \
                or error_message == 'previous output value is 0':
            scanned_transactions = []
            for tx_in in tx.inputs.all():
                if tx_in.previous_output:
                    if not tx_in.previous_output.address:
                        previous_tx_id = tx_in.previous_output.transaction.tx_id

                        if previous_tx_id in scanned_transactions:
                            continue

                        rpc_prev_tx, msg = send_rpc(
                            {
                                'method': 'getrawtransaction',
                                'params': [previous_tx_id, 1]
                            },
                            schema_name=message.get('chain')
                        )

                        for tout in rpc_prev_tx.get('vout', []):
                            if tout.get('n') != tx_in.previous_output.index:
                                continue
                            script = tout.get('scriptPubKey')

                            if not script:
                                logger.warning(
                                    'no script found in rpc for output {}'.format(
                                        tx_in.previous_output
                                    )
                                )
                                continue

                            if script.get('type') == 'park':
                                logger.info('park output')
                                park_data = script.get('park', {})
                                tx_in.previous_output.park_duration = park_data.get('duration')  # noqa
                                address = park_data.get('unparkaddress')
                            else:
                                addresses = script.get('addresses', [])
                                if not addresses:
                                    logger.warning(
                                        'no addresses found in rpc for output {}'.format(
                                            tx_in.previous_output
                                        )
                                    )
                                    continue
                                address = addresses[0]

                            address_object, _ = Address.objects.get_or_create(
                                address=address
                            )

                            if tx_in.previous_output.address == address_object:
                                logger.info(
                                    'output {} already has address {}'.format(
                                        tx_in.previous_output,
                                        address
                                    )
                                )
                                continue
                            tx_in.previous_output.address = address_object
                            # update the value too
                            tx_in.previous_output.value = convert_to_satoshis(
                                tout.get('value', 0.0)
                            )
                            tx_in.previous_output.save()
                            logger.info(
                                'added {} to {}'.format(address, tx_in.previous_output)
                            )
                            # re-validate transaction too
                            tx_in.previous_output.transaction.save()

                        scanned_transactions.append(previous_tx_id)
            return

        if error_message == 'park output has no duration':
            for tout in rpc_tx.get('vout', []):
                try:
                    tx_out = tx.outputs.get(index=tout.get('n'))
                except TxOutput.DoesNotExist:
                    logger.warning('output not found: {}'.format(tout.get('n')))
                    tx.save()
                    continue

                script = tout.get('scriptPubKey')
                if not script:
                    logger.warning(
                        'no script found in rpc for output {}'.format(tx_out)
                    )
                    continue

                if script.get('type') != 'park':
                    continue

                park_data = script.get('park', {})
                tx_out.park_duration = park_data.get('duration')
                address = park_data.get('unparkaddress')
                address_object, _ = Address.objects.get_or_create(address=address)
                tx_out.address = address_object
                tx_out.save()
                logger.info('added park data to {}'.format(tx_out))

        tx.parse_rpc_tx(rpc_tx)
Example #33
0
    def purge_expired_report_data(self,
                                  expired_date=None,
                                  provider_uuid=None,
                                  simulate=False):
        """Remove report data with a billing start period before specified date.

        Args:
            expired_date (datetime.datetime): The cutoff date for removing data.
            provider_uuid (uuid): The DB id of the provider to purge data for.
            simulate (bool): Whether to simluate the removal.

        Returns:
            ([{}]) List of dictionaries containing 'account_payer_id' and 'billing_period_start'

        """
        with ReportingCommonDBAccessor() as reporting_common:
            column_map = reporting_common.column_map

        with AWSReportDBAccessor(self._schema, column_map) as accessor:
            if (expired_date is None
                    and provider_uuid is None) or (  # noqa: W504
                        expired_date is not None
                        and provider_uuid is not None):
                err = "This method must be called with either expired_date or provider_uuid"
                raise AWSReportDBCleanerError(err)
            removed_items = []

            if expired_date is not None:
                bill_objects = accessor.get_bill_query_before_date(
                    expired_date)
            else:
                bill_objects = accessor.get_cost_entry_bills_query_by_provider(
                    provider_uuid)
            with schema_context(self._schema):
                for bill in bill_objects.all():
                    bill_id = bill.id
                    removed_payer_account_id = bill.payer_account_id
                    removed_billing_period_start = bill.billing_period_start

                    if not simulate:
                        del_count = accessor.get_ocp_aws_summary_query_for_billid(
                            bill_id).delete()
                        LOG.info(
                            "Removing %s OCP-on-AWS summary items for bill id %s",
                            del_count, bill_id)

                        del_count = accessor.get_ocp_aws_project_summary_query_for_billid(
                            bill_id).delete()
                        LOG.info(
                            "Removing %s OCP-on-AWS project summary items for bill id %s",
                            del_count, bill_id)

                        del_count = accessor.get_lineitem_query_for_billid(
                            bill_id).delete()
                        LOG.info(
                            "Removing %s cost entry line items for bill id %s",
                            del_count, bill_id)

                        del_count = accessor.get_daily_query_for_billid(
                            bill_id).delete()
                        LOG.info(
                            "Removing %s cost entry daily items for bill id %s",
                            del_count, bill_id)

                        del_count = accessor.get_summary_query_for_billid(
                            bill_id).delete()
                        LOG.info(
                            "Removing %s cost entry summary items for bill id %s",
                            del_count, bill_id)

                        del_count = accessor.get_cost_entry_query_for_billid(
                            bill_id).delete()
                        LOG.info("Removing %s cost entry items for bill id %s",
                                 del_count, bill_id)

                    LOG.info(
                        "Report data removed for Account Payer ID: %s with billing period: %s",
                        removed_payer_account_id,
                        removed_billing_period_start,
                    )
                    removed_items.append({
                        "account_payer_id":
                        removed_payer_account_id,
                        "billing_period_start":
                        str(removed_billing_period_start),
                    })

                if not simulate:
                    bill_objects.delete()

        return removed_items
Example #34
0
 def get_usage_periods_by_date(self, start_date):
     """Return all report period entries for the specified start date."""
     table_name = OCP_REPORT_TABLE_MAP["report_period"]
     with schema_context(self.schema):
         return self._get_db_obj_query(table_name).filter(
             report_period_start=start_date).all()
 def get_manifest_by_id(self, manifest_id):
     """Get the manifest by id."""
     with schema_context(self._schema):
         query = self._get_db_obj_query()
         return query.filter(id=manifest_id).first()
Example #36
0
    def purge_expired_report_data(self,
                                  expired_date=None,
                                  provider_uuid=None,
                                  simulate=False):
        """Remove usage data with a report period before specified date.

        Args:
            expired_date (datetime.datetime): The cutoff date for removing data.
            provider_uuid (uuid): The DB id of the provider to purge data for.
            simulate (bool): Whether to simluate the removal.

        Returns:
            ([{}]) List of dictionaries containing 'usage_period_id' and 'interval_start'

        """
        LOG.info("Calling purge_expired_report_data for ocp")

        with OCPReportDBAccessor(self._schema) as accessor:
            if (expired_date is not None
                    and provider_uuid is not None) or (  # noqa: W504
                        expired_date is None and provider_uuid is None):
                err = "This method must be called with expired_date or provider_uuid"
                raise OCPReportDBCleanerError(err)
            removed_items = []

            if expired_date is not None:
                usage_period_objs = accessor.get_usage_period_on_or_before_date(
                    expired_date)
            else:
                usage_period_objs = accessor.get_usage_period_query_by_provider(
                    provider_uuid)
            with schema_context(self._schema):
                for usage_period in usage_period_objs.all():
                    report_period_id = usage_period.id
                    cluster_id = usage_period.cluster_id
                    removed_usage_start_period = usage_period.report_period_start

                    if not simulate:
                        qty = accessor.get_item_query_report_period_id(
                            report_period_id).delete()
                        LOG.info(
                            "Removing %s usage period line items for usage period id %s",
                            qty, report_period_id)

                        qty = accessor.get_daily_usage_query_for_clusterid(
                            cluster_id).delete()
                        LOG.info(
                            "Removing %s usage daily items for cluster id %s",
                            qty, cluster_id)

                        qty = accessor.get_summary_usage_query_for_clusterid(
                            cluster_id).delete()
                        LOG.info(
                            "Removing %s usage summary items for cluster id %s",
                            qty, cluster_id)

                        qty = accessor.get_cost_summary_for_clusterid(
                            cluster_id).delete()
                        LOG.info(
                            "Removing %s cost summary items for cluster id %s",
                            qty, cluster_id)

                        qty = accessor.get_storage_item_query_report_period_id(
                            report_period_id).delete()
                        LOG.info(
                            "Removing %s storage line items for usage period id %s",
                            qty, report_period_id)

                        qty = accessor.get_node_label_item_query_report_period_id(
                            report_period_id).delete()
                        LOG.info(
                            "Removing %s node label line items for usage period id %s",
                            qty, report_period_id)

                        qty = accessor.get_daily_storage_item_query_cluster_id(
                            cluster_id).delete()
                        LOG.info(
                            "Removing %s storage dailyitems for cluster id %s",
                            qty, cluster_id)

                        qty = accessor.get_storage_summary_query_cluster_id(
                            cluster_id).delete()
                        LOG.info(
                            "Removing %s storage summary for cluster id %s",
                            qty, cluster_id)

                        qty = accessor.get_report_query_report_period_id(
                            report_period_id).delete()
                        LOG.info(
                            "Removing %s usage period items for usage period id %s",
                            qty, report_period_id)

                        qty = accessor.get_ocp_aws_summary_query_for_cluster_id(
                            cluster_id).delete()
                        LOG.info(
                            "Removing %s OCP-on-AWS summary items for cluster id %s",
                            qty, cluster_id)

                        qty = accessor.get_ocp_aws_project_summary_query_for_cluster_id(
                            cluster_id).delete()
                        LOG.info(
                            "Removing %s OCP-on-AWS project summary items for cluster id %s",
                            qty, cluster_id)

                    LOG.info(
                        "Report data removed for usage period ID: %s with interval start: %s",
                        report_period_id,
                        removed_usage_start_period,
                    )
                    removed_items.append({
                        "usage_period_id":
                        report_period_id,
                        "interval_start":
                        str(removed_usage_start_period)
                    })

                if not simulate:
                    usage_period_objs.delete()
        return removed_items
Example #37
0
 def tearDown(self):
     """Teardown test case."""
     db_access = AccountAliasAccessor(self.account_id, 'acct10001')
     with schema_context(self.schema):
         db_access._get_db_obj_query().delete()
Example #38
0
def repair_transaction(message):
    """
    repair the given transaction
    :param message: 
    :return: 
    """
    with schema_context(message.get('chain')):
        tx_id = message.get('tx_id')
        if not tx_id:
            logger.error('no tx_id passed')

        # get the raw transaction
        rpc_tx = send_rpc({
            'method': 'getrawtransaction',
            'params': [tx_id, 1]
        },
                          schema_name=message.get('chain'))
        if not rpc_tx:
            return

        block_hash = rpc_tx.get('blockhash')
        if not block_hash:
            logger.error('no block hash found in rpc_tx: {}'.format(tx_id[:8]))
            # indicates that block is orphaned?
            return

        block, block_created = Block.objects.get_or_create(hash=block_hash)
        if block_created:
            # save has triggered validation which will parse the full block with tx
            logger.warning('block {} is new when parsing tx {}'.format(
                block, tx_id))
            return

        # get the block too for the index
        rpc_block = send_rpc({
            'method': 'getblock',
            'params': [block_hash]
        },
                             schema_name=message.get('chain'))

        if not rpc_block:
            return

        tx_list = rpc_block.get('tx', [])
        if not tx_list:
            logger.error('problem getting tx_list from block {}'.format(block))
            return

        tx_index = tx_list.index(tx_id)

        try:
            tx = Transaction.objects.get(tx_id=tx_id)
        except Transaction.DoesNotExist:
            logger.warning('tx {} is new.'.format(tx_id[:8]))
            tx = Transaction(tx_id=tx_id, block=block, index=tx_index)
            tx.save(validate=False)

        logger.info('repairing tx {}'.format(tx))

        valid, error_message = tx.validate()

        if valid:
            logger.info('tx {} is valid'.format(tx))
            return

        logger.error('tx {} invalid: {}'.format(tx, error_message))

        if error_message == 'incorrect index':
            tx.index = tx_index
            tx.save()
            logger.info('updated index of {}'.format(tx))
            return

        if error_message == 'no block':
            tx.block = block
            tx.save()
            logger.info('update block on {}'.format(tx))
            return

        if error_message == 'output has no address':
            for tout in rpc_tx.get('vout', []):
                try:
                    tx_out = tx.outputs.get(index=tout.get('n'))
                except TxOutput.DoesNotExist:
                    logger.warning('output not found: {}'.format(
                        tout.get('n')))
                    tx.save()
                    continue

                script = tout.get('scriptPubKey')
                if not script:
                    logger.warning(
                        'no script found in rpc for output {}'.format(tx_out))
                    continue

                if script.get('type') == 'park':
                    logger.info('park output')
                    park_data = script.get('park', {})
                    tx_out.park_duration = park_data.get('duration')
                    address = park_data.get('unparkaddress')
                else:
                    addresses = script.get('addresses', [])
                    if not addresses:
                        logger.warning(
                            'no addresses found in rpc for output {}'.format(
                                tx_out))
                        continue
                    address = addresses[0]

                address_object, _ = Address.objects.get_or_create(
                    address=address)
                if tx_out.address == address_object:
                    logger.info('output {} already has address {}'.format(
                        tx_out, address))
                    continue
                tx_out.address = address_object
                # update the value too
                tx_out.value = convert_to_satoshis(tout.get('value', 0.0))
                tx_out.save()
                logger.info('added {} to {}'.format(address, tx_out))
            return

        if error_message == 'address missing from previous output' \
                or error_message == 'previous output value is 0':
            scanned_transactions = []
            for tx_in in tx.inputs.all():
                if tx_in.previous_output:
                    if not tx_in.previous_output.address:
                        previous_tx_id = tx_in.previous_output.transaction.tx_id

                        if previous_tx_id in scanned_transactions:
                            continue

                        rpc_prev_tx = send_rpc(
                            {
                                'method': 'getrawtransaction',
                                'params': [previous_tx_id, 1]
                            },
                            schema_name=message.get('chain'))

                        for tout in rpc_prev_tx.get('vout', []):
                            if tout.get('n') != tx_in.previous_output.index:
                                continue
                            script = tout.get('scriptPubKey')

                            if not script:
                                logger.warning(
                                    'no script found in rpc for output {}'.
                                    format(tx_in.previous_output))
                                continue

                            if script.get('type') == 'park':
                                logger.info('park output')
                                park_data = script.get('park', {})
                                tx_in.previous_output.park_duration = park_data.get(
                                    'duration')  # noqa
                                address = park_data.get('unparkaddress')
                            else:
                                addresses = script.get('addresses', [])
                                if not addresses:
                                    logger.warning(
                                        'no addresses found in rpc for output {}'
                                        .format(tx_in.previous_output))
                                    continue
                                address = addresses[0]

                            address_object, _ = Address.objects.get_or_create(
                                address=address)

                            if tx_in.previous_output.address == address_object:
                                logger.info(
                                    'output {} already has address {}'.format(
                                        tx_in.previous_output, address))
                                continue
                            tx_in.previous_output.address = address_object
                            # update the value too
                            tx_in.previous_output.value = convert_to_satoshis(
                                tout.get('value', 0.0))
                            tx_in.previous_output.save()
                            logger.info('added {} to {}'.format(
                                address, tx_in.previous_output))
                            # re-validate transaction too
                            tx_in.previous_output.transaction.save()

                        scanned_transactions.append(previous_tx_id)
            return

        if error_message == 'park output has no duration':
            for tout in rpc_tx.get('vout', []):
                try:
                    tx_out = tx.outputs.get(index=tout.get('n'))
                except TxOutput.DoesNotExist:
                    logger.warning('output not found: {}'.format(
                        tout.get('n')))
                    tx.save()
                    continue

                script = tout.get('scriptPubKey')
                if not script:
                    logger.warning(
                        'no script found in rpc for output {}'.format(tx_out))
                    continue

                if script.get('type') != 'park':
                    continue

                park_data = script.get('park', {})
                tx_out.park_duration = park_data.get('duration')
                address = park_data.get('unparkaddress')
                address_object, _ = Address.objects.get_or_create(
                    address=address)
                tx_out.address = address_object
                tx_out.save()
                logger.info('added park data to {}'.format(tx_out))

        tx.parse_rpc_tx(rpc_tx)
Example #39
0
def get_address_balance(message):
    with schema_context(message.get('chain')):
        addr = message.get('address')