Beispiel #1
0
    def test_normalize_instance_on_off(self):
        """Test normalize_runs for one plain instance."""
        powered_times = ((
            util_helper.utc_dt(2019, 1, 9, 0, 0, 0),
            util_helper.utc_dt(2019, 1, 10, 0, 0, 0),
        ), )
        events = api_helper.generate_instance_events(self.instance_plain,
                                                     powered_times)
        runs = util.normalize_runs(events)
        self.assertEquals(len(runs), 1)
        run = runs[0]
        self.assertEquals(run.start_time, powered_times[0][0])
        self.assertEquals(run.end_time, powered_times[0][1])
        self.assertEquals(run.instance_id, self.instance_plain.id)
        self.assertEquals(run.image_id, self.image_plain.id)

        # special flags
        self.assertFalse(run.is_cloud_access)
        self.assertFalse(run.is_encrypted)
        self.assertFalse(run.is_marketplace)

        # rhel detection
        self.assertFalse(run.rhel)
        self.assertFalse(run.rhel_detected)

        # openshift detection
        self.assertFalse(run.openshift)
        self.assertFalse(run.openshift_detected)
Beispiel #2
0
    def test_normalize_one_instance_on_on_off_off(self):
        """
        Test normalize_runs one instance with "overlapping" ons and offs.

        In this special case, the mock data simulates receiving information
        that would indicate two potentially overlapping runs. However, since
        we discard subsequent on and off events, we only generate one run.
        """
        powered_times = (
            (
                util_helper.utc_dt(2019, 1, 9, 0, 0, 0),
                util_helper.utc_dt(2019, 1, 15, 0, 0, 0),
            ),
            (
                util_helper.utc_dt(2019, 1, 10, 0, 0, 0),
                util_helper.utc_dt(2019, 1, 19, 0, 0, 0),
            ),
        )
        events = api_helper.generate_instance_events(self.instance_plain,
                                                     powered_times)
        random.shuffle(events)

        runs = util.normalize_runs(events)
        self.assertEquals(len(runs), 1)
        run = runs[0]
        self.assertEquals(run.start_time, powered_times[0][0])
        self.assertEquals(run.end_time, powered_times[0][1])
        self.assertEquals(run.instance_id, self.instance_plain.id)
        self.assertEquals(run.image_id, self.image_plain.id)
Beispiel #3
0
    def test_normalize_one_instance_on_off_off_off(self):
        """
        Test normalize_runs one instance with multiple on and one off.

        In this special case, only one run should be created. The first off
        event is the only one relevant to that run.
        """
        powered_times = (
            (
                util_helper.utc_dt(2019, 1, 9, 0, 0, 0),
                util_helper.utc_dt(2019, 1, 10, 0, 0, 0),
            ),
            (None, util_helper.utc_dt(2019, 1, 12, 0, 0, 0)),
            (None, util_helper.utc_dt(2019, 1, 14, 0, 0, 0)),
        )
        events = api_helper.generate_instance_events(self.instance_plain,
                                                     powered_times)
        random.shuffle(events)

        runs = util.normalize_runs(events)
        self.assertEquals(len(runs), 1)
        run = runs[0]
        self.assertEquals(run.start_time, powered_times[0][0])
        self.assertEquals(run.end_time, powered_times[0][1])
        self.assertEquals(run.instance_id, self.instance_plain.id)
        self.assertEquals(run.image_id, self.image_plain.id)
Beispiel #4
0
    def test_normalize_one_instance_multiple_on_off(self):
        """Test normalize_runs one instance having multiple on-off cycles."""
        powered_times = (
            (
                util_helper.utc_dt(2019, 1, 9, 0, 0, 0),
                util_helper.utc_dt(2019, 1, 10, 0, 0, 0),
            ),
            (
                util_helper.utc_dt(2019, 1, 11, 0, 0, 0),
                util_helper.utc_dt(2019, 1, 12, 0, 0, 0),
            ),
            (util_helper.utc_dt(2019, 1, 13, 0, 0, 0), None),
        )
        events = api_helper.generate_instance_events(self.instance_plain,
                                                     powered_times)
        random.shuffle(events)

        runs = util.normalize_runs(events)
        self.assertEquals(len(runs), len(powered_times))
        sorted_runs = sorted(runs, key=lambda r: r.start_time)
        for index, run in enumerate(sorted_runs):
            self.assertEquals(run.start_time, powered_times[index][0])
            self.assertEquals(run.end_time, powered_times[index][1])
            self.assertEquals(run.instance_id, self.instance_plain.id)
            self.assertEquals(run.image_id, self.image_plain.id)
Beispiel #5
0
 def test_normalize_instance_on_never_off(self):
     """Test normalize_runs for an instance that starts but never stops."""
     powered_times = ((util_helper.utc_dt(2019, 1, 9, 0, 0, 0), None), )
     events = api_helper.generate_instance_events(self.instance_plain,
                                                  powered_times)
     runs = util.normalize_runs(events)
     self.assertEquals(len(runs), 1)
     run = runs[0]
     self.assertEquals(run.start_time, powered_times[0][0])
     self.assertEquals(run.end_time, powered_times[0][1])
     self.assertEquals(run.instance_id, self.instance_plain.id)
     self.assertEquals(run.image_id, self.image_plain.id)
Beispiel #6
0
    def handle(self, *args, **options):
        """Handle the command execution."""
        all_runs = Run.objects.all()
        runs_count = all_runs.count()
        all_concurrent_usage = ConcurrentUsage.objects.all()
        concurrent_usage_count = all_concurrent_usage.count()
        if runs_count > 0 or concurrent_usage_count > 0:
            self.stdout.write(
                "Found {} existing Runs and {} Concurrent Usage "
                "objects.".format(runs_count, concurrent_usage_count)
            )
            if not options.get("confirm") and not self.confirm(
                runs_count, concurrent_usage_count
            ):
                return False
            all_runs.delete()
            logger.info("Deleted all Run objects.")
            all_concurrent_usage.delete()
            logger.info("Deleted all ConcurrentUsage objects.")

        runs = []
        for instance in tqdm(Instance.objects.all(), desc="Runs for instances"):
            events = InstanceEvent.objects.filter(instance=instance)

            normalized_runs = normalize_runs(events)

            for normalized_run in normalized_runs:
                run = Run(
                    start_time=normalized_run.start_time,
                    end_time=normalized_run.end_time,
                    machineimage_id=normalized_run.image_id,
                    instance_id=normalized_run.instance_id,
                    instance_type=normalized_run.instance_type,
                    memory=normalized_run.instance_memory,
                    vcpu=normalized_run.instance_vcpu,
                )
                run.save()
                runs.append(run)

        logger.info("Created {} runs.".format(len(runs)))
        logger.info("Generating concurrent usage calculation tasks.")
        calculate_max_concurrent_usage_from_runs(runs)
        logger.info("Finished generating concurrent usage calculation tasks.")
        logger.info("Reminder: run the Celery worker to calculate concurrent usages!")
Beispiel #7
0
def process_instance_event(event):
    """
    Process instance events that have been saved during log analysis.

    Note:
        When processing power_on type events, this triggers a recalculation of
        ConcurrentUsage objects. If the event is at some point in the
        not-too-recent past, this may take a while as every day since the event
        will get recalculated and saved. We do not anticipate this being a real
        problem in practice, but this has the potential to slow down unit test
        execution over time since their occurred_at values are often static and
        will recede father into the past from "today", resulting in more days
        needing to recalculate. This effect could be mitigated in tests by
        patching parts of the datetime module that are used to find "today".
    """
    after_run = Q(start_time__gt=event.occurred_at)
    during_run = Q(start_time__lte=event.occurred_at,
                   end_time__gt=event.occurred_at)
    during_run_no_end = Q(start_time__lte=event.occurred_at, end_time=None)

    filters = after_run | during_run | during_run_no_end
    instance = Instance.objects.get(id=event.instance_id)

    if Run.objects.filter(filters, instance=instance).exists():
        recalculate_runs(event)
    elif event.event_type == InstanceEvent.TYPE.power_on:
        normalized_runs = normalize_runs([event])
        runs = []
        for index, normalized_run in enumerate(normalized_runs):
            logger.info("Processing run {} of {}".format(
                index + 1, len(normalized_runs)))
            run = Run(
                start_time=normalized_run.start_time,
                end_time=normalized_run.end_time,
                machineimage_id=normalized_run.image_id,
                instance_id=normalized_run.instance_id,
                instance_type=normalized_run.instance_type,
                memory=normalized_run.instance_memory,
                vcpu=normalized_run.instance_vcpu,
            )
            run.save()
            runs.append(run)
        calculate_max_concurrent_usage_from_runs(runs)
Beispiel #8
0
    def test_normalize_multiple_instances_on_off(self):
        """Test normalize_runs for events for multiple instances and images."""
        rhel_powered_times = ((
            util_helper.utc_dt(2019, 1, 9, 0, 0, 0),
            util_helper.utc_dt(2019, 1, 10, 0, 0, 0),
        ), )
        ocp_powered_times = ((
            util_helper.utc_dt(2019, 1, 8, 0, 0, 0),
            util_helper.utc_dt(2019, 1, 11, 0, 0, 0),
        ), )
        rhel_events = api_helper.generate_instance_events(
            self.instance_rhel, rhel_powered_times)
        ocp_events = api_helper.generate_instance_events(
            self.instance_ocp, ocp_powered_times)

        # force some slight out-of-order shuffling of incoming events.
        events = rhel_events[:-1] + ocp_events[::-1] + rhel_events[-1:]
        random.shuffle(events)

        runs = util.normalize_runs(events)
        self.assertEquals(len(runs), 2)

        rhel_run = runs[0] if runs[0].rhel else runs[1]
        ocp_run = runs[0] if runs[0].openshift else runs[1]

        self.assertTrue(rhel_run.rhel)
        self.assertFalse(rhel_run.openshift)
        self.assertEquals(rhel_run.start_time, rhel_powered_times[0][0])
        self.assertEquals(rhel_run.end_time, rhel_powered_times[0][1])
        self.assertEquals(rhel_run.instance_id, self.instance_rhel.id)
        self.assertEquals(rhel_run.image_id, self.image_rhel.id)

        self.assertFalse(ocp_run.rhel)
        self.assertTrue(ocp_run.openshift)
        self.assertEquals(ocp_run.start_time, ocp_powered_times[0][0])
        self.assertEquals(ocp_run.end_time, ocp_powered_times[0][1])
        self.assertEquals(ocp_run.instance_id, self.instance_ocp.id)
        self.assertEquals(ocp_run.image_id, self.image_ocp.id)
Beispiel #9
0
    def __init__(self):
        """Initialize all the data for the examples."""
        api_helper.generate_instance_type_definitions(cloud_type="aws")
        api_helper.generate_instance_type_definitions(cloud_type="azure")

        self.customer_account_number = "100001"
        self.customer_user = util_helper.get_test_user(
            self.customer_account_number, is_superuser=False)
        self.customer_user.date_joined = util_helper.utc_dt(
            2019, 1, 1, 0, 0, 0)
        self.customer_user.save()

        self.customer_client = api_helper.SandboxedRestClient()
        self.customer_client._force_authenticate(self.customer_user)
        self.internal_client = api_helper.SandboxedRestClient(
            api_root="/internal/api/cloudigrade/v1")
        self.internal_client._force_authenticate(self.customer_user)

        self.customer_arn = util_helper.generate_dummy_arn()

        # Times to use for various account and event activity.
        self.now = get_now()
        self.this_morning = self.now.replace(hour=0,
                                             minute=0,
                                             second=0,
                                             microsecond=0)
        self.yesterday = self.this_morning - timedelta(days=1)
        self.last_month = self.this_morning - timedelta(days=31)
        self.last_week = self.this_morning - timedelta(days=7)
        self.three_days_ago = self.this_morning - timedelta(days=3)
        self.two_days_ago = self.this_morning - timedelta(days=2)
        self.two_weeks_ago = self.this_morning - timedelta(weeks=2)
        self.tomorrow = self.this_morning + timedelta(days=1)
        self.next_week = self.this_morning + timedelta(weeks=1)

        ######################################
        # Generate AWS data for the customer user.
        self.aws_customer_account = api_helper.generate_cloud_account(
            arn=util_helper.generate_dummy_arn(),
            user=self.customer_user,
            name="greatest account ever",
            created_at=self.two_weeks_ago,
        )
        self.azure_customer_account = api_helper.generate_cloud_account(
            user=self.customer_user,
            name="meh account",
            created_at=self.two_weeks_ago,
            cloud_type="azure",
            azure_subscription_id=str(seeded_uuid4()),
            azure_tenant_id=str(seeded_uuid4()),
        )
        self.customer_instances = [
            api_helper.generate_instance(self.aws_customer_account),
            api_helper.generate_instance(self.aws_customer_account),
            api_helper.generate_instance(self.aws_customer_account),
            api_helper.generate_instance(self.azure_customer_account,
                                         cloud_type="azure"),
            api_helper.generate_instance(self.azure_customer_account,
                                         cloud_type="azure"),
            api_helper.generate_instance(self.azure_customer_account,
                                         cloud_type="azure"),
        ]

        # Generate events so we can see customer activity in the responses.
        # These events represent all customer instances starting one week ago,
        # stopping two days ago, and starting again yesterday.
        self.events = []
        for instance in self.customer_instances[:2]:
            self.events.extend(
                api_helper.generate_instance_events(
                    instance,
                    [
                        (self.last_week, self.three_days_ago),
                        (self.yesterday, None),
                    ],
                ))
        for instance in self.customer_instances[3:6]:
            self.events.extend(
                api_helper.generate_instance_events(
                    instance,
                    [
                        (self.last_week, self.three_days_ago),
                        (self.yesterday, None),
                    ],
                    cloud_type="azure",
                ))

        # Build the runs for the created events.
        # Note: this crude and *direct* implementation of Run-saving should be
        # replaced as we continue porting pilot functionality and (eventually)
        # better general-purpose Run-handling functions materialize.
        normalized_runs = normalize_runs(models.InstanceEvent.objects.all())
        for normalized_run in normalized_runs:
            run = models.Run(
                start_time=normalized_run.start_time,
                end_time=normalized_run.end_time,
                machineimage_id=normalized_run.image_id,
                instance_id=normalized_run.instance_id,
                instance_type=normalized_run.instance_type,
                memory=normalized_run.instance_memory,
                vcpu=normalized_run.instance_vcpu,
            )
            run.save()

        # Force all images to have RHEL detected ("7.7")
        self.images = list(
            set(instance.machine_image for instance in self.customer_instances
                if instance.machine_image is not None))
        for image in self.images:
            image.inspection_json = json.dumps({
                "rhel_enabled_repos_found": True,
                "rhel_version": "7.7",
                "syspurpose": {
                    "role": "Red Hat Enterprise Linux Server",
                    "service_level_agreement": "Premium",
                    "usage": "Development/Test",
                },
            })
            image.status = image.INSPECTED
            image.region = "us-east-1"
            image.save()

        # Pre-calculate concurrent usage data for upcoming requests.
        # Calculate each day since "last week" (oldest date we use in example requests).
        the_date = self.last_week.date()
        one_day_delta = timedelta(days=1)
        # while the_date <= self.this_morning.date():
        while the_date <= self.next_week.date():
            task_id = f"calculate-concurrent-usage-{seeded_uuid4()}"
            models.ConcurrentUsageCalculationTask.objects.create(
                user_id=self.customer_user.id,
                date=the_date.isoformat(),
                task_id=task_id,
                status=models.ConcurrentUsageCalculationTask.COMPLETE,
            )
            calculate_max_concurrent_usage(the_date, self.customer_user.id)
            the_date = the_date + one_day_delta