Ejemplo n.º 1
0
 def get_response(self):
     response = self.client.get(reverse('stats_quota_timeline'), data={
         'aggregate': 'project',
         'uuid': self.project.uuid.hex,
         'item': 'vcpu',
         'from': core_utils.datetime_to_timestamp(timezone.now() - timedelta(minutes=1)),
         'to': core_utils.datetime_to_timestamp(timezone.now() + timedelta(minutes=1))
     })
     return response
Ejemplo n.º 2
0
    def test_alert_list_can_be_filtered_by_created_date(self):
        project = structure_factories.ProjectFactory(customer=self.customer)
        alert1 = factories.AlertFactory(scope=project, created=timezone.now()-timedelta(days=1))
        alert2 = factories.AlertFactory(scope=project, created=timezone.now()-timedelta(days=3))

        self.client.force_authenticate(self.owner)
        response = self.client.get(factories.AlertFactory.get_list_url(), data={
            'created_from': core_utils.datetime_to_timestamp(timezone.now()-timedelta(days=2)),
            'created_to': core_utils.datetime_to_timestamp(timezone.now())})

        self.assertEqual(response.status_code, status.HTTP_200_OK)
        self.assertIn(alert1.uuid.hex, [a['uuid'] for a in response.data])
        self.assertNotIn(alert2.uuid.hex, [a['uuid'] for a in response.data])
Ejemplo n.º 3
0
    def history(self, request, uuid=None):
        mapped = {
            'start': request.query_params.get('start'),
            'end': request.query_params.get('end'),
            'points_count': request.query_params.get('points_count'),
            'point_list': request.query_params.getlist('point'),
        }
        serializer = HistorySerializer(
            data={k: v
                  for k, v in mapped.items() if v})
        serializer.is_valid(raise_exception=True)

        quota = self.get_object()
        serialized_versions = []
        for point_date in serializer.get_filter_data():
            serialized = {'point': datetime_to_timestamp(point_date)}
            try:
                version = reversion.get_for_date(quota, point_date)
            except Version.DoesNotExist:
                pass
            else:
                serializer = self.get_serializer()
                serializer.instance = version.object_version.object
                serialized['object'] = serializer.data
            serialized_versions.append(serialized)

        return response.Response(serialized_versions,
                                 status=status.HTTP_200_OK)
Ejemplo n.º 4
0
    def get_stats(self, instances, is_paas=False):
        self.attrs = self.data
        item = self.data['item']
        zabbix_db_client = ZabbixDBClient()
        if is_paas and item == 'memory_util':
            item = 'memory_util_agent'
        if is_paas and item == 'cpu_util':
            item = 'cpu_util_agent'
        item_stats = zabbix_db_client.get_item_stats(
            instances, item, self.data['start_timestamp'], self.data['end_timestamp'], self.data['segments_count'])
        # XXX: Quick and dirty fix: zabbix presents percentage of free space(not utilized) for storage
        if self.data['item'] in ('storage_root_util', 'storage_data_util'):
            for stat in item_stats:
                if 'value' in stat:
                    stat['value'] = 100 - stat['value']

        # XXX: temporary hack: show zero as value if one of the instances was created less then 30 minutes ago and
        # actual value is not available
        def is_instance_newly_created(instance):
            return instance.created > timezone.now() - timedelta(minutes=30)

        if any([is_instance_newly_created(i) for i in instances]) and item_stats:
            last_segment = item_stats[0]
            if (last_segment['from'] > datetime_to_timestamp(timezone.now() - timedelta(minutes=30)) and
                    'value' not in last_segment):
                last_segment['value'] = 0

        return item_stats
Ejemplo n.º 5
0
    def test_alerts_stats_can_be_filtered_by_time_interval(self):
        # new alerts
        for _ in range(3):
            logging_factories.AlertFactory(
                severity=logging_models.Alert.SeverityChoices.WARNING,
                scope=self.membership1,
                closed=timezone.now(),
                created=timezone.now() - timedelta(minutes=10))
        old_alerts = [
            logging_factories.AlertFactory(
                severity=logging_models.Alert.SeverityChoices.WARNING,
                scope=self.membership2,
                closed=timezone.now() - timedelta(minutes=20),
                created=timezone.now() - timedelta(minutes=30))
            for _ in range(2)
        ]

        self.client.force_authenticate(self.customer_owner)
        response = self.client.get(
            self.url,
            data={
                'from':
                core_utils.datetime_to_timestamp(timezone.now() -
                                                 timedelta(minutes=35)),
                'to':
                core_utils.datetime_to_timestamp(timezone.now() -
                                                 timedelta(minutes=15))
            })

        self.assertEqual(response.status_code, status.HTTP_200_OK)
        severity_names = dict(logging_models.Alert.SeverityChoices.CHOICES)

        self.assertItemsEqual(
            response.data, {
                severity_names[logging_models.Alert.SeverityChoices.ERROR].lower(
                ):
                0,
                severity_names[logging_models.Alert.SeverityChoices.WARNING].lower(
                ):
                len(old_alerts),
                severity_names[logging_models.Alert.SeverityChoices.INFO].lower(
                ):
                0,
                severity_names[logging_models.Alert.SeverityChoices.DEBUG].lower(
                ):
                0,
            })
Ejemplo n.º 6
0
    def test_endpoint_does_not_return_object_if_date(self):
        history_timestamp = core_utils.datetime_to_timestamp(timezone.now() - timedelta(hours=2))

        self.client.force_authenticate(self.owner)
        response = self.client.get(self.url, data={'point': history_timestamp})

        self.assertEqual(response.status_code, status.HTTP_200_OK)
        self.assertNotIn('object', response.data[0])
Ejemplo n.º 7
0
    def get_item_stats(self, hostid, item, points):
        if item.value_type == models.Item.ValueTypes.FLOAT:
            history_table = 'history'
            trend_table = 'trends'
        elif item.value_type == models.Item.ValueTypes.INTEGER:
            # Integer value
            history_table = 'history_uint'
            trend_table = 'trends_uint'
        else:
            raise ZabbixBackendError(
                'Cannot get statistics for non-numerical item %s' % item.name)

        history_retention_days = item.history
        history_delay_seconds = item.delay or self.HISTORY_DELAY_SECONDS
        trend_delay_seconds = self.TREND_DELAY_SECONDS

        trends_start_date = datetime_to_timestamp(timezone.now() - timedelta(
            days=history_retention_days))

        points = points[::-1]
        history_cursor = self._get_history(item.name, hostid, history_table,
                                           points[-1] - history_delay_seconds,
                                           points[0])
        trends_cursor = self._get_history(item.name, hostid, trend_table,
                                          points[-1] - trend_delay_seconds,
                                          points[0])

        values = []
        if points[0] > trends_start_date:
            next_value = history_cursor.fetchone()
        else:
            next_value = trends_cursor.fetchone()

        for end, start in zip(points[:-1], points[1:]):
            if start > trends_start_date:
                interval = history_delay_seconds
            else:
                interval = trend_delay_seconds

            value = None
            while True:
                if next_value is None:
                    break
                time, value = next_value
                if item.is_byte():
                    value = self.b2mb(value)

                if time <= end:
                    if end - time < interval or time > start:
                        break
                else:
                    if start > trends_start_date:
                        next_value = history_cursor.fetchone()
                    else:
                        next_value = trends_cursor.fetchone()

            values.append(value)
        return values[::-1]
Ejemplo n.º 8
0
    def setUp(self):
        super(EventsTest, self).setUp()

        today = datetime.date.today()
        timestamp = datetime_to_timestamp(today)
        period = format_period(today)

        ResourceSlaStateTransition.objects.create(scope=self.vm1, period=period, timestamp=timestamp, state=True)
        ResourceSlaStateTransition.objects.create(scope=self.vm2, period=period, timestamp=timestamp, state=False)

        self.url = reverse('resource-sla-state-transition-list')
Ejemplo n.º 9
0
    def test_old_version_of_quota_is_available(self):
        old_usage = self.quota.usage
        self.quota.usage = self.quota.usage + 1
        self.quota.save()
        history_timestamp = core_utils.datetime_to_timestamp(timezone.now() - timedelta(minutes=30))

        self.client.force_authenticate(self.owner)
        response = self.client.get(self.url, data={'point': history_timestamp})

        self.assertEqual(response.status_code, status.HTTP_200_OK)
        self.assertEqual(response.data[0]['point'], history_timestamp)
        self.assertEqual(response.data[0]['object']['usage'], old_usage)
Ejemplo n.º 10
0
    def balance_history(self, request, uuid=None):
        default_start = timezone.now() - timedelta(days=30)  # one month ago
        timestamp_interval_serializer = core_serializers.TimestampIntervalSerializer(
            data={
                'start':
                request.query_params.get('from',
                                         datetime_to_timestamp(default_start)),
                'end':
                request.query_params.get('to',
                                         datetime_to_timestamp(timezone.now()))
            })
        timestamp_interval_serializer.is_valid(raise_exception=True)
        filter_data = timestamp_interval_serializer.get_filter_data()

        customer = self.get_object()
        queryset = models.BalanceHistory.objects.filter(
            customer=customer).order_by('created')
        queryset = queryset.filter(created__gte=filter_data['start'],
                                   created__lte=filter_data['end'])

        serializer = serializers.BalanceHistorySerializer(queryset, many=True)
        return Response(serializer.data, status=status.HTTP_200_OK)
Ejemplo n.º 11
0
 def test_staff_receive_project_groups_stats_only_for_given_time_interval(
         self):
     # when
     data = {
         'from':
         core_utils.datetime_to_timestamp(timezone.now() -
                                          timedelta(days=8)),
         'to':
         core_utils.datetime_to_timestamp(timezone.now()),
         'datapoints':
         2,
         'type':
         'project_group',
     }
     response = self.execute_request_with_data(self.staff, data)
     # then
     self.assertEqual(response.status_code, status.HTTP_200_OK)
     self.assertEqual(len(response.data), 2,
                      'Response has to contain 2 datapoints')
     self.assertEqual(response.data[0]['value'], 0,
                      'First datapoint has to contain 0 project_groups')
     self.assertEqual(
         response.data[1]['value'], 1,
         'Second datapoint has to contain 1 project_group(new)')
Ejemplo n.º 12
0
    def test_alerts_can_be_filtered_by_project(self):
        project1_alerts = [
            logging_factories.AlertFactory(
                severity=logging_models.Alert.SeverityChoices.WARNING,
                scope=self.membership1,
                closed=None,
                created=timezone.now() - timedelta(minutes=1))
            for _ in range(3)
        ]
        # project 2 alerts
        for _ in range(2):
            logging_factories.AlertFactory(
                severity=logging_models.Alert.SeverityChoices.ERROR,
                scope=self.membership2,
                closed=None,
                created=timezone.now() - timedelta(minutes=1))

        self.client.force_authenticate(self.customer_owner)
        response = self.client.get(
            self.url,
            data={
                'from':
                core_utils.datetime_to_timestamp(timezone.now() -
                                                 timedelta(minutes=10)),
                'aggregate':
                'project',
                'uuid':
                self.project1.uuid.hex,
            })

        self.assertEqual(response.status_code, status.HTTP_200_OK)
        severity_names = dict(logging_models.Alert.SeverityChoices.CHOICES)

        self.assertItemsEqual(
            response.data, {
                severity_names[logging_models.Alert.SeverityChoices.ERROR].lower(
                ):
                0,
                severity_names[logging_models.Alert.SeverityChoices.WARNING].lower(
                ):
                len(project1_alerts),
                severity_names[logging_models.Alert.SeverityChoices.INFO].lower(
                ):
                0,
                severity_names[logging_models.Alert.SeverityChoices.DEBUG].lower(
                ):
                0,
            })
Ejemplo n.º 13
0
    def test_customer_owner_can_see_stats_for_all_alerts_that_are_related_to_his_customer(
            self):
        warning_alerts = [
            logging_factories.AlertFactory(
                severity=logging_models.Alert.SeverityChoices.WARNING,
                scope=self.membership1,
                closed=None,
                created=timezone.now() - timedelta(minutes=1))
            for _ in range(3)
        ]
        error_alerts = [
            logging_factories.AlertFactory(
                severity=logging_models.Alert.SeverityChoices.ERROR,
                scope=self.membership2,
                closed=None,
                created=timezone.now() - timedelta(minutes=1))
            for _ in range(2)
        ]

        self.client.force_authenticate(self.customer_owner)
        response = self.client.get(
            self.url,
            data={
                'from':
                core_utils.datetime_to_timestamp(timezone.now() -
                                                 timedelta(minutes=10))
            })

        self.assertEqual(response.status_code, status.HTTP_200_OK)
        severity_names = dict(logging_models.Alert.SeverityChoices.CHOICES)

        self.assertItemsEqual(
            response.data, {
                severity_names[logging_models.Alert.SeverityChoices.ERROR].lower(
                ):
                len(error_alerts),
                severity_names[logging_models.Alert.SeverityChoices.WARNING].lower(
                ):
                len(warning_alerts),
                severity_names[logging_models.Alert.SeverityChoices.INFO].lower(
                ):
                0,
                severity_names[logging_models.Alert.SeverityChoices.DEBUG].lower(
                ):
                0,
            })
Ejemplo n.º 14
0
    def get_application_installation_state(self, instance):
        # a shortcut for the IaaS instances -- all done
        if instance.type == Instance.Services.IAAS:
            return 'OK'

        zabbix_api_client = api_client.ZabbixApiClient()
        name = zabbix_api_client.get_host_name(instance)
        api = zabbix_api_client.get_zabbix_api()

        if api.host.exists(host=name):
            hostid = api.host.get(filter={'host': name})[0]['hostid']
            # get installation state from DB:
            query = r"""
                SELECT
                  hi.value
                FROM zabbix.items it
                  JOIN zabbix.history_uint hi ON hi.itemid = it.itemid
                WHERE
                  it.key_ = %(key_)s
                AND
                  it.hostid = %(hostid)s
                AND
                  hi.clock > %(time)s
                ORDER BY hi.clock DESC
                LIMIT 1
            """
            parameters = {
                'key_':
                zabbix_api_client._settings.get('application-status-item',
                                                'application.status'),
                'hostid':
                hostid,
                'time':
                datetime_to_timestamp(timezone.now() - timedelta(hours=1)),
            }
            try:
                value = self.execute_query(query, parameters)[0][0]
            except IndexError:
                return 'NO DATA'
            return 'OK' if value == 1 else 'NOT OK'
        else:
            logger.warn(
                'Cannot retrieve installation state of instance %s. Host does not exist.',
                instance)
            return 'NO DATA'
Ejemplo n.º 15
0
    def get_stats(self, user):
        start_datetime = core_utils.timestamp_to_datetime(self.data['start_timestamp'])
        end_datetime = core_utils.timestamp_to_datetime(self.data['end_timestamp'])

        model = self.MODEL_CLASSES[self.data['model_name']]
        filtered_queryset = filters.filter_queryset_for_user(model.objects.all(), user)
        created_datetimes = (
            filtered_queryset
            .filter(created__gte=start_datetime, created__lte=end_datetime)
            .values('created')
            .annotate(count=django_models.Count('id', distinct=True)))

        time_and_value_list = [
            (core_utils.datetime_to_timestamp(dt['created']), dt['count']) for dt in created_datetimes]

        return core_utils.format_time_and_value_to_segment_list(
            time_and_value_list, self.data['segments_count'],
            self.data['start_timestamp'], self.data['end_timestamp'])
Ejemplo n.º 16
0
    def setUp(self):
        # customers
        self.old_customer = factories.CustomerFactory(created=timezone.now() -
                                                      timedelta(days=10))
        self.new_customer = factories.CustomerFactory(created=timezone.now() -
                                                      timedelta(days=1))
        # groups
        self.old_project_group = factories.ProjectGroupFactory(
            customer=self.old_customer,
            created=timezone.now() - timedelta(days=10))
        self.new_project_group = factories.ProjectGroupFactory(
            customer=self.new_customer,
            created=timezone.now() - timedelta(days=1))
        # projects
        self.old_projects = factories.ProjectFactory.create_batch(
            3,
            created=timezone.now() - timedelta(days=10),
            customer=self.old_customer)
        self.new_projects = factories.ProjectFactory.create_batch(
            3,
            created=timezone.now() - timedelta(days=1),
            customer=self.new_customer)
        # users
        self.staff = factories.UserFactory(is_staff=True)
        self.old_customer_owner = factories.UserFactory()
        self.old_customer.add_user(self.old_customer_owner,
                                   models.CustomerRole.OWNER)
        self.new_project_group_manager = factories.UserFactory()
        self.new_project_group.add_user(self.new_project_group_manager,
                                        models.ProjectGroupRole.MANAGER)
        self.all_projects_admin = factories.UserFactory()
        for p in self.old_projects + self.new_projects:
            p.add_user(self.all_projects_admin,
                       models.ProjectRole.ADMINISTRATOR)

        self.url = reverse('stats_creation_time')
        self.default_data = {
            'from':
            core_utils.datetime_to_timestamp(timezone.now() -
                                             timedelta(days=12)),
            'datapoints':
            2,
        }
Ejemplo n.º 17
0
    def test_instances_alerts_are_counted_in_project_alerts(self):
        instance = factories.InstanceFactory(
            cloud_project_membership=self.membership1)
        instances_alerts = [
            logging_factories.AlertFactory(
                severity=logging_models.Alert.SeverityChoices.WARNING,
                scope=instance,
                closed=None,
                created=timezone.now() - timedelta(minutes=1))
            for _ in range(3)
        ]

        self.client.force_authenticate(self.project1_admin)
        response = self.client.get(
            self.url,
            data={
                'from':
                core_utils.datetime_to_timestamp(timezone.now() -
                                                 timedelta(minutes=10)),
            })

        self.assertEqual(response.status_code, status.HTTP_200_OK)
        severity_names = dict(logging_models.Alert.SeverityChoices.CHOICES)

        self.assertItemsEqual(
            response.data, {
                severity_names[logging_models.Alert.SeverityChoices.ERROR].lower(
                ):
                0,
                severity_names[logging_models.Alert.SeverityChoices.WARNING].lower(
                ):
                len(instances_alerts),
                severity_names[logging_models.Alert.SeverityChoices.INFO].lower(
                ):
                0,
                severity_names[logging_models.Alert.SeverityChoices.DEBUG].lower(
                ):
                0,
            })
Ejemplo n.º 18
0
    def get_stats(self, instance, start, end):
        items = []
        for item in self.validated_data['items']:
            if item == 'memory_util' and instance.type == models.Instance.Services.PAAS:
                items.append('memory_util_agent')
            else:
                items.append(item)
        method = self.validated_data['method']
        host = ZabbixApiClient().get_host_name(instance)

        records = ZabbixDBClient().get_host_max_values(host, items, start, end, method=method)

        results = []
        for timestamp, item, value in records:
            # XXX: Quick and dirty fix: zabbix presents percentage of free space(not utilized) for storage
            if item in ('storage_root_util', 'storage_data_util'):
                results.append({
                    'item': item,
                    'timestamp': timestamp,
                    'value': 100 - value,
                })
            else:
                results.append({
                    'item': item,
                    'timestamp': timestamp,
                    'value': value,
                })
        # XXX: temporary hack: show zero as value if instance was created less then 30 minutes ago and actual value
        # is not available
        items_with_values = {r['item'] for r in results}
        items_without_values = set(items) - items_with_values
        timestamp = datetime_to_timestamp(timezone.now())
        if items_without_values and instance.created > timezone.now() - timedelta(minutes=30):
            for item in items_without_values:
                results.append({'item': item, 'timestamp': timestamp, 'value': 0})
        return results
Ejemplo n.º 19
0
 def to_representation(self, value):
     return utils.datetime_to_timestamp(value)
Ejemplo n.º 20
0
    def test_zabbix_is_called_with_right_parameters(self):
        self.client.force_authenticate(self.staff)
        usage = [
            (1415910025, 'cpu_util', 10),
            (1415910025, 'memory_util', 22),
            (1415910025, 'memory_util_agent', 21),
            (1415910025, 'storage_root_util', 23),
            (1415910025, 'storage_data_util', 33),
        ]
        expected = [
            {
                'item': 'cpu_util',
                'value': 10,
                'timestamp': 1415910025
            },
            {
                'item': 'memory_util',
                'value': 22,
                'timestamp': 1415910025
            },
            {
                'item': 'storage_root_util',
                'value': 77,
                'timestamp': 1415910025
            },
            {
                'item': 'storage_data_util',
                'value': 67,
                'timestamp': 1415910025
            },
            {
                'item': 'memory_util_agent',
                'value': 21,
                'timestamp': 1415910025
            },
        ]

        with patch(
                'nodeconductor.monitoring.zabbix.db_client.ZabbixDBClient.get_host_max_values'
        ) as client:
            client.return_value = usage
            query_params = {
                'from':
                core_utils.datetime_to_timestamp(timezone.now() -
                                                 timedelta(days=10)),
                'to':
                core_utils.datetime_to_timestamp(timezone.now() -
                                                 timedelta(days=5)),
            }
            response = self.client.get(self.url, data=query_params)

            self.assertEqual(status.HTTP_200_OK, response.status_code)
            self.assertItemsEqual(expected, response.data)

            client.assert_called_once_with(
                self.instance.backend_id,
                [
                    'cpu_util', 'memory_util_agent', 'storage_root_util',
                    'storage_data_util'
                ],
                query_params['from'],
                query_params['to'],
                method='MAX',
            )
Ejemplo n.º 21
0
 def setUp(self):
     self.datetime = utils.timeshift(days=-1)
     self.timestamp = utils.datetime_to_timestamp(self.datetime)
Ejemplo n.º 22
0
    def history(self, request, uuid=None):
        """
        Historical data endpoints could be available for any objects (currently
        implemented for quotas and events count). The data is available at *<object_endpoint>/history/*,
        for example: */api/quotas/<uuid>/history/*.

        There are two ways to define datetime points for historical data.

        1. Send *?point=<timestamp>* parameter that can list. Response will contain historical data for each given point
            in the same order.
        2. Send *?start=<timestamp>*, *?end=<timestamp>*, *?points_count=<integer>* parameters.
           Result will contain <points_count> points from <start> to <end>.

        Response format:

        .. code-block:: javascript

            [
                {
                    "point": <timestamp>,
                    "object": {<object_representation>}
                },
                {
                    "point": <timestamp>
                    "object": {<object_representation>}
                },
            ...
            ]

        NB! There will not be any "object" for corresponding point in response if there
        is no data about object for a given timestamp.
        """
        mapped = {
            'start': request.query_params.get('start'),
            'end': request.query_params.get('end'),
            'points_count': request.query_params.get('points_count'),
            'point_list': request.query_params.getlist('point'),
        }
        history_serializer = HistorySerializer(
            data={k: v
                  for k, v in mapped.items() if v})
        history_serializer.is_valid(raise_exception=True)

        quota = self.get_object()
        serializer = self.get_serializer(quota)
        serialized_versions = []
        for point_date in history_serializer.get_filter_data():
            serialized = {'point': datetime_to_timestamp(point_date)}
            version = Version.objects.get_for_object(quota).filter(
                revision__date_created__lte=point_date)
            if version.exists():
                # make copy of serialized data and update field that are stored in version
                version_object = version.first()._object_version.object
                serialized['object'] = serializer.data.copy()
                serialized['object'].update({
                    f: getattr(version_object, f)
                    for f in quota.get_version_fields()
                })
            serialized_versions.append(serialized)
        return response.Response(serialized_versions,
                                 status=status.HTTP_200_OK)
Ejemplo n.º 23
0
 def datetime_to_elasticsearch_timestamp(self, dt):
     """ Elasticsearch calculates timestamp in milliseconds """
     return datetime_to_timestamp(dt) * 1000
Ejemplo n.º 24
0
    def get_item_stats(self, instances, item, start_timestamp, end_timestamp,
                       segments_count):
        # FIXME: Quick and dirty hack to handle storage in a separate flow
        # XXX: "Storage" item is deprecated it will be removed soon (need to confirm Portal usage)
        if item == 'storage':
            return self.get_storage_stats(instances, start_timestamp,
                                          end_timestamp, segments_count)

        host_ids = []
        try:
            host_ids = self.zabbix_api_client.get_host_ids(instances)
        except errors.ZabbixError:
            logger.warning('Failed to get a Zabbix host for instances %s',
                           instances)

        # return an empty list if no hosts were found
        if not host_ids:
            return []

        zabbix_settings = getattr(settings, 'NODECONDUCTOR',
                                  {}).get('MONITORING', {}).get('ZABBIX', {})
        HISTORY_RECORDS_INTERVAL = zabbix_settings.get(
            'HISTORY_RECORDS_INTERVAL', 15) * 60
        TRENDS_RECORDS_INTERVAL = zabbix_settings.get(
            'TRENDS_RECORDS_INTERVAL', 60) * 60
        HISTORY_DATE_RANGE = timedelta(
            hours=zabbix_settings.get('TRENDS_DATE_RANGE', 48))

        item_key = self.items[item]['key']
        item_history_table = self.items[item]['table']
        item_trends_table = 'trends' if item_history_table == 'history' else 'trends_uint'
        convert_to_mb = self.items[item]['convert_to_mb']
        trends_start_date = datetime_to_timestamp(timezone.now() -
                                                  HISTORY_DATE_RANGE)
        try:
            history_cursor = self.get_cursor(
                host_ids, [item_key],
                item_history_table,
                start_timestamp,
                end_timestamp,
                convert_to_mb,
                min_interval=HISTORY_RECORDS_INTERVAL)
            trends_cursor = self.get_cursor(
                host_ids, [item_key],
                item_trends_table,
                start_timestamp,
                end_timestamp,
                convert_to_mb,
                min_interval=TRENDS_RECORDS_INTERVAL)

            interval = ((end_timestamp - start_timestamp) / segments_count)
            points = [
                start_timestamp + interval * i
                for i in range(segments_count + 1)
            ][::-1]

            segment_list = []
            if points[1] > trends_start_date:
                next_value = history_cursor.fetchone()
            else:
                next_value = trends_cursor.fetchone()

            for end, start in zip(points[:-1], points[1:]):
                segment = {'from': start, 'to': end}
                interval = HISTORY_RECORDS_INTERVAL if start > trends_start_date else TRENDS_RECORDS_INTERVAL

                while True:
                    if next_value is None:
                        break
                    time, value = next_value

                    if time <= end:
                        if end - time < interval or time > start:
                            segment['value'] = value
                        break
                    else:
                        if start > trends_start_date:
                            next_value = history_cursor.fetchone()
                        else:
                            next_value = trends_cursor.fetchone()

                segment_list.append(segment)

            return segment_list
        except DatabaseError as e:
            logger.exception('Can not execute query the Zabbix DB.')
            six.reraise(errors.ZabbixError, e, sys.exc_info()[2])