Beispiel #1
0
class ColumnListProfileViewSet(OrgValidateMixin,
                               SEEDOrgNoPatchOrOrgCreateModelViewSet):
    """
    API endpoint for returning Column List Profiles

    create:
        Create a new list profile. The list of columns is an array of column primary keys. If using Swagger, then
        this will be enters as a list with returns between each primary key.

        JSON POST Example:

            {
                "name": "some new name 3",
                "profile_location": "List View Profile",
                "inventory_type": "Tax Lot",
                "columns": [
                    {"id": 1, "pinned": false, "order": 10},
                    {"id": 5, "pinned": true, "order": 14},
                    {"id": 7, "pinned": true, "order": 14}
                ]
            }

    """
    serializer_class = ColumnListProfileSerializer
    model = ColumnListProfile
    filter_backends = (ColumnListProfileFilterBackend, )
    pagination_class = None

    # force_parent = True  # Ideally the column list profiles would inherit from the parent,
    # but not yet.

    # Overridden to augment with protected ComStock list profile if enabled
    def retrieve(self, request, *args, **kwargs):
        org_id = self.get_organization(self.request)

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'organization with id %s does not exist' % org_id
                },
                status=status.HTTP_404_NOT_FOUND)

        if not org.comstock_enabled or kwargs['pk'] != 'null':
            return super(ColumnListProfileViewSet,
                         self).retrieve(request, args, kwargs)

        result = {
            'status': 'success',
            'data': {
                'id': None,
                'name': 'ComStock',
                'profile_location': VIEW_LOCATION_TYPES[VIEW_LIST][1],
                'inventory_type':
                VIEW_LIST_INVENTORY_TYPE[VIEW_LIST_PROPERTY][1],
                'columns': self.list_comstock_columns(org_id)
            }
        }

        return JsonResponse(result, status=status.HTTP_200_OK)

    # Overridden to augment with protected ComStock list profile if enabled
    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(
            required=False,
            description=
            "Optional org id which overrides the users (default) current org id"
        ),
        AutoSchemaHelper.query_string_field(
            name='inventory_type',
            required=True,
            description="'Property' or 'Tax Lot' for filtering."),
        AutoSchemaHelper.query_string_field(
            name='profile_location',
            required=True,
            description=
            "'List View Profile' or 'Detail View Profile' for filtering."),
    ])
    def list(self, request, *args, **kwargs):
        org_id = self.get_organization(self.request)

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'organization with id %s does not exist' % org_id
                },
                status=status.HTTP_404_NOT_FOUND)

        inventory_type = request.query_params.get('inventory_type')
        profile_location = request.query_params.get('profile_location')
        if not org.comstock_enabled or inventory_type == 'Tax Lot' or profile_location == 'Detail View Profile':
            return super(ColumnListProfileViewSet,
                         self).list(request, args, kwargs)

        queryset = self.filter_queryset(self.get_queryset())

        page = self.paginate_queryset(queryset)
        if page is not None:
            serializer = self.get_serializer(page, many=True)
            return self.get_paginated_response(serializer.data)

        results = list(queryset)
        base_profiles = self.get_serializer(results, many=True).data

        # Add ComStock columns
        base_profiles.append({
            "id": None,
            "name": "ComStock",
            "profile_location": profile_location,
            "inventory_type": inventory_type,
            "columns": self.list_comstock_columns(org_id)
        })

        return Response(base_profiles)

    @staticmethod
    def list_comstock_columns(org_id):
        comstock_columns = Column.objects.filter(organization_id=org_id, comstock_mapping__isnull=False) \
            .order_by('comstock_mapping')

        results = []
        for index, column in enumerate(comstock_columns):
            results.append({
                "id": column.id,
                "pinned": False,
                "order": index + 1,
                "column_name": column.column_name,
                "table_name": column.table_name,
                "comstock_mapping": column.comstock_mapping
            })

        return results
Beispiel #2
0
class OrganizationViewSet(viewsets.ViewSet):
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['DELETE'])
    def columns(self, request, pk=None):
        """
        Delete all columns for an organization. This method is typically not recommended if there
        are data in the inventory as it will invalidate all extra_data fields. This also removes
        all the column mappings that existed.

        ---
        parameters:
            - name: pk
              description: The organization_id
              required: true
              paramType: path
        type:
            status:
                description: success or error
                type: string
                required: true
            column_mappings_deleted_count:
                description: Number of column_mappings that were deleted
                type: integer
                required: true
            columns_deleted_count:
                description: Number of columns that were deleted
                type: integer
                required: true
        """
        try:
            org = Organization.objects.get(pk=pk)
            c_count, cm_count = Column.delete_all(org)
            return JsonResponse({
                'status': 'success',
                'column_mappings_deleted_count': cm_count,
                'columns_deleted_count': c_count,
            })
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'organization with with id {} does not exist'.format(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_integer_field('import_file_id',
                                                 required=True,
                                                 description='Import file id'),
            openapi.Parameter('id',
                              openapi.IN_PATH,
                              type=openapi.TYPE_INTEGER,
                              description='Organization id'),
        ],
        request_body=SaveColumnMappingsRequestPayloadSerializer,
        responses={200: 'success response'})
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def column_mappings(self, request, pk=None):
        """
        Saves the mappings between the raw headers of an ImportFile and the
        destination fields in the `to_table_name` model which should be either
        PropertyState or TaxLotState

        Valid source_type values are found in ``seed.models.SEED_DATA_SOURCES``
        """
        import_file_id = request.query_params.get('import_file_id')
        if import_file_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Query param `import_file_id` is required'
                },
                status=status.HTTP_400_BAD_REQUEST)
        try:
            _ = ImportFile.objects.get(pk=import_file_id)
            organization = Organization.objects.get(pk=pk)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'No import file found'
                },
                status=status.HTTP_404_NOT_FOUND)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'No organization found'
                },
                status=status.HTTP_404_NOT_FOUND)

        result = Column.create_mappings(request.data.get('mappings',
                                                         []), organization,
                                        request.user, import_file_id)

        if result:
            return JsonResponse({'status': 'success'})
        else:
            return JsonResponse({'status': 'error'})

    def _start_whole_org_match_merge_link(self,
                                          org_id,
                                          state_class_name,
                                          proposed_columns=[]):
        identifier = randint(100, 100000)
        result_key = _get_match_merge_link_key(identifier)
        set_cache_raw(result_key, {})

        progress_data = ProgressData(func_name='org_match_merge_link',
                                     unique_id=identifier)
        progress_data.delete()

        whole_org_match_merge_link.apply_async(
            args=(org_id, state_class_name, proposed_columns),
            link=cache_match_merge_link_result.s(identifier,
                                                 progress_data.key))

        return progress_data.key

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_boolean_field(
            'brief',
            required=False,
            description='If true, only return high-level organization details')
    ])
    @api_endpoint_class
    @ajax_request_class
    def list(self, request):
        """
        Retrieves all orgs the user has access to.
        """

        # if brief==true only return high-level organization details
        brief = json.loads(request.query_params.get('brief', 'false'))

        if brief:
            if request.user.is_superuser:
                qs = Organization.objects.only('id', 'name', 'parent_org_id')
            else:
                qs = request.user.orgs.only('id', 'name', 'parent_org_id')

            orgs = _dict_org_brief(request, qs)
            if len(orgs) == 0:
                return JsonResponse(
                    {
                        'status':
                        'error',
                        'message':
                        'Your SEED account is not associated with any organizations. '
                        'Please contact a SEED administrator.'
                    },
                    status=status.HTTP_401_UNAUTHORIZED)
            else:
                return JsonResponse({'organizations': orgs})
        else:
            if request.user.is_superuser:
                qs = Organization.objects.all()
            else:
                qs = request.user.orgs.all()

            orgs = _dict_org(request, qs)
            if len(orgs) == 0:
                return JsonResponse(
                    {
                        'status':
                        'error',
                        'message':
                        'Your SEED account is not associated with any organizations. '
                        'Please contact a SEED administrator.'
                    },
                    status=status.HTTP_401_UNAUTHORIZED)
            else:
                return JsonResponse({'organizations': orgs})

    @method_decorator(permission_required('seed.can_access_admin'))
    @api_endpoint_class
    @ajax_request_class
    def destroy(self, request, pk=None):
        """
        Starts a background task to delete an organization and all related data.
        """

        return JsonResponse(tasks.delete_organization(pk))

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def retrieve(self, request, pk=None):
        """
        Retrieves a single organization by id.
        """
        org_id = pk

        if org_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'no organization_id sent'
                },
                status=status.HTTP_400_BAD_REQUEST)

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization does not exist'
                },
                status=status.HTTP_404_NOT_FOUND)
        if (not request.user.is_superuser
                and not OrganizationUser.objects.filter(
                    user=request.user,
                    organization=org,
                    role_level__in=[ROLE_OWNER, ROLE_MEMBER, ROLE_VIEWER
                                    ]).exists()):
            # TODO: better permission and return 401 or 403
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'user is not the owner of the org'
                },
                status=status.HTTP_403_FORBIDDEN)

        return JsonResponse({
            'status': 'success',
            'organization': _dict_org(request, [org])[0],
        })

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'organization_name': 'string',
            'user_id': 'integer',
        },
        required=['organization_name', 'user_id'],
        description='Properties:\n'
        '- organization_name: The new organization name\n'
        '- user_id: The user ID (primary key) to be used as the owner of the new organization'
    ))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_parent_org_owner')
    def create(self, request):
        """
        Creates a new organization.
        """
        body = request.data
        user = User.objects.get(pk=body['user_id'])
        org_name = body['organization_name']

        if Organization.objects.filter(name=org_name).exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization name already exists'
                },
                status=status.HTTP_409_CONFLICT)

        org, _, _ = create_organization(user, org_name, org_name)
        return JsonResponse({
            'status': 'success',
            'message': 'organization created',
            'organization': _dict_org(request, [org])[0]
        })

    @api_endpoint_class
    @ajax_request_class
    @method_decorator(permission_required('seed.can_access_admin'))
    @action(detail=True, methods=['DELETE'])
    def inventory(self, request, pk=None):
        """
        Starts a background task to delete all properties & taxlots
        in an org.
        """
        return JsonResponse(tasks.delete_organization_inventory(pk))

    @swagger_auto_schema(
        request_body=SaveSettingsSerializer, )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_owner')
    @action(detail=True, methods=['PUT'])
    def save_settings(self, request, pk=None):
        """
        Saves an organization's settings: name, query threshold, shared fields, etc
        """
        body = request.data
        org = Organization.objects.get(pk=pk)
        posted_org = body.get('organization', None)
        if posted_org is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'malformed request'
                },
                status=status.HTTP_400_BAD_REQUEST)

        desired_threshold = posted_org.get('query_threshold', None)
        if desired_threshold is not None:
            org.query_threshold = desired_threshold

        desired_name = posted_org.get('name', None)
        if desired_name is not None:
            org.name = desired_name

        def is_valid_choice(choice_tuples, s):
            """choice_tuples is std model ((value, label), ...)"""
            return (s is not None) and (s in [
                choice[0] for choice in choice_tuples
            ])

        def warn_bad_pint_spec(kind, unit_string):
            if unit_string is not None:
                _log.warn("got bad {0} unit string {1} for org {2}".format(
                    kind, unit_string, org.name))

        def warn_bad_units(kind, unit_string):
            _log.warn("got bad {0} unit string {1} for org {2}".format(
                kind, unit_string, org.name))

        desired_display_units_eui = posted_org.get('display_units_eui')
        if is_valid_choice(Organization.MEASUREMENT_CHOICES_EUI,
                           desired_display_units_eui):
            org.display_units_eui = desired_display_units_eui
        else:
            warn_bad_pint_spec('eui', desired_display_units_eui)

        desired_display_units_area = posted_org.get('display_units_area')
        if is_valid_choice(Organization.MEASUREMENT_CHOICES_AREA,
                           desired_display_units_area):
            org.display_units_area = desired_display_units_area
        else:
            warn_bad_pint_spec('area', desired_display_units_area)

        desired_display_significant_figures = posted_org.get(
            'display_significant_figures')
        if isinstance(
                desired_display_significant_figures,
                int) and desired_display_significant_figures >= 0:  # noqa
            org.display_significant_figures = desired_display_significant_figures
        elif desired_display_significant_figures is not None:
            _log.warn("got bad sig figs {0} for org {1}".format(
                desired_display_significant_figures, org.name))

        desired_display_meter_units = posted_org.get('display_meter_units')
        if desired_display_meter_units:
            org.display_meter_units = desired_display_meter_units

        desired_thermal_conversion_assumption = posted_org.get(
            'thermal_conversion_assumption')
        if is_valid_choice(Organization.THERMAL_CONVERSION_ASSUMPTION_CHOICES,
                           desired_thermal_conversion_assumption):
            org.thermal_conversion_assumption = desired_thermal_conversion_assumption

        # Update MapQuest API Key if it's been changed
        mapquest_api_key = posted_org.get('mapquest_api_key', '')
        if mapquest_api_key != org.mapquest_api_key:
            org.mapquest_api_key = mapquest_api_key

        comstock_enabled = posted_org.get('comstock_enabled', False)
        if comstock_enabled != org.comstock_enabled:
            org.comstock_enabled = comstock_enabled

        org.save()

        # Update the selected exportable fields.
        new_public_column_names = posted_org.get('public_fields', None)
        if new_public_column_names is not None:
            old_public_columns = Column.objects.filter(
                organization=org, shared_field_type=Column.SHARED_PUBLIC)
            # turn off sharing in the old_pub_fields
            for col in old_public_columns:
                col.shared_field_type = Column.SHARED_NONE
                col.save()

            # for now just iterate over this to grab the new columns.
            for col in new_public_column_names:
                new_col = Column.objects.filter(organization=org, id=col['id'])
                if len(new_col) == 1:
                    new_col = new_col.first()
                    new_col.shared_field_type = Column.SHARED_PUBLIC
                    new_col.save()

        return JsonResponse({'status': 'success'})

    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def query_threshold(self, request, pk=None):
        """
        Returns the "query_threshold" for an org.  Searches from
        members of sibling orgs must return at least this many buildings
        from orgs they do not belong to, or else buildings from orgs they
        don't belong to will be removed from the results.
        """
        org = Organization.objects.get(pk=pk)
        return JsonResponse({
            'status': 'success',
            'query_threshold': org.query_threshold
        })

    @swagger_auto_schema(responses={200: SharedFieldsReturnSerializer})
    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def shared_fields(self, request, pk=None):
        """
        Retrieves all fields marked as shared for the organization. Will only return used fields.
        """
        result = {'status': 'success', 'public_fields': []}

        columns = Column.retrieve_all(pk, 'property', True)
        for c in columns:
            if c['sharedFieldType'] == 'Public':
                new_column = {
                    'table_name': c['table_name'],
                    'name': c['name'],
                    'column_name': c['column_name'],
                    # this is the field name in the db. The other name can have tax_
                    'display_name': c['display_name']
                }
                result['public_fields'].append(new_column)

        return JsonResponse(result)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'sub_org_name': 'string',
            'sub_org_owner_email': 'string',
        },
        required=['sub_org_name', 'sub_org_owner_email'],
        description='Properties:\n'
        '- sub_org_name: Name of the new sub organization\n'
        '- sub_org_owner_email: Email of the owner of the sub organization, which must already exist',
    ))
    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['POST'])
    def sub_org(self, request, pk=None):
        """
        Creates a child org of a parent org.
        """
        body = request.data
        org = Organization.objects.get(pk=pk)
        email = body['sub_org_owner_email'].lower()
        try:
            user = User.objects.get(username=email)
        except User.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'User with email address (%s) does not exist' % email
                },
                status=status.HTTP_400_BAD_REQUEST)

        created, mess_or_org, _ = create_suborganization(
            user, org, body['sub_org_name'], ROLE_OWNER)
        if created:
            return JsonResponse({
                'status': 'success',
                'organization_id': mess_or_org.pk
            })
        else:
            return JsonResponse({
                'status': 'error',
                'message': mess_or_org
            },
                                status=status.HTTP_409_CONFLICT)

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def matching_criteria_columns(self, request, pk=None):
        """
        Retrieve all matching criteria columns for an org.
        """
        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        matching_criteria_column_names = dict(
            org.column_set.filter(
                is_matching_criteria=True).values('table_name').annotate(
                    column_names=ArrayAgg('column_name')).values_list(
                        'table_name', 'column_names'))

        return JsonResponse(matching_criteria_column_names)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {'inventory_type': 'string'},
        required=['inventory_type'],
        description='Properties:\n'
        '- inventory_type: either "properties" or "taxlots"'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def match_merge_link(self, request, pk=None):
        """
        Run match_merge_link for an org.
        """
        inventory_type = request.data.get('inventory_type', None)
        if inventory_type not in ['properties', 'taxlots']:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Provided inventory type should either be "properties" or "taxlots".'
                },
                status=status.HTTP_404_NOT_FOUND)

        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        state_class_name = 'PropertyState' if inventory_type == 'properties' else 'TaxLotState'

        progress_key = self._start_whole_org_match_merge_link(
            org.id, state_class_name)

        return JsonResponse({'progress_key': progress_key})

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'inventory_type': 'string',
            'add': ['string'],
            'remove': ['string'],
        },
        required=['inventory_type'],
        description='Properties:\n'
        '- inventory_type: either "properties" or "taxlots"\n'
        '- add: list of column names\n'
        '- remove: list of column names'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def match_merge_link_preview(self, request, pk=None):
        """
        Run match_merge_link preview for an org and record type.
        """
        inventory_type = request.data.get('inventory_type', None)
        if inventory_type not in ['properties', 'taxlots']:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Provided inventory type should either be "properties" or "taxlots".'
                },
                status=status.HTTP_404_NOT_FOUND)

        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        state_class_name = 'PropertyState' if inventory_type == 'properties' else 'TaxLotState'

        current_columns = matching_criteria_column_names(
            org.id, state_class_name)

        add = set(request.data.get('add', []))
        remove = set(request.data.get('remove', []))

        provided_columns = Column.objects.filter(
            column_name__in=add.union(remove),
            organization_id=org.id,
            table_name=state_class_name)
        if provided_columns.count() != (len(add) + len(remove)):
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Invalid column names provided.'
                },
                status=status.HTTP_404_NOT_FOUND)

        proposed_columns = current_columns.union(add).difference(remove)

        progress_key = self._start_whole_org_match_merge_link(
            org.id, state_class_name, list(proposed_columns))

        return JsonResponse({'progress_key': progress_key})

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_integer_field(
            'match_merge_link_id',
            required=True,
            description='ID of match merge link')
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def match_merge_link_result(self, request, pk=None):
        try:
            Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        identifier = request.query_params['match_merge_link_id']
        result_key = _get_match_merge_link_key(identifier)

        # using unsafe serialization b/c the result might not be a dict
        return JsonResponse(get_cache_raw(result_key), safe=False)

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def geocoding_columns(self, request, pk=None):
        """
        Retrieve all geocoding columns for an org.
        """
        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        geocoding_columns_qs = org.column_set.\
            filter(geocoding_order__gt=0).\
            order_by('geocoding_order').\
            values('table_name', 'column_name')

        geocoding_columns = {
            'PropertyState': [],
            'TaxLotState': [],
        }

        for col in geocoding_columns_qs:
            geocoding_columns[col['table_name']].append(col['column_name'])

        return JsonResponse(geocoding_columns)

    def get_cycles(self, start, end, organization_id):
        if not isinstance(start, type(end)):
            raise TypeError('start and end not same types')
        # if of type int or convertable  assume they are cycle ids
        try:
            start = int(start)
            end = int(end)
        except ValueError as error:  # noqa
            # assume string is JS date
            if isinstance(start, basestring):
                start_datetime = dateutil.parser.parse(start)
                end_datetime = dateutil.parser.parse(end)
            else:
                raise Exception('Date is not a string')
        # get date times from cycles
        if isinstance(start, int):
            cycle = Cycle.objects.get(pk=start,
                                      organization_id=organization_id)
            start_datetime = cycle.start
            if start == end:
                end_datetime = cycle.end
            else:
                end_datetime = Cycle.objects.get(
                    pk=end, organization_id=organization_id).end
        return Cycle.objects.filter(
            start__gte=start_datetime,
            end__lte=end_datetime,
            organization_id=organization_id).order_by('start')

    def get_data(self, property_view, x_var, y_var):
        result = None
        state = property_view.state
        if getattr(state, x_var, None) and getattr(state, y_var, None):
            result = {
                "id": property_view.property_id,
                "x": getattr(state, x_var),
                "y": getattr(state, y_var),
            }
        return result

    def get_raw_report_data(self, organization_id, cycles, x_var, y_var,
                            campus_only):
        all_property_views = PropertyView.objects.select_related(
            'property',
            'state').filter(property__organization_id=organization_id,
                            cycle_id__in=cycles)
        organization = Organization.objects.get(pk=organization_id)
        results = []
        for cycle in cycles:
            property_views = all_property_views.filter(cycle_id=cycle)
            count_total = []
            count_with_data = []
            data = []
            for property_view in property_views:
                property_pk = property_view.property_id
                if property_view.property.campus and campus_only:
                    count_total.append(property_pk)
                    result = self.get_data(property_view, x_var, y_var)
                    if result:
                        result['yr_e'] = cycle.end.strftime('%Y')
                        data.append(result)
                        count_with_data.append(property_pk)
                elif not property_view.property.campus:
                    count_total.append(property_pk)
                    result = self.get_data(property_view, x_var, y_var)
                    if result:
                        result['yr_e'] = cycle.end.strftime('%Y')
                        de_unitted_result = apply_display_unit_preferences(
                            organization, result)
                        data.append(de_unitted_result)
                        count_with_data.append(property_pk)
            result = {
                "cycle_id": cycle.pk,
                "chart_data": data,
                "property_counts": {
                    "yr_e": cycle.end.strftime('%Y'),
                    "num_properties": len(count_total),
                    "num_properties_w-data": len(count_with_data),
                },
            }
            results.append(result)
        return results

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var', required=True, description='Raw column name for y axis'),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
        AutoSchemaHelper.query_string_field(
            'campus_only',
            required=False,
            description='If true, includes campuses'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report(self, request, pk=None):
        """
        Retrieve a summary report for charting x vs y
        """
        campus_only = json.loads(
            request.query_params.get('campus_only', 'false'))
        params = {}
        missing_params = []
        error = ''
        for param in ['x_var', 'y_var', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
        else:
            cycles = self.get_cycles(params['start'], params['end'], pk)
            data = self.get_raw_report_data(pk, cycles, params['x_var'],
                                            params['y_var'], campus_only)
            for datum in data:
                if datum['property_counts']['num_properties_w-data'] != 0:
                    break
            property_counts = []
            chart_data = []
            for datum in data:
                property_counts.append(datum['property_counts'])
                chart_data.extend(datum['chart_data'])
            data = {
                'property_counts': property_counts,
                'chart_data': chart_data,
            }
            result = {'status': 'success', 'data': data}
            status_code = status.HTTP_200_OK
        return Response(result, status=status_code)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var',
            required=True,
            description=
            'Raw column name for y axis, must be one of: "gross_floor_area", "use_description", "year_built"'
        ),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
        AutoSchemaHelper.query_string_field(
            'campus_only',
            required=False,
            description='If true, includes campuses'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report_aggregated(self, request, pk=None):
        """
        Retrieve a summary report for charting x vs y aggregated by y_var
        """
        campus_only = json.loads(
            request.query_params.get('campus_only', 'false'))
        valid_y_values = ['gross_floor_area', 'use_description', 'year_built']
        params = {}
        missing_params = []
        empty = True
        error = ''
        for param in ['x_var', 'y_var', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            elif param == 'y_var' and val not in valid_y_values:
                error = "{} {} is not a valid value for {}.".format(
                    error, val, param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
        else:
            cycles = self.get_cycles(params['start'], params['end'], pk)
            x_var = params['x_var']
            y_var = params['y_var']
            data = self.get_raw_report_data(pk, cycles, x_var, y_var,
                                            campus_only)
            for datum in data:
                if datum['property_counts']['num_properties_w-data'] != 0:
                    empty = False
                    break
            if empty:
                result = {'status': 'error', 'message': 'No data found'}
                status_code = status.HTTP_404_NOT_FOUND
        if not empty or not error:
            chart_data = []
            property_counts = []
            for datum in data:
                buildings = datum['chart_data']
                yr_e = datum['property_counts']['yr_e']
                chart_data.extend(self.aggregate_data(yr_e, y_var, buildings)),
                property_counts.append(datum['property_counts'])
            # Send back to client
            aggregated_data = {
                'chart_data': chart_data,
                'property_counts': property_counts
            }
            result = {
                'status': 'success',
                'aggregated_data': aggregated_data,
            }
            status_code = status.HTTP_200_OK
        return Response(result, status=status_code)

    def aggregate_data(self, yr_e, y_var, buildings):
        aggregation_method = {
            'use_description': self.aggregate_use_description,
            'year_built': self.aggregate_year_built,
            'gross_floor_area': self.aggregate_gross_floor_area,
        }
        return aggregation_method[y_var](yr_e, buildings)

    def aggregate_use_description(self, yr_e, buildings):
        # Group buildings in this year_ending group into uses
        chart_data = []
        grouped_uses = defaultdict(list)
        for b in buildings:
            grouped_uses[str(b['y']).lower()].append(b)

        # Now iterate over use groups to make each chart item
        for use, buildings_in_uses in grouped_uses.items():
            chart_data.append({
                'x': median([b['x'] for b in buildings_in_uses]),
                'y': use.capitalize(),
                'yr_e': yr_e
            })
        return chart_data

    def aggregate_year_built(self, yr_e, buildings):
        # Group buildings in this year_ending group into decades
        chart_data = []
        grouped_decades = defaultdict(list)
        for b in buildings:
            grouped_decades['%s0' % str(b['y'])[:-1]].append(b)

        # Now iterate over decade groups to make each chart item
        for decade, buildings_in_decade in grouped_decades.items():
            chart_data.append({
                'x':
                median([b['x'] for b in buildings_in_decade]),
                'y':
                '%s-%s' % (decade, '%s9' % str(decade)[:-1]),  # 1990-1999
                'yr_e':
                yr_e
            })
        return chart_data

    def aggregate_gross_floor_area(self, yr_e, buildings):
        chart_data = []
        y_display_map = {
            0: '0-99k',
            100000: '100-199k',
            200000: '200k-299k',
            300000: '300k-399k',
            400000: '400-499k',
            500000: '500-599k',
            600000: '600-699k',
            700000: '700-799k',
            800000: '800-899k',
            900000: '900-999k',
            1000000: 'over 1,000k',
        }
        max_bin = max(y_display_map)

        # Group buildings in this year_ending group into ranges
        grouped_ranges = defaultdict(list)
        for b in buildings:
            area = b['y']
            # make sure anything greater than the biggest bin gets put in
            # the biggest bin
            range_bin = min(max_bin, round_down_hundred_thousand(area))
            grouped_ranges[range_bin].append(b)

        # Now iterate over range groups to make each chart item
        for range_floor, buildings_in_range in grouped_ranges.items():
            chart_data.append({
                'x': median([b['x'] for b in buildings_in_range]),
                'y': y_display_map[range_floor],
                'yr_e': yr_e
            })
        return chart_data

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field('x_label',
                                            required=True,
                                            description='Label for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var', required=True, description='Raw column name for y axis'),
        AutoSchemaHelper.query_string_field('y_label',
                                            required=True,
                                            description='Label for y axis'),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report_export(self, request, pk=None):
        """
        Export a report as a spreadsheet
        """
        params = {}
        missing_params = []
        error = ''
        for param in ['x_var', 'x_label', 'y_var', 'y_label', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
            return Response(result, status=status_code)

        response = HttpResponse(
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response['Content-Disposition'] = 'attachment; filename="report-data"'

        # Create WB
        output = BytesIO()
        wb = Workbook(output, {'remove_timezone': True})

        # Create sheets
        count_sheet = wb.add_worksheet('Counts')
        base_sheet = wb.add_worksheet('Raw')
        agg_sheet = wb.add_worksheet('Agg')

        # Enable bold format and establish starting cells
        bold = wb.add_format({'bold': True})
        data_row_start = 0
        data_col_start = 0

        # Write all headers across all sheets
        count_sheet.write(data_row_start, data_col_start, 'Year Ending', bold)
        count_sheet.write(data_row_start, data_col_start + 1,
                          'Properties with Data', bold)
        count_sheet.write(data_row_start, data_col_start + 2,
                          'Total Properties', bold)

        base_sheet.write(data_row_start, data_col_start, 'ID', bold)
        base_sheet.write(data_row_start, data_col_start + 1,
                         request.query_params.get('x_label'), bold)
        base_sheet.write(data_row_start, data_col_start + 2,
                         request.query_params.get('y_label'), bold)
        base_sheet.write(data_row_start, data_col_start + 3, 'Year Ending',
                         bold)

        agg_sheet.write(data_row_start, data_col_start,
                        request.query_params.get('x_label'), bold)
        agg_sheet.write(data_row_start, data_col_start + 1,
                        request.query_params.get('y_label'), bold)
        agg_sheet.write(data_row_start, data_col_start + 2, 'Year Ending',
                        bold)

        # Gather base data
        cycles = self.get_cycles(params['start'], params['end'], pk)
        data = self.get_raw_report_data(pk, cycles, params['x_var'],
                                        params['y_var'], False)

        base_row = data_row_start + 1
        agg_row = data_row_start + 1
        count_row = data_row_start + 1

        for cycle_results in data:
            total_count = cycle_results['property_counts']['num_properties']
            with_data_count = cycle_results['property_counts'][
                'num_properties_w-data']
            yr_e = cycle_results['property_counts']['yr_e']

            # Write Counts
            count_sheet.write(count_row, data_col_start, yr_e)
            count_sheet.write(count_row, data_col_start + 1, with_data_count)
            count_sheet.write(count_row, data_col_start + 2, total_count)

            count_row += 1

            # Write Base/Raw Data
            data_rows = cycle_results['chart_data']
            for datum in data_rows:
                base_sheet.write(base_row, data_col_start, datum.get('id'))
                base_sheet.write(base_row, data_col_start + 1, datum.get('x'))
                base_sheet.write(base_row, data_col_start + 2, datum.get('y'))
                base_sheet.write(base_row, data_col_start + 3,
                                 datum.get('yr_e'))

                base_row += 1

            # Gather and write Agg data
            for agg_datum in self.aggregate_data(yr_e, params['y_var'],
                                                 data_rows):
                agg_sheet.write(agg_row, data_col_start, agg_datum.get('x'))
                agg_sheet.write(agg_row, data_col_start + 1,
                                agg_datum.get('y'))
                agg_sheet.write(agg_row, data_col_start + 2,
                                agg_datum.get('yr_e'))

                agg_row += 1

        wb.close()

        xlsx_data = output.getvalue()

        response.write(xlsx_data)

        return response

    @has_perm_class('requires_member')
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def geocode_api_key_exists(self, request, pk=None):
        """
        Returns true if the organization has a mapquest api key
        """
        org = Organization.objects.get(id=pk)

        if org.mapquest_api_key:
            return True
        else:
            return False
Beispiel #3
0
class TaxLotPropertyViewSet(GenericViewSet):
    """
    The TaxLotProperty field is used to return the properties and tax lots from the join table.
    This method presently only works with the CSV, but should eventually be extended to be the
    viewset for any tax lot / property join API call.
    """
    renderer_classes = (JSONRenderer, )
    serializer_class = TaxLotPropertySerializer

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field("cycle_id", True, "Cycle ID"),
            AutoSchemaHelper.query_string_field(
                "inventory_type", False,
                "Either 'taxlots' or 'properties' and defaults to 'properties'."
            ),
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'ids': ['integer'],
                'filename': 'string',
                'export_type': 'string',
                'profile_id': 'integer'
            },
            description='- ids: (View) IDs for records to be exported\n'
            '- filename: desired filename including extension (defaulting to \'ExportedData.{export_type}\')\n'
            '- export_types: \'csv\', \'geojson\', \'xlsx\' (defaulting to \'csv\')\n'
            '- profile_id: Column List Profile ID to use for customizing fields included in export'
        ),
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=False, methods=['POST'])
    def export(self, request):
        """
        Download a collection of the TaxLot and Properties in multiple formats.
        """
        cycle_pk = request.query_params.get('cycle_id', None)
        if not cycle_pk:
            return JsonResponse({
                'status':
                'error',
                'message':
                'Must pass in cycle_id as query parameter'
            })
        org_id = request.query_params['organization_id']
        if 'profile_id' not in request.data:
            profile_id = None
        else:
            if request.data['profile_id'] == 'None' or request.data[
                    'profile_id'] == '':
                profile_id = None
            else:
                profile_id = request.data['profile_id']

        # get the class to operate on and the relationships
        view_klass_str = request.query_params.get('inventory_type',
                                                  'properties')
        view_klass = INVENTORY_MODELS[view_klass_str]

        # Set the first column to be the ID
        column_name_mappings = OrderedDict([('id', 'ID')])
        column_ids, add_column_name_mappings, columns_from_database = ColumnListProfile.return_columns(
            org_id, profile_id, view_klass_str)
        column_name_mappings.update(add_column_name_mappings)
        select_related = ['state', 'cycle']
        ids = request.data.get('ids', [])
        filter_str = {'cycle': cycle_pk}
        if hasattr(view_klass, 'property'):
            select_related.append('property')
            prefetch_related = ['labels']
            filter_str = {'property__organization_id': org_id}
            if ids:
                filter_str['id__in'] = ids
            # always export the labels and notes
            column_name_mappings['property_notes'] = 'Property Notes'
            column_name_mappings['property_labels'] = 'Property Labels'

        elif hasattr(view_klass, 'taxlot'):
            select_related.append('taxlot')
            prefetch_related = ['labels']
            filter_str = {'taxlot__organization_id': org_id}
            if ids:
                filter_str['id__in'] = ids
            # always export the labels and notes
            column_name_mappings['taxlot_notes'] = 'Tax Lot Notes'
            column_name_mappings['taxlot_labels'] = 'Tax Lot Labels'

        model_views = view_klass.objects.select_related(
            *select_related).prefetch_related(*prefetch_related).filter(
                **filter_str).order_by('id')

        # get the data in a dict which includes the related data
        data = TaxLotProperty.get_related(model_views, column_ids,
                                          columns_from_database)

        # add labels and notes
        for i, record in enumerate(model_views):
            label_string = []
            note_string = []
            for label in list(record.labels.all().order_by('name')):
                label_string.append(label.name)
            for note in list(record.notes.all().order_by('created')):
                note_string.append(note.created.astimezone().strftime(
                    "%Y-%m-%d %I:%M:%S %p") + "\n" + note.text)

            if hasattr(record, 'property'):
                data[i]['property_labels'] = ','.join(label_string)
                data[i]['property_notes'] = '\n----------\n'.join(note_string)
            elif hasattr(record, 'taxlot'):
                data[i]['taxlot_labels'] = ','.join(label_string)
                data[i]['taxlot_notes'] = '\n----------\n'.join(note_string)

        # force the data into the same order as the IDs
        if ids:
            order_dict = {obj_id: index for index, obj_id in enumerate(ids)}
            if view_klass_str == 'properties':
                view_id_str = 'property_view_id'
            else:
                view_id_str = 'taxlot_view_id'
            data.sort(key=lambda inventory_obj: order_dict[inventory_obj[
                view_id_str]])

        export_type = request.data.get('export_type', 'csv')

        filename = request.data.get('filename', f"ExportedData.{export_type}")

        if export_type == "csv":
            return self._csv_response(filename, data, column_name_mappings)
        elif export_type == "geojson":
            return self._json_response(filename, data, column_name_mappings)
        elif export_type == "xlsx":
            return self._spreadsheet_response(filename, data,
                                              column_name_mappings)

    def _csv_response(self, filename, data, column_name_mappings):
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        writer = csv.writer(response)

        # check the first item in the header and make sure that it isn't ID (it can be id, or iD).
        # excel doesn't like the first item to be ID in a CSV
        header = list(column_name_mappings.values())
        if header[0] == 'ID':
            header[0] = 'id'
        writer.writerow(header)

        # iterate over the results to preserve column order and write row.
        for datum in data:
            row = []
            for column in column_name_mappings:
                row_result = datum.get(column, None)

                # Try grabbing the value out of the related field if not found yet.
                if row_result is None and datum.get('related'):
                    row_result = datum['related'][0].get(column, None)

                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
                if isinstance(row_result, ureg.Quantity):
                    row_result = row_result.magnitude
                elif isinstance(row_result, datetime.datetime):
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(row_result, datetime.date):
                    row_result = row_result.strftime("%Y-%m-%d")
                row.append(row_result)

            writer.writerow(row)

        return response

    def _spreadsheet_response(self, filename, data, column_name_mappings):
        response = HttpResponse(
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        scenario_keys = ('id', 'name', 'description',
                         'annual_site_energy_savings',
                         'annual_source_energy_savings', 'annual_cost_savings',
                         'analysis_state', 'analysis_state_message',
                         'annual_electricity_savings',
                         'annual_natural_gas_savings', 'annual_site_energy',
                         'annual_source_energy', 'annual_natural_gas_energy',
                         'annual_electricity_energy', 'annual_peak_demand',
                         'annual_site_energy_use_intensity',
                         'annual_source_energy_use_intensity')
        scenario_key_mappings = {
            'annual_site_energy_savings': 'annual_site_energy_savings_mmbtu',
            'annual_source_energy_savings':
            'annual_source_energy_savings_mmbtu',
            'annual_cost_savings': 'annual_cost_savings_dollars',
            'annual_site_energy': 'annual_site_energy_kbtu',
            'annual_site_energy_use_intensity':
            'annual_site_energy_use_intensity_kbtu_ft2',
            'annual_source_energy': 'annual_source_energy_kbtu',
            'annual_source_energy_use_intensity':
            'annual_source_energy_use_intensity_kbtu_ft2',
            'annual_natural_gas_energy': 'annual_natural_gas_energy_mmbtu',
            'annual_electricity_energy': 'annual_electricity_energy_mmbtu',
            'annual_peak_demand': 'annual_peak_demand_kw',
            'annual_electricity_savings': 'annual_electricity_savings_kbtu',
            'annual_natural_gas_savings': 'annual_natural_gas_savings_kbtu'
        }

        property_measure_keys = ('id', 'property_measure_name', 'measure_id',
                                 'cost_mv', 'cost_total_first',
                                 'cost_installation', 'cost_material',
                                 'cost_capital_replacement',
                                 'cost_residual_value')
        measure_keys = ('name', 'display_name', 'category',
                        'category_display_name')
        # find measures and scenarios
        for i, record in enumerate(data):
            measures = PropertyMeasure.objects.filter(
                property_state_id=record['property_state_id'])
            record['measures'] = measures

            scenarios = Scenario.objects.filter(
                property_state_id=record['property_state_id'])
            record['scenarios'] = scenarios

        output = io.BytesIO()
        wb = xlsxwriter.Workbook(output, {'remove_timezone': True})

        # add tabs
        ws1 = wb.add_worksheet('Properties')
        ws2 = wb.add_worksheet('Measures')
        ws3 = wb.add_worksheet('Scenarios')
        ws4 = wb.add_worksheet('Scenario Measure Join Table')
        ws5 = wb.add_worksheet('Meter Readings')
        bold = wb.add_format({'bold': True})

        row = 0
        row2 = 0
        col2 = 0
        row3 = 0
        col3 = 0
        row4 = 0
        row5 = 0

        for index, val in enumerate(list(column_name_mappings.values())):
            # Do not write the first element as ID, this causes weird issues with Excel.
            if index == 0 and val == 'ID':
                ws1.write(row, index, 'id', bold)
            else:
                ws1.write(row, index, val, bold)

        # iterate over the results to preserve column order and write row.
        add_m_headers = True
        add_s_headers = True
        for datum in data:
            row += 1
            id = None
            for index, column in enumerate(column_name_mappings):
                if column == 'id':
                    id = datum.get(column, None)

                row_result = datum.get(column, None)

                # Try grabbing the value out of the related field if not found yet.
                if row_result is None and datum.get('related'):
                    row_result = datum['related'][0].get(column, None)

                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
                if isinstance(row_result, ureg.Quantity):
                    row_result = row_result.magnitude
                elif isinstance(row_result, datetime.datetime):
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(row_result, datetime.date):
                    row_result = row_result.strftime("%Y-%m-%d")
                ws1.write(row, index, row_result)

            # measures
            for index, m in enumerate(datum['measures']):
                if add_m_headers:
                    # grab headers
                    for key in property_measure_keys:
                        ws2.write(row2, col2, key, bold)
                        col2 += 1
                    for key in measure_keys:
                        ws2.write(row2, col2, 'measure ' + key, bold)
                        col2 += 1
                    add_m_headers = False

                row2 += 1
                col2 = 0
                for key in property_measure_keys:
                    ws2.write(row2, col2, getattr(m, key))
                    col2 += 1
                for key in measure_keys:
                    ws2.write(row2, col2, getattr(m.measure, key))
                    col2 += 1

            # scenarios (and join table)
            # join table
            ws4.write('A1', 'property_id', bold)
            ws4.write('B1', 'scenario_id', bold)
            ws4.write('C1', 'measure_id', bold)
            for index, s in enumerate(datum['scenarios']):
                scenario_id = s.id
                if add_s_headers:
                    # grab headers
                    for key in scenario_keys:
                        # double check scenario_key_mappings in case a different header is desired
                        if key in scenario_key_mappings.keys():
                            key = scenario_key_mappings[key]
                        ws3.write(row3, col3, key, bold)
                        col3 += 1
                    add_s_headers = False
                row3 += 1
                col3 = 0
                for key in scenario_keys:
                    ws3.write(row3, col3, getattr(s, key))
                    col3 += 1

                for sm in s.measures.all():
                    row4 += 1
                    ws4.write(row4, 0, id)
                    ws4.write(row4, 1, scenario_id)
                    ws4.write(row4, 2, sm.id)

            # scenario meter readings
            ws5.write('A1', 'scenario_id', bold)
            ws5.write('B1', 'meter_id', bold)
            ws5.write('C1', 'type', bold)
            ws5.write('D1', 'start_time', bold)
            ws5.write('E1', 'end_time', bold)
            ws5.write('F1', 'reading', bold)
            ws5.write('G1', 'units', bold)
            ws5.write('H1', 'is_virtual', bold)
            # datetime formatting
            date_format = wb.add_format({'num_format': 'yyyy-mm-dd hh:mm:ss'})

            for index, s in enumerate(datum['scenarios']):
                scenario_id = s.id
                # retrieve meters
                meters = Meter.objects.filter(scenario_id=scenario_id)
                for m in meters:
                    # retrieve readings
                    readings = MeterReading.objects.filter(
                        meter_id=m.id).order_by('start_time')
                    for r in readings:
                        row5 += 1
                        ws5.write(row5, 0, scenario_id)
                        ws5.write(row5, 1, m.id)
                        the_type = next((item for item in Meter.ENERGY_TYPES
                                         if item[0] == m.type), None)
                        the_type = the_type[1] if the_type is not None else None
                        ws5.write(
                            row5, 2, the_type
                        )  # use energy type enum to determine reading type
                        ws5.write_datetime(row5, 3, r.start_time, date_format)
                        ws5.write_datetime(row5, 4, r.end_time, date_format)
                        ws5.write(row5, 5,
                                  r.reading)  # this is now a float field
                        ws5.write(row5, 6, r.source_unit)
                        ws5.write(row5, 7, m.is_virtual)

        wb.close()

        # xlsx_data contains the Excel file
        xlsx_data = output.getvalue()

        response.write(xlsx_data)
        return response

    def _json_response(self, filename, data, column_name_mappings):
        polygon_fields = [
            "bounding_box", "centroid", "property_footprint",
            "taxlot_footprint", "long_lat"
        ]
        features = []

        # extract related records
        related_records = self._extract_related(data)

        # append related_records to data
        complete_data = data + related_records

        for datum in complete_data:
            feature = {"type": "Feature", "properties": {}}

            for key, value in datum.items():
                if value is None:
                    continue

                if isinstance(value, ureg.Quantity):
                    value = value.magnitude
                elif isinstance(value, datetime.datetime):
                    value = value.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(value, datetime.date):
                    value = value.strftime("%Y-%m-%d")

                if value and any(k in key for k in polygon_fields):
                    """
                    If object is a polygon and is populated, add the 'geometry'
                    key-value-pair in the appropriate GeoJSON format.
                    When the first geometry is added, the correct format is
                    established. When/If a second geometry is added, this is
                    appended alongside the previous geometry.
                    """
                    individual_geometry = {}

                    # long_lat
                    if key == 'long_lat':
                        coordinates = self._serialized_point(value)
                        # point
                        individual_geometry = {
                            "coordinates": coordinates,
                            "type": "Point"
                        }
                    else:
                        # polygons
                        coordinates = self._serialized_coordinates(value)
                        individual_geometry = {
                            "coordinates": [coordinates],
                            "type": "Polygon"
                        }

                    if feature.get("geometry", None) is None:
                        feature["geometry"] = {
                            "type": "GeometryCollection",
                            "geometries": [individual_geometry]
                        }
                    else:
                        feature["geometry"]["geometries"].append(
                            individual_geometry)
                else:
                    """
                    Non-polygon data
                    """
                    display_key = column_name_mappings.get(key, key)
                    feature["properties"][display_key] = value

                    # # store point geometry in case you need it
                    # if display_key == "Longitude":
                    #     point_geometry[0] = value
                    # if display_key == "Latitude":
                    #     point_geometry[1] = value
            """
            Before appending feature, ensure that if there is no geometry recorded.
            Note that the GeoJson will not render if no lat/lng
            """

            # add style information
            if feature["properties"].get("property_state_id") is not None:
                feature["properties"]["stroke"] = "#185189"  # buildings color
            elif feature["properties"].get("taxlot_state_id") is not None:
                feature["properties"]["stroke"] = "#10A0A0"  # buildings color
            feature["properties"]["marker-color"] = "#E74C3C"
            # feature["properties"]["stroke-width"] = 3
            feature["properties"]["fill-opacity"] = 0

            # append feature
            features.append(feature)

            response_dict = {
                "type": "FeatureCollection",
                "crs": {
                    "type": "EPSG",
                    "properties": {
                        "code": 4326
                    }
                },
                "features": features
            }

        response = JsonResponse(response_dict)
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        return response

    def _serialized_coordinates(self, polygon_wkt):
        string_coord_pairs = polygon_wkt.lstrip('POLYGON (').rstrip(')').split(
            ', ')

        coordinates = []
        for coord_pair in string_coord_pairs:
            float_coords = [float(coord) for coord in coord_pair.split(' ')]
            coordinates.append(float_coords)

        return coordinates

    def _serialized_point(self, point_wkt):
        string_coords = point_wkt.lstrip('POINT (').rstrip(')').split(', ')

        coordinates = []
        for coord in string_coords[0].split(' '):
            coordinates.append(float(coord))

        return coordinates

    def _extract_related(self, data):
        # extract all related records into a separate array
        related = []

        # figure out if we are dealing with properties or taxlots
        if data[0].get("property_state_id", None) is not None:
            is_property = True
        elif data[0].get("taxlot_state_id", None) is not None:
            is_property = False

        for datum in data:
            if datum.get("related", None) is not None:
                for record in datum["related"]:
                    related.append(record)

        # make array unique
        if is_property:

            unique = [dict(p) for p in set(tuple(i.items()) for i in related)]

        else:
            unique = [dict(p) for p in set(tuple(i.items()) for i in related)]

        return unique
Beispiel #4
0
class ColumnViewSet(OrgValidateMixin, SEEDOrgNoPatchOrOrgCreateModelViewSet,
                    OrgCreateUpdateMixin):
    """
    create:
        Create a new Column within a specified org or user's currently activated org.
    update:
        Update a column and modify which dataset it belongs to.
    delete:
        Deletes a single column.
    """
    raise_exception = True
    serializer_class = ColumnSerializer
    renderer_classes = (JSONRenderer, )
    model = Column
    pagination_class = None
    parser_classes = (JSONParser, FormParser)

    def get_queryset(self):
        # check if the request is properties or taxlots
        org_id = self.get_organization(self.request)
        return Column.objects.filter(organization_id=org_id)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(required=False),
            AutoSchemaHelper.query_string_field(
                name='inventory_type',
                required=False,
                description=
                'Which inventory type is being matched (for related fields and naming)'
                '\nDefault: "property"'),
            AutoSchemaHelper.query_boolean_field(
                name='only_used',
                required=False,
                description=
                'Determine whether or not to show only the used fields '
                '(i.e. only columns that have been mapped)'
                '\nDefault: "false"'),
            AutoSchemaHelper.query_boolean_field(
                name='display_units',
                required=False,
                description='If true, any columns that have units will have them'
                ' added as a suffix to the display_name'
                '\nDefault: "false"'),
        ], )
    @api_endpoint_class
    @ajax_request_class
    def list(self, request):
        """
        Retrieves all columns for the user's organization including the raw database columns. Will
        return all the columns across both the Property and Tax Lot tables. The related field will
        be true if the column came from the other table that is not the 'inventory_type' (which
        defaults to Property)
        """
        organization_id = self.get_organization(self.request)
        inventory_type = request.query_params.get('inventory_type', 'property')
        only_used = json.loads(request.query_params.get('only_used', 'false'))
        columns = Column.retrieve_all(organization_id, inventory_type,
                                      only_used)
        organization = Organization.objects.get(pk=organization_id)
        if json.loads(request.query_params.get('display_units', 'false')):
            columns = [add_pint_unit_suffix(organization, x) for x in columns]
        return JsonResponse({
            'status': 'success',
            'columns': columns,
        })

    @swagger_auto_schema_org_query_param
    @ajax_request_class
    def retrieve(self, request, pk=None):
        """
        This API endpoint retrieves a Column
        """
        organization_id = self.get_organization(self.request)
        # check if column exists for the organization
        try:
            c = Column.objects.get(pk=pk)
        except Column.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'column with id {} does not exist'.format(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        if c.organization.id != organization_id:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Organization ID mismatch between column and organization'
                },
                status=status.HTTP_400_BAD_REQUEST)

        return JsonResponse({
            'status': 'success',
            'column': ColumnSerializer(c).data
        })

    @ajax_request_class
    @has_perm_class('can_modify_data')
    def update(self, request, pk=None):
        organization_id = request.query_params.get('organization_id', None)

        request.data['shared_field_type'] = request.data['sharedFieldType']
        del request.data['sharedFieldType']

        # Ensure ComStock uniqueness across properties and taxlots together
        if request.data['comstock_mapping'] is not None:
            Column.objects.filter(organization_id=organization_id, comstock_mapping=request.data['comstock_mapping']) \
                .update(comstock_mapping=None)
        return super(ColumnViewSet, self).update(request, pk)

    @ajax_request_class
    @has_perm_class('can_modify_data')
    def destroy(self, request, pk=None):
        org_id = self.get_organization(request)
        try:
            column = Column.objects.get(id=pk, organization_id=org_id)
        except Column.DoesNotExist:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Cannot find column in org=%s with pk=%s' % (org_id, pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        if not column.is_extra_data:
            return JsonResponse(
                {
                    'success': False,
                    'message': 'Only extra_data columns can be deleted'
                },
                status=status.HTTP_400_BAD_REQUEST)

        # Delete key from jsonb data
        if column.table_name == 'PropertyState':
            states = PropertyState.objects.filter(
                organization_id=org_id,
                data_state=DATA_STATE_MATCHING,
                extra_data__has_key=column.column_name)
            state_count = states.count()
            # far faster than iterating states, popping the key, and saving, but doesn't update the hash
            # with connection.cursor() as cursor:
            #     cursor.execute("UPDATE seed_propertystate "
            #                    "SET extra_data = extra_data - %s "
            #                    "WHERE organization_id = %s AND data_state = %s",
            #                    [column.column_name, org_id, DATA_STATE_MATCHING])

        elif column.table_name == 'TaxLotState':
            states = TaxLotState.objects.filter(
                organization_id=org_id,
                data_state=DATA_STATE_MATCHING,
                extra_data__has_key=column.column_name)
            state_count = states.count()
            # far faster than iterating states, popping the key, and saving, but doesn't update the hash
            # with connection.cursor() as cursor:
            #     cursor.execute("UPDATE seed_taxlotstate "
            #                    "SET extra_data = extra_data - %s "
            #                    "WHERE organization_id = %s AND data_state = %s",
            #                    [column.column_name, org_id, DATA_STATE_MATCHING])

        else:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Unexpected table_name \'%s\' for column with pk=%s' %
                    (column.table_name, pk)
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # Pop the key and update the hash
        for state in states:
            state.extra_data.pop(column.column_name)
            state.save()

        # Delete all mappings from raw column names to the mapped column, then delete the mapped column
        ColumnMapping.objects.filter(column_mapped=column).delete()
        column.delete()

        table_display_name = column.table_name if state_count == 1 else column.table_name + 's'
        return JsonResponse(
            {
                'success':
                True,
                'message':
                'Removed \'%s\' from %s %s' %
                (column.column_name, state_count, table_display_name)
            },
            status=status.HTTP_200_OK)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory({
        'new_column_name':
        'string',
        'overwrite':
        'boolean'
    }))
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def rename(self, request, pk=None):
        """
        This API endpoint renames a Column
        """
        org_id = self.get_organization(request)
        try:
            column = Column.objects.get(id=pk, organization_id=org_id)
        except Column.DoesNotExist:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Cannot find column in org=%s with pk=%s' % (org_id, pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        new_column_name = request.data.get('new_column_name', None)
        overwrite = request.data.get('overwrite', False)
        if not new_column_name:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'You must specify the name of the new column as "new_column_name"'
                },
                status=status.HTTP_400_BAD_REQUEST)

        result = column.rename_column(new_column_name, overwrite)
        if not result[0]:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Unable to rename column with message: "%s"' % result[1]
                },
                status=status.HTTP_400_BAD_REQUEST)
        else:
            return JsonResponse({'success': True, 'message': result[1]})

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_string_field(
            'inventory_type',
            required=True,
            description='Inventory Type, either "property" or "taxlot"')
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    @action(detail=False, methods=['GET'])
    def mappable(self, request):
        """
        List only inventory columns that are mappable
        """
        organization_id = int(request.query_params.get('organization_id'))
        inventory_type = request.query_params.get('inventory_type')
        if inventory_type not in ['property', 'taxlot']:
            return JsonResponse({
                'status':
                'error',
                'message':
                'Query param `inventory_type` must be "property" or "taxlot"'
            })
        columns = Column.retrieve_mapping_columns(organization_id,
                                                  inventory_type)

        return JsonResponse({'status': 'success', 'columns': columns})