예제 #1
0
class AnalysisPropertyViewViewSet(viewsets.ViewSet):
    serializer_class = AnalysisPropertyViewSerializer
    model = AnalysisPropertyView

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field(True)])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def list(self, request, analysis_pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            views_queryset = AnalysisPropertyView.objects.filter(
                analysis=analysis_pk,
                analysis__organization_id=organization_id)
        except AnalysisPropertyView.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    "Requested analysis doesn't exist in this organization."
                },
                status=HTTP_409_CONFLICT)
        serialized_views = []
        for view in views_queryset:
            serialized_view = AnalysisPropertyViewSerializer(view).data
            serialized_views.append(serialized_view)
        return JsonResponse({'status': 'success', 'views': serialized_views})

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field(True)])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def retrieve(self, request, analysis_pk, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            view = AnalysisPropertyView.objects.get(
                id=pk,
                analysis=analysis_pk,
                analysis__organization_id=organization_id)
        except AnalysisPropertyView.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    "Requested analysis property view doesn't exist in this organization and/or analysis."
                },
                status=HTTP_409_CONFLICT)

        return JsonResponse({
            'status': 'success',
            'view': AnalysisPropertyViewSerializer(view).data
        })
예제 #2
0
파일: measures.py 프로젝트: riahtu/seed
class MeasureViewSet(viewsets.ReadOnlyModelViewSet, OrgMixin):
    """
    list:
        Return a list of all measures

    retrieve:
        Return a measure by a unique id

    """
    serializer_class = MeasureSerializer
    parser_classes = (
        JSONParser,
        FormParser,
    )
    renderer_classes = (JSONRenderer, )
    pagination_class = None

    def get_queryset(self):
        org_id = self.get_organization(self.request)
        return Measure.objects.filter(organization_id=org_id)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=no_body)
    @has_perm_class('can_modify_data')
    @action(detail=False, methods=['POST'])
    def reset(self, request):
        """
        Reset all the measures back to the defaults (as provided by BuildingSync)
        """
        organization_id = request.query_params.get('organization_id', None)
        if not organization_id:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization_id not provided'
                },
                status=status.HTTP_400_BAD_REQUEST)

        Measure.populate_measures(organization_id)
        data = dict(measures=list(
            Measure.objects.filter(
                organization_id=organization_id).order_by('id').values()))

        data['status'] = 'success'
        return JsonResponse(data)
예제 #3
0
파일: geocode.py 프로젝트: riahtu/seed
class GeocodeViewSet(viewsets.ViewSet, OrgMixin):

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'property_view_ids': ['integer'],
                'taxlot_view_ids': ['integer'],
            },
            description='IDs by inventory type for records to be geocoded.'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=False, methods=['POST'])
    def geocode_by_ids(self, request):
        """
        Submit a request to geocode property and tax lot records.
        """
        body = dict(request.data)
        org_id = self.get_organization(request)
        property_view_ids = body.get('property_view_ids')
        taxlot_view_ids = body.get('taxlot_view_ids')

        if property_view_ids:
            property_views = PropertyView.objects.filter(
                id__in=property_view_ids,
                cycle__organization_id=org_id
            )
            properties = PropertyState.objects.filter(
                id__in=Subquery(property_views.values('state_id'))
            )
            geocode_buildings(properties)

        if taxlot_view_ids:
            taxlot_views = TaxLotView.objects.filter(
                id__in=taxlot_view_ids,
                cycle__organization_id=org_id
            )
            taxlots = TaxLotState.objects.filter(
                id__in=Subquery(taxlot_views.values('state_id'))
            )
            geocode_buildings(taxlots)

        return JsonResponse({'status': 'success'})

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'property_view_ids': ['integer'],
                'taxlot_view_ids': ['integer'],
            },
            description='IDs by inventory type for records to be used in building a geocoding summary.'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_view_data')
    @action(detail=False, methods=['POST'])
    def confidence_summary(self, request):
        """
        Generate a summary of geocoding confidence values for property and
        tax lot records.
        """
        body = dict(request.data)
        org_id = self.get_organization(request)
        property_view_ids = body.get('property_view_ids')
        taxlot_view_ids = body.get('taxlot_view_ids')

        result = {}

        if property_view_ids:
            property_views = PropertyView.objects.filter(
                id__in=property_view_ids,
                cycle__organization_id=org_id
            )
            result["properties"] = {
                'not_geocoded': PropertyState.objects.filter(
                    id__in=Subquery(property_views.values('state_id')),
                    geocoding_confidence__isnull=True
                ).count(),
                'high_confidence': PropertyState.objects.filter(
                    id__in=Subquery(property_views.values('state_id')),
                    geocoding_confidence__startswith='High'
                ).count(),
                'low_confidence': PropertyState.objects.filter(
                    id__in=Subquery(property_views.values('state_id')),
                    geocoding_confidence__startswith='Low'
                ).count(),
                'manual': PropertyState.objects.filter(
                    id__in=Subquery(property_views.values('state_id')),
                    geocoding_confidence='Manually geocoded (N/A)'
                ).count(),
                'missing_address_components': PropertyState.objects.filter(
                    id__in=Subquery(property_views.values('state_id')),
                    geocoding_confidence='Missing address components (N/A)'
                ).count(),
            }

        if taxlot_view_ids:
            taxlot_views = TaxLotView.objects.filter(
                id__in=taxlot_view_ids,
                cycle__organization_id=org_id
            )
            result["tax_lots"] = {
                'not_geocoded': TaxLotState.objects.filter(
                    id__in=Subquery(taxlot_views.values('state_id')),
                    geocoding_confidence__isnull=True
                ).count(),
                'high_confidence': TaxLotState.objects.filter(
                    id__in=Subquery(taxlot_views.values('state_id')),
                    geocoding_confidence__startswith='High'
                ).count(),
                'low_confidence': TaxLotState.objects.filter(
                    id__in=Subquery(taxlot_views.values('state_id')),
                    geocoding_confidence__startswith='Low'
                ).count(),
                'manual': TaxLotState.objects.filter(
                    id__in=Subquery(taxlot_views.values('state_id')),
                    geocoding_confidence='Manually geocoded (N/A)'
                ).count(),
                'missing_address_components': TaxLotState.objects.filter(
                    id__in=Subquery(taxlot_views.values('state_id')),
                    geocoding_confidence='Missing address components (N/A)'
                ).count(),
            }

        return result
예제 #4
0
class TaxLotPropertyViewSet(GenericViewSet):
    """
    The TaxLotProperty field is used to return the properties and tax lots from the join table.
    This method presently only works with the CSV, but should eventually be extended to be the
    viewset for any tax lot / property join API call.
    """
    renderer_classes = (JSONRenderer, )
    serializer_class = TaxLotPropertySerializer

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field("cycle_id", True, "Cycle ID"),
            AutoSchemaHelper.query_string_field(
                "inventory_type", False,
                "Either 'taxlots' or 'properties' and defaults to 'properties'."
            ),
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'ids': ['integer'],
                'filename': 'string',
                'export_type': 'string',
                'profile_id': 'integer'
            },
            description='- ids: (View) IDs for records to be exported\n'
            '- filename: desired filename including extension (defaulting to \'ExportedData.{export_type}\')\n'
            '- export_types: \'csv\', \'geojson\', \'xlsx\' (defaulting to \'csv\')\n'
            '- profile_id: Column List Profile ID to use for customizing fields included in export'
        ),
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=False, methods=['POST'])
    def export(self, request):
        """
        Download a collection of the TaxLot and Properties in multiple formats.
        """
        cycle_pk = request.query_params.get('cycle_id', None)
        if not cycle_pk:
            return JsonResponse({
                'status':
                'error',
                'message':
                'Must pass in cycle_id as query parameter'
            })
        org_id = request.query_params['organization_id']
        if 'profile_id' not in request.data:
            profile_id = None
        else:
            if request.data['profile_id'] == 'None' or request.data[
                    'profile_id'] == '':
                profile_id = None
            else:
                profile_id = request.data['profile_id']

        # get the class to operate on and the relationships
        view_klass_str = request.query_params.get('inventory_type',
                                                  'properties')
        view_klass = INVENTORY_MODELS[view_klass_str]

        # Set the first column to be the ID
        column_name_mappings = OrderedDict([('id', 'ID')])
        column_ids, add_column_name_mappings, columns_from_database = ColumnListProfile.return_columns(
            org_id, profile_id, view_klass_str)
        column_name_mappings.update(add_column_name_mappings)
        select_related = ['state', 'cycle']
        ids = request.data.get('ids', [])
        filter_str = {'cycle': cycle_pk}
        if hasattr(view_klass, 'property'):
            select_related.append('property')
            prefetch_related = ['labels']
            filter_str = {'property__organization_id': org_id}
            if ids:
                filter_str['id__in'] = ids
            # always export the labels and notes
            column_name_mappings['property_notes'] = 'Property Notes'
            column_name_mappings['property_labels'] = 'Property Labels'

        elif hasattr(view_klass, 'taxlot'):
            select_related.append('taxlot')
            prefetch_related = ['labels']
            filter_str = {'taxlot__organization_id': org_id}
            if ids:
                filter_str['id__in'] = ids
            # always export the labels and notes
            column_name_mappings['taxlot_notes'] = 'Tax Lot Notes'
            column_name_mappings['taxlot_labels'] = 'Tax Lot Labels'

        model_views = view_klass.objects.select_related(
            *select_related).prefetch_related(*prefetch_related).filter(
                **filter_str).order_by('id')

        # get the data in a dict which includes the related data
        data = TaxLotProperty.get_related(model_views, column_ids,
                                          columns_from_database)

        # add labels and notes
        for i, record in enumerate(model_views):
            label_string = []
            note_string = []
            for label in list(record.labels.all().order_by('name')):
                label_string.append(label.name)
            for note in list(record.notes.all().order_by('created')):
                note_string.append(note.created.astimezone().strftime(
                    "%Y-%m-%d %I:%M:%S %p") + "\n" + note.text)

            if hasattr(record, 'property'):
                data[i]['property_labels'] = ','.join(label_string)
                data[i]['property_notes'] = '\n----------\n'.join(note_string)
            elif hasattr(record, 'taxlot'):
                data[i]['taxlot_labels'] = ','.join(label_string)
                data[i]['taxlot_notes'] = '\n----------\n'.join(note_string)

        # force the data into the same order as the IDs
        if ids:
            order_dict = {obj_id: index for index, obj_id in enumerate(ids)}
            if view_klass_str == 'properties':
                view_id_str = 'property_view_id'
            else:
                view_id_str = 'taxlot_view_id'
            data.sort(key=lambda inventory_obj: order_dict[inventory_obj[
                view_id_str]])

        export_type = request.data.get('export_type', 'csv')

        filename = request.data.get('filename', f"ExportedData.{export_type}")

        if export_type == "csv":
            return self._csv_response(filename, data, column_name_mappings)
        elif export_type == "geojson":
            return self._json_response(filename, data, column_name_mappings)
        elif export_type == "xlsx":
            return self._spreadsheet_response(filename, data,
                                              column_name_mappings)

    def _csv_response(self, filename, data, column_name_mappings):
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        writer = csv.writer(response)

        # check the first item in the header and make sure that it isn't ID (it can be id, or iD).
        # excel doesn't like the first item to be ID in a CSV
        header = list(column_name_mappings.values())
        if header[0] == 'ID':
            header[0] = 'id'
        writer.writerow(header)

        # iterate over the results to preserve column order and write row.
        for datum in data:
            row = []
            for column in column_name_mappings:
                row_result = datum.get(column, None)

                # Try grabbing the value out of the related field if not found yet.
                if row_result is None and datum.get('related'):
                    row_result = datum['related'][0].get(column, None)

                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
                if isinstance(row_result, ureg.Quantity):
                    row_result = row_result.magnitude
                elif isinstance(row_result, datetime.datetime):
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(row_result, datetime.date):
                    row_result = row_result.strftime("%Y-%m-%d")
                row.append(row_result)

            writer.writerow(row)

        return response

    def _spreadsheet_response(self, filename, data, column_name_mappings):
        response = HttpResponse(
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        scenario_keys = ('id', 'name', 'description',
                         'annual_site_energy_savings',
                         'annual_source_energy_savings', 'annual_cost_savings',
                         'analysis_state', 'analysis_state_message',
                         'annual_electricity_savings',
                         'annual_natural_gas_savings', 'annual_site_energy',
                         'annual_source_energy', 'annual_natural_gas_energy',
                         'annual_electricity_energy', 'annual_peak_demand',
                         'annual_site_energy_use_intensity',
                         'annual_source_energy_use_intensity')
        scenario_key_mappings = {
            'annual_site_energy_savings': 'annual_site_energy_savings_mmbtu',
            'annual_source_energy_savings':
            'annual_source_energy_savings_mmbtu',
            'annual_cost_savings': 'annual_cost_savings_dollars',
            'annual_site_energy': 'annual_site_energy_kbtu',
            'annual_site_energy_use_intensity':
            'annual_site_energy_use_intensity_kbtu_ft2',
            'annual_source_energy': 'annual_source_energy_kbtu',
            'annual_source_energy_use_intensity':
            'annual_source_energy_use_intensity_kbtu_ft2',
            'annual_natural_gas_energy': 'annual_natural_gas_energy_mmbtu',
            'annual_electricity_energy': 'annual_electricity_energy_mmbtu',
            'annual_peak_demand': 'annual_peak_demand_kw',
            'annual_electricity_savings': 'annual_electricity_savings_kbtu',
            'annual_natural_gas_savings': 'annual_natural_gas_savings_kbtu'
        }

        property_measure_keys = ('id', 'property_measure_name', 'measure_id',
                                 'cost_mv', 'cost_total_first',
                                 'cost_installation', 'cost_material',
                                 'cost_capital_replacement',
                                 'cost_residual_value')
        measure_keys = ('name', 'display_name', 'category',
                        'category_display_name')
        # find measures and scenarios
        for i, record in enumerate(data):
            measures = PropertyMeasure.objects.filter(
                property_state_id=record['property_state_id'])
            record['measures'] = measures

            scenarios = Scenario.objects.filter(
                property_state_id=record['property_state_id'])
            record['scenarios'] = scenarios

        output = io.BytesIO()
        wb = xlsxwriter.Workbook(output, {'remove_timezone': True})

        # add tabs
        ws1 = wb.add_worksheet('Properties')
        ws2 = wb.add_worksheet('Measures')
        ws3 = wb.add_worksheet('Scenarios')
        ws4 = wb.add_worksheet('Scenario Measure Join Table')
        ws5 = wb.add_worksheet('Meter Readings')
        bold = wb.add_format({'bold': True})

        row = 0
        row2 = 0
        col2 = 0
        row3 = 0
        col3 = 0
        row4 = 0
        row5 = 0

        for index, val in enumerate(list(column_name_mappings.values())):
            # Do not write the first element as ID, this causes weird issues with Excel.
            if index == 0 and val == 'ID':
                ws1.write(row, index, 'id', bold)
            else:
                ws1.write(row, index, val, bold)

        # iterate over the results to preserve column order and write row.
        add_m_headers = True
        add_s_headers = True
        for datum in data:
            row += 1
            id = None
            for index, column in enumerate(column_name_mappings):
                if column == 'id':
                    id = datum.get(column, None)

                row_result = datum.get(column, None)

                # Try grabbing the value out of the related field if not found yet.
                if row_result is None and datum.get('related'):
                    row_result = datum['related'][0].get(column, None)

                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
                if isinstance(row_result, ureg.Quantity):
                    row_result = row_result.magnitude
                elif isinstance(row_result, datetime.datetime):
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(row_result, datetime.date):
                    row_result = row_result.strftime("%Y-%m-%d")
                ws1.write(row, index, row_result)

            # measures
            for index, m in enumerate(datum['measures']):
                if add_m_headers:
                    # grab headers
                    for key in property_measure_keys:
                        ws2.write(row2, col2, key, bold)
                        col2 += 1
                    for key in measure_keys:
                        ws2.write(row2, col2, 'measure ' + key, bold)
                        col2 += 1
                    add_m_headers = False

                row2 += 1
                col2 = 0
                for key in property_measure_keys:
                    ws2.write(row2, col2, getattr(m, key))
                    col2 += 1
                for key in measure_keys:
                    ws2.write(row2, col2, getattr(m.measure, key))
                    col2 += 1

            # scenarios (and join table)
            # join table
            ws4.write('A1', 'property_id', bold)
            ws4.write('B1', 'scenario_id', bold)
            ws4.write('C1', 'measure_id', bold)
            for index, s in enumerate(datum['scenarios']):
                scenario_id = s.id
                if add_s_headers:
                    # grab headers
                    for key in scenario_keys:
                        # double check scenario_key_mappings in case a different header is desired
                        if key in scenario_key_mappings.keys():
                            key = scenario_key_mappings[key]
                        ws3.write(row3, col3, key, bold)
                        col3 += 1
                    add_s_headers = False
                row3 += 1
                col3 = 0
                for key in scenario_keys:
                    ws3.write(row3, col3, getattr(s, key))
                    col3 += 1

                for sm in s.measures.all():
                    row4 += 1
                    ws4.write(row4, 0, id)
                    ws4.write(row4, 1, scenario_id)
                    ws4.write(row4, 2, sm.id)

            # scenario meter readings
            ws5.write('A1', 'scenario_id', bold)
            ws5.write('B1', 'meter_id', bold)
            ws5.write('C1', 'type', bold)
            ws5.write('D1', 'start_time', bold)
            ws5.write('E1', 'end_time', bold)
            ws5.write('F1', 'reading', bold)
            ws5.write('G1', 'units', bold)
            ws5.write('H1', 'is_virtual', bold)
            # datetime formatting
            date_format = wb.add_format({'num_format': 'yyyy-mm-dd hh:mm:ss'})

            for index, s in enumerate(datum['scenarios']):
                scenario_id = s.id
                # retrieve meters
                meters = Meter.objects.filter(scenario_id=scenario_id)
                for m in meters:
                    # retrieve readings
                    readings = MeterReading.objects.filter(
                        meter_id=m.id).order_by('start_time')
                    for r in readings:
                        row5 += 1
                        ws5.write(row5, 0, scenario_id)
                        ws5.write(row5, 1, m.id)
                        the_type = next((item for item in Meter.ENERGY_TYPES
                                         if item[0] == m.type), None)
                        the_type = the_type[1] if the_type is not None else None
                        ws5.write(
                            row5, 2, the_type
                        )  # use energy type enum to determine reading type
                        ws5.write_datetime(row5, 3, r.start_time, date_format)
                        ws5.write_datetime(row5, 4, r.end_time, date_format)
                        ws5.write(row5, 5,
                                  r.reading)  # this is now a float field
                        ws5.write(row5, 6, r.source_unit)
                        ws5.write(row5, 7, m.is_virtual)

        wb.close()

        # xlsx_data contains the Excel file
        xlsx_data = output.getvalue()

        response.write(xlsx_data)
        return response

    def _json_response(self, filename, data, column_name_mappings):
        polygon_fields = [
            "bounding_box", "centroid", "property_footprint",
            "taxlot_footprint", "long_lat"
        ]
        features = []

        # extract related records
        related_records = self._extract_related(data)

        # append related_records to data
        complete_data = data + related_records

        for datum in complete_data:
            feature = {"type": "Feature", "properties": {}}

            for key, value in datum.items():
                if value is None:
                    continue

                if isinstance(value, ureg.Quantity):
                    value = value.magnitude
                elif isinstance(value, datetime.datetime):
                    value = value.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(value, datetime.date):
                    value = value.strftime("%Y-%m-%d")

                if value and any(k in key for k in polygon_fields):
                    """
                    If object is a polygon and is populated, add the 'geometry'
                    key-value-pair in the appropriate GeoJSON format.
                    When the first geometry is added, the correct format is
                    established. When/If a second geometry is added, this is
                    appended alongside the previous geometry.
                    """
                    individual_geometry = {}

                    # long_lat
                    if key == 'long_lat':
                        coordinates = self._serialized_point(value)
                        # point
                        individual_geometry = {
                            "coordinates": coordinates,
                            "type": "Point"
                        }
                    else:
                        # polygons
                        coordinates = self._serialized_coordinates(value)
                        individual_geometry = {
                            "coordinates": [coordinates],
                            "type": "Polygon"
                        }

                    if feature.get("geometry", None) is None:
                        feature["geometry"] = {
                            "type": "GeometryCollection",
                            "geometries": [individual_geometry]
                        }
                    else:
                        feature["geometry"]["geometries"].append(
                            individual_geometry)
                else:
                    """
                    Non-polygon data
                    """
                    display_key = column_name_mappings.get(key, key)
                    feature["properties"][display_key] = value

                    # # store point geometry in case you need it
                    # if display_key == "Longitude":
                    #     point_geometry[0] = value
                    # if display_key == "Latitude":
                    #     point_geometry[1] = value
            """
            Before appending feature, ensure that if there is no geometry recorded.
            Note that the GeoJson will not render if no lat/lng
            """

            # add style information
            if feature["properties"].get("property_state_id") is not None:
                feature["properties"]["stroke"] = "#185189"  # buildings color
            elif feature["properties"].get("taxlot_state_id") is not None:
                feature["properties"]["stroke"] = "#10A0A0"  # buildings color
            feature["properties"]["marker-color"] = "#E74C3C"
            # feature["properties"]["stroke-width"] = 3
            feature["properties"]["fill-opacity"] = 0

            # append feature
            features.append(feature)

            response_dict = {
                "type": "FeatureCollection",
                "crs": {
                    "type": "EPSG",
                    "properties": {
                        "code": 4326
                    }
                },
                "features": features
            }

        response = JsonResponse(response_dict)
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        return response

    def _serialized_coordinates(self, polygon_wkt):
        string_coord_pairs = polygon_wkt.lstrip('POLYGON (').rstrip(')').split(
            ', ')

        coordinates = []
        for coord_pair in string_coord_pairs:
            float_coords = [float(coord) for coord in coord_pair.split(' ')]
            coordinates.append(float_coords)

        return coordinates

    def _serialized_point(self, point_wkt):
        string_coords = point_wkt.lstrip('POINT (').rstrip(')').split(', ')

        coordinates = []
        for coord in string_coords[0].split(' '):
            coordinates.append(float(coord))

        return coordinates

    def _extract_related(self, data):
        # extract all related records into a separate array
        related = []

        # figure out if we are dealing with properties or taxlots
        if data[0].get("property_state_id", None) is not None:
            is_property = True
        elif data[0].get("taxlot_state_id", None) is not None:
            is_property = False

        for datum in data:
            if datum.get("related", None) is not None:
                for record in datum["related"]:
                    related.append(record)

        # make array unique
        if is_property:

            unique = [dict(p) for p in set(tuple(i.items()) for i in related)]

        else:
            unique = [dict(p) for p in set(tuple(i.items()) for i in related)]

        return unique
예제 #5
0
class DatasetViewSet(viewsets.ViewSet):
    raise_exception = True

    @swagger_auto_schema_org_query_param
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_view_data')
    def list(self, request):
        """
        Retrieves all datasets for the user's organization.
        """

        org_id = request.query_params.get('organization_id', None)
        org = Organization.objects.get(pk=org_id)
        datasets = []
        for d in ImportRecord.objects.filter(super_organization=org):
            importfiles = [obj_to_dict(f) for f in d.files]
            dataset = obj_to_dict(d)
            dataset['importfiles'] = importfiles
            if d.last_modified_by:
                dataset['last_modified_by'] = d.last_modified_by.email
            dataset['updated_at'] = convert_to_js_timestamp(d.updated_at)
            datasets.append(dataset)

        return JsonResponse({
            'status': 'success',
            'datasets': datasets,
        })

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field()
    ],
                         request_body=AutoSchemaHelper.schema_factory(
                             {'dataset': 'string'},
                             description='The new name for this dataset'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    def update(self, request, pk=None):
        """
            Updates the name of a dataset (ImportRecord).
        """

        organization_id = request.query_params.get('organization_id', None)
        if organization_id is None:
            return JsonResponse({
                'status':
                'error',
                'message':
                'Missing organization_id query parameter'
            })
        try:
            organization_id = int(organization_id)
        except ValueError:
            return JsonResponse({
                'status': 'error',
                'message': 'Bad (non-numeric) organization_id'
            })
        dataset_id = pk
        name = request.data['dataset']

        # check if user has access to the dataset
        d = ImportRecord.objects.filter(super_organization_id=organization_id,
                                        pk=dataset_id)

        if not d.exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'user does not have permission to update dataset',
                },
                status=status.HTTP_400_BAD_REQUEST)
        d = d[0]
        d.name = name
        d.save()
        return JsonResponse({
            'status': 'success',
        })

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_view_data')
    def retrieve(self, request, pk=None):
        """
            Retrieves a dataset (ImportRecord).
        """

        organization_id = request.query_params.get('organization_id', None)
        if organization_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Missing organization_id query parameter'
                },
                status=status.HTTP_400_BAD_REQUEST)
        try:
            organization_id = int(organization_id)
        except ValueError:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Bad (non-numeric) organization_id'
                },
                status=status.HTTP_400_BAD_REQUEST)

        valid_orgs = OrganizationUser.objects.filter(
            user_id=request.user.id).values_list(
                'organization_id', flat=True).order_by('organization_id')
        if organization_id not in valid_orgs:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Cannot access datasets for this organization id',
                },
                status=status.HTTP_403_FORBIDDEN)

        # check if dataset exists
        try:
            d = ImportRecord.objects.get(pk=pk)
        except ImportRecord.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'dataset with id {} does not exist'.format(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        if d.super_organization_id != organization_id:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Organization ID mismatch between dataset and organization'
                },
                status=status.HTTP_400_BAD_REQUEST)

        dataset = obj_to_dict(d)
        importfiles = []
        for f in d.files:
            importfile = obj_to_dict(f)
            if not f.uploaded_filename:
                importfile['name'] = f.filename_only
            else:
                importfile['name'] = f.uploaded_filename
            importfiles.append(importfile)

        dataset['importfiles'] = importfiles
        if d.last_modified_by:
            dataset['last_modified_by'] = d.last_modified_by.email
        dataset['updated_at'] = convert_to_js_timestamp(d.updated_at)

        return JsonResponse({
            'status': 'success',
            'dataset': dataset,
        })

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def destroy(self, request, pk=None):
        """
            Deletes a dataset (ImportRecord).
        """
        organization_id = int(request.query_params.get('organization_id',
                                                       None))
        dataset_id = pk
        # check if user has access to the dataset
        d = ImportRecord.objects.filter(super_organization_id=organization_id,
                                        pk=dataset_id)
        if not d.exists():
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'user does not have permission to deactivate dataset',
                },
                status=status.HTTP_403_FORBIDDEN)
        d = d[0]
        d.delete()
        return JsonResponse({'status': 'success'})

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {'name': 'string'},
            description='Name of the dataset to be created'),
        responses={
            200:
            AutoSchemaHelper.schema_factory({
                'id': 'integer',
                'name': 'string',
            })
        })
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    def create(self, request):
        """
        Creates a new empty dataset (ImportRecord).
        """

        body = request.data
        org_id = int(request.query_params.get('organization_id', None))

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization_id not provided'
                },
                status=status.HTTP_400_BAD_REQUEST)
        record = ImportRecord.objects.create(name=body['name'],
                                             app='seed',
                                             start_time=timezone.now(),
                                             created_at=timezone.now(),
                                             last_modified_by=request.user,
                                             super_organization=org,
                                             owner=request.user)

        return JsonResponse({
            'status': 'success',
            'id': record.pk,
            'name': record.name
        })

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        responses={
            200: AutoSchemaHelper.schema_factory({
                'datasets_count': 'integer',
            })
        })
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    @require_organization_id_class
    @action(detail=False, methods=['GET'])
    def count(self, request):
        """
        Retrieves the number of datasets for an org.
        """
        org_id = int(request.query_params.get('organization_id', None))

        datasets_count = ImportRecord.objects.filter(
            super_organization_id=org_id).count()
        return JsonResponse({
            'status': 'success',
            'datasets_count': datasets_count
        })
예제 #6
0
class UploadViewSet(viewsets.ViewSet):
    """
    Endpoint to upload data files to, if uploading to local file storage.
    Valid source_type values are found in ``seed.models.SEED_DATA_SOURCES``

    Returns::

        {
            'import_record': True,
            'import_file_id': The ID of the newly-uploaded ImportFile
        }

    """
    parser_classes = (FormParser, MultiPartParser)

    @swagger_auto_schema(
        request_body=no_body,
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.upload_file_field(name='file',
                                               required=True,
                                               description='File to Upload'),
            AutoSchemaHelper.form_integer_field(
                name='import_record',
                required=True,
                description=
                'the dataset ID you want to associate this file with.'),
            AutoSchemaHelper.form_string_field(
                name='source_type',
                required=True,
                description=
                'the type of file (e.g. "Portfolio Raw" or "Assessed Raw")'),
            AutoSchemaHelper.form_string_field(
                name='source_program_version',
                required=False,
                description=
                'the version of the file as related to the source_type'),
        ])
    @has_perm_class('can_modify_data')
    @api_endpoint_class
    @ajax_request_class
    def create(self, request):
        """
        Upload a new file to an import_record. This is a multipart/form upload.
        """

        if len(request.FILES) == 0:
            return JsonResponse({
                'success':
                False,
                'message':
                "Must pass file in as a Multipart/Form post"
            })

        # Fineuploader requires the field to be qqfile it appears... so why not support both? ugh.
        if 'qqfile' in request.data:
            the_file = request.data['qqfile']
        else:
            the_file = request.data['file']
        filename = the_file.name
        path = os.path.join(settings.MEDIA_ROOT, "uploads", filename)

        # Get a unique filename using the get_available_name method in FileSystemStorage
        s = FileSystemStorage()
        path = s.get_available_name(path)

        # verify the directory exists
        if not os.path.exists(os.path.dirname(path)):
            os.makedirs(os.path.dirname(path))

        # save the file
        with open(path, 'wb+') as temp_file:
            for chunk in the_file.chunks():
                temp_file.write(chunk)
        org_id = request.query_params.get('organization_id', None)
        import_record_pk = request.POST.get('import_record',
                                            request.GET.get('import_record'))
        try:
            record = ImportRecord.objects.get(pk=import_record_pk,
                                              super_organization_id=org_id)
        except ImportRecord.DoesNotExist:
            # clean up the uploaded file
            os.unlink(path)
            return JsonResponse({
                'success':
                False,
                'message':
                "Import Record %s not found" % import_record_pk
            })

        source_type = request.POST.get('source_type',
                                       request.GET.get('source_type'))

        # Add Program & Version fields (empty string if not given)
        kw_fields = {
            field: request.POST.get(field, request.GET.get(field, ''))
            for field in ['source_program', 'source_program_version']
        }

        f = ImportFile.objects.create(import_record=record,
                                      uploaded_filename=filename,
                                      file=path,
                                      source_type=source_type,
                                      **kw_fields)

        return JsonResponse({'success': True, "import_file_id": f.pk})

    @staticmethod
    def _get_pint_var_from_pm_value_object(pm_value):
        units = pint.UnitRegistry()
        if '@uom' in pm_value and '#text' in pm_value:
            # this is the correct expected path for unit-based attributes
            string_value = pm_value['#text']
            try:
                float_value = float(string_value)
            except ValueError:
                return {
                    'success':
                    False,
                    'message':
                    'Could not cast value to float: \"%s\"' % string_value
                }
            original_unit_string = pm_value['@uom']
            if original_unit_string == 'kBtu':
                pint_val = float_value * units.kBTU
            elif original_unit_string == 'kBtu/ft²':
                pint_val = float_value * units.kBTU / units.sq_ft
            elif original_unit_string == 'Metric Tons CO2e':
                pint_val = float_value * units.metric_ton
            elif original_unit_string == 'kgCO2e/ft²':
                pint_val = float_value * units.kilogram / units.sq_ft
            else:
                return {
                    'success':
                    False,
                    'message':
                    'Unsupported units string: \"%s\"' % original_unit_string
                }
            return {'success': True, 'pint_value': pint_val}

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'import_record_id':
                'integer',
                'properties': [{
                    'address_1': 'string',
                    'city': 'string',
                    'state_province': 'string',
                    'postal_code': 'string',
                    'county': 'string',
                    'country': 'string',
                    'property_name': 'string',
                    'property_id': 'integer',
                    'year_built': 'integer',
                }],
            },
            required=['import_record_id', 'properties'],
            description='An object containing meta data for a property'))
    @has_perm_class('can_modify_data')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=False, methods=['POST'], parser_classes=(JSONParser, ))
    def create_from_pm_import(self, request):
        """
        Create an import_record from a PM import request.
        TODO: The properties key here is going to be an enormous amount of XML data at times, need to change this
        This allows the PM import workflow to be treated essentially the same as a standard file upload
        The process comprises the following steps:

        * Get a unique file name for this portfolio manager import
        """

        doing_pint = False

        if 'properties' not in request.data:
            return JsonResponse(
                {
                    'success': False,
                    'message': "Must pass properties in the request body."
                },
                status=status.HTTP_400_BAD_REQUEST)

        # base file name (will be appended with a random string to ensure uniqueness if multiple on the same day)
        today_date = datetime.datetime.today().strftime('%Y-%m-%d')
        file_name = "pm_import_%s.csv" % today_date

        # create a folder to keep pm_import files
        path = os.path.join(settings.MEDIA_ROOT, "uploads", "pm_imports",
                            file_name)

        # Get a unique filename using the get_available_name method in FileSystemStorage
        s = FileSystemStorage()
        path = s.get_available_name(path)

        # verify the directory exists
        if not os.path.exists(os.path.dirname(path)):
            os.makedirs(os.path.dirname(path))

        # This list should cover the core keys coming from PM, ensuring that they map easily
        # We will also look for keys not in this list and just map them to themselves
        # pm_key_to_column_heading_map = {
        #     'address_1': 'Address',
        #     'city': 'City',
        #     'state_province': 'State',
        #     'postal_code': 'Zip',
        #     'county': 'County',
        #     'country': 'Country',
        #     'property_name': 'Property Name',
        #     'property_id': 'Property ID',
        #     'year_built': 'Year Built',
        # }
        # so now it looks like we *don't* need to override these, but instead we should leave all the headers as-is
        # I'm going to leave this in here for right now, but if it turns out that we don't need it after testing,
        # then I'll remove it entirely
        pm_key_to_column_heading_map = {}

        # We will also create a list of values that are used in PM export to indicate a value wasn't available
        # When we import them into SEED here we will be sure to not write those values
        pm_flagged_bad_string_values = [
            'Not Available',
            'Unable to Check (not enough data)',
            'No Current Year Ending Date',
        ]

        # We will make a pass through the first property to get the list of unexpected keys
        for pm_property in request.data['properties']:
            for pm_key_name, _ in pm_property.items():
                if pm_key_name not in pm_key_to_column_heading_map:
                    pm_key_to_column_heading_map[pm_key_name] = pm_key_name
            break

        # Create the header row of the csv file first
        rows = []
        header_row = []
        for _, csv_header in pm_key_to_column_heading_map.items():
            header_row.append(csv_header)
        rows.append(header_row)

        num_properties = len(request.data['properties'])
        property_num = 0
        last_time = datetime.datetime.now()

        _log.debug("About to try to import %s properties from ESPM" %
                   num_properties)
        _log.debug("Starting at %s" % last_time)

        # Create a single row for each building
        for pm_property in request.data['properties']:

            # report some helpful info every 20 properties
            property_num += 1
            if property_num % 20 == 0:
                new_time = datetime.datetime.now()
                _log.debug("On property number %s; current time: %s" %
                           (property_num, new_time))

            this_row = []

            # Loop through all known PM variables
            for pm_variable, _ in pm_key_to_column_heading_map.items():

                # Initialize this to False for each pm_variable we will search through
                added = False

                # Check if this PM export has this variable in it
                if pm_variable in pm_property:

                    # If so, create a convenience variable to store it
                    this_pm_variable = pm_property[pm_variable]

                    # Next we need to check type.  If it is a string, we will add it here to avoid parsing numerics
                    # However, we need to be sure to not add the flagged bad strings.
                    # However, a flagged value *could* be a value property name, and we would want to allow that
                    if isinstance(this_pm_variable, basestring):
                        if pm_variable == 'property_name':
                            this_row.append(this_pm_variable)
                            added = True
                        elif pm_variable == 'property_notes':
                            sanitized_string = this_pm_variable.replace(
                                '\n', ' ')
                            this_row.append(sanitized_string)
                            added = True
                        elif this_pm_variable not in pm_flagged_bad_string_values:
                            this_row.append(this_pm_variable)
                            added = True

                    # If it isn't a string, it should be a dictionary, storing numeric data and units, etc.
                    else:
                        # As long as it is a valid dictionary, try to get a meaningful value out of it
                        if this_pm_variable and '#text' in this_pm_variable and this_pm_variable[
                                '#text'] != 'Not Available':
                            # Coerce the value into a proper set of Pint units for us
                            if doing_pint:
                                new_var = UploadViewSet._get_pint_var_from_pm_value_object(
                                    this_pm_variable)
                                if new_var['success']:
                                    pint_value = new_var['pint_value']
                                    this_row.append(pint_value.magnitude)
                                    added = True
                                    # TODO: What to do with the pint_value.units here?
                            else:
                                this_row.append(
                                    float(this_pm_variable['#text']))
                                added = True

                # And finally, if we haven't set the added flag, give the csv column a blank value
                if not added:
                    this_row.append('')

            # Then add this property row of data
            rows.append(this_row)

        # Then write the actual data out as csv
        with open(path, 'w', encoding='utf-8') as csv_file:
            pm_csv_writer = csv.writer(csv_file)
            for row_num, row in enumerate(rows):
                pm_csv_writer.writerow(row)

        # Look up the import record (data set)
        org_id = request.data['organization_id']
        import_record_pk = request.data['import_record_id']
        try:
            record = ImportRecord.objects.get(pk=import_record_pk,
                                              super_organization_id=org_id)
        except ImportRecord.DoesNotExist:
            # clean up the uploaded file
            os.unlink(path)
            return JsonResponse({
                'success':
                False,
                'message':
                "Import Record %s not found" % import_record_pk
            })

        # Create a new import file object in the database
        f = ImportFile.objects.create(
            import_record=record,
            uploaded_filename=file_name,
            file=path,
            source_type=SEED_DATA_SOURCES[PORTFOLIO_RAW],
            **{
                'source_program': 'PortfolioManager',
                'source_program_version': '1.0'
            })

        # Return the newly created import file ID
        return JsonResponse({'success': True, 'import_file_id': f.pk})
예제 #7
0
파일: columns.py 프로젝트: ytgov/seed
class ColumnViewSet(OrgValidateMixin, SEEDOrgNoPatchOrOrgCreateModelViewSet,
                    OrgCreateUpdateMixin):
    """
    create:
        Create a new Column within a specified org or user's currently activated org.
    update:
        Update a column and modify which dataset it belongs to.
    delete:
        Deletes a single column.
    """
    raise_exception = True
    serializer_class = ColumnSerializer
    renderer_classes = (JSONRenderer, )
    model = Column
    pagination_class = None
    parser_classes = (JSONParser, FormParser)

    def get_queryset(self):
        # check if the request is properties or taxlots
        org_id = self.get_organization(self.request)
        return Column.objects.filter(organization_id=org_id)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(required=False),
            AutoSchemaHelper.query_string_field(
                name='inventory_type',
                required=False,
                description=
                'Which inventory type is being matched (for related fields and naming)'
                '\nDefault: "property"'),
            AutoSchemaHelper.query_boolean_field(
                name='only_used',
                required=False,
                description=
                'Determine whether or not to show only the used fields '
                '(i.e. only columns that have been mapped)'
                '\nDefault: "false"'),
            AutoSchemaHelper.query_boolean_field(
                name='display_units',
                required=False,
                description='If true, any columns that have units will have them'
                ' added as a suffix to the display_name'
                '\nDefault: "false"'),
        ], )
    @api_endpoint_class
    @ajax_request_class
    def list(self, request):
        """
        Retrieves all columns for the user's organization including the raw database columns. Will
        return all the columns across both the Property and Tax Lot tables. The related field will
        be true if the column came from the other table that is not the 'inventory_type' (which
        defaults to Property)
        """
        organization_id = self.get_organization(self.request)
        inventory_type = request.query_params.get('inventory_type', 'property')
        only_used = json.loads(request.query_params.get('only_used', 'false'))
        columns = Column.retrieve_all(organization_id, inventory_type,
                                      only_used)
        organization = Organization.objects.get(pk=organization_id)
        if json.loads(request.query_params.get('display_units', 'false')):
            columns = [add_pint_unit_suffix(organization, x) for x in columns]
        return JsonResponse({
            'status': 'success',
            'columns': columns,
        })

    @swagger_auto_schema_org_query_param
    @ajax_request_class
    def retrieve(self, request, pk=None):
        """
        This API endpoint retrieves a Column
        """
        organization_id = self.get_organization(self.request)
        # check if column exists for the organization
        try:
            c = Column.objects.get(pk=pk)
        except Column.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'column with id {} does not exist'.format(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        if c.organization.id != organization_id:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Organization ID mismatch between column and organization'
                },
                status=status.HTTP_400_BAD_REQUEST)

        return JsonResponse({
            'status': 'success',
            'column': ColumnSerializer(c).data
        })

    @ajax_request_class
    @has_perm_class('can_modify_data')
    def update(self, request, pk=None):
        organization_id = request.query_params.get('organization_id', None)

        request.data['shared_field_type'] = request.data['sharedFieldType']
        del request.data['sharedFieldType']

        # Ensure ComStock uniqueness across properties and taxlots together
        if request.data['comstock_mapping'] is not None:
            Column.objects.filter(organization_id=organization_id, comstock_mapping=request.data['comstock_mapping']) \
                .update(comstock_mapping=None)
        return super(ColumnViewSet, self).update(request, pk)

    @ajax_request_class
    @has_perm_class('can_modify_data')
    def destroy(self, request, pk=None):
        org_id = self.get_organization(request)
        try:
            column = Column.objects.get(id=pk, organization_id=org_id)
        except Column.DoesNotExist:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Cannot find column in org=%s with pk=%s' % (org_id, pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        if not column.is_extra_data:
            return JsonResponse(
                {
                    'success': False,
                    'message': 'Only extra_data columns can be deleted'
                },
                status=status.HTTP_400_BAD_REQUEST)

        # Delete key from jsonb data
        if column.table_name == 'PropertyState':
            states = PropertyState.objects.filter(
                organization_id=org_id,
                data_state=DATA_STATE_MATCHING,
                extra_data__has_key=column.column_name)
            state_count = states.count()
            # far faster than iterating states, popping the key, and saving, but doesn't update the hash
            # with connection.cursor() as cursor:
            #     cursor.execute("UPDATE seed_propertystate "
            #                    "SET extra_data = extra_data - %s "
            #                    "WHERE organization_id = %s AND data_state = %s",
            #                    [column.column_name, org_id, DATA_STATE_MATCHING])

        elif column.table_name == 'TaxLotState':
            states = TaxLotState.objects.filter(
                organization_id=org_id,
                data_state=DATA_STATE_MATCHING,
                extra_data__has_key=column.column_name)
            state_count = states.count()
            # far faster than iterating states, popping the key, and saving, but doesn't update the hash
            # with connection.cursor() as cursor:
            #     cursor.execute("UPDATE seed_taxlotstate "
            #                    "SET extra_data = extra_data - %s "
            #                    "WHERE organization_id = %s AND data_state = %s",
            #                    [column.column_name, org_id, DATA_STATE_MATCHING])

        else:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Unexpected table_name \'%s\' for column with pk=%s' %
                    (column.table_name, pk)
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # Pop the key and update the hash
        for state in states:
            state.extra_data.pop(column.column_name)
            state.save()

        # Delete all mappings from raw column names to the mapped column, then delete the mapped column
        ColumnMapping.objects.filter(column_mapped=column).delete()
        column.delete()

        table_display_name = column.table_name if state_count == 1 else column.table_name + 's'
        return JsonResponse(
            {
                'success':
                True,
                'message':
                'Removed \'%s\' from %s %s' %
                (column.column_name, state_count, table_display_name)
            },
            status=status.HTTP_200_OK)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory({
        'new_column_name':
        'string',
        'overwrite':
        'boolean'
    }))
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def rename(self, request, pk=None):
        """
        This API endpoint renames a Column
        """
        org_id = self.get_organization(request)
        try:
            column = Column.objects.get(id=pk, organization_id=org_id)
        except Column.DoesNotExist:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Cannot find column in org=%s with pk=%s' % (org_id, pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        new_column_name = request.data.get('new_column_name', None)
        overwrite = request.data.get('overwrite', False)
        if not new_column_name:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'You must specify the name of the new column as "new_column_name"'
                },
                status=status.HTTP_400_BAD_REQUEST)

        result = column.rename_column(new_column_name, overwrite)
        if not result[0]:
            return JsonResponse(
                {
                    'success':
                    False,
                    'message':
                    'Unable to rename column with message: "%s"' % result[1]
                },
                status=status.HTTP_400_BAD_REQUEST)
        else:
            return JsonResponse({'success': True, 'message': result[1]})

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_string_field(
            'inventory_type',
            required=True,
            description='Inventory Type, either "property" or "taxlot"')
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    @action(detail=False, methods=['GET'])
    def mappable(self, request):
        """
        List only inventory columns that are mappable
        """
        organization_id = int(request.query_params.get('organization_id'))
        inventory_type = request.query_params.get('inventory_type')
        if inventory_type not in ['property', 'taxlot']:
            return JsonResponse({
                'status':
                'error',
                'message':
                'Query param `inventory_type` must be "property" or "taxlot"'
            })
        columns = Column.retrieve_mapping_columns(organization_id,
                                                  inventory_type)

        return JsonResponse({'status': 'success', 'columns': columns})
예제 #8
0
class PortfolioManagerViewSet(GenericViewSet):
    """
    This viewset contains two API views: /template_list/ and /report/ that are used to interface SEED with ESPM
    """
    serializer_class = PortfolioManagerSerializer

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'username': '******',
            'password': '******'
        },
        description='ESPM account credentials.',
        required=['username', 'password']),
                         responses={
                             200:
                             AutoSchemaHelper.schema_factory({
                                 'status':
                                 'string',
                                 'templates': [{
                                     'template_information_1': 'string',
                                     'template_information_2': 'integer',
                                     '[other keys...]': 'string'
                                 }],
                                 'message':
                                 'string'
                             }),
                         })
    @action(detail=False, methods=['POST'])
    def template_list(self, request):
        """
        This API view makes a request to ESPM for the list of available report templates, including root templates and
        child data requests.
        ---
        This API responds with a JSON object with two keys: status, which will be a string -
        either error or success.  If successful, a second key, templates, will hold the list of templates from ESPM. If
        not successful, a second key, message, will include an error description that can be presented on the UI.
        """
        if 'username' not in request.data:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Invalid call to PM worker: missing username for PM account'
                },
                status=status.HTTP_400_BAD_REQUEST)
        if 'password' not in request.data:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Invalid call to PM worker: missing password for PM account'
                },
                status=status.HTTP_400_BAD_REQUEST)
        username = request.data['username']
        password = request.data['password']
        pm = PortfolioManagerImport(username, password)
        try:
            possible_templates = pm.get_list_of_report_templates()
        except PMExcept as pme:
            return JsonResponse({
                'status': 'error',
                'message': str(pme)
            },
                                status=status.HTTP_400_BAD_REQUEST)
        except Exception as e:
            return JsonResponse({
                'status': 'error',
                'message': e
            },
                                status=status.HTTP_400_BAD_REQUEST)
        return JsonResponse({
            'status': 'success',
            'templates': possible_templates
        })

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'username': '******',
            'password': '******',
            'template': {
                '[copy information from template_list]': 'string'
            }
        },
        description='ESPM account credentials.',
        required=['username', 'password']),
                         responses={
                             200:
                             AutoSchemaHelper.schema_factory({
                                 'status':
                                 'string',
                                 'properties': [{
                                     'properties_information_1': 'string',
                                     'properties_information_2': 'integer',
                                     '[other keys...]': 'string'
                                 }],
                                 'message':
                                 'string'
                             }),
                         })
    @action(detail=False, methods=['POST'])
    def report(self, request):
        """
        This API view makes a request to ESPM to generate and download a report based on a specific template.
        ---
        This API responds with a JSON object with two keys: status, which will be a string -
        either error or success.  If successful, a second key, properties, will hold the list of properties found in
        this generated report.  If not successful, a second key, message, will include an error description that can be
        presented on the UI.
        """
        if 'username' not in request.data:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Invalid call to PM worker: missing username for PM account'
                },
                status=status.HTTP_400_BAD_REQUEST)
        if 'password' not in request.data:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Invalid call to PM worker: missing password for PM account'
                },
                status=status.HTTP_400_BAD_REQUEST)
        if 'template' not in request.data:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Invalid call to PM worker: missing template for PM account'
                },
                status=status.HTTP_400_BAD_REQUEST)
        username = request.data['username']
        password = request.data['password']
        template = request.data['template']
        pm = PortfolioManagerImport(username, password)
        try:
            try:
                if 'z_seed_child_row' not in template:
                    return JsonResponse(
                        {
                            'status':
                            'error',
                            'message':
                            'Invalid template formulation during portfolio manager data import'
                        },
                        status=status.HTTP_400_BAD_REQUEST)
                if template['z_seed_child_row']:
                    content = pm.generate_and_download_child_data_request_report(
                        template)
                else:
                    content = pm.generate_and_download_template_report(
                        template)
            except PMExcept as pme:
                return JsonResponse({
                    'status': 'error',
                    'message': str(pme)
                },
                                    status=status.HTTP_400_BAD_REQUEST)
            try:
                content_object = xmltodict.parse(content,
                                                 dict_constructor=dict)
            except Exception:  # catch all because xmltodict doesn't specify a class of Exceptions
                return JsonResponse(
                    {
                        'status': 'error',
                        'message': 'Malformed XML from template download'
                    },
                    status=status.HTTP_400_BAD_REQUEST)
            try:
                possible_properties = content_object['report'][
                    'informationAndMetrics']['row']
                if isinstance(possible_properties, list):
                    properties = possible_properties
                elif isinstance(possible_properties, dict):
                    properties = [possible_properties]
                else:  # OrderedDict hints that a 'preview' report was generated, anything else is an unhandled case
                    return JsonResponse(
                        {
                            'status':
                            'error',
                            'message':
                            'Property list was not a list...was a preview report template used on accident?'
                        },
                        status=status.HTTP_400_BAD_REQUEST)
            except KeyError:
                return JsonResponse(
                    {
                        'status':
                        'error',
                        'message':
                        'Processed template successfully, but missing keys -- is the report empty on Portfolio Manager?'
                    },
                    status=status.HTTP_400_BAD_REQUEST)

            return JsonResponse({
                'status': 'success',
                'properties': properties
            })
        except Exception as e:
            return JsonResponse({
                'status': 'error',
                'message': e
            },
                                status=status.HTTP_400_BAD_REQUEST)
예제 #9
0
파일: users.py 프로젝트: riahtu/seed
    return d.get(severity)


class EmailAndIDSerializer(serializers.Serializer):
    email = serializers.CharField(max_length=100)
    user_id = serializers.IntegerField()


class ListUsersResponseSerializer(serializers.Serializer):
    users = EmailAndIDSerializer(many=True)


# this is used for swagger docs for some views below
user_response_schema = AutoSchemaHelper.schema_factory({
    'first_name': 'string',
    'last_name': 'string',
    'email': 'string',
    'api_key': 'string',
})


class UserViewSet(viewsets.ViewSet):
    raise_exception = True

    def validate_request_user(self, pk, request):
        try:
            user = User.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return False, JsonResponse(
                {'status': 'error', 'message': "Could not find user with pk = " + str(pk)},
                status=status.HTTP_404_NOT_FOUND)
        if not user == request.user:
예제 #10
0
파일: users.py 프로젝트: riahtu/seed
class UserViewSet(viewsets.ViewSet):
    raise_exception = True

    def validate_request_user(self, pk, request):
        try:
            user = User.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return False, JsonResponse(
                {'status': 'error', 'message': "Could not find user with pk = " + str(pk)},
                status=status.HTTP_404_NOT_FOUND)
        if not user == request.user:
            return False, JsonResponse(
                {'status': 'error', 'message': "Cannot access user with pk = " + str(pk)},
                status=status.HTTP_403_FORBIDDEN)
        return True, user

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'org_name': 'string',
                'first_name': 'string',
                'last_name': 'string',
                'role': 'string',
                'email': 'string',
            },
            required=['first_name', 'last_name', 'role', 'email'],
            description='An object containing meta data for a new user:\n'
                        '-org_name: New organization name if creating a new organization for this user\n'
                        '-first_name: First name of new user\n'
                        '-last_name: Last name of new user\n'
                        '-role: one of owner, member, or viewer\n'
                        '-email: Email address of the new user'
        ),
        responses={
            200: AutoSchemaHelper.schema_factory(
                {
                    'status': 'string',
                    'message': 'string',
                    'org': 'string',
                    'org_created': 'boolean',
                    'username': '******',
                    'user_id': 'string',
                },
                description='Properties:\n'
                            '-org: name of new org (or existing org)\n'
                            '-org_created: true if new org created\n'
                            '-username: username of new user\n'
                            '-user_id: user id (pk) of new user'
            )
        }
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_owner')
    def create(self, request):
        """
        Creates a new SEED user.  One of 'organization_id' or 'org_name' is needed.
        Sends invitation email to the new user.
        """
        body = request.data
        org_name = body.get('org_name')
        org_id = request.query_params.get('organization_id', None)
        if (org_name and org_id) or (not org_name and not org_id):
            return JsonResponse({
                'status': 'error',
                'message': 'Choose either an existing org or provide a new one'
            }, status=status.HTTP_409_CONFLICT)

        first_name = body['first_name']
        last_name = body['last_name']
        email = body['email']
        username = body['email']
        user, created = User.objects.get_or_create(username=username.lower())

        if org_id:
            org = Organization.objects.get(pk=org_id)
            org_created = False
        else:
            org, _, _ = create_organization(user, org_name)
            org_created = True

        # Add the user to the org.  If this is the org's first user,
        # the user becomes the owner/admin automatically.
        # see Organization.add_member()
        if not org.is_member(user):
            org.add_member(user)

        if body.get('role'):
            # check if this is a dict, if so, grab the value out of 'value'
            role = body['role']
            try:
                _get_role_from_js(role)
            except Exception:
                return JsonResponse({'status': 'error', 'message': 'valid arguments for role are [viewer, member, '
                                                                   'owner]'},
                                    status=status.HTTP_400_BAD_REQUEST)

            OrganizationUser.objects.filter(
                organization_id=org.pk,
                user_id=user.pk
            ).update(role_level=_get_role_from_js(role))

        if created:
            user.set_unusable_password()
            user.email = email
            user.first_name = first_name
            user.last_name = last_name
        user.save()

        try:
            domain = request.get_host()
        except Exception:
            domain = 'seed-platform.org'
        invite_to_seed(
            domain, user.email, default_token_generator.make_token(user), user.pk, first_name
        )

        return JsonResponse({
            'status': 'success',
            'message': user.email,
            'org': org.name,
            'org_created': org_created,
            'username': user.username,
            'user_id': user.id
        })

    @swagger_auto_schema(
        responses={
            200: ListUsersResponseSerializer,
        }
    )
    @ajax_request_class
    @has_perm_class('requires_superuser')
    def list(self, request):
        """
        Retrieves all users' email addresses and IDs.
        Only usable by superusers.
        """
        users = []
        for user in User.objects.only('id', 'email'):
            users.append({'email': user.email, 'user_id': user.id})
        return JsonResponse({'users': users})

    @ajax_request_class
    @api_endpoint_class
    @action(detail=False, methods=['GET'])
    def current(self, request):
        """
        Returns the id (primary key) for the current user to allow it
        to be passed to other user related endpoints
        ---
        type:
            pk:
                description: Primary key for the current user
                required: true
                type: string
        """
        return JsonResponse({'pk': request.user.id})

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'role': 'string',
            },
            description='new role for user',
        ),
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_owner')
    @action(detail=True, methods=['PUT'])
    def role(self, request, pk=None):
        """
        Updates a user's role within an organization.
        """
        body = request.data
        role = _get_role_from_js(body['role'])

        user_id = pk
        organization_id = request.query_params.get('organization_id', None)

        is_last_member = not OrganizationUser.objects.filter(
            organization_id=organization_id,
        ).exclude(user_id=user_id).exists()

        if is_last_member:
            return JsonResponse({
                'status': 'error',
                'message': 'an organization must have at least one member'
            }, status=status.HTTP_409_CONFLICT)

        is_last_owner = not OrganizationUser.objects.filter(
            organization_id=organization_id,
            role_level=ROLE_OWNER,
        ).exclude(user_id=user_id).exists()

        if is_last_owner:
            return JsonResponse({
                'status': 'error',
                'message': 'an organization must have at least one owner level member'
            }, status=status.HTTP_409_CONFLICT)

        OrganizationUser.objects.filter(
            user_id=user_id,
            organization_id=organization_id
        ).update(role_level=role)

        return JsonResponse({'status': 'success'})

    @swagger_auto_schema(
        responses={
            200: user_response_schema,
        }
    )
    @api_endpoint_class
    @ajax_request_class
    def retrieve(self, request, pk=None):
        """
        Retrieves the a user's first_name, last_name, email
        and api key if exists by user ID (pk).
        """

        ok, content = self.validate_request_user(pk, request)
        if ok:
            user = content
        else:
            return content
        return JsonResponse({
            'status': 'success',
            'first_name': user.first_name,
            'last_name': user.last_name,
            'email': user.email,
            'api_key': user.api_key,
        })

    @ajax_request_class
    @action(detail=True, methods=['POST'])
    def generate_api_key(self, request, pk=None):
        """
        Generates a new API key
        ---
        parameter_strategy: replace
        parameters:
            - name: pk
              description: User ID / primary key
              type: integer
              required: true
              paramType: path
        type:
            status:
                description: success or error
                type: string
                required: true
            api_key:
                description: the new API key for this user
                type: string
                required: true
        """
        ok, content = self.validate_request_user(pk, request)
        if ok:
            user = content
        else:
            return content
        user.generate_key()
        return {
            'status': 'success',
            'api_key': User.objects.get(pk=pk).api_key
        }

    @swagger_auto_schema(
        request_body=AutoSchemaHelper.schema_factory({
            'first_name': 'string',
            'last_name': 'string',
            'email': 'string'
        }),
        description='An object containing meta data for a updated user: \n'
                    '- Required - first_name, last_name, email',
        responses={
            200: user_response_schema,
        }
    )
    @api_endpoint_class
    @ajax_request_class
    def update(self, request, pk=None):
        """
        Updates the user's first name, last name, and email
        """
        body = request.data
        ok, content = self.validate_request_user(pk, request)
        if ok:
            user = content
        else:
            return content
        json_user = body
        user.first_name = json_user.get('first_name')
        user.last_name = json_user.get('last_name')
        user.email = json_user.get('email')
        user.username = json_user.get('email')
        user.save()
        return JsonResponse({
            'status': 'success',
            'first_name': user.first_name,
            'last_name': user.last_name,
            'email': user.email,
            'api_key': user.api_key,
        })

    @swagger_auto_schema(
        request_body=AutoSchemaHelper.schema_factory(
            {
                'current_password': '******',
                'password_1': 'string',
                'password_2': 'string',
            },
            description='Fill in the current and new matching passwords'),
    )
    @ajax_request_class
    @action(detail=True, methods=['PUT'])
    def set_password(self, request, pk=None):
        """
        sets/updates a user's password, follows the min requirement of
        django password validation settings in config/settings/common.py
        """
        body = request.data
        ok, content = self.validate_request_user(pk, request)
        if ok:
            user = content
        else:
            return content
        current_password = body.get('current_password')
        p1 = body.get('password_1')
        p2 = body.get('password_2')
        if not user.check_password(current_password):
            return JsonResponse({'status': 'error', 'message': 'current password is not valid'},
                                status=status.HTTP_400_BAD_REQUEST)
        if p1 is None or p1 != p2:
            return JsonResponse({'status': 'error', 'message': 'entered password do not match'},
                                status=status.HTTP_400_BAD_REQUEST)
        try:
            validate_password(p2)
        except ValidationError as e:
            return JsonResponse({'status': 'error', 'message': e.messages[0]},
                                status=status.HTTP_400_BAD_REQUEST)
        user.set_password(p1)
        user.save()
        return JsonResponse({'status': 'success'})

    @ajax_request_class
    def get_actions(self, request):
        """returns all actions"""
        return {
            'status': 'success',
            'actions': list(PERMS.keys()),
        }

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'actions': ['string'],
            },
            description='A list of actions to check: examples include (requires_parent_org_owner, '
                        'requires_owner, requires_member, requires_viewer, '
                        'requires_superuser, can_create_sub_org, can_remove_org)'
        ),
        responses={
            200: AutoSchemaHelper.schema_factory({
                'auth': {
                    'action_name': 'boolean'
                }
            })
        }
    )
    @ajax_request_class
    @action(detail=True, methods=['POST'])
    def is_authorized(self, request, pk=None):
        """
        Checks the auth for a given action, if user is the owner of the parent
        org then True is returned for each action
        """
        ok, content = self.validate_request_user(pk, request)
        if ok:
            user = content
        else:
            return content

        actions, org, error, message = self._parse_is_authenticated_params(request)
        if error:
            return JsonResponse({
                'status': 'error',
                'message': message
            }, status=status.HTTP_400_BAD_REQUEST)

        # If the only action requested is 'requires_superuser' no need to check an org affiliation
        if len(actions) == 1 and actions[0] == 'requires_superuser':
            return JsonResponse(
                {'status': 'success', 'auth': {'requires_superuser': user.is_superuser}})

        auth = self._try_parent_org_auth(user, org, actions)
        if auth:
            return JsonResponse({'status': 'success', 'auth': auth})

        try:
            ou = OrganizationUser.objects.get(user=user, organization=org)
        except OrganizationUser.DoesNotExist:
            return JsonResponse({'status': 'error', 'message': 'user does not exist'})

        auth = {action: PERMS[action](ou) for action in actions}
        return JsonResponse({'status': 'success', 'auth': auth})

    def _parse_is_authenticated_params(self, request):
        """checks if the org exists and if the actions are present

        :param request: the request
        :returns: tuple (actions, org, error, message)
        """
        error = False
        message = ""
        body = request.data
        if not body.get('actions'):
            message = 'no actions to check'
            error = True

        org_id = request.query_params.get('organization_id', None)
        if org_id == '':
            message = 'organization id is undefined'
            error = True
            org = None
        else:
            try:
                org = Organization.objects.get(pk=org_id)
            except Organization.DoesNotExist:
                message = 'organization does not exist'
                error = True
                org = None

        return body.get('actions'), org, error, message

    def _try_parent_org_auth(self, user, organization, actions):
        """checks the parent org for permissions, if the user is not the owner of
        the parent org, then None is returned.

        :param user: the request user
        :param organization: org to check its parent
        :param actions: list of str actions to check
        :returns: a dict of action permission resolutions or None
        """
        try:
            ou = OrganizationUser.objects.get(
                user=user,
                organization=organization.parent_org,
                role_level=ROLE_OWNER
            )
        except OrganizationUser.DoesNotExist:
            return None

        return {
            action: PERMS['requires_owner'](ou) for action in actions
        }

    @swagger_auto_schema(
        responses={
            200: AutoSchemaHelper.schema_factory({
                'show_shared_buildings': 'boolean'
            })
        }
    )
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def shared_buildings(self, request, pk=None):
        """
        Get the request user's ``show_shared_buildings`` attr
        """
        ok, content = self.validate_request_user(pk, request)
        if ok:
            user = content
        else:
            return content

        return JsonResponse({
            'status': 'success',
            'show_shared_buildings': user.show_shared_buildings,
        })

    @swagger_auto_schema_org_query_param
    @ajax_request_class
    @action(detail=True, methods=['PUT'])
    def default_organization(self, request, pk=None):
        """
        Sets the user's default organization
        """
        ok, content = self.validate_request_user(pk, request)
        if ok:
            user = content
        else:
            return content
        user.default_organization_id = request.query_params.get('organization_id', None)
        user.save()
        return {'status': 'success'}

    @has_perm_class('requires_superuser')
    @ajax_request_class
    @action(detail=True, methods=['PUT'])
    def deactivate(self, request, pk=None):
        """
        Deactivates a user
        """
        try:
            user_id = pk
            user = SEEDUser.objects.get(
                id=user_id
            )
            user.deactivate_user()
            return JsonResponse({
                'status': 'successfully deactivated',
                'data': user.email
            })
        except Exception as e:
            return JsonResponse({
                'status': 'error',
                'data': str(e),
            }, status=HTTP_400_BAD_REQUEST)
예제 #11
0
    CreateModelMixin,
)

from seed.decorators import ajax_request_class
from seed.lib.superperms.orgs.decorators import has_perm_class
from seed.lib.superperms.orgs.permissions import SEEDOrgPermissions
from seed.models.data_quality import DataQualityCheck, Rule
from seed.serializers.rules import RuleSerializer
from seed.utils.api import api_endpoint_class
from seed.utils.api_schema import AutoSchemaHelper
from seed.utils.viewsets import UpdateWithoutPatchModelMixin

_log = logging.getLogger(__name__)

nested_org_id_path_field = AutoSchemaHelper.base_field(
    "nested_organization_id", "IN_PATH",
    "Organization ID - identifier used to specify a DataQualityCheck and its Rules",
    True, "TYPE_INTEGER")


@method_decorator(
    name='list',
    decorator=swagger_auto_schema(manual_parameters=[nested_org_id_path_field])
)
@method_decorator(
    name='update',
    decorator=swagger_auto_schema(manual_parameters=[nested_org_id_path_field])
)
@method_decorator(
    name='destroy',
    decorator=swagger_auto_schema(manual_parameters=[nested_org_id_path_field])
)
예제 #12
0
파일: import_files.py 프로젝트: riahtu/seed
class ImportFileViewSet(viewsets.ViewSet):
    raise_exception = True
    queryset = ImportFile.objects.all()

    @api_endpoint_class
    @ajax_request_class
    def retrieve(self, request, pk=None):
        """
        Retrieves details about an ImportFile.
        """
        import_file_id = pk
        orgs = request.user.orgs.all()
        try:
            import_file = ImportFile.objects.get(pk=import_file_id)
            d = ImportRecord.objects.filter(super_organization__in=orgs,
                                            pk=import_file.import_record_id)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not access an import file with this ID'
                },
                status=status.HTTP_403_FORBIDDEN)
        # check if user has access to the import file
        if not d.exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not locate import file with this ID',
                    'import_file': {},
                },
                status=status.HTTP_400_BAD_REQUEST)

        f = obj_to_dict(import_file)
        f['name'] = import_file.filename_only
        if not import_file.uploaded_filename:
            f['uploaded_filename'] = import_file.filename
        f['dataset'] = obj_to_dict(import_file.import_record)

        return JsonResponse({
            'status': 'success',
            'import_file': f,
        })

    @swagger_auto_schema_org_query_param
    @has_perm_class('requires_viewer')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def first_five_rows(self, request, pk=None):
        """
        Retrieves the first five rows of an ImportFile.
        """
        org_id = request.query_params.get('organization_id', None)
        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)
        if import_file.cached_second_to_fifth_row is None:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Internal problem occurred, import file first five rows not cached'
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)
        '''
        import_file.cached_second_to_fifth_row is a field that contains the first
        5 lines of data from the file, split on newlines, delimited by
        ROW_DELIMITER. This becomes an issue when fields have newlines in them,
        so the following is to handle newlines in the fields.
        In the case of only one data column there will be no ROW_DELIMITER.
        '''
        header = import_file.cached_first_row.split(ROW_DELIMITER)
        data = import_file.cached_second_to_fifth_row
        return JsonResponse({
            'status':
            'success',
            'first_five_rows':
            convert_first_five_rows_to_list(header, data)
        })

    @swagger_auto_schema_org_query_param
    @has_perm_class('requires_viewer')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def raw_column_names(self, request, pk=None):
        """
        Retrieves a list of all column names from an ImportFile.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return JsonResponse({
            'status': 'success',
            'raw_columns': import_file.first_row_columns
        })

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
    ],
                         responses={200: MappingResultsResponseSerializer})
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'], url_path='mapping_results')
    def mapping_results(self, request, pk=None):
        """
        Retrieves a paginated list of Properties and Tax Lots for an import file after mapping.
        """
        import_file_id = pk
        org_id = request.query_params.get('organization_id', None)
        org = Organization.objects.get(pk=org_id)

        try:
            # get the field names that were in the mapping
            import_file = ImportFile.objects.get(
                pk=import_file_id, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # List of the only fields to show
        field_names = import_file.get_cached_mapped_columns

        # set of fields
        fields = {
            'PropertyState': ['id', 'extra_data', 'lot_number'],
            'TaxLotState': ['id', 'extra_data']
        }
        columns_from_db = Column.retrieve_all(org_id)
        property_column_name_mapping = {}
        taxlot_column_name_mapping = {}
        for field_name in field_names:
            # find the field from the columns in the database
            for column in columns_from_db:
                if column['table_name'] == 'PropertyState' and \
                        field_name[0] == 'PropertyState' and \
                        field_name[1] == column['column_name']:
                    property_column_name_mapping[
                        column['column_name']] = column['name']
                    if not column['is_extra_data']:
                        fields['PropertyState'].append(
                            field_name[1])  # save to the raw db fields
                    continue
                elif column['table_name'] == 'TaxLotState' and \
                        field_name[0] == 'TaxLotState' and \
                        field_name[1] == column['column_name']:
                    taxlot_column_name_mapping[
                        column['column_name']] = column['name']
                    if not column['is_extra_data']:
                        fields['TaxLotState'].append(
                            field_name[1])  # save to the raw db fields
                    continue

        inventory_type = request.data.get('inventory_type', 'all')

        result = {'status': 'success'}

        if inventory_type == 'properties' or inventory_type == 'all':
            properties = PropertyState.objects.filter(
                import_file_id=import_file_id,
                data_state__in=[DATA_STATE_MAPPING, DATA_STATE_MATCHING],
                merge_state__in=[
                    MERGE_STATE_UNKNOWN, MERGE_STATE_NEW
                ]).only(*fields['PropertyState']).order_by('id')

            property_results = []
            for prop in properties:
                prop_dict = TaxLotProperty.model_to_dict_with_mapping(
                    prop,
                    property_column_name_mapping,
                    fields=fields['PropertyState'],
                    exclude=['extra_data'])

                prop_dict.update(
                    TaxLotProperty.extra_data_to_dict_with_mapping(
                        prop.extra_data,
                        property_column_name_mapping,
                        fields=prop.extra_data.keys(),
                    ).items())

                prop_dict = apply_display_unit_preferences(org, prop_dict)
                property_results.append(prop_dict)

            result['properties'] = property_results

        if inventory_type == 'taxlots' or inventory_type == 'all':
            tax_lots = TaxLotState.objects.filter(
                import_file_id=import_file_id,
                data_state__in=[DATA_STATE_MAPPING, DATA_STATE_MATCHING],
                merge_state__in=[MERGE_STATE_UNKNOWN, MERGE_STATE_NEW
                                 ]).only(*fields['TaxLotState']).order_by('id')

            tax_lot_results = []
            for tax_lot in tax_lots:
                tax_lot_dict = TaxLotProperty.model_to_dict_with_mapping(
                    tax_lot,
                    taxlot_column_name_mapping,
                    fields=fields['TaxLotState'],
                    exclude=['extra_data'])
                tax_lot_dict.update(
                    TaxLotProperty.extra_data_to_dict_with_mapping(
                        tax_lot.extra_data,
                        taxlot_column_name_mapping,
                        fields=tax_lot.extra_data.keys(),
                    ).items())

                tax_lot_dict = apply_display_unit_preferences(
                    org, tax_lot_dict)
                tax_lot_results.append(tax_lot_dict)

            result['tax_lots'] = tax_lot_results

        return result

    @staticmethod
    def has_coparent(state_id, inventory_type, fields=None):
        """
        Return the coparent of the current state id based on the inventory type. If fields
        are given (as a list), then it will only return the fields specified of the state model
        object as a dictionary.

        :param state_id: int, ID of PropertyState or TaxLotState
        :param inventory_type: string, either properties | taxlots
        :param fields: list, either None or list of fields to return
        :return: dict or state object, If fields is not None then will return state_object
        """
        state_model = PropertyState if inventory_type == 'properties' else TaxLotState

        # TODO: convert coparent to instance method, not class method
        audit_entry, audit_count = state_model.coparent(state_id)

        if audit_count == 0:
            return False

        if audit_count > 1:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Internal problem occurred, more than one merge record found'
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        return audit_entry[0]

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
    ],
                         request_body=AutoSchemaHelper.schema_factory({
                             'remap':
                             'boolean',
                             'mark_as_done':
                             'boolean',
                         }))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def map(self, request, pk=None):
        """
        Starts a background task to convert imported raw data into
        PropertyState and TaxLotState, using user's column mappings.
        """

        body = request.data

        remap = body.get('remap', False)
        mark_as_done = body.get('mark_as_done', True)

        org_id = request.query_params.get('organization_id', None)
        import_file = ImportFile.objects.filter(
            pk=pk, import_record__super_organization_id=org_id)
        if not import_file.exists():
            return {
                'status': 'error',
                'message': 'ImportFile {} does not exist'.format(pk)
            }

        # return remap_data(import_file_id)
        return JsonResponse(map_data(pk, remap, mark_as_done))

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def start_system_matching_and_geocoding(self, request, pk=None):
        """
        Starts a background task to attempt automatic matching between buildings
        in an ImportFile with other existing buildings within the same org.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            ImportFile.objects.get(pk=pk,
                                   import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return geocode_and_match_buildings_task(pk)

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def start_data_quality_checks(self, request, pk=None):
        """
        Starts a background task to attempt automatic matching between buildings
        in an ImportFile with other existing buildings within the same org.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return_value = do_checks(org_id, None, None, import_file.pk)
        # step 5: create a new model instance
        return JsonResponse({
            'progress_key': return_value['progress_key'],
            'progress': return_value,
        })

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def validate_use_cases(self, request, pk=None):
        """
        Starts a background task to call BuildingSync's use case validation
        tool.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return task_validate_use_cases(import_file.pk)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
    ],
                         request_body=AutoSchemaHelper.schema_factory(
                             {'cycle_id': 'string'}))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def start_save_data(self, request, pk=None):
        """
        Starts a background task to import raw data from an ImportFile
        into PropertyState objects as extra_data. If the cycle_id is set to
        year_ending then the cycle ID will be set to the year_ending column for each
        record in the uploaded file. Note that the year_ending flag is not yet enabled.
        """
        body = request.data
        import_file_id = pk
        if not import_file_id:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'must pass file_id of file to save'
                },
                status=status.HTTP_400_BAD_REQUEST)

        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=import_file_id, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        cycle_id = body.get('cycle_id')
        if not cycle_id:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'must pass cycle_id of the cycle to save the data'
                },
                status=status.HTTP_400_BAD_REQUEST)
        elif cycle_id == 'year_ending':
            _log.error("NOT CONFIGURED FOR YEAR ENDING OPTION AT THE MOMENT")
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'SEED is unable to parse year_ending at the moment'
                },
                status=status.HTTP_400_BAD_REQUEST)
        else:
            # find the cycle
            cycle = Cycle.objects.get(id=cycle_id)
            if cycle:
                # assign the cycle id to the import file object
                import_file.cycle = cycle
                import_file.save()
            else:
                return JsonResponse(
                    {
                        'status': 'error',
                        'message': 'cycle_id was invalid'
                    },
                    status=status.HTTP_400_BAD_REQUEST)

        return JsonResponse(task_save_raw(import_file.id))

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def mapping_done(self, request, pk=None):
        """
        Tell the backend that the mapping is complete.
        """
        import_file_id = pk
        if not import_file_id:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'must pass import_file_id'
                },
                status=status.HTTP_400_BAD_REQUEST)

        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=import_file_id, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'no import file with given id'
                },
                status=status.HTTP_404_NOT_FOUND)

        import_file.mapping_done = True
        import_file.save()

        return JsonResponse({'status': 'success', 'message': ''})

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def matching_and_geocoding_results(self, request, pk=None):
        """
        Retrieves the number of matched and unmatched properties & tax lots for
        a given ImportFile record.  Specifically for new imports
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # property views associated with this imported file (including merges)
        properties_new = []
        properties_matched = list(
            PropertyState.objects.filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_MERGED,
            ).values_list('id', flat=True))

        # Check audit log in case PropertyStates are listed as "new" but were merged into a different property
        properties = list(
            PropertyState.objects.filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_NEW,
            ))

        for state in properties:
            audit_creation_id = PropertyAuditLog.objects.only('id').exclude(
                import_filename=None).get(state_id=state.id,
                                          name='Import Creation')
            if PropertyAuditLog.objects.exclude(
                    record_type=AUDIT_USER_EDIT).filter(
                        parent1_id=audit_creation_id).exists():
                properties_matched.append(state.id)
            else:
                properties_new.append(state.id)

        tax_lots_new = []
        tax_lots_matched = list(
            TaxLotState.objects.only('id').filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_MERGED,
            ).values_list('id', flat=True))

        # Check audit log in case TaxLotStates are listed as "new" but were merged into a different tax lot
        taxlots = list(
            TaxLotState.objects.filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_NEW,
            ))

        for state in taxlots:
            audit_creation_id = TaxLotAuditLog.objects.only('id').exclude(
                import_filename=None).get(state_id=state.id,
                                          name='Import Creation')
            if TaxLotAuditLog.objects.exclude(
                    record_type=AUDIT_USER_EDIT).filter(
                        parent1_id=audit_creation_id).exists():
                tax_lots_matched.append(state.id)
            else:
                tax_lots_new.append(state.id)

        # Construct Geocode Results
        property_geocode_results = {
            'high_confidence':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='High')),
            'low_confidence':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='Low')),
            'manual':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Manually geocoded (N/A)')),
            'missing_address_components':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Missing address components (N/A)')),
        }

        tax_lot_geocode_results = {
            'high_confidence':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='High')),
            'low_confidence':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='Low')),
            'manual':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Manually geocoded (N/A)')),
            'missing_address_components':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Missing address components (N/A)')),
        }

        # merge in any of the matching results from the JSON field
        return {
            'status':
            'success',
            'import_file_records':
            import_file.matching_results_data.get('import_file_records', None),
            'properties': {
                'initial_incoming':
                import_file.matching_results_data.get(
                    'property_initial_incoming', None),
                'duplicates_against_existing':
                import_file.matching_results_data.get(
                    'property_duplicates_against_existing', None),
                'duplicates_within_file':
                import_file.matching_results_data.get(
                    'property_duplicates_within_file', None),
                'merges_against_existing':
                import_file.matching_results_data.get(
                    'property_merges_against_existing', None),
                'merges_between_existing':
                import_file.matching_results_data.get(
                    'property_merges_between_existing', None),
                'merges_within_file':
                import_file.matching_results_data.get(
                    'property_merges_within_file', None),
                'new':
                import_file.matching_results_data.get('property_new', None),
                'geocoded_high_confidence':
                property_geocode_results.get('high_confidence'),
                'geocoded_low_confidence':
                property_geocode_results.get('low_confidence'),
                'geocoded_manually':
                property_geocode_results.get('manual'),
                'geocode_not_possible':
                property_geocode_results.get('missing_address_components'),
            },
            'tax_lots': {
                'initial_incoming':
                import_file.matching_results_data.get(
                    'tax_lot_initial_incoming', None),
                'duplicates_against_existing':
                import_file.matching_results_data.get(
                    'tax_lot_duplicates_against_existing', None),
                'duplicates_within_file':
                import_file.matching_results_data.get(
                    'tax_lot_duplicates_within_file', None),
                'merges_against_existing':
                import_file.matching_results_data.get(
                    'tax_lot_merges_against_existing', None),
                'merges_between_existing':
                import_file.matching_results_data.get(
                    'tax_lot_merges_between_existing', None),
                'merges_within_file':
                import_file.matching_results_data.get(
                    'tax_lot_merges_within_file', None),
                'new':
                import_file.matching_results_data.get('tax_lot_new', None),
                'geocoded_high_confidence':
                tax_lot_geocode_results.get('high_confidence'),
                'geocoded_low_confidence':
                tax_lot_geocode_results.get('low_confidence'),
                'geocoded_manually':
                tax_lot_geocode_results.get('manual'),
                'geocode_not_possible':
                tax_lot_geocode_results.get('missing_address_components'),
            }
        }

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def mapping_suggestions(self, request, pk):
        """
        Returns suggested mappings from an uploaded file's headers to known
        data fields.
        """
        organization_id = request.query_params.get('organization_id', None)

        result = {'status': 'success'}

        membership = OrganizationUser.objects.select_related('organization') \
            .get(organization_id=organization_id, user=request.user)
        organization = membership.organization

        # For now, each organization holds their own mappings. This is non-ideal, but it is the
        # way it is for now. In order to move to parent_org holding, then we need to be able to
        # dynamically match columns based on the names and not the db id (or support many-to-many).
        # parent_org = organization.get_parent()

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=organization.pk)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # Get a list of the database fields in a list, these are the db columns and the extra_data columns
        property_columns = Column.retrieve_mapping_columns(
            organization.pk, 'property')
        taxlot_columns = Column.retrieve_mapping_columns(
            organization.pk, 'taxlot')

        # If this is a portfolio manager file, then load in the PM mappings and if the column_mappings
        # are not in the original mappings, default to PM
        if import_file.from_portfolio_manager:
            pm_mappings = simple_mapper.get_pm_mapping(
                import_file.first_row_columns, resolve_duplicates=True)
            suggested_mappings = mapper.build_column_mapping(
                import_file.first_row_columns,
                Column.retrieve_all_by_tuple(organization_id),
                previous_mapping=get_column_mapping,
                map_args=[organization],
                default_mappings=pm_mappings,
                thresh=80)
        elif import_file.from_buildingsync:
            bsync_mappings = xml_mapper.build_column_mapping()
            suggested_mappings = mapper.build_column_mapping(
                import_file.first_row_columns,
                Column.retrieve_all_by_tuple(organization_id),
                previous_mapping=get_column_mapping,
                map_args=[organization],
                default_mappings=bsync_mappings,
                thresh=80)
        else:
            # All other input types
            suggested_mappings = mapper.build_column_mapping(
                import_file.first_row_columns,
                Column.retrieve_all_by_tuple(organization.pk),
                previous_mapping=get_column_mapping,
                map_args=[organization],
                thresh=
                80  # percentage match that we require. 80% is random value for now.
            )
            # replace None with empty string for column names and PropertyState for tables
            # TODO #239: Move this fix to build_column_mapping
            for m in suggested_mappings:
                table, destination_field, _confidence = suggested_mappings[m]
                if destination_field is None:
                    suggested_mappings[m][1] = ''

        # Fix the table name, eventually move this to the build_column_mapping
        for m in suggested_mappings:
            table, _destination_field, _confidence = suggested_mappings[m]
            # Do not return the campus, created, updated fields... that is force them to be in the property state
            if not table or table == 'Property':
                suggested_mappings[m][0] = 'PropertyState'
            elif table == 'TaxLot':
                suggested_mappings[m][0] = 'TaxLotState'

        result['suggested_column_mappings'] = suggested_mappings
        result['property_columns'] = property_columns
        result['taxlot_columns'] = taxlot_columns

        return JsonResponse(result)

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    def destroy(self, request, pk):
        """
        Deletes an import file
        """
        organization_id = int(request.query_params.get('organization_id',
                                                       None))
        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=organization_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # check if the import record exists for the file and organization
        d = ImportRecord.objects.filter(super_organization_id=organization_id,
                                        pk=import_file.import_record.pk)

        if not d.exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'user does not have permission to delete file',
                },
                status=status.HTTP_403_FORBIDDEN)

        # This does not actually delete the object because it is a NonDeletableModel
        import_file.delete()
        return JsonResponse({'status': 'success'})

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_integer_field(
            'view_id', required=True, description='ID for property view')
    ])
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def greenbutton_meters_preview(self, request, pk):
        """
        Returns validated type units and proposed imports
        """
        org_id = request.query_params.get('organization_id')
        view_id = request.query_params.get('view_id')

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        parser = reader.GreenButtonParser(import_file.local_file)
        raw_meter_data = list(parser.data)

        try:
            property_id = PropertyView.objects.get(
                pk=view_id, cycle__organization_id=org_id).property_id
        except PropertyView.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not find property with pk=' + str(view_id)
                },
                status=status.HTTP_400_BAD_REQUEST)

        meters_parser = MetersParser(org_id,
                                     raw_meter_data,
                                     source_type=Meter.GREENBUTTON,
                                     property_id=property_id)

        result = {}

        result["validated_type_units"] = meters_parser.validated_type_units()
        result["proposed_imports"] = meters_parser.proposed_imports

        import_file.matching_results_data['property_id'] = property_id
        import_file.save()

        return result

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def pm_meters_preview(self, request, pk):
        """
        Returns validated type units, proposed imports and unlinkable PM ids
        """
        org_id = request.query_params.get('organization_id')

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        parser = reader.MCMParser(import_file.local_file,
                                  sheet_name='Meter Entries')
        raw_meter_data = list(parser.data)

        meters_parser = MetersParser(org_id, raw_meter_data)

        result = {}

        result["validated_type_units"] = meters_parser.validated_type_units()
        result["proposed_imports"] = meters_parser.proposed_imports
        result["unlinkable_pm_ids"] = meters_parser.unlinkable_pm_ids

        return result
예제 #13
0
    StatusLabel as Label, )
from seed.serializers.labels import (
    LabelSerializer, )
from seed.utils.api import drf_api_endpoint
from django.utils.decorators import method_decorator
from seed.utils.api_schema import AutoSchemaHelper
from seed.utils.labels import filter_labels_for_inv_type
from seed.utils.viewsets import SEEDOrgNoPatchOrOrgCreateModelViewSet


@method_decorator(
    name='retrieve',
    decorator=swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(
            required=False,
            description=
            "Optional org id which overrides the users (default) current org id"
        )
    ]),
)
@method_decorator(
    name='list',
    decorator=swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(
            required=False,
            description=
            "Optional org id which overrides the users (default) current org id"
        )
    ]),
)
@method_decorator(
예제 #14
0
class DataQualityCheckViewSet(viewsets.ViewSet):
    """
    Handles Data Quality API operations within Inventory backend.
    (1) Post, wait, get…
    (2) Respond with what changed
    """

    # Remove lookup_field once data_quality_check_id is used and "pk" can be used
    lookup_field = 'organization_id'

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.base_field(
                "organization_id", "IN_PATH",
                "Organization ID - identifier used to specify a DataQualityCheck",
                True, "TYPE_INTEGER")
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'property_view_ids': ['integer'],
                'taxlot_view_ids': ['integer'],
            },
            description='An object containing IDs of the records to perform'
            ' data quality checks on. Should contain two keys- '
            'property_view_ids and taxlot_view_ids, each of '
            'which is an array of appropriate IDs.',
        ),
        responses={
            200:
            AutoSchemaHelper.schema_factory({
                'num_properties': 'integer',
                'num_taxlots': 'integer',
                'progress_key': 'string',
                'progress': {},
            })
        })
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def start(self, request, organization_id):
        """
        This API endpoint will create a new data_quality check process in the background,
        on potentially a subset of properties/taxlots, and return back a query key
        """
        body = request.data
        property_view_ids = body['property_view_ids']
        taxlot_view_ids = body['taxlot_view_ids']

        property_state_ids = PropertyView.objects.filter(
            id__in=property_view_ids).values_list('state_id', flat=True)
        taxlot_state_ids = TaxLotView.objects.filter(
            id__in=taxlot_view_ids).values_list('state_id', flat=True)

        # For now, organization_id is the only key currently used to identify DataQualityChecks
        return_value = do_checks(organization_id, property_state_ids,
                                 taxlot_state_ids)

        return JsonResponse({
            'num_properties': len(property_state_ids),
            'num_taxlots': len(taxlot_state_ids),
            # TODO #239: Deprecate progress_key from here and just use the 'progess.progress_key'
            'progress_key': return_value['progress_key'],
            'progress': return_value,
        })

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_integer_field("run_id", True,
                                             "Import file ID or cache key"),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=False, methods=['GET'])
    def results_csv(self, request):
        """
        Download a CSV of the results from a data quality run based on either the ID that was
        given during the creation of the data quality task or the ID of the
        import file which had it's records checked.
        Note that it is not related to objects in the database, since the results
        are stored in redis!
        """
        run_id = request.query_params.get('run_id')
        if run_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'must include Import file ID or cache key as run_id'
                },
                status=status.HTTP_400_BAD_REQUEST)

        data_quality_results = get_cache_raw(
            DataQualityCheck.cache_key(run_id))
        response = HttpResponse(content_type='text/csv')
        response[
            'Content-Disposition'] = 'attachment; filename="Data Quality Check Results.csv"'

        writer = csv.writer(response)
        if data_quality_results is None:
            writer.writerow(['Error'])
            writer.writerow(['data quality results not found'])
            return response

        writer.writerow([
            'Table', 'Address Line 1', 'PM Property ID', 'Tax Lot ID',
            'Custom ID', 'Field', 'Applied Label', 'Condition',
            'Error Message', 'Severity'
        ])

        for row in data_quality_results:
            for result in row['data_quality_results']:
                writer.writerow([
                    row['data_quality_results'][0]['table_name'],
                    row['address_line_1'],
                    row['pm_property_id'] if 'pm_property_id' in row else None,
                    row['jurisdiction_tax_lot_id']
                    if 'jurisdiction_tax_lot_id' in row else None,
                    row['custom_id_1'],
                    result['formatted_field'],
                    result.get('label', None),
                    result['condition'],
                    # the detailed_message field can have units which has superscripts/subscripts, so unidecode it!
                    unidecode(result['detailed_message']),
                    result['severity']
                ])

        return response

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_integer_field("run_id", True,
                                             "Import file ID or cache key"),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=False, methods=['GET'])
    def results(self, request):
        """
        Return the results of a data quality run based on either the ID that was
        given during the creation of the data quality task or the ID of the
        import file which had it's records checked.
        Note that it is not related to objects in the database, since the results
        are stored in redis!
        """
        data_quality_id = request.query_params['run_id']
        data_quality_results = get_cache_raw(
            DataQualityCheck.cache_key(data_quality_id))
        return JsonResponse({'data': data_quality_results})
예제 #15
0
class OrganizationViewSet(viewsets.ViewSet):
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['DELETE'])
    def columns(self, request, pk=None):
        """
        Delete all columns for an organization. This method is typically not recommended if there
        are data in the inventory as it will invalidate all extra_data fields. This also removes
        all the column mappings that existed.

        ---
        parameters:
            - name: pk
              description: The organization_id
              required: true
              paramType: path
        type:
            status:
                description: success or error
                type: string
                required: true
            column_mappings_deleted_count:
                description: Number of column_mappings that were deleted
                type: integer
                required: true
            columns_deleted_count:
                description: Number of columns that were deleted
                type: integer
                required: true
        """
        try:
            org = Organization.objects.get(pk=pk)
            c_count, cm_count = Column.delete_all(org)
            return JsonResponse({
                'status': 'success',
                'column_mappings_deleted_count': cm_count,
                'columns_deleted_count': c_count,
            })
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'organization with with id {} does not exist'.format(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_integer_field('import_file_id',
                                                 required=True,
                                                 description='Import file id'),
            openapi.Parameter('id',
                              openapi.IN_PATH,
                              type=openapi.TYPE_INTEGER,
                              description='Organization id'),
        ],
        request_body=SaveColumnMappingsRequestPayloadSerializer,
        responses={200: 'success response'})
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def column_mappings(self, request, pk=None):
        """
        Saves the mappings between the raw headers of an ImportFile and the
        destination fields in the `to_table_name` model which should be either
        PropertyState or TaxLotState

        Valid source_type values are found in ``seed.models.SEED_DATA_SOURCES``
        """
        import_file_id = request.query_params.get('import_file_id')
        if import_file_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Query param `import_file_id` is required'
                },
                status=status.HTTP_400_BAD_REQUEST)
        try:
            _ = ImportFile.objects.get(pk=import_file_id)
            organization = Organization.objects.get(pk=pk)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'No import file found'
                },
                status=status.HTTP_404_NOT_FOUND)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'No organization found'
                },
                status=status.HTTP_404_NOT_FOUND)

        result = Column.create_mappings(request.data.get('mappings',
                                                         []), organization,
                                        request.user, import_file_id)

        if result:
            return JsonResponse({'status': 'success'})
        else:
            return JsonResponse({'status': 'error'})

    def _start_whole_org_match_merge_link(self,
                                          org_id,
                                          state_class_name,
                                          proposed_columns=[]):
        identifier = randint(100, 100000)
        result_key = _get_match_merge_link_key(identifier)
        set_cache_raw(result_key, {})

        progress_data = ProgressData(func_name='org_match_merge_link',
                                     unique_id=identifier)
        progress_data.delete()

        whole_org_match_merge_link.apply_async(
            args=(org_id, state_class_name, proposed_columns),
            link=cache_match_merge_link_result.s(identifier,
                                                 progress_data.key))

        return progress_data.key

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_boolean_field(
            'brief',
            required=False,
            description='If true, only return high-level organization details')
    ])
    @api_endpoint_class
    @ajax_request_class
    def list(self, request):
        """
        Retrieves all orgs the user has access to.
        """

        # if brief==true only return high-level organization details
        brief = json.loads(request.query_params.get('brief', 'false'))

        if brief:
            if request.user.is_superuser:
                qs = Organization.objects.only('id', 'name', 'parent_org_id')
            else:
                qs = request.user.orgs.only('id', 'name', 'parent_org_id')

            orgs = _dict_org_brief(request, qs)
            if len(orgs) == 0:
                return JsonResponse(
                    {
                        'status':
                        'error',
                        'message':
                        'Your SEED account is not associated with any organizations. '
                        'Please contact a SEED administrator.'
                    },
                    status=status.HTTP_401_UNAUTHORIZED)
            else:
                return JsonResponse({'organizations': orgs})
        else:
            if request.user.is_superuser:
                qs = Organization.objects.all()
            else:
                qs = request.user.orgs.all()

            orgs = _dict_org(request, qs)
            if len(orgs) == 0:
                return JsonResponse(
                    {
                        'status':
                        'error',
                        'message':
                        'Your SEED account is not associated with any organizations. '
                        'Please contact a SEED administrator.'
                    },
                    status=status.HTTP_401_UNAUTHORIZED)
            else:
                return JsonResponse({'organizations': orgs})

    @method_decorator(permission_required('seed.can_access_admin'))
    @api_endpoint_class
    @ajax_request_class
    def destroy(self, request, pk=None):
        """
        Starts a background task to delete an organization and all related data.
        """

        return JsonResponse(tasks.delete_organization(pk))

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def retrieve(self, request, pk=None):
        """
        Retrieves a single organization by id.
        """
        org_id = pk

        if org_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'no organization_id sent'
                },
                status=status.HTTP_400_BAD_REQUEST)

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization does not exist'
                },
                status=status.HTTP_404_NOT_FOUND)
        if (not request.user.is_superuser
                and not OrganizationUser.objects.filter(
                    user=request.user,
                    organization=org,
                    role_level__in=[ROLE_OWNER, ROLE_MEMBER, ROLE_VIEWER
                                    ]).exists()):
            # TODO: better permission and return 401 or 403
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'user is not the owner of the org'
                },
                status=status.HTTP_403_FORBIDDEN)

        return JsonResponse({
            'status': 'success',
            'organization': _dict_org(request, [org])[0],
        })

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'organization_name': 'string',
            'user_id': 'integer',
        },
        required=['organization_name', 'user_id'],
        description='Properties:\n'
        '- organization_name: The new organization name\n'
        '- user_id: The user ID (primary key) to be used as the owner of the new organization'
    ))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_parent_org_owner')
    def create(self, request):
        """
        Creates a new organization.
        """
        body = request.data
        user = User.objects.get(pk=body['user_id'])
        org_name = body['organization_name']

        if Organization.objects.filter(name=org_name).exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization name already exists'
                },
                status=status.HTTP_409_CONFLICT)

        org, _, _ = create_organization(user, org_name, org_name)
        return JsonResponse({
            'status': 'success',
            'message': 'organization created',
            'organization': _dict_org(request, [org])[0]
        })

    @api_endpoint_class
    @ajax_request_class
    @method_decorator(permission_required('seed.can_access_admin'))
    @action(detail=True, methods=['DELETE'])
    def inventory(self, request, pk=None):
        """
        Starts a background task to delete all properties & taxlots
        in an org.
        """
        return JsonResponse(tasks.delete_organization_inventory(pk))

    @swagger_auto_schema(
        request_body=SaveSettingsSerializer, )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_owner')
    @action(detail=True, methods=['PUT'])
    def save_settings(self, request, pk=None):
        """
        Saves an organization's settings: name, query threshold, shared fields, etc
        """
        body = request.data
        org = Organization.objects.get(pk=pk)
        posted_org = body.get('organization', None)
        if posted_org is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'malformed request'
                },
                status=status.HTTP_400_BAD_REQUEST)

        desired_threshold = posted_org.get('query_threshold', None)
        if desired_threshold is not None:
            org.query_threshold = desired_threshold

        desired_name = posted_org.get('name', None)
        if desired_name is not None:
            org.name = desired_name

        def is_valid_choice(choice_tuples, s):
            """choice_tuples is std model ((value, label), ...)"""
            return (s is not None) and (s in [
                choice[0] for choice in choice_tuples
            ])

        def warn_bad_pint_spec(kind, unit_string):
            if unit_string is not None:
                _log.warn("got bad {0} unit string {1} for org {2}".format(
                    kind, unit_string, org.name))

        def warn_bad_units(kind, unit_string):
            _log.warn("got bad {0} unit string {1} for org {2}".format(
                kind, unit_string, org.name))

        desired_display_units_eui = posted_org.get('display_units_eui')
        if is_valid_choice(Organization.MEASUREMENT_CHOICES_EUI,
                           desired_display_units_eui):
            org.display_units_eui = desired_display_units_eui
        else:
            warn_bad_pint_spec('eui', desired_display_units_eui)

        desired_display_units_area = posted_org.get('display_units_area')
        if is_valid_choice(Organization.MEASUREMENT_CHOICES_AREA,
                           desired_display_units_area):
            org.display_units_area = desired_display_units_area
        else:
            warn_bad_pint_spec('area', desired_display_units_area)

        desired_display_significant_figures = posted_org.get(
            'display_significant_figures')
        if isinstance(
                desired_display_significant_figures,
                int) and desired_display_significant_figures >= 0:  # noqa
            org.display_significant_figures = desired_display_significant_figures
        elif desired_display_significant_figures is not None:
            _log.warn("got bad sig figs {0} for org {1}".format(
                desired_display_significant_figures, org.name))

        desired_display_meter_units = posted_org.get('display_meter_units')
        if desired_display_meter_units:
            org.display_meter_units = desired_display_meter_units

        desired_thermal_conversion_assumption = posted_org.get(
            'thermal_conversion_assumption')
        if is_valid_choice(Organization.THERMAL_CONVERSION_ASSUMPTION_CHOICES,
                           desired_thermal_conversion_assumption):
            org.thermal_conversion_assumption = desired_thermal_conversion_assumption

        # Update MapQuest API Key if it's been changed
        mapquest_api_key = posted_org.get('mapquest_api_key', '')
        if mapquest_api_key != org.mapquest_api_key:
            org.mapquest_api_key = mapquest_api_key

        comstock_enabled = posted_org.get('comstock_enabled', False)
        if comstock_enabled != org.comstock_enabled:
            org.comstock_enabled = comstock_enabled

        org.save()

        # Update the selected exportable fields.
        new_public_column_names = posted_org.get('public_fields', None)
        if new_public_column_names is not None:
            old_public_columns = Column.objects.filter(
                organization=org, shared_field_type=Column.SHARED_PUBLIC)
            # turn off sharing in the old_pub_fields
            for col in old_public_columns:
                col.shared_field_type = Column.SHARED_NONE
                col.save()

            # for now just iterate over this to grab the new columns.
            for col in new_public_column_names:
                new_col = Column.objects.filter(organization=org, id=col['id'])
                if len(new_col) == 1:
                    new_col = new_col.first()
                    new_col.shared_field_type = Column.SHARED_PUBLIC
                    new_col.save()

        return JsonResponse({'status': 'success'})

    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def query_threshold(self, request, pk=None):
        """
        Returns the "query_threshold" for an org.  Searches from
        members of sibling orgs must return at least this many buildings
        from orgs they do not belong to, or else buildings from orgs they
        don't belong to will be removed from the results.
        """
        org = Organization.objects.get(pk=pk)
        return JsonResponse({
            'status': 'success',
            'query_threshold': org.query_threshold
        })

    @swagger_auto_schema(responses={200: SharedFieldsReturnSerializer})
    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def shared_fields(self, request, pk=None):
        """
        Retrieves all fields marked as shared for the organization. Will only return used fields.
        """
        result = {'status': 'success', 'public_fields': []}

        columns = Column.retrieve_all(pk, 'property', True)
        for c in columns:
            if c['sharedFieldType'] == 'Public':
                new_column = {
                    'table_name': c['table_name'],
                    'name': c['name'],
                    'column_name': c['column_name'],
                    # this is the field name in the db. The other name can have tax_
                    'display_name': c['display_name']
                }
                result['public_fields'].append(new_column)

        return JsonResponse(result)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'sub_org_name': 'string',
            'sub_org_owner_email': 'string',
        },
        required=['sub_org_name', 'sub_org_owner_email'],
        description='Properties:\n'
        '- sub_org_name: Name of the new sub organization\n'
        '- sub_org_owner_email: Email of the owner of the sub organization, which must already exist',
    ))
    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['POST'])
    def sub_org(self, request, pk=None):
        """
        Creates a child org of a parent org.
        """
        body = request.data
        org = Organization.objects.get(pk=pk)
        email = body['sub_org_owner_email'].lower()
        try:
            user = User.objects.get(username=email)
        except User.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'User with email address (%s) does not exist' % email
                },
                status=status.HTTP_400_BAD_REQUEST)

        created, mess_or_org, _ = create_suborganization(
            user, org, body['sub_org_name'], ROLE_OWNER)
        if created:
            return JsonResponse({
                'status': 'success',
                'organization_id': mess_or_org.pk
            })
        else:
            return JsonResponse({
                'status': 'error',
                'message': mess_or_org
            },
                                status=status.HTTP_409_CONFLICT)

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def matching_criteria_columns(self, request, pk=None):
        """
        Retrieve all matching criteria columns for an org.
        """
        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        matching_criteria_column_names = dict(
            org.column_set.filter(
                is_matching_criteria=True).values('table_name').annotate(
                    column_names=ArrayAgg('column_name')).values_list(
                        'table_name', 'column_names'))

        return JsonResponse(matching_criteria_column_names)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {'inventory_type': 'string'},
        required=['inventory_type'],
        description='Properties:\n'
        '- inventory_type: either "properties" or "taxlots"'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def match_merge_link(self, request, pk=None):
        """
        Run match_merge_link for an org.
        """
        inventory_type = request.data.get('inventory_type', None)
        if inventory_type not in ['properties', 'taxlots']:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Provided inventory type should either be "properties" or "taxlots".'
                },
                status=status.HTTP_404_NOT_FOUND)

        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        state_class_name = 'PropertyState' if inventory_type == 'properties' else 'TaxLotState'

        progress_key = self._start_whole_org_match_merge_link(
            org.id, state_class_name)

        return JsonResponse({'progress_key': progress_key})

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'inventory_type': 'string',
            'add': ['string'],
            'remove': ['string'],
        },
        required=['inventory_type'],
        description='Properties:\n'
        '- inventory_type: either "properties" or "taxlots"\n'
        '- add: list of column names\n'
        '- remove: list of column names'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def match_merge_link_preview(self, request, pk=None):
        """
        Run match_merge_link preview for an org and record type.
        """
        inventory_type = request.data.get('inventory_type', None)
        if inventory_type not in ['properties', 'taxlots']:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Provided inventory type should either be "properties" or "taxlots".'
                },
                status=status.HTTP_404_NOT_FOUND)

        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        state_class_name = 'PropertyState' if inventory_type == 'properties' else 'TaxLotState'

        current_columns = matching_criteria_column_names(
            org.id, state_class_name)

        add = set(request.data.get('add', []))
        remove = set(request.data.get('remove', []))

        provided_columns = Column.objects.filter(
            column_name__in=add.union(remove),
            organization_id=org.id,
            table_name=state_class_name)
        if provided_columns.count() != (len(add) + len(remove)):
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Invalid column names provided.'
                },
                status=status.HTTP_404_NOT_FOUND)

        proposed_columns = current_columns.union(add).difference(remove)

        progress_key = self._start_whole_org_match_merge_link(
            org.id, state_class_name, list(proposed_columns))

        return JsonResponse({'progress_key': progress_key})

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_integer_field(
            'match_merge_link_id',
            required=True,
            description='ID of match merge link')
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def match_merge_link_result(self, request, pk=None):
        try:
            Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        identifier = request.query_params['match_merge_link_id']
        result_key = _get_match_merge_link_key(identifier)

        # using unsafe serialization b/c the result might not be a dict
        return JsonResponse(get_cache_raw(result_key), safe=False)

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def geocoding_columns(self, request, pk=None):
        """
        Retrieve all geocoding columns for an org.
        """
        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        geocoding_columns_qs = org.column_set.\
            filter(geocoding_order__gt=0).\
            order_by('geocoding_order').\
            values('table_name', 'column_name')

        geocoding_columns = {
            'PropertyState': [],
            'TaxLotState': [],
        }

        for col in geocoding_columns_qs:
            geocoding_columns[col['table_name']].append(col['column_name'])

        return JsonResponse(geocoding_columns)

    def get_cycles(self, start, end, organization_id):
        if not isinstance(start, type(end)):
            raise TypeError('start and end not same types')
        # if of type int or convertable  assume they are cycle ids
        try:
            start = int(start)
            end = int(end)
        except ValueError as error:  # noqa
            # assume string is JS date
            if isinstance(start, basestring):
                start_datetime = dateutil.parser.parse(start)
                end_datetime = dateutil.parser.parse(end)
            else:
                raise Exception('Date is not a string')
        # get date times from cycles
        if isinstance(start, int):
            cycle = Cycle.objects.get(pk=start,
                                      organization_id=organization_id)
            start_datetime = cycle.start
            if start == end:
                end_datetime = cycle.end
            else:
                end_datetime = Cycle.objects.get(
                    pk=end, organization_id=organization_id).end
        return Cycle.objects.filter(
            start__gte=start_datetime,
            end__lte=end_datetime,
            organization_id=organization_id).order_by('start')

    def get_data(self, property_view, x_var, y_var):
        result = None
        state = property_view.state
        if getattr(state, x_var, None) and getattr(state, y_var, None):
            result = {
                "id": property_view.property_id,
                "x": getattr(state, x_var),
                "y": getattr(state, y_var),
            }
        return result

    def get_raw_report_data(self, organization_id, cycles, x_var, y_var,
                            campus_only):
        all_property_views = PropertyView.objects.select_related(
            'property',
            'state').filter(property__organization_id=organization_id,
                            cycle_id__in=cycles)
        organization = Organization.objects.get(pk=organization_id)
        results = []
        for cycle in cycles:
            property_views = all_property_views.filter(cycle_id=cycle)
            count_total = []
            count_with_data = []
            data = []
            for property_view in property_views:
                property_pk = property_view.property_id
                if property_view.property.campus and campus_only:
                    count_total.append(property_pk)
                    result = self.get_data(property_view, x_var, y_var)
                    if result:
                        result['yr_e'] = cycle.end.strftime('%Y')
                        data.append(result)
                        count_with_data.append(property_pk)
                elif not property_view.property.campus:
                    count_total.append(property_pk)
                    result = self.get_data(property_view, x_var, y_var)
                    if result:
                        result['yr_e'] = cycle.end.strftime('%Y')
                        de_unitted_result = apply_display_unit_preferences(
                            organization, result)
                        data.append(de_unitted_result)
                        count_with_data.append(property_pk)
            result = {
                "cycle_id": cycle.pk,
                "chart_data": data,
                "property_counts": {
                    "yr_e": cycle.end.strftime('%Y'),
                    "num_properties": len(count_total),
                    "num_properties_w-data": len(count_with_data),
                },
            }
            results.append(result)
        return results

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var', required=True, description='Raw column name for y axis'),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
        AutoSchemaHelper.query_string_field(
            'campus_only',
            required=False,
            description='If true, includes campuses'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report(self, request, pk=None):
        """
        Retrieve a summary report for charting x vs y
        """
        campus_only = json.loads(
            request.query_params.get('campus_only', 'false'))
        params = {}
        missing_params = []
        error = ''
        for param in ['x_var', 'y_var', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
        else:
            cycles = self.get_cycles(params['start'], params['end'], pk)
            data = self.get_raw_report_data(pk, cycles, params['x_var'],
                                            params['y_var'], campus_only)
            for datum in data:
                if datum['property_counts']['num_properties_w-data'] != 0:
                    break
            property_counts = []
            chart_data = []
            for datum in data:
                property_counts.append(datum['property_counts'])
                chart_data.extend(datum['chart_data'])
            data = {
                'property_counts': property_counts,
                'chart_data': chart_data,
            }
            result = {'status': 'success', 'data': data}
            status_code = status.HTTP_200_OK
        return Response(result, status=status_code)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var',
            required=True,
            description=
            'Raw column name for y axis, must be one of: "gross_floor_area", "use_description", "year_built"'
        ),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
        AutoSchemaHelper.query_string_field(
            'campus_only',
            required=False,
            description='If true, includes campuses'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report_aggregated(self, request, pk=None):
        """
        Retrieve a summary report for charting x vs y aggregated by y_var
        """
        campus_only = json.loads(
            request.query_params.get('campus_only', 'false'))
        valid_y_values = ['gross_floor_area', 'use_description', 'year_built']
        params = {}
        missing_params = []
        empty = True
        error = ''
        for param in ['x_var', 'y_var', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            elif param == 'y_var' and val not in valid_y_values:
                error = "{} {} is not a valid value for {}.".format(
                    error, val, param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
        else:
            cycles = self.get_cycles(params['start'], params['end'], pk)
            x_var = params['x_var']
            y_var = params['y_var']
            data = self.get_raw_report_data(pk, cycles, x_var, y_var,
                                            campus_only)
            for datum in data:
                if datum['property_counts']['num_properties_w-data'] != 0:
                    empty = False
                    break
            if empty:
                result = {'status': 'error', 'message': 'No data found'}
                status_code = status.HTTP_404_NOT_FOUND
        if not empty or not error:
            chart_data = []
            property_counts = []
            for datum in data:
                buildings = datum['chart_data']
                yr_e = datum['property_counts']['yr_e']
                chart_data.extend(self.aggregate_data(yr_e, y_var, buildings)),
                property_counts.append(datum['property_counts'])
            # Send back to client
            aggregated_data = {
                'chart_data': chart_data,
                'property_counts': property_counts
            }
            result = {
                'status': 'success',
                'aggregated_data': aggregated_data,
            }
            status_code = status.HTTP_200_OK
        return Response(result, status=status_code)

    def aggregate_data(self, yr_e, y_var, buildings):
        aggregation_method = {
            'use_description': self.aggregate_use_description,
            'year_built': self.aggregate_year_built,
            'gross_floor_area': self.aggregate_gross_floor_area,
        }
        return aggregation_method[y_var](yr_e, buildings)

    def aggregate_use_description(self, yr_e, buildings):
        # Group buildings in this year_ending group into uses
        chart_data = []
        grouped_uses = defaultdict(list)
        for b in buildings:
            grouped_uses[str(b['y']).lower()].append(b)

        # Now iterate over use groups to make each chart item
        for use, buildings_in_uses in grouped_uses.items():
            chart_data.append({
                'x': median([b['x'] for b in buildings_in_uses]),
                'y': use.capitalize(),
                'yr_e': yr_e
            })
        return chart_data

    def aggregate_year_built(self, yr_e, buildings):
        # Group buildings in this year_ending group into decades
        chart_data = []
        grouped_decades = defaultdict(list)
        for b in buildings:
            grouped_decades['%s0' % str(b['y'])[:-1]].append(b)

        # Now iterate over decade groups to make each chart item
        for decade, buildings_in_decade in grouped_decades.items():
            chart_data.append({
                'x':
                median([b['x'] for b in buildings_in_decade]),
                'y':
                '%s-%s' % (decade, '%s9' % str(decade)[:-1]),  # 1990-1999
                'yr_e':
                yr_e
            })
        return chart_data

    def aggregate_gross_floor_area(self, yr_e, buildings):
        chart_data = []
        y_display_map = {
            0: '0-99k',
            100000: '100-199k',
            200000: '200k-299k',
            300000: '300k-399k',
            400000: '400-499k',
            500000: '500-599k',
            600000: '600-699k',
            700000: '700-799k',
            800000: '800-899k',
            900000: '900-999k',
            1000000: 'over 1,000k',
        }
        max_bin = max(y_display_map)

        # Group buildings in this year_ending group into ranges
        grouped_ranges = defaultdict(list)
        for b in buildings:
            area = b['y']
            # make sure anything greater than the biggest bin gets put in
            # the biggest bin
            range_bin = min(max_bin, round_down_hundred_thousand(area))
            grouped_ranges[range_bin].append(b)

        # Now iterate over range groups to make each chart item
        for range_floor, buildings_in_range in grouped_ranges.items():
            chart_data.append({
                'x': median([b['x'] for b in buildings_in_range]),
                'y': y_display_map[range_floor],
                'yr_e': yr_e
            })
        return chart_data

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field('x_label',
                                            required=True,
                                            description='Label for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var', required=True, description='Raw column name for y axis'),
        AutoSchemaHelper.query_string_field('y_label',
                                            required=True,
                                            description='Label for y axis'),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report_export(self, request, pk=None):
        """
        Export a report as a spreadsheet
        """
        params = {}
        missing_params = []
        error = ''
        for param in ['x_var', 'x_label', 'y_var', 'y_label', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
            return Response(result, status=status_code)

        response = HttpResponse(
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response['Content-Disposition'] = 'attachment; filename="report-data"'

        # Create WB
        output = BytesIO()
        wb = Workbook(output, {'remove_timezone': True})

        # Create sheets
        count_sheet = wb.add_worksheet('Counts')
        base_sheet = wb.add_worksheet('Raw')
        agg_sheet = wb.add_worksheet('Agg')

        # Enable bold format and establish starting cells
        bold = wb.add_format({'bold': True})
        data_row_start = 0
        data_col_start = 0

        # Write all headers across all sheets
        count_sheet.write(data_row_start, data_col_start, 'Year Ending', bold)
        count_sheet.write(data_row_start, data_col_start + 1,
                          'Properties with Data', bold)
        count_sheet.write(data_row_start, data_col_start + 2,
                          'Total Properties', bold)

        base_sheet.write(data_row_start, data_col_start, 'ID', bold)
        base_sheet.write(data_row_start, data_col_start + 1,
                         request.query_params.get('x_label'), bold)
        base_sheet.write(data_row_start, data_col_start + 2,
                         request.query_params.get('y_label'), bold)
        base_sheet.write(data_row_start, data_col_start + 3, 'Year Ending',
                         bold)

        agg_sheet.write(data_row_start, data_col_start,
                        request.query_params.get('x_label'), bold)
        agg_sheet.write(data_row_start, data_col_start + 1,
                        request.query_params.get('y_label'), bold)
        agg_sheet.write(data_row_start, data_col_start + 2, 'Year Ending',
                        bold)

        # Gather base data
        cycles = self.get_cycles(params['start'], params['end'], pk)
        data = self.get_raw_report_data(pk, cycles, params['x_var'],
                                        params['y_var'], False)

        base_row = data_row_start + 1
        agg_row = data_row_start + 1
        count_row = data_row_start + 1

        for cycle_results in data:
            total_count = cycle_results['property_counts']['num_properties']
            with_data_count = cycle_results['property_counts'][
                'num_properties_w-data']
            yr_e = cycle_results['property_counts']['yr_e']

            # Write Counts
            count_sheet.write(count_row, data_col_start, yr_e)
            count_sheet.write(count_row, data_col_start + 1, with_data_count)
            count_sheet.write(count_row, data_col_start + 2, total_count)

            count_row += 1

            # Write Base/Raw Data
            data_rows = cycle_results['chart_data']
            for datum in data_rows:
                base_sheet.write(base_row, data_col_start, datum.get('id'))
                base_sheet.write(base_row, data_col_start + 1, datum.get('x'))
                base_sheet.write(base_row, data_col_start + 2, datum.get('y'))
                base_sheet.write(base_row, data_col_start + 3,
                                 datum.get('yr_e'))

                base_row += 1

            # Gather and write Agg data
            for agg_datum in self.aggregate_data(yr_e, params['y_var'],
                                                 data_rows):
                agg_sheet.write(agg_row, data_col_start, agg_datum.get('x'))
                agg_sheet.write(agg_row, data_col_start + 1,
                                agg_datum.get('y'))
                agg_sheet.write(agg_row, data_col_start + 2,
                                agg_datum.get('yr_e'))

                agg_row += 1

        wb.close()

        xlsx_data = output.getvalue()

        response.write(xlsx_data)

        return response

    @has_perm_class('requires_member')
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def geocode_api_key_exists(self, request, pk=None):
        """
        Returns true if the organization has a mapquest api key
        """
        org = Organization.objects.get(id=pk)

        if org.mapquest_api_key:
            return True
        else:
            return False
예제 #16
0
class LabelInventoryViewSet(APIView):
    """API endpoint for viewing and creating labels.

            Returns::
                [
                    {
                        'id': Label's primary key
                        'name': Name given to label
                        'color': Color of label,
                        'organization_id': Id of organization label belongs to,
                        'is_applied': Will be empty array if not applied to property/taxlots
                    }
                ]

    ---
    """
    renderer_classes = (JSONRenderer, )
    parser_classes = (JSONParser, )
    inventory_models = {'property': PropertyView, 'taxlot': TaxLotView}
    errors = {
        'disjoint':
        ErrorState(
            status.HTTP_422_UNPROCESSABLE_ENTITY,
            'add_label_ids and remove_label_ids cannot contain elements in common'
        ),
        'missing_org':
        ErrorState(status.HTTP_422_UNPROCESSABLE_ENTITY,
                   'missing organization_id'),
        'missing_inventory_ids':
        ErrorState(status.HTTP_422_UNPROCESSABLE_ENTITY,
                   'inventory_ids cannot be undefined or empty')
    }

    @property
    def models(self):
        """
        Exposes Django's internal models for join table.

        Used for bulk_create operations.
        """
        return {
            'property': apps.get_model('seed', 'PropertyView_labels'),
            'taxlot': apps.get_model('seed', 'TaxLotView_labels')
        }

    def get_queryset(self, inventory_type, organization_id):
        Model = self.models[inventory_type]
        return Model.objects.filter(
            statuslabel__super_organization_id=organization_id)

    def get_label_desc(self, add_label_ids, remove_label_ids):
        return Label.objects.filter(pk__in=add_label_ids +
                                    remove_label_ids).values(
                                        'id', 'color', 'name')

    def get_inventory_id(self, q, inventory_type):
        return getattr(q, "{}view_id".format(inventory_type))

    def exclude(self, qs, inventory_type, label_ids):
        exclude = {label: [] for label in label_ids}
        for q in qs:
            if q.statuslabel_id in label_ids:
                inventory_id = self.get_inventory_id(q, inventory_type)
                exclude[q.statuslabel_id].append(inventory_id)
        return exclude

    def filter_by_inventory(self, qs, inventory_type, inventory_ids):
        if inventory_ids:
            filterdict = {
                "{}view__pk__in".format(inventory_type): inventory_ids
            }
            qs = qs.filter(**filterdict)
        return qs

    def label_factory(self, inventory_type, label_id, inventory_id):
        Model = self.models[inventory_type]

        # Ensure the the label org and inventory org are the same
        inventory_parent_org_id = getattr(Model, "{}view".format(inventory_type)).get_queryset().get(pk=inventory_id)\
            .cycle.organization.get_parent().id
        label_super_org_id = Model.statuslabel.get_queryset().get(
            pk=label_id).super_organization_id
        if inventory_parent_org_id == label_super_org_id:
            create_dict = {
                'statuslabel_id': label_id,
                "{}view_id".format(inventory_type): inventory_id
            }

            return Model(**create_dict)
        else:
            raise IntegrityError(
                'Label with super_organization_id={} cannot be applied to a record with parent '
                'organization_id={}.'.format(label_super_org_id,
                                             inventory_parent_org_id))

    def add_labels(self, qs, inventory_type, inventory_ids, add_label_ids):
        added = []
        if add_label_ids:
            model = self.models[inventory_type]
            exclude = self.exclude(qs, inventory_type, add_label_ids)
            new_inventory_labels = [
                self.label_factory(inventory_type, label_id, pk)
                for label_id in add_label_ids for pk in inventory_ids
                if pk not in exclude[label_id]
            ]
            model.objects.bulk_create(new_inventory_labels)
            added = [
                self.get_inventory_id(m, inventory_type)
                for m in new_inventory_labels
            ]
        return added

    def remove_labels(self, qs, inventory_type, remove_label_ids):
        removed = []
        if remove_label_ids:
            rqs = qs.filter(statuslabel_id__in=remove_label_ids)
            removed = [self.get_inventory_id(q, inventory_type) for q in rqs]
            rqs.delete()
        return removed

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'add_label_ids': ['integer'],
                'remove_label_ids': ['integer'],
                'inventory_ids': ['integer'],
            },
            required=['inventory_ids'],
            description='Properties:\n'
            '- add_label_ids: label ids to add to the inventories\n'
            '- remove_label_ids: label ids to remove from the inventories\n'
            '- inventory_ids: List of inventory IDs'))
    @has_perm_class('can_modify_data')
    def put(self, request, inventory_type):
        """
        Updates label assignments to inventory items.

        Returns::

            {
                'status': {string}              'success' or 'error'
                'message': {string}             Error message if error
                'num_updated': {integer}        Number of properties/taxlots updated
                'labels': [                     List of labels affected.
                    {
                        'color': {string}
                        'id': {int}
                        'label': {'string'}
                        'name': {string}
                    }...
                ]
            }

        """
        add_label_ids = request.data.get('add_label_ids', [])
        remove_label_ids = request.data.get('remove_label_ids', [])
        inventory_ids = request.data.get('inventory_ids', [])
        organization_id = request.query_params['organization_id']
        error = None
        # ensure add_label_ids and remove_label_ids are different
        if not set(add_label_ids).isdisjoint(remove_label_ids):
            error = self.errors['disjoint']
        elif not organization_id:
            error = self.errors['missing_org']
        elif len(inventory_ids) == 0:
            error = self.errors['missing_inventory_ids']
        if error:
            result = {'status': 'error', 'message': str(error)}
            status_code = error.status_code
        else:
            qs = self.get_queryset(inventory_type, organization_id)
            qs = self.filter_by_inventory(qs, inventory_type, inventory_ids)
            removed = self.remove_labels(qs, inventory_type, remove_label_ids)
            added = self.add_labels(qs, inventory_type, inventory_ids,
                                    add_label_ids)
            num_updated = len(set(added).union(removed))
            labels = self.get_label_desc(add_label_ids, remove_label_ids)
            result = {
                'status': 'success',
                'num_updated': num_updated,
                'labels': labels
            }
            status_code = status.HTTP_200_OK
        return response.Response(result, status=status_code)
예제 #17
0
class UbidViewSet(viewsets.ViewSet, OrgMixin):
    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'property_view_ids': ['integer'],
                'taxlot_view_ids': ['integer'],
            },
            description=
            'IDs by inventory type for records to have their UBID decoded.'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=False, methods=['POST'])
    def decode_by_ids(self, request):
        """
        Submit a request to decode UBIDs for property and tax lot records.
        """
        body = dict(request.data)
        org_id = self.get_organization(request)
        property_view_ids = body.get('property_view_ids')
        taxlot_view_ids = body.get('taxlot_view_ids')

        if property_view_ids:
            property_views = PropertyView.objects.filter(
                id__in=property_view_ids, cycle__organization_id=org_id)
            properties = PropertyState.objects.filter(
                id__in=Subquery(property_views.values('state_id')))
            decode_unique_ids(properties)

        if taxlot_view_ids:
            taxlot_views = TaxLotView.objects.filter(
                id__in=taxlot_view_ids, cycle__organization_id=org_id)
            taxlots = TaxLotState.objects.filter(
                id__in=Subquery(taxlot_views.values('state_id')))
            decode_unique_ids(taxlots)

        return JsonResponse({'status': 'success'})

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'property_view_ids': ['integer'],
                'taxlot_view_ids': ['integer'],
            },
            description=
            'IDs by inventory type for records to be used in building a UBID decoding summary.'
        ))
    @ajax_request_class
    @has_perm_class('can_view_data')
    @action(detail=False, methods=['POST'])
    def decode_results(self, request):
        """
        Generate a summary of populated, unpopulated, and decoded UBIDs for
        property and tax lot records.
        """
        body = dict(request.data)
        org_id = self.get_organization(request)

        ubid_unpopulated = 0
        ubid_successfully_decoded = 0
        ubid_not_decoded = 0
        ulid_unpopulated = 0
        ulid_successfully_decoded = 0
        ulid_not_decoded = 0
        property_view_ids = body.get('property_view_ids')
        taxlot_view_ids = body.get('taxlot_view_ids')
        if property_view_ids:
            property_views = PropertyView.objects.filter(
                id__in=property_view_ids, cycle__organization_id=org_id)
            property_states = PropertyState.objects.filter(
                id__in=Subquery(property_views.values('state_id')))

            ubid_unpopulated = property_states.filter(
                ubid__isnull=True).count()
            ubid_successfully_decoded = property_states.filter(
                ubid__isnull=False,
                bounding_box__isnull=False,
                centroid__isnull=False).count()
            # for ubid_not_decoded, bounding_box could be populated from a GeoJSON import
            ubid_not_decoded = property_states.filter(
                ubid__isnull=False, centroid__isnull=True).count()

        if taxlot_view_ids:
            taxlot_views = TaxLotView.objects.filter(
                id__in=taxlot_view_ids, cycle__organization_id=org_id)
            taxlot_states = TaxLotState.objects.filter(
                id__in=Subquery(taxlot_views.values('state_id')))

            ulid_unpopulated = taxlot_states.filter(ulid__isnull=True).count()
            ulid_successfully_decoded = taxlot_states.filter(
                ulid__isnull=False,
                bounding_box__isnull=False,
                centroid__isnull=False).count()
            # for ulid_not_decoded, bounding_box could be populated from a GeoJSON import
            ulid_not_decoded = taxlot_states.filter(
                ulid__isnull=False, centroid__isnull=True).count()

        result = {
            "ubid_unpopulated": ubid_unpopulated,
            "ubid_successfully_decoded": ubid_successfully_decoded,
            "ubid_not_decoded": ubid_not_decoded,
            "ulid_unpopulated": ulid_unpopulated,
            "ulid_successfully_decoded": ulid_successfully_decoded,
            "ulid_not_decoded": ulid_not_decoded,
        }

        return result
예제 #18
0
class ColumnMappingProfileViewSet(OrgMixin, ViewSet):
    permission_classes = (SEEDOrgPermissions, )
    # req by SEEDOrgPermissions, but currently not used by any methods.
    queryset = ColumnMappingProfile.objects.none()

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(
                required=False,
                description=
                "Optional org id which overrides the users (default) current org id"
            )
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {'profile_type': ['string']},
            description=
            "Possible Types: 'Normal', 'BuildingSync Default', BuildingSync Custom'"
        ))
    @api_endpoint_class
    @action(detail=False,
            methods=['POST'])  # POST in order to provide array/list
    def filter(self, request):
        """
        Retrieves all profiles for an organization.
        """
        try:
            profile_types = request.data.get('profile_type', [])
            profile_types = [
                ColumnMappingProfile.get_profile_type(pt)
                for pt in profile_types
            ]
            filter_params = {
                'organizations__pk': self.get_organization(request, True).id
            }
            if profile_types:
                filter_params['profile_type__in'] = profile_types
            profiles = ColumnMappingProfile.objects.filter(**filter_params)
            data = [ColumnMappingProfileSerializer(p).data for p in profiles]

            return JsonResponse({
                'status': 'success',
                'data': data,
            })
        except Exception as e:
            raise e
            return JsonResponse({
                'status': 'error',
                'data': str(e),
            },
                                status=HTTP_400_BAD_REQUEST)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(
            required=False,
            description=
            "Optional org id which overrides the users (default) current org id"
        )
    ],
                         request_body=AutoSchemaHelper.schema_factory(
                             {
                                 'name':
                                 'string',
                                 'mappings': [{
                                     'to_field': 'string',
                                     'from_field': 'string',
                                     'from_units': 'string',
                                     'to_table_name': 'string',
                                 }]
                             },
                             description="Optional 'name' or 'mappings'.\n" +
                             mappings_description))
    @api_endpoint_class
    def update(self, request, pk=None):
        """
        Updates a profile given appropriate request data. The body should contain
        only valid fields for ColumnMappingProfile objects.
        """
        org_id = self.get_organization(request, True).id
        try:
            profile = ColumnMappingProfile.objects.get(
                organizations__pk=org_id, pk=pk)
        except ColumnMappingProfile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'data': 'No profile with given id'
                },
                status=HTTP_400_BAD_REQUEST)

        if profile.profile_type == ColumnMappingProfile.BUILDINGSYNC_DEFAULT:
            return JsonResponse(
                {
                    'status': 'error',
                    'data': 'Default BuildingSync profile are not editable'
                },
                status=HTTP_400_BAD_REQUEST)

        updated_name, updated_mappings = request.data.get(
            'name'), request.data.get('mappings')

        # update the name
        if updated_name is not None:
            profile.name = updated_name

        # update the mappings according to the profile type
        if updated_mappings is not None:
            if profile.profile_type == ColumnMappingProfile.BUILDINGSYNC_CUSTOM:
                # only allow these updates to the mappings
                # - changing the to_field or from_units
                # - removing mappings
                original_mappings_dict = {
                    m['from_field']: m.copy()
                    for m in profile.mappings
                }
                final_mappings = []
                for updated_mapping in updated_mappings:
                    from_field = updated_mapping['from_field']
                    original_mapping = original_mappings_dict.get(from_field)
                    if original_mapping is not None:
                        original_mapping['to_field'] = updated_mapping[
                            'to_field']
                        original_mapping['from_units'] = updated_mapping[
                            'from_units']
                        final_mappings.append(original_mapping)
                        del original_mappings_dict[from_field]

                profile.mappings = final_mappings
            elif updated_mappings:
                # indiscriminantly update the mappings
                profile.mappings = updated_mappings

        profile.save()
        return JsonResponse({
            'status':
            'success',
            'data':
            ColumnMappingProfileSerializer(profile).data,
        })

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(
            required=False,
            description=
            "Optional org id which overrides the users (default) current org id"
        )
    ],
                         request_body=AutoSchemaHelper.schema_factory(
                             {
                                 'name':
                                 'string',
                                 'mappings': [{
                                     'to_field': 'string',
                                     'from_field': 'string',
                                     'from_units': 'string',
                                     'to_table_name': 'string',
                                 }]
                             },
                             description=mappings_description,
                             required=['name', 'mappings']))
    @api_endpoint_class
    def create(self, request, pk=None):
        """
        Creates a new profile given appropriate request data. The body should
        contain only valid fields for ColumnMappingProfile objects.
        """
        org_id = self.get_organization(request, True).id
        try:
            profile_data = request.data
            profile_data['organizations'] = [org_id]
            ser_profile = ColumnMappingProfileSerializer(data=profile_data)
            if ser_profile.is_valid():
                profile = ser_profile.save()
                response_status = 'success'
                response_data = ColumnMappingProfileSerializer(profile).data
                response_code = HTTP_200_OK
            else:
                response_status = 'error'
                response_data = ser_profile.errors
                response_code = HTTP_400_BAD_REQUEST

            return JsonResponse(
                {
                    'status': response_status,
                    'data': response_data,
                },
                status=response_code)
        except Exception as e:
            return JsonResponse({
                'status': 'error',
                'data': str(e),
            },
                                status=HTTP_400_BAD_REQUEST)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(
            required=False,
            description=
            "Optional org id which overrides the users (default) current org id"
        )
    ])
    @api_endpoint_class
    def destroy(self, request, pk=None):
        """
        Deletes a specific profile.
        """
        org_id = self.get_organization(request, True).id
        try:
            profile = ColumnMappingProfile.objects.get(
                organizations__pk=org_id, pk=pk)
        except Exception as e:
            return JsonResponse({
                'status': 'error',
                'data': str(e),
            },
                                status=HTTP_400_BAD_REQUEST)

        if profile.profile_type == ColumnMappingProfile.BUILDINGSYNC_DEFAULT:
            return JsonResponse(
                {
                    'status': 'error',
                    'data': 'Not allowed to edit default BuildingSync profiles'
                },
                status=HTTP_400_BAD_REQUEST)
        else:
            profile.delete()
            return JsonResponse({
                'status': 'success',
                'data': "Successfully deleted",
            })

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(
                required=False,
                description=
                "Optional org id which overrides the users (default) current org id"
            )
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {'headers': ['string']},
            description=
            "Raw headers - the exact headers for columns in an import file.",
            required=['headers']))
    @api_endpoint_class
    @action(detail=False, methods=['POST'])
    def suggestions(self, request):
        """
        Retrieves suggestions given raw column headers.
        """
        try:
            org_id = self.get_organization(request, True).id
            raw_headers = request.data.get('headers', [])

            suggested_mappings = mapper.build_column_mapping(
                raw_headers,
                Column.retrieve_all_by_tuple(org_id),
                previous_mapping=None,
                map_args=None,
                thresh=
                80  # percentage match that we require. 80% is random value for now.
            )
            # replace None with empty string for column names and PropertyState for tables
            # TODO #239: Move this fix to build_column_mapping
            for m in suggested_mappings:
                table, destination_field, _confidence = suggested_mappings[m]
                if destination_field is None:
                    suggested_mappings[m][1] = ''

            # Fix the table name, eventually move this to the build_column_mapping
            for m in suggested_mappings:
                table, _destination_field, _confidence = suggested_mappings[m]
                # Do not return the campus, created, updated fields... that is force them to be in the property state
                if not table or table == 'Property':
                    suggested_mappings[m][0] = 'PropertyState'
                elif table == 'TaxLot':
                    suggested_mappings[m][0] = 'TaxLotState'

            return JsonResponse({
                'status': 'success',
                'data': suggested_mappings,
            })
        except Exception as e:
            return JsonResponse({
                'status': 'error',
                'data': str(e),
            },
                                status=HTTP_400_BAD_REQUEST)
예제 #19
0
class AnalysisViewSet(viewsets.ViewSet):
    serializer_class = AnalysisSerializer
    model = Analysis

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
        ],
        request_body=CreateAnalysisSerializer,
    )
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def create(self, request):
        serializer = CreateAnalysisSerializer(data=request.data)
        if not serializer.is_valid():
            return JsonResponse({
                'status': 'error',
                'message': 'Bad request',
                'errors': serializer.errors
            })

        analysis = serializer.save(
            user_id=request.user.id,
            organization_id=request.query_params['organization_id'])
        pipeline = AnalysisPipeline.factory(analysis)
        try:
            progress_data = pipeline.prepare_analysis(
                serializer.validated_data['property_view_ids'])
            return JsonResponse({
                'status': 'success',
                'progress_key': progress_data['progress_key'],
                'progress': progress_data,
            })
        except AnalysisPipelineException as e:
            return JsonResponse({
                'status': 'error',
                'message': str(e)
            },
                                status=HTTP_409_CONFLICT)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(True),
        AutoSchemaHelper.query_integer_field('property_id', False,
                                             'Property ID')
    ])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def list(self, request):
        organization_id = request.query_params.get('organization_id', None)
        property_id = request.query_params.get('property_id', None)
        analyses = []
        if property_id is not None:
            analyses_queryset = (Analysis.objects.filter(
                organization=organization_id,
                analysispropertyview__property=property_id).distinct().
                                 order_by('id'))
        else:
            analyses_queryset = (Analysis.objects.filter(
                organization=organization_id).order_by('id'))
        for analysis in analyses_queryset:
            property_view_info = analysis.get_property_view_info(property_id)
            serialized_analysis = AnalysisSerializer(analysis).data
            serialized_analysis.update(property_view_info)
            analyses.append(serialized_analysis)

        return JsonResponse({'status': 'success', 'analyses': analyses})

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field(True)])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def retrieve(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    "Requested analysis doesn't exist in this organization."
                },
                status=HTTP_409_CONFLICT)
        serialized_analysis = AnalysisSerializer(analysis).data
        property_view_info = analysis.get_property_view_info()
        serialized_analysis.update(property_view_info)

        return JsonResponse({
            'status': 'success',
            'analysis': serialized_analysis
        })

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['post'])
    def start(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
            pipeline = AnalysisPipeline.factory(analysis)
            progress_data = pipeline.start_analysis()
            return JsonResponse({
                'status': 'success',
                'progress_key': progress_data['progress_key'],
                'progress': progress_data,
            })
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Requested analysis doesn\'t exist in this organization.'
                },
                status=HTTP_409_CONFLICT)
        except AnalysisPipelineException as e:
            return JsonResponse({
                'status': 'error',
                'message': str(e)
            },
                                status=HTTP_409_CONFLICT)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['post'])
    def stop(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
            pipeline = AnalysisPipeline.factory(analysis)
            pipeline.stop()
            return JsonResponse({
                'status': 'success',
            })
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Requested analysis doesn\'t exist in this organization.'
                },
                status=HTTP_409_CONFLICT)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def destroy(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
            pipeline = AnalysisPipeline.factory(analysis)
            pipeline.delete()
            return JsonResponse({
                'status': 'success',
            })
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Requested analysis doesn\'t exist in this organization.'
                },
                status=HTTP_409_CONFLICT)
예제 #20
0
class ColumnListProfileViewSet(OrgValidateMixin,
                               SEEDOrgNoPatchOrOrgCreateModelViewSet):
    """
    API endpoint for returning Column List Profiles

    create:
        Create a new list profile. The list of columns is an array of column primary keys. If using Swagger, then
        this will be enters as a list with returns between each primary key.

        JSON POST Example:

            {
                "name": "some new name 3",
                "profile_location": "List View Profile",
                "inventory_type": "Tax Lot",
                "columns": [
                    {"id": 1, "pinned": false, "order": 10},
                    {"id": 5, "pinned": true, "order": 14},
                    {"id": 7, "pinned": true, "order": 14}
                ]
            }

    """
    serializer_class = ColumnListProfileSerializer
    model = ColumnListProfile
    filter_backends = (ColumnListProfileFilterBackend, )
    pagination_class = None

    # force_parent = True  # Ideally the column list profiles would inherit from the parent,
    # but not yet.

    # Overridden to augment with protected ComStock list profile if enabled
    def retrieve(self, request, *args, **kwargs):
        org_id = self.get_organization(self.request)

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'organization with id %s does not exist' % org_id
                },
                status=status.HTTP_404_NOT_FOUND)

        if not org.comstock_enabled or kwargs['pk'] != 'null':
            return super(ColumnListProfileViewSet,
                         self).retrieve(request, args, kwargs)

        result = {
            'status': 'success',
            'data': {
                'id': None,
                'name': 'ComStock',
                'profile_location': VIEW_LOCATION_TYPES[VIEW_LIST][1],
                'inventory_type':
                VIEW_LIST_INVENTORY_TYPE[VIEW_LIST_PROPERTY][1],
                'columns': self.list_comstock_columns(org_id)
            }
        }

        return JsonResponse(result, status=status.HTTP_200_OK)

    # Overridden to augment with protected ComStock list profile if enabled
    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(
            required=False,
            description=
            "Optional org id which overrides the users (default) current org id"
        ),
        AutoSchemaHelper.query_string_field(
            name='inventory_type',
            required=True,
            description="'Property' or 'Tax Lot' for filtering."),
        AutoSchemaHelper.query_string_field(
            name='profile_location',
            required=True,
            description=
            "'List View Profile' or 'Detail View Profile' for filtering."),
    ])
    def list(self, request, *args, **kwargs):
        org_id = self.get_organization(self.request)

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'organization with id %s does not exist' % org_id
                },
                status=status.HTTP_404_NOT_FOUND)

        inventory_type = request.query_params.get('inventory_type')
        profile_location = request.query_params.get('profile_location')
        if not org.comstock_enabled or inventory_type == 'Tax Lot' or profile_location == 'Detail View Profile':
            return super(ColumnListProfileViewSet,
                         self).list(request, args, kwargs)

        queryset = self.filter_queryset(self.get_queryset())

        page = self.paginate_queryset(queryset)
        if page is not None:
            serializer = self.get_serializer(page, many=True)
            return self.get_paginated_response(serializer.data)

        results = list(queryset)
        base_profiles = self.get_serializer(results, many=True).data

        # Add ComStock columns
        base_profiles.append({
            "id": None,
            "name": "ComStock",
            "profile_location": profile_location,
            "inventory_type": inventory_type,
            "columns": self.list_comstock_columns(org_id)
        })

        return Response(base_profiles)

    @staticmethod
    def list_comstock_columns(org_id):
        comstock_columns = Column.objects.filter(organization_id=org_id, comstock_mapping__isnull=False) \
            .order_by('comstock_mapping')

        results = []
        for index, column in enumerate(comstock_columns):
            results.append({
                "id": column.id,
                "pinned": False,
                "order": index + 1,
                "column_name": column.column_name,
                "table_name": column.table_name,
                "comstock_mapping": column.comstock_mapping
            })

        return results
예제 #21
0
class TaxlotViewSet(viewsets.ViewSet, OrgMixin, ProfileIdMixin):
    renderer_classes = (JSONRenderer,)
    serializer_class = TaxLotSerializer
    parser_classes = (JSONParser,)
    _organization = None

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field(required=True)],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'selected': ['integer'],
            },
            description='IDs for taxlots to be checked for which labels are applied.'
        )
    )
    @action(detail=False, methods=['POST'])
    def labels(self, request):
        """
        Returns a list of all labels where the is_applied field
        in the response pertains to the labels applied to taxlot_view
        """
        labels = StatusLabel.objects.filter(
            super_organization=self.get_parent_org(self.request)
        ).order_by("name").distinct()
        super_organization = self.get_organization(request)
        # TODO: refactor to avoid passing request here
        return get_labels(request, labels, super_organization, 'taxlot_view')

    def _get_filtered_results(self, request, profile_id):
        page = request.query_params.get('page', 1)
        per_page = request.query_params.get('per_page', 1)
        org_id = request.query_params.get('organization_id', None)
        cycle_id = request.query_params.get('cycle')
        # check if there is a query paramater for the profile_id. If so, then use that one
        profile_id = request.query_params.get('profile_id', profile_id)
        if not org_id:
            return JsonResponse(
                {'status': 'error', 'message': 'Need to pass organization_id as query parameter'},
                status=status.HTTP_400_BAD_REQUEST)

        if cycle_id:
            cycle = Cycle.objects.get(organization_id=org_id, pk=cycle_id)
        else:
            cycle = Cycle.objects.filter(organization_id=org_id).order_by('name')
            if cycle:
                cycle = cycle.first()
            else:
                return JsonResponse({
                    'status': 'error',
                    'message': 'Could not locate cycle',
                    'pagination': {
                        'total': 0
                    },
                    'cycle_id': None,
                    'results': []
                })

        # Return taxlot views limited to the 'taxlot_view_ids' list.  Otherwise, if selected is empty, return all
        if 'taxlot_view_ids' in request.data and request.data['taxlot_view_ids']:
            taxlot_views_list = TaxLotView.objects.select_related('taxlot', 'state', 'cycle') \
                .filter(id__in=request.data['taxlot_view_ids'], taxlot__organization_id=org_id,
                        cycle=cycle) \
                .order_by('id')
        else:
            taxlot_views_list = TaxLotView.objects.select_related('taxlot', 'state', 'cycle') \
                .filter(taxlot__organization_id=org_id, cycle=cycle) \
                .order_by('id')

        paginator = Paginator(taxlot_views_list, per_page)

        try:
            taxlot_views = paginator.page(page)
            page = int(page)
        except PageNotAnInteger:
            taxlot_views = paginator.page(1)
            page = 1
        except EmptyPage:
            taxlot_views = paginator.page(paginator.num_pages)
            page = paginator.num_pages

        org = Organization.objects.get(pk=org_id)

        # Retrieve all the columns that are in the db for this organization
        columns_from_database = Column.retrieve_all(org_id, 'taxlot', False)

        # This uses an old method of returning the show_columns. There is a new method that
        # is preferred in v2.1 API with the ProfileIdMixin.
        if profile_id is None:
            show_columns = None
        elif profile_id == -1:
            show_columns = list(Column.objects.filter(
                organization_id=org_id
            ).values_list('id', flat=True))
        else:
            try:
                profile = ColumnListProfile.objects.get(
                    organization=org,
                    id=profile_id,
                    profile_location=VIEW_LIST,
                    inventory_type=VIEW_LIST_TAXLOT
                )
                show_columns = list(ColumnListProfileColumn.objects.filter(
                    column_list_profile_id=profile.id
                ).values_list('column_id', flat=True))
            except ColumnListProfile.DoesNotExist:
                show_columns = None

        related_results = TaxLotProperty.get_related(taxlot_views, show_columns,
                                                     columns_from_database)

        # collapse units here so we're only doing the last page; we're already a
        # realized list by now and not a lazy queryset
        unit_collapsed_results = [apply_display_unit_preferences(org, x) for x in related_results]

        response = {
            'pagination': {
                'page': page,
                'start': paginator.page(page).start_index(),
                'end': paginator.page(page).end_index(),
                'num_pages': paginator.num_pages,
                'has_next': paginator.page(page).has_next(),
                'has_previous': paginator.page(page).has_previous(),
                'total': paginator.count
            },
            'cycle_id': cycle.id,
            'results': unit_collapsed_results
        }

        return JsonResponse(response)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'cycle',
                required=True,
                description='The ID of the cycle to get taxlots'
            ),
            AutoSchemaHelper.query_integer_field(
                'page',
                required=False,
                description='The current page of taxlots to return'
            ),
            AutoSchemaHelper.query_integer_field(
                'per_page',
                required=False,
                description='The number of items per page to return'
            ),
            AutoSchemaHelper.query_integer_field(
                'profile_id',
                required=False,
                description='The ID of the column profile to use'
            )
        ]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    def list(self, request):
        """
        List all the properties
        """
        return self._get_filtered_results(request, profile_id=-1)

    @swagger_auto_schema(
        request_body=AutoSchemaHelper.schema_factory(
            {
                'organization_id': 'integer',
                'profile_id': 'integer',
                'cycle_ids': ['integer'],
            },
            required=['organization_id', 'cycle_ids'],
            description='Properties:\n'
                        '- organization_id: ID of organization\n'
                        '- profile_id: Either an id of a list settings profile, '
                        'or undefined\n'
                        '- cycle_ids: The IDs of the cycle to get taxlots'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    @action(detail=False, methods=['POST'])
    def filter_by_cycle(self, request):
        """
        List all the taxlots with all columns
        """
        # NOTE: we are using a POST http method b/c swagger and django handle
        # arrays differently in query parameters. ie this is just simpler
        org_id = request.data.get('organization_id', None)
        profile_id = request.data.get('profile_id', -1)
        cycle_ids = request.data.get('cycle_ids', [])

        if not org_id:
            return JsonResponse(
                {'status': 'error', 'message': 'Need to pass organization_id as query parameter'},
                status=status.HTTP_400_BAD_REQUEST)

        response = taxlots_across_cycles(org_id, profile_id, cycle_ids)

        return JsonResponse(response)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'cycle',
                required=True,
                description='The ID of the cycle to get tax lots'
            ),
            AutoSchemaHelper.query_integer_field(
                'page',
                required=False,
                description='The current page of taxlots to return'
            ),
            AutoSchemaHelper.query_integer_field(
                'per_page',
                required=False,
                description='The number of items per page to return'
            ),
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'profile_id': 'integer',
                'taxlot_view_ids': ['integer'],
            },
            required=['profile_id'],
            description='Properties:\n'
                        '- profile_id: Either an id of a list settings profile, or undefined\n'
                        '- taxlot_view_ids: List of taxlot view ids'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    @action(detail=False, methods=['POST'])
    def filter(self, request):
        """
        List all the tax lots
        """
        if 'profile_id' not in request.data:
            profile_id = None
        else:
            if request.data['profile_id'] == 'None':
                profile_id = None
            else:
                profile_id = request.data['profile_id']

        return self._get_filtered_results(request, profile_id=profile_id)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'taxlot_view_ids': ['integer']
            },
            required=['taxlot_view_ids'],
            description='Properties:\n'
                        '- taxlot_view_ids: Array containing tax lot state ids to merge'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=False, methods=['POST'])
    def merge(self, request):
        """
        Merge multiple tax lot records into a single new record, and run this
        new record through a match and merge round within it's current Cycle.
        """
        body = request.data
        organization_id = int(request.query_params.get('organization_id', None))

        taxlot_view_ids = body.get('taxlot_view_ids', [])
        taxlot_states = TaxLotView.objects.filter(
            id__in=taxlot_view_ids,
            cycle__organization_id=organization_id
        ).values('id', 'state_id')
        # get the state ids in order according to the given view ids
        taxlot_states_dict = {t['id']: t['state_id'] for t in taxlot_states}
        taxlot_state_ids = [
            taxlot_states_dict[view_id]
            for view_id in taxlot_view_ids if view_id in taxlot_states_dict
        ]

        if len(taxlot_state_ids) != len(taxlot_view_ids):
            return {
                'status': 'error',
                'message': 'All records not found.'
            }

        # Check the number of taxlot_state_ids to merge
        if len(taxlot_state_ids) < 2:
            return JsonResponse({
                'status': 'error',
                'message': 'At least two ids are necessary to merge'
            }, status=status.HTTP_400_BAD_REQUEST)

        merged_state = merge_taxlots(taxlot_state_ids, organization_id, 'Manual Match')

        merge_count, link_count, view_id = match_merge_link(merged_state.taxlotview_set.first().id, 'TaxLotState')

        result = {
            'status': 'success'
        }

        result.update({
            'match_merged_count': merge_count,
            'match_link_count': link_count,
        })

        return result

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def unmerge(self, request, pk=None):
        """
        Unmerge a taxlot view into two taxlot views
        """
        try:
            old_view = TaxLotView.objects.select_related(
                'taxlot', 'cycle', 'state'
            ).get(
                id=pk,
                taxlot__organization_id=self.request.GET['organization_id']
            )
        except TaxLotView.DoesNotExist:
            return {
                'status': 'error',
                'message': 'taxlot view with id {} does not exist'.format(pk)
            }

        # Duplicate pairing
        paired_view_ids = list(TaxLotProperty.objects.filter(taxlot_view_id=old_view.id)
                               .order_by('property_view_id').values_list('property_view_id',
                                                                         flat=True))

        # Capture previous associated labels
        label_ids = list(old_view.labels.all().values_list('id', flat=True))

        notes = old_view.notes.all()
        for note in notes:
            note.taxlot_view = None

        merged_state = old_view.state
        if merged_state.data_state != DATA_STATE_MATCHING or merged_state.merge_state != MERGE_STATE_MERGED:
            return {
                'status': 'error',
                'message': 'taxlot view with id {} is not a merged taxlot view'.format(pk)
            }

        log = TaxLotAuditLog.objects.select_related('parent_state1', 'parent_state2').filter(
            state=merged_state
        ).order_by('-id').first()

        if log.parent_state1 is None or log.parent_state2 is None:
            return {
                'status': 'error',
                'message': 'taxlot view with id {} must have two parent states'.format(pk)
            }

        state1 = log.parent_state1
        state2 = log.parent_state2
        cycle_id = old_view.cycle_id

        # Clone the taxlot record twice
        old_taxlot = old_view.taxlot
        new_taxlot = old_taxlot
        new_taxlot.id = None
        new_taxlot.save()

        new_taxlot_2 = TaxLot.objects.get(pk=new_taxlot.pk)
        new_taxlot_2.id = None
        new_taxlot_2.save()

        # If the canonical TaxLot is NOT associated to another -View
        if not TaxLotView.objects.filter(taxlot_id=old_view.taxlot_id).exclude(pk=old_view.id).exists():
            TaxLot.objects.get(pk=old_view.taxlot_id).delete()

        # Create the views
        new_view1 = TaxLotView(
            cycle_id=cycle_id,
            taxlot_id=new_taxlot.id,
            state=state1
        )
        new_view2 = TaxLotView(
            cycle_id=cycle_id,
            taxlot_id=new_taxlot_2.id,
            state=state2
        )

        # Mark the merged state as deleted
        merged_state.merge_state = MERGE_STATE_DELETE
        merged_state.save()

        # Change the merge_state of the individual states
        if log.parent1.name in ['Import Creation',
                                'Manual Edit'] and log.parent1.import_filename is not None:
            # State belongs to a new record
            state1.merge_state = MERGE_STATE_NEW
        else:
            state1.merge_state = MERGE_STATE_MERGED
        if log.parent2.name in ['Import Creation',
                                'Manual Edit'] and log.parent2.import_filename is not None:
            # State belongs to a new record
            state2.merge_state = MERGE_STATE_NEW
        else:
            state2.merge_state = MERGE_STATE_MERGED
        # In most cases data_state will already be 3 (DATA_STATE_MATCHING), but if one of the parents was a
        # de-duplicated record then data_state will be 0. This step ensures that the new states will be 3.
        state1.data_state = DATA_STATE_MATCHING
        state2.data_state = DATA_STATE_MATCHING
        state1.save()
        state2.save()

        # Delete the audit log entry for the merge
        log.delete()

        old_view.delete()
        new_view1.save()
        new_view2.save()

        # Asssociate labels
        label_objs = StatusLabel.objects.filter(pk__in=label_ids)
        new_view1.labels.set(label_objs)
        new_view2.labels.set(label_objs)

        # Duplicate notes to the new views
        for note in notes:
            created = note.created
            updated = note.updated
            note.id = None
            note.taxlot_view = new_view1
            note.save()
            ids = [note.id]
            note.id = None
            note.taxlot_view = new_view2
            note.save()
            ids.append(note.id)
            # Correct the created and updated times to match the original note
            Note.objects.filter(id__in=ids).update(created=created, updated=updated)

        for paired_view_id in paired_view_ids:
            TaxLotProperty(primary=True,
                           cycle_id=cycle_id,
                           taxlot_view_id=new_view1.id,
                           property_view_id=paired_view_id).save()
            TaxLotProperty(primary=True,
                           cycle_id=cycle_id,
                           taxlot_view_id=new_view2.id,
                           property_view_id=paired_view_id).save()

        return {
            'status': 'success',
            'view_id': new_view1.id
        }

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['GET'])
    def links(self, request, pk=None):
        """
        Get taxlot details for each linked taxlot across org cycles
        """
        organization_id = request.query_params.get('organization_id', None)
        base_view = TaxLotView.objects.select_related('cycle').filter(
            pk=pk,
            cycle__organization_id=organization_id
        )

        if base_view.exists():
            result = {'data': []}

            linked_views = TaxLotView.objects.select_related('cycle').filter(
                taxlot_id=base_view.get().taxlot_id,
                cycle__organization_id=organization_id
            ).order_by('-cycle__start')
            for linked_view in linked_views:
                state_data = TaxLotStateSerializer(linked_view.state).data

                state_data['cycle_id'] = linked_view.cycle.id
                state_data['view_id'] = linked_view.id
                result['data'].append(state_data)

            return JsonResponse(result, status=status.HTTP_200_OK)
        else:
            result = {
                'status': 'error',
                'message': 'property view with id {} does not exist in given organization'.format(pk)
            }
            return JsonResponse(result)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def match_merge_link(self, request, pk=None):
        """
        Runs match merge link for an individual taxlot.

        Note that this method can return a view_id of None if the given -View
        was not involved in a merge.
        """
        org_id = request.query_params.get('organization_id', None)

        taxlot_view = TaxLotView.objects.get(
            pk=pk,
            cycle__organization_id=org_id
        )
        merge_count, link_count, view_id = match_merge_link(taxlot_view.pk, 'TaxLotState')

        result = {
            'view_id': view_id,
            'match_merged_count': merge_count,
            'match_link_count': link_count,
        }

        return JsonResponse(result)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'property_id',
                required=True,
                description='The property id to pair up with this taxlot'
            )
        ]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['PUT'])
    def pair(self, request, pk=None):
        """
        Pair a property to this taxlot
        """
        organization_id = int(request.query_params.get('organization_id'))
        property_id = int(request.query_params.get('property_id'))
        taxlot_id = int(pk)
        return pair_unpair_property_taxlot(property_id, taxlot_id, organization_id, True)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'property_id',
                required=True,
                description='The property id to unpair from this taxlot'
            )
        ]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['PUT'])
    def unpair(self, request, pk=None):
        """
        Unpair a property from this taxlot
        """
        organization_id = int(request.query_params.get('organization_id'))
        property_id = int(request.query_params.get('property_id'))
        taxlot_id = int(pk)
        return pair_unpair_property_taxlot(property_id, taxlot_id, organization_id, False)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'taxlot_view_ids': ['integer']
            },
            required=['taxlot_view_ids'],
            description='A list of taxlot view ids to delete'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=False, methods=['DELETE'])
    def batch_delete(self, request):
        """
        Batch delete several tax lots
        """
        org_id = request.query_params.get('organization_id', None)

        taxlot_view_ids = request.data.get('taxlot_view_ids', [])
        taxlot_state_ids = TaxLotView.objects.filter(
            id__in=taxlot_view_ids,
            cycle__organization_id=org_id
        ).values_list('state_id', flat=True)
        resp = TaxLotState.objects.filter(pk__in=Subquery(taxlot_state_ids)).delete()

        if resp[0] == 0:
            return JsonResponse({'status': 'warning', 'message': 'No action was taken'})

        return JsonResponse({'status': 'success', 'taxlots': resp[1]['seed.TaxLotState']})

    def _get_taxlot_view(self, taxlot_pk):
        try:
            taxlot_view = TaxLotView.objects.select_related(
                'taxlot', 'cycle', 'state'
            ).get(
                id=taxlot_pk,
                taxlot__organization_id=self.request.GET['organization_id']
            )
            result = {
                'status': 'success',
                'taxlot_view': taxlot_view
            }
        except TaxLotView.DoesNotExist:
            result = {
                'status': 'error',
                'message': 'taxlot view with id {} does not exist'.format(taxlot_pk)
            }
        return result

    def get_history(self, taxlot_view):
        """Return history in reverse order"""

        # access the history from the property state
        history, master = taxlot_view.state.history()

        # convert the history and master states to StateSerializers
        master['state'] = TaxLotStateSerializer(master['state_data']).data
        del master['state_data']
        del master['state_id']

        for h in history:
            h['state'] = TaxLotStateSerializer(h['state_data']).data
            del h['state_data']
            del h['state_id']

        return history, master

    def _get_properties(self, taxlot_view_pk):
        property_view_pks = TaxLotProperty.objects.filter(
            taxlot_view_id=taxlot_view_pk
        ).values_list('property_view_id', flat=True)
        property_views = PropertyView.objects.filter(
            pk__in=property_view_pks
        ).select_related('cycle', 'state')
        properties = []
        for property_view in property_views:
            properties.append(PropertyViewSerializer(property_view).data)
        return properties

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_view_data')
    def retrieve(self, request, pk):
        """
        Get taxlot details
        """
        result = self._get_taxlot_view(pk)
        if result.get('status', None) != 'error':
            taxlot_view = result.pop('taxlot_view')
            result.update(TaxLotViewSerializer(taxlot_view).data)
            # remove TaxLotView id from result
            result.pop('id')

            result['state'] = TaxLotStateSerializer(taxlot_view.state).data
            result['properties'] = self._get_properties(taxlot_view.pk)
            result['history'], master = self.get_history(taxlot_view)
            result = update_result_with_master(result, master)
            return JsonResponse(result, status=status.HTTP_200_OK)
        else:
            return JsonResponse(result, status=status.HTTP_404_NOT_FOUND)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=UpdateTaxLotPayloadSerializer,
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    def update(self, request, pk):
        """
        Update a taxlot and run the updated record through a match and merge
        round within it's current Cycle.
        """
        data = request.data

        result = self._get_taxlot_view(pk)
        if result.get('status', 'error') != 'error':
            taxlot_view = result.pop('taxlot_view')
            taxlot_state_data = TaxLotStateSerializer(taxlot_view.state).data

            # get the taxlot state information from the request
            new_taxlot_state_data = data['state']

            # set empty strings to None
            for key, val in new_taxlot_state_data.items():
                if val == '':
                    new_taxlot_state_data[key] = None

            changed_fields, previous_data = get_changed_fields(taxlot_state_data, new_taxlot_state_data)
            if not changed_fields:
                result.update(
                    {'status': 'success', 'message': 'Records are identical'}
                )
                return JsonResponse(result, status=status.HTTP_204_NO_CONTENT)
            else:
                # Not sure why we are going through the pain of logging this all right now... need to
                # reevaluate this.
                log = TaxLotAuditLog.objects.select_related().filter(
                    state=taxlot_view.state
                ).order_by('-id').first()

                # if checks above pass, create an exact copy of the current state for historical purposes
                if log.name == 'Import Creation':
                    # Add new state by removing the existing ID.
                    taxlot_state_data.pop('id')
                    # Remove the import_file_id for the first edit of a new record
                    # If the import file has been deleted and this value remains the serializer won't be valid
                    taxlot_state_data.pop('import_file')
                    new_taxlot_state_serializer = TaxLotStateSerializer(
                        data=taxlot_state_data
                    )
                    if new_taxlot_state_serializer.is_valid():
                        # create the new property state, and perform an initial save / moving relationships
                        new_state = new_taxlot_state_serializer.save()

                        # then assign this state to the property view and save the whole view
                        taxlot_view.state = new_state
                        taxlot_view.save()

                        TaxLotAuditLog.objects.create(organization=log.organization,
                                                      parent1=log,
                                                      parent2=None,
                                                      parent_state1=log.state,
                                                      parent_state2=None,
                                                      state=new_state,
                                                      name='Manual Edit',
                                                      description=None,
                                                      import_filename=log.import_filename,
                                                      record_type=AUDIT_USER_EDIT)

                        result.update(
                            {'state': new_taxlot_state_serializer.data}
                        )

                        # save the property view so that the datetime gets updated on the property.
                        taxlot_view.save()
                    else:
                        result.update({
                            'status': 'error',
                            'message': 'Invalid update data with errors: {}'.format(
                                new_taxlot_state_serializer.errors)}
                        )
                        return JsonResponse(result, status=status.HTTP_422_UNPROCESSABLE_ENTITY)

                # redo assignment of this variable in case this was an initial edit
                taxlot_state_data = TaxLotStateSerializer(taxlot_view.state).data

                if 'extra_data' in new_taxlot_state_data:
                    taxlot_state_data['extra_data'].update(
                        new_taxlot_state_data['extra_data']
                    )

                taxlot_state_data.update(
                    {k: v for k, v in new_taxlot_state_data.items() if k != 'extra_data'}
                )

                log = TaxLotAuditLog.objects.select_related().filter(
                    state=taxlot_view.state
                ).order_by('-id').first()

                if log.name in ['Manual Edit', 'Manual Match', 'System Match', 'Merge current state in migration']:
                    # Convert this to using the serializer to save the data. This will override the
                    # previous values in the state object.

                    # Note: We should be able to use partial update here and pass in the changed
                    # fields instead of the entire state_data.
                    updated_taxlot_state_serializer = TaxLotStateSerializer(
                        taxlot_view.state,
                        data=taxlot_state_data
                    )
                    if updated_taxlot_state_serializer.is_valid():
                        # create the new property state, and perform an initial save / moving
                        # relationships
                        updated_taxlot_state_serializer.save()

                        result.update(
                            {'state': updated_taxlot_state_serializer.data}
                        )

                        # save the property view so that the datetime gets updated on the property.
                        taxlot_view.save()

                        Note.create_from_edit(request.user.id, taxlot_view, new_taxlot_state_data, previous_data)

                        merge_count, link_count, view_id = match_merge_link(taxlot_view.id, 'TaxLotState')

                        result.update({
                            'view_id': view_id,
                            'match_merged_count': merge_count,
                            'match_link_count': link_count,
                        })

                        return JsonResponse(result, status=status.HTTP_200_OK)
                    else:
                        result.update({
                            'status': 'error',
                            'message': 'Invalid update data with errors: {}'.format(
                                updated_taxlot_state_serializer.errors)}
                        )
                        return JsonResponse(result, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
                else:
                    result = {
                        'status': 'error',
                        'message': 'Unrecognized audit log name: ' + log.name
                    }
                    return JsonResponse(result, status=status.HTTP_204_NO_CONTENT)
        else:
            return JsonResponse(result, status=status.HTTP_404_NOT_FOUND)