Exemplo n.º 1
0
class DataQualityCheckViewSet(viewsets.ViewSet):
    """
    Handles Data Quality API operations within Inventory backend.
    (1) Post, wait, get…
    (2) Respond with what changed
    """

    # Remove lookup_field once data_quality_check_id is used and "pk" can be used
    lookup_field = 'organization_id'

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.base_field(
                "organization_id", "IN_PATH",
                "Organization ID - identifier used to specify a DataQualityCheck",
                True, "TYPE_INTEGER")
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'property_view_ids': ['integer'],
                'taxlot_view_ids': ['integer'],
            },
            description='An object containing IDs of the records to perform'
            ' data quality checks on. Should contain two keys- '
            'property_view_ids and taxlot_view_ids, each of '
            'which is an array of appropriate IDs.',
        ),
        responses={
            200:
            AutoSchemaHelper.schema_factory({
                'num_properties': 'integer',
                'num_taxlots': 'integer',
                'progress_key': 'string',
                'progress': {},
            })
        })
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def start(self, request, organization_id):
        """
        This API endpoint will create a new data_quality check process in the background,
        on potentially a subset of properties/taxlots, and return back a query key
        """
        body = request.data
        property_view_ids = body['property_view_ids']
        taxlot_view_ids = body['taxlot_view_ids']

        property_state_ids = PropertyView.objects.filter(
            id__in=property_view_ids).values_list('state_id', flat=True)
        taxlot_state_ids = TaxLotView.objects.filter(
            id__in=taxlot_view_ids).values_list('state_id', flat=True)

        # For now, organization_id is the only key currently used to identify DataQualityChecks
        return_value = do_checks(organization_id, property_state_ids,
                                 taxlot_state_ids)

        return JsonResponse({
            'num_properties': len(property_state_ids),
            'num_taxlots': len(taxlot_state_ids),
            # TODO #239: Deprecate progress_key from here and just use the 'progess.progress_key'
            'progress_key': return_value['progress_key'],
            'progress': return_value,
        })

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_integer_field("run_id", True,
                                             "Import file ID or cache key"),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=False, methods=['GET'])
    def results_csv(self, request):
        """
        Download a CSV of the results from a data quality run based on either the ID that was
        given during the creation of the data quality task or the ID of the
        import file which had it's records checked.
        Note that it is not related to objects in the database, since the results
        are stored in redis!
        """
        run_id = request.query_params.get('run_id')
        if run_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'must include Import file ID or cache key as run_id'
                },
                status=status.HTTP_400_BAD_REQUEST)

        data_quality_results = get_cache_raw(
            DataQualityCheck.cache_key(run_id))
        response = HttpResponse(content_type='text/csv')
        response[
            'Content-Disposition'] = 'attachment; filename="Data Quality Check Results.csv"'

        writer = csv.writer(response)
        if data_quality_results is None:
            writer.writerow(['Error'])
            writer.writerow(['data quality results not found'])
            return response

        writer.writerow([
            'Table', 'Address Line 1', 'PM Property ID', 'Tax Lot ID',
            'Custom ID', 'Field', 'Applied Label', 'Condition',
            'Error Message', 'Severity'
        ])

        for row in data_quality_results:
            for result in row['data_quality_results']:
                writer.writerow([
                    row['data_quality_results'][0]['table_name'],
                    row['address_line_1'],
                    row['pm_property_id'] if 'pm_property_id' in row else None,
                    row['jurisdiction_tax_lot_id']
                    if 'jurisdiction_tax_lot_id' in row else None,
                    row['custom_id_1'],
                    result['formatted_field'],
                    result.get('label', None),
                    result['condition'],
                    # the detailed_message field can have units which has superscripts/subscripts, so unidecode it!
                    unidecode(result['detailed_message']),
                    result['severity']
                ])

        return response

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_integer_field("run_id", True,
                                             "Import file ID or cache key"),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=False, methods=['GET'])
    def results(self, request):
        """
        Return the results of a data quality run based on either the ID that was
        given during the creation of the data quality task or the ID of the
        import file which had it's records checked.
        Note that it is not related to objects in the database, since the results
        are stored in redis!
        """
        data_quality_id = request.query_params['run_id']
        data_quality_results = get_cache_raw(
            DataQualityCheck.cache_key(data_quality_id))
        return JsonResponse({'data': data_quality_results})
Exemplo n.º 2
0
class TaxLotPropertyViewSet(GenericViewSet):
    """
    The TaxLotProperty field is used to return the properties and tax lots from the join table.
    This method presently only works with the CSV, but should eventually be extended to be the
    viewset for any tax lot / property join API call.
    """
    renderer_classes = (JSONRenderer, )
    serializer_class = TaxLotPropertySerializer

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field("cycle_id", True, "Cycle ID"),
            AutoSchemaHelper.query_string_field(
                "inventory_type", False,
                "Either 'taxlots' or 'properties' and defaults to 'properties'."
            ),
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'ids': ['integer'],
                'filename': 'string',
                'export_type': 'string',
                'profile_id': 'integer'
            },
            description='- ids: (View) IDs for records to be exported\n'
            '- filename: desired filename including extension (defaulting to \'ExportedData.{export_type}\')\n'
            '- export_types: \'csv\', \'geojson\', \'xlsx\' (defaulting to \'csv\')\n'
            '- profile_id: Column List Profile ID to use for customizing fields included in export'
        ),
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=False, methods=['POST'])
    def export(self, request):
        """
        Download a collection of the TaxLot and Properties in multiple formats.
        """
        cycle_pk = request.query_params.get('cycle_id', None)
        if not cycle_pk:
            return JsonResponse({
                'status':
                'error',
                'message':
                'Must pass in cycle_id as query parameter'
            })
        org_id = request.query_params['organization_id']
        if 'profile_id' not in request.data:
            profile_id = None
        else:
            if request.data['profile_id'] == 'None' or request.data[
                    'profile_id'] == '':
                profile_id = None
            else:
                profile_id = request.data['profile_id']

        # get the class to operate on and the relationships
        view_klass_str = request.query_params.get('inventory_type',
                                                  'properties')
        view_klass = INVENTORY_MODELS[view_klass_str]

        # Set the first column to be the ID
        column_name_mappings = OrderedDict([('id', 'ID')])
        column_ids, add_column_name_mappings, columns_from_database = ColumnListProfile.return_columns(
            org_id, profile_id, view_klass_str)
        column_name_mappings.update(add_column_name_mappings)
        select_related = ['state', 'cycle']
        ids = request.data.get('ids', [])
        filter_str = {'cycle': cycle_pk}
        if hasattr(view_klass, 'property'):
            select_related.append('property')
            prefetch_related = ['labels']
            filter_str = {'property__organization_id': org_id}
            if ids:
                filter_str['id__in'] = ids
            # always export the labels and notes
            column_name_mappings['property_notes'] = 'Property Notes'
            column_name_mappings['property_labels'] = 'Property Labels'

        elif hasattr(view_klass, 'taxlot'):
            select_related.append('taxlot')
            prefetch_related = ['labels']
            filter_str = {'taxlot__organization_id': org_id}
            if ids:
                filter_str['id__in'] = ids
            # always export the labels and notes
            column_name_mappings['taxlot_notes'] = 'Tax Lot Notes'
            column_name_mappings['taxlot_labels'] = 'Tax Lot Labels'

        model_views = view_klass.objects.select_related(
            *select_related).prefetch_related(*prefetch_related).filter(
                **filter_str).order_by('id')

        # get the data in a dict which includes the related data
        data = TaxLotProperty.get_related(model_views, column_ids,
                                          columns_from_database)

        # add labels and notes
        for i, record in enumerate(model_views):
            label_string = []
            note_string = []
            for label in list(record.labels.all().order_by('name')):
                label_string.append(label.name)
            for note in list(record.notes.all().order_by('created')):
                note_string.append(note.created.astimezone().strftime(
                    "%Y-%m-%d %I:%M:%S %p") + "\n" + note.text)

            if hasattr(record, 'property'):
                data[i]['property_labels'] = ','.join(label_string)
                data[i]['property_notes'] = '\n----------\n'.join(note_string)
            elif hasattr(record, 'taxlot'):
                data[i]['taxlot_labels'] = ','.join(label_string)
                data[i]['taxlot_notes'] = '\n----------\n'.join(note_string)

        # force the data into the same order as the IDs
        if ids:
            order_dict = {obj_id: index for index, obj_id in enumerate(ids)}
            if view_klass_str == 'properties':
                view_id_str = 'property_view_id'
            else:
                view_id_str = 'taxlot_view_id'
            data.sort(key=lambda inventory_obj: order_dict[inventory_obj[
                view_id_str]])

        export_type = request.data.get('export_type', 'csv')

        filename = request.data.get('filename', f"ExportedData.{export_type}")

        if export_type == "csv":
            return self._csv_response(filename, data, column_name_mappings)
        elif export_type == "geojson":
            return self._json_response(filename, data, column_name_mappings)
        elif export_type == "xlsx":
            return self._spreadsheet_response(filename, data,
                                              column_name_mappings)

    def _csv_response(self, filename, data, column_name_mappings):
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        writer = csv.writer(response)

        # check the first item in the header and make sure that it isn't ID (it can be id, or iD).
        # excel doesn't like the first item to be ID in a CSV
        header = list(column_name_mappings.values())
        if header[0] == 'ID':
            header[0] = 'id'
        writer.writerow(header)

        # iterate over the results to preserve column order and write row.
        for datum in data:
            row = []
            for column in column_name_mappings:
                row_result = datum.get(column, None)

                # Try grabbing the value out of the related field if not found yet.
                if row_result is None and datum.get('related'):
                    row_result = datum['related'][0].get(column, None)

                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
                if isinstance(row_result, ureg.Quantity):
                    row_result = row_result.magnitude
                elif isinstance(row_result, datetime.datetime):
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(row_result, datetime.date):
                    row_result = row_result.strftime("%Y-%m-%d")
                row.append(row_result)

            writer.writerow(row)

        return response

    def _spreadsheet_response(self, filename, data, column_name_mappings):
        response = HttpResponse(
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        scenario_keys = ('id', 'name', 'description',
                         'annual_site_energy_savings',
                         'annual_source_energy_savings', 'annual_cost_savings',
                         'analysis_state', 'analysis_state_message',
                         'annual_electricity_savings',
                         'annual_natural_gas_savings', 'annual_site_energy',
                         'annual_source_energy', 'annual_natural_gas_energy',
                         'annual_electricity_energy', 'annual_peak_demand',
                         'annual_site_energy_use_intensity',
                         'annual_source_energy_use_intensity')
        scenario_key_mappings = {
            'annual_site_energy_savings': 'annual_site_energy_savings_mmbtu',
            'annual_source_energy_savings':
            'annual_source_energy_savings_mmbtu',
            'annual_cost_savings': 'annual_cost_savings_dollars',
            'annual_site_energy': 'annual_site_energy_kbtu',
            'annual_site_energy_use_intensity':
            'annual_site_energy_use_intensity_kbtu_ft2',
            'annual_source_energy': 'annual_source_energy_kbtu',
            'annual_source_energy_use_intensity':
            'annual_source_energy_use_intensity_kbtu_ft2',
            'annual_natural_gas_energy': 'annual_natural_gas_energy_mmbtu',
            'annual_electricity_energy': 'annual_electricity_energy_mmbtu',
            'annual_peak_demand': 'annual_peak_demand_kw',
            'annual_electricity_savings': 'annual_electricity_savings_kbtu',
            'annual_natural_gas_savings': 'annual_natural_gas_savings_kbtu'
        }

        property_measure_keys = ('id', 'property_measure_name', 'measure_id',
                                 'cost_mv', 'cost_total_first',
                                 'cost_installation', 'cost_material',
                                 'cost_capital_replacement',
                                 'cost_residual_value')
        measure_keys = ('name', 'display_name', 'category',
                        'category_display_name')
        # find measures and scenarios
        for i, record in enumerate(data):
            measures = PropertyMeasure.objects.filter(
                property_state_id=record['property_state_id'])
            record['measures'] = measures

            scenarios = Scenario.objects.filter(
                property_state_id=record['property_state_id'])
            record['scenarios'] = scenarios

        output = io.BytesIO()
        wb = xlsxwriter.Workbook(output, {'remove_timezone': True})

        # add tabs
        ws1 = wb.add_worksheet('Properties')
        ws2 = wb.add_worksheet('Measures')
        ws3 = wb.add_worksheet('Scenarios')
        ws4 = wb.add_worksheet('Scenario Measure Join Table')
        ws5 = wb.add_worksheet('Meter Readings')
        bold = wb.add_format({'bold': True})

        row = 0
        row2 = 0
        col2 = 0
        row3 = 0
        col3 = 0
        row4 = 0
        row5 = 0

        for index, val in enumerate(list(column_name_mappings.values())):
            # Do not write the first element as ID, this causes weird issues with Excel.
            if index == 0 and val == 'ID':
                ws1.write(row, index, 'id', bold)
            else:
                ws1.write(row, index, val, bold)

        # iterate over the results to preserve column order and write row.
        add_m_headers = True
        add_s_headers = True
        for datum in data:
            row += 1
            id = None
            for index, column in enumerate(column_name_mappings):
                if column == 'id':
                    id = datum.get(column, None)

                row_result = datum.get(column, None)

                # Try grabbing the value out of the related field if not found yet.
                if row_result is None and datum.get('related'):
                    row_result = datum['related'][0].get(column, None)

                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
                if isinstance(row_result, ureg.Quantity):
                    row_result = row_result.magnitude
                elif isinstance(row_result, datetime.datetime):
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(row_result, datetime.date):
                    row_result = row_result.strftime("%Y-%m-%d")
                ws1.write(row, index, row_result)

            # measures
            for index, m in enumerate(datum['measures']):
                if add_m_headers:
                    # grab headers
                    for key in property_measure_keys:
                        ws2.write(row2, col2, key, bold)
                        col2 += 1
                    for key in measure_keys:
                        ws2.write(row2, col2, 'measure ' + key, bold)
                        col2 += 1
                    add_m_headers = False

                row2 += 1
                col2 = 0
                for key in property_measure_keys:
                    ws2.write(row2, col2, getattr(m, key))
                    col2 += 1
                for key in measure_keys:
                    ws2.write(row2, col2, getattr(m.measure, key))
                    col2 += 1

            # scenarios (and join table)
            # join table
            ws4.write('A1', 'property_id', bold)
            ws4.write('B1', 'scenario_id', bold)
            ws4.write('C1', 'measure_id', bold)
            for index, s in enumerate(datum['scenarios']):
                scenario_id = s.id
                if add_s_headers:
                    # grab headers
                    for key in scenario_keys:
                        # double check scenario_key_mappings in case a different header is desired
                        if key in scenario_key_mappings.keys():
                            key = scenario_key_mappings[key]
                        ws3.write(row3, col3, key, bold)
                        col3 += 1
                    add_s_headers = False
                row3 += 1
                col3 = 0
                for key in scenario_keys:
                    ws3.write(row3, col3, getattr(s, key))
                    col3 += 1

                for sm in s.measures.all():
                    row4 += 1
                    ws4.write(row4, 0, id)
                    ws4.write(row4, 1, scenario_id)
                    ws4.write(row4, 2, sm.id)

            # scenario meter readings
            ws5.write('A1', 'scenario_id', bold)
            ws5.write('B1', 'meter_id', bold)
            ws5.write('C1', 'type', bold)
            ws5.write('D1', 'start_time', bold)
            ws5.write('E1', 'end_time', bold)
            ws5.write('F1', 'reading', bold)
            ws5.write('G1', 'units', bold)
            ws5.write('H1', 'is_virtual', bold)
            # datetime formatting
            date_format = wb.add_format({'num_format': 'yyyy-mm-dd hh:mm:ss'})

            for index, s in enumerate(datum['scenarios']):
                scenario_id = s.id
                # retrieve meters
                meters = Meter.objects.filter(scenario_id=scenario_id)
                for m in meters:
                    # retrieve readings
                    readings = MeterReading.objects.filter(
                        meter_id=m.id).order_by('start_time')
                    for r in readings:
                        row5 += 1
                        ws5.write(row5, 0, scenario_id)
                        ws5.write(row5, 1, m.id)
                        the_type = next((item for item in Meter.ENERGY_TYPES
                                         if item[0] == m.type), None)
                        the_type = the_type[1] if the_type is not None else None
                        ws5.write(
                            row5, 2, the_type
                        )  # use energy type enum to determine reading type
                        ws5.write_datetime(row5, 3, r.start_time, date_format)
                        ws5.write_datetime(row5, 4, r.end_time, date_format)
                        ws5.write(row5, 5,
                                  r.reading)  # this is now a float field
                        ws5.write(row5, 6, r.source_unit)
                        ws5.write(row5, 7, m.is_virtual)

        wb.close()

        # xlsx_data contains the Excel file
        xlsx_data = output.getvalue()

        response.write(xlsx_data)
        return response

    def _json_response(self, filename, data, column_name_mappings):
        polygon_fields = [
            "bounding_box", "centroid", "property_footprint",
            "taxlot_footprint", "long_lat"
        ]
        features = []

        # extract related records
        related_records = self._extract_related(data)

        # append related_records to data
        complete_data = data + related_records

        for datum in complete_data:
            feature = {"type": "Feature", "properties": {}}

            for key, value in datum.items():
                if value is None:
                    continue

                if isinstance(value, ureg.Quantity):
                    value = value.magnitude
                elif isinstance(value, datetime.datetime):
                    value = value.strftime("%Y-%m-%d %H:%M:%S")
                elif isinstance(value, datetime.date):
                    value = value.strftime("%Y-%m-%d")

                if value and any(k in key for k in polygon_fields):
                    """
                    If object is a polygon and is populated, add the 'geometry'
                    key-value-pair in the appropriate GeoJSON format.
                    When the first geometry is added, the correct format is
                    established. When/If a second geometry is added, this is
                    appended alongside the previous geometry.
                    """
                    individual_geometry = {}

                    # long_lat
                    if key == 'long_lat':
                        coordinates = self._serialized_point(value)
                        # point
                        individual_geometry = {
                            "coordinates": coordinates,
                            "type": "Point"
                        }
                    else:
                        # polygons
                        coordinates = self._serialized_coordinates(value)
                        individual_geometry = {
                            "coordinates": [coordinates],
                            "type": "Polygon"
                        }

                    if feature.get("geometry", None) is None:
                        feature["geometry"] = {
                            "type": "GeometryCollection",
                            "geometries": [individual_geometry]
                        }
                    else:
                        feature["geometry"]["geometries"].append(
                            individual_geometry)
                else:
                    """
                    Non-polygon data
                    """
                    display_key = column_name_mappings.get(key, key)
                    feature["properties"][display_key] = value

                    # # store point geometry in case you need it
                    # if display_key == "Longitude":
                    #     point_geometry[0] = value
                    # if display_key == "Latitude":
                    #     point_geometry[1] = value
            """
            Before appending feature, ensure that if there is no geometry recorded.
            Note that the GeoJson will not render if no lat/lng
            """

            # add style information
            if feature["properties"].get("property_state_id") is not None:
                feature["properties"]["stroke"] = "#185189"  # buildings color
            elif feature["properties"].get("taxlot_state_id") is not None:
                feature["properties"]["stroke"] = "#10A0A0"  # buildings color
            feature["properties"]["marker-color"] = "#E74C3C"
            # feature["properties"]["stroke-width"] = 3
            feature["properties"]["fill-opacity"] = 0

            # append feature
            features.append(feature)

            response_dict = {
                "type": "FeatureCollection",
                "crs": {
                    "type": "EPSG",
                    "properties": {
                        "code": 4326
                    }
                },
                "features": features
            }

        response = JsonResponse(response_dict)
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(
            filename)

        return response

    def _serialized_coordinates(self, polygon_wkt):
        string_coord_pairs = polygon_wkt.lstrip('POLYGON (').rstrip(')').split(
            ', ')

        coordinates = []
        for coord_pair in string_coord_pairs:
            float_coords = [float(coord) for coord in coord_pair.split(' ')]
            coordinates.append(float_coords)

        return coordinates

    def _serialized_point(self, point_wkt):
        string_coords = point_wkt.lstrip('POINT (').rstrip(')').split(', ')

        coordinates = []
        for coord in string_coords[0].split(' '):
            coordinates.append(float(coord))

        return coordinates

    def _extract_related(self, data):
        # extract all related records into a separate array
        related = []

        # figure out if we are dealing with properties or taxlots
        if data[0].get("property_state_id", None) is not None:
            is_property = True
        elif data[0].get("taxlot_state_id", None) is not None:
            is_property = False

        for datum in data:
            if datum.get("related", None) is not None:
                for record in datum["related"]:
                    related.append(record)

        # make array unique
        if is_property:

            unique = [dict(p) for p in set(tuple(i.items()) for i in related)]

        else:
            unique = [dict(p) for p in set(tuple(i.items()) for i in related)]

        return unique
Exemplo n.º 3
0
class AnalysisViewSet(viewsets.ViewSet):
    serializer_class = AnalysisSerializer
    model = Analysis

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
        ],
        request_body=CreateAnalysisSerializer,
    )
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def create(self, request):
        serializer = CreateAnalysisSerializer(data=request.data)
        if not serializer.is_valid():
            return JsonResponse({
                'status': 'error',
                'message': 'Bad request',
                'errors': serializer.errors
            })

        analysis = serializer.save(
            user_id=request.user.id,
            organization_id=request.query_params['organization_id'])
        pipeline = AnalysisPipeline.factory(analysis)
        try:
            progress_data = pipeline.prepare_analysis(
                serializer.validated_data['property_view_ids'])
            return JsonResponse({
                'status': 'success',
                'progress_key': progress_data['progress_key'],
                'progress': progress_data,
            })
        except AnalysisPipelineException as e:
            return JsonResponse({
                'status': 'error',
                'message': str(e)
            },
                                status=HTTP_409_CONFLICT)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(True),
        AutoSchemaHelper.query_integer_field('property_id', False,
                                             'Property ID')
    ])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def list(self, request):
        organization_id = request.query_params.get('organization_id', None)
        property_id = request.query_params.get('property_id', None)
        analyses = []
        if property_id is not None:
            analyses_queryset = (Analysis.objects.filter(
                organization=organization_id,
                analysispropertyview__property=property_id).distinct().
                                 order_by('id'))
        else:
            analyses_queryset = (Analysis.objects.filter(
                organization=organization_id).order_by('id'))
        for analysis in analyses_queryset:
            property_view_info = analysis.get_property_view_info(property_id)
            serialized_analysis = AnalysisSerializer(analysis).data
            serialized_analysis.update(property_view_info)
            analyses.append(serialized_analysis)

        return JsonResponse({'status': 'success', 'analyses': analyses})

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field(True)])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def retrieve(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    "Requested analysis doesn't exist in this organization."
                },
                status=HTTP_409_CONFLICT)
        serialized_analysis = AnalysisSerializer(analysis).data
        property_view_info = analysis.get_property_view_info()
        serialized_analysis.update(property_view_info)

        return JsonResponse({
            'status': 'success',
            'analysis': serialized_analysis
        })

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['post'])
    def start(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
            pipeline = AnalysisPipeline.factory(analysis)
            progress_data = pipeline.start_analysis()
            return JsonResponse({
                'status': 'success',
                'progress_key': progress_data['progress_key'],
                'progress': progress_data,
            })
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Requested analysis doesn\'t exist in this organization.'
                },
                status=HTTP_409_CONFLICT)
        except AnalysisPipelineException as e:
            return JsonResponse({
                'status': 'error',
                'message': str(e)
            },
                                status=HTTP_409_CONFLICT)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['post'])
    def stop(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
            pipeline = AnalysisPipeline.factory(analysis)
            pipeline.stop()
            return JsonResponse({
                'status': 'success',
            })
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Requested analysis doesn\'t exist in this organization.'
                },
                status=HTTP_409_CONFLICT)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @require_organization_id_class
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def destroy(self, request, pk):
        organization_id = int(request.query_params.get('organization_id', 0))
        try:
            analysis = Analysis.objects.get(id=pk,
                                            organization_id=organization_id)
            pipeline = AnalysisPipeline.factory(analysis)
            pipeline.delete()
            return JsonResponse({
                'status': 'success',
            })
        except Analysis.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Requested analysis doesn\'t exist in this organization.'
                },
                status=HTTP_409_CONFLICT)
Exemplo n.º 4
0
class OrganizationViewSet(viewsets.ViewSet):
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['DELETE'])
    def columns(self, request, pk=None):
        """
        Delete all columns for an organization. This method is typically not recommended if there
        are data in the inventory as it will invalidate all extra_data fields. This also removes
        all the column mappings that existed.

        ---
        parameters:
            - name: pk
              description: The organization_id
              required: true
              paramType: path
        type:
            status:
                description: success or error
                type: string
                required: true
            column_mappings_deleted_count:
                description: Number of column_mappings that were deleted
                type: integer
                required: true
            columns_deleted_count:
                description: Number of columns that were deleted
                type: integer
                required: true
        """
        try:
            org = Organization.objects.get(pk=pk)
            c_count, cm_count = Column.delete_all(org)
            return JsonResponse({
                'status': 'success',
                'column_mappings_deleted_count': cm_count,
                'columns_deleted_count': c_count,
            })
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'organization with with id {} does not exist'.format(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_integer_field('import_file_id',
                                                 required=True,
                                                 description='Import file id'),
            openapi.Parameter('id',
                              openapi.IN_PATH,
                              type=openapi.TYPE_INTEGER,
                              description='Organization id'),
        ],
        request_body=SaveColumnMappingsRequestPayloadSerializer,
        responses={200: 'success response'})
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def column_mappings(self, request, pk=None):
        """
        Saves the mappings between the raw headers of an ImportFile and the
        destination fields in the `to_table_name` model which should be either
        PropertyState or TaxLotState

        Valid source_type values are found in ``seed.models.SEED_DATA_SOURCES``
        """
        import_file_id = request.query_params.get('import_file_id')
        if import_file_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Query param `import_file_id` is required'
                },
                status=status.HTTP_400_BAD_REQUEST)
        try:
            _ = ImportFile.objects.get(pk=import_file_id)
            organization = Organization.objects.get(pk=pk)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'No import file found'
                },
                status=status.HTTP_404_NOT_FOUND)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'No organization found'
                },
                status=status.HTTP_404_NOT_FOUND)

        result = Column.create_mappings(request.data.get('mappings',
                                                         []), organization,
                                        request.user, import_file_id)

        if result:
            return JsonResponse({'status': 'success'})
        else:
            return JsonResponse({'status': 'error'})

    def _start_whole_org_match_merge_link(self,
                                          org_id,
                                          state_class_name,
                                          proposed_columns=[]):
        identifier = randint(100, 100000)
        result_key = _get_match_merge_link_key(identifier)
        set_cache_raw(result_key, {})

        progress_data = ProgressData(func_name='org_match_merge_link',
                                     unique_id=identifier)
        progress_data.delete()

        whole_org_match_merge_link.apply_async(
            args=(org_id, state_class_name, proposed_columns),
            link=cache_match_merge_link_result.s(identifier,
                                                 progress_data.key))

        return progress_data.key

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_boolean_field(
            'brief',
            required=False,
            description='If true, only return high-level organization details')
    ])
    @api_endpoint_class
    @ajax_request_class
    def list(self, request):
        """
        Retrieves all orgs the user has access to.
        """

        # if brief==true only return high-level organization details
        brief = json.loads(request.query_params.get('brief', 'false'))

        if brief:
            if request.user.is_superuser:
                qs = Organization.objects.only('id', 'name', 'parent_org_id')
            else:
                qs = request.user.orgs.only('id', 'name', 'parent_org_id')

            orgs = _dict_org_brief(request, qs)
            if len(orgs) == 0:
                return JsonResponse(
                    {
                        'status':
                        'error',
                        'message':
                        'Your SEED account is not associated with any organizations. '
                        'Please contact a SEED administrator.'
                    },
                    status=status.HTTP_401_UNAUTHORIZED)
            else:
                return JsonResponse({'organizations': orgs})
        else:
            if request.user.is_superuser:
                qs = Organization.objects.all()
            else:
                qs = request.user.orgs.all()

            orgs = _dict_org(request, qs)
            if len(orgs) == 0:
                return JsonResponse(
                    {
                        'status':
                        'error',
                        'message':
                        'Your SEED account is not associated with any organizations. '
                        'Please contact a SEED administrator.'
                    },
                    status=status.HTTP_401_UNAUTHORIZED)
            else:
                return JsonResponse({'organizations': orgs})

    @method_decorator(permission_required('seed.can_access_admin'))
    @api_endpoint_class
    @ajax_request_class
    def destroy(self, request, pk=None):
        """
        Starts a background task to delete an organization and all related data.
        """

        return JsonResponse(tasks.delete_organization(pk))

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    def retrieve(self, request, pk=None):
        """
        Retrieves a single organization by id.
        """
        org_id = pk

        if org_id is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'no organization_id sent'
                },
                status=status.HTTP_400_BAD_REQUEST)

        try:
            org = Organization.objects.get(pk=org_id)
        except Organization.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization does not exist'
                },
                status=status.HTTP_404_NOT_FOUND)
        if (not request.user.is_superuser
                and not OrganizationUser.objects.filter(
                    user=request.user,
                    organization=org,
                    role_level__in=[ROLE_OWNER, ROLE_MEMBER, ROLE_VIEWER
                                    ]).exists()):
            # TODO: better permission and return 401 or 403
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'user is not the owner of the org'
                },
                status=status.HTTP_403_FORBIDDEN)

        return JsonResponse({
            'status': 'success',
            'organization': _dict_org(request, [org])[0],
        })

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'organization_name': 'string',
            'user_id': 'integer',
        },
        required=['organization_name', 'user_id'],
        description='Properties:\n'
        '- organization_name: The new organization name\n'
        '- user_id: The user ID (primary key) to be used as the owner of the new organization'
    ))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_parent_org_owner')
    def create(self, request):
        """
        Creates a new organization.
        """
        body = request.data
        user = User.objects.get(pk=body['user_id'])
        org_name = body['organization_name']

        if Organization.objects.filter(name=org_name).exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'organization name already exists'
                },
                status=status.HTTP_409_CONFLICT)

        org, _, _ = create_organization(user, org_name, org_name)
        return JsonResponse({
            'status': 'success',
            'message': 'organization created',
            'organization': _dict_org(request, [org])[0]
        })

    @api_endpoint_class
    @ajax_request_class
    @method_decorator(permission_required('seed.can_access_admin'))
    @action(detail=True, methods=['DELETE'])
    def inventory(self, request, pk=None):
        """
        Starts a background task to delete all properties & taxlots
        in an org.
        """
        return JsonResponse(tasks.delete_organization_inventory(pk))

    @swagger_auto_schema(
        request_body=SaveSettingsSerializer, )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_owner')
    @action(detail=True, methods=['PUT'])
    def save_settings(self, request, pk=None):
        """
        Saves an organization's settings: name, query threshold, shared fields, etc
        """
        body = request.data
        org = Organization.objects.get(pk=pk)
        posted_org = body.get('organization', None)
        if posted_org is None:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'malformed request'
                },
                status=status.HTTP_400_BAD_REQUEST)

        desired_threshold = posted_org.get('query_threshold', None)
        if desired_threshold is not None:
            org.query_threshold = desired_threshold

        desired_name = posted_org.get('name', None)
        if desired_name is not None:
            org.name = desired_name

        def is_valid_choice(choice_tuples, s):
            """choice_tuples is std model ((value, label), ...)"""
            return (s is not None) and (s in [
                choice[0] for choice in choice_tuples
            ])

        def warn_bad_pint_spec(kind, unit_string):
            if unit_string is not None:
                _log.warn("got bad {0} unit string {1} for org {2}".format(
                    kind, unit_string, org.name))

        def warn_bad_units(kind, unit_string):
            _log.warn("got bad {0} unit string {1} for org {2}".format(
                kind, unit_string, org.name))

        desired_display_units_eui = posted_org.get('display_units_eui')
        if is_valid_choice(Organization.MEASUREMENT_CHOICES_EUI,
                           desired_display_units_eui):
            org.display_units_eui = desired_display_units_eui
        else:
            warn_bad_pint_spec('eui', desired_display_units_eui)

        desired_display_units_area = posted_org.get('display_units_area')
        if is_valid_choice(Organization.MEASUREMENT_CHOICES_AREA,
                           desired_display_units_area):
            org.display_units_area = desired_display_units_area
        else:
            warn_bad_pint_spec('area', desired_display_units_area)

        desired_display_significant_figures = posted_org.get(
            'display_significant_figures')
        if isinstance(
                desired_display_significant_figures,
                int) and desired_display_significant_figures >= 0:  # noqa
            org.display_significant_figures = desired_display_significant_figures
        elif desired_display_significant_figures is not None:
            _log.warn("got bad sig figs {0} for org {1}".format(
                desired_display_significant_figures, org.name))

        desired_display_meter_units = posted_org.get('display_meter_units')
        if desired_display_meter_units:
            org.display_meter_units = desired_display_meter_units

        desired_thermal_conversion_assumption = posted_org.get(
            'thermal_conversion_assumption')
        if is_valid_choice(Organization.THERMAL_CONVERSION_ASSUMPTION_CHOICES,
                           desired_thermal_conversion_assumption):
            org.thermal_conversion_assumption = desired_thermal_conversion_assumption

        # Update MapQuest API Key if it's been changed
        mapquest_api_key = posted_org.get('mapquest_api_key', '')
        if mapquest_api_key != org.mapquest_api_key:
            org.mapquest_api_key = mapquest_api_key

        comstock_enabled = posted_org.get('comstock_enabled', False)
        if comstock_enabled != org.comstock_enabled:
            org.comstock_enabled = comstock_enabled

        org.save()

        # Update the selected exportable fields.
        new_public_column_names = posted_org.get('public_fields', None)
        if new_public_column_names is not None:
            old_public_columns = Column.objects.filter(
                organization=org, shared_field_type=Column.SHARED_PUBLIC)
            # turn off sharing in the old_pub_fields
            for col in old_public_columns:
                col.shared_field_type = Column.SHARED_NONE
                col.save()

            # for now just iterate over this to grab the new columns.
            for col in new_public_column_names:
                new_col = Column.objects.filter(organization=org, id=col['id'])
                if len(new_col) == 1:
                    new_col = new_col.first()
                    new_col.shared_field_type = Column.SHARED_PUBLIC
                    new_col.save()

        return JsonResponse({'status': 'success'})

    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def query_threshold(self, request, pk=None):
        """
        Returns the "query_threshold" for an org.  Searches from
        members of sibling orgs must return at least this many buildings
        from orgs they do not belong to, or else buildings from orgs they
        don't belong to will be removed from the results.
        """
        org = Organization.objects.get(pk=pk)
        return JsonResponse({
            'status': 'success',
            'query_threshold': org.query_threshold
        })

    @swagger_auto_schema(responses={200: SharedFieldsReturnSerializer})
    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def shared_fields(self, request, pk=None):
        """
        Retrieves all fields marked as shared for the organization. Will only return used fields.
        """
        result = {'status': 'success', 'public_fields': []}

        columns = Column.retrieve_all(pk, 'property', True)
        for c in columns:
            if c['sharedFieldType'] == 'Public':
                new_column = {
                    'table_name': c['table_name'],
                    'name': c['name'],
                    'column_name': c['column_name'],
                    # this is the field name in the db. The other name can have tax_
                    'display_name': c['display_name']
                }
                result['public_fields'].append(new_column)

        return JsonResponse(result)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'sub_org_name': 'string',
            'sub_org_owner_email': 'string',
        },
        required=['sub_org_name', 'sub_org_owner_email'],
        description='Properties:\n'
        '- sub_org_name: Name of the new sub organization\n'
        '- sub_org_owner_email: Email of the owner of the sub organization, which must already exist',
    ))
    @has_perm_class('requires_member')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['POST'])
    def sub_org(self, request, pk=None):
        """
        Creates a child org of a parent org.
        """
        body = request.data
        org = Organization.objects.get(pk=pk)
        email = body['sub_org_owner_email'].lower()
        try:
            user = User.objects.get(username=email)
        except User.DoesNotExist:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'User with email address (%s) does not exist' % email
                },
                status=status.HTTP_400_BAD_REQUEST)

        created, mess_or_org, _ = create_suborganization(
            user, org, body['sub_org_name'], ROLE_OWNER)
        if created:
            return JsonResponse({
                'status': 'success',
                'organization_id': mess_or_org.pk
            })
        else:
            return JsonResponse({
                'status': 'error',
                'message': mess_or_org
            },
                                status=status.HTTP_409_CONFLICT)

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def matching_criteria_columns(self, request, pk=None):
        """
        Retrieve all matching criteria columns for an org.
        """
        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        matching_criteria_column_names = dict(
            org.column_set.filter(
                is_matching_criteria=True).values('table_name').annotate(
                    column_names=ArrayAgg('column_name')).values_list(
                        'table_name', 'column_names'))

        return JsonResponse(matching_criteria_column_names)

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {'inventory_type': 'string'},
        required=['inventory_type'],
        description='Properties:\n'
        '- inventory_type: either "properties" or "taxlots"'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def match_merge_link(self, request, pk=None):
        """
        Run match_merge_link for an org.
        """
        inventory_type = request.data.get('inventory_type', None)
        if inventory_type not in ['properties', 'taxlots']:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Provided inventory type should either be "properties" or "taxlots".'
                },
                status=status.HTTP_404_NOT_FOUND)

        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        state_class_name = 'PropertyState' if inventory_type == 'properties' else 'TaxLotState'

        progress_key = self._start_whole_org_match_merge_link(
            org.id, state_class_name)

        return JsonResponse({'progress_key': progress_key})

    @swagger_auto_schema(request_body=AutoSchemaHelper.schema_factory(
        {
            'inventory_type': 'string',
            'add': ['string'],
            'remove': ['string'],
        },
        required=['inventory_type'],
        description='Properties:\n'
        '- inventory_type: either "properties" or "taxlots"\n'
        '- add: list of column names\n'
        '- remove: list of column names'))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['POST'])
    def match_merge_link_preview(self, request, pk=None):
        """
        Run match_merge_link preview for an org and record type.
        """
        inventory_type = request.data.get('inventory_type', None)
        if inventory_type not in ['properties', 'taxlots']:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Provided inventory type should either be "properties" or "taxlots".'
                },
                status=status.HTTP_404_NOT_FOUND)

        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        state_class_name = 'PropertyState' if inventory_type == 'properties' else 'TaxLotState'

        current_columns = matching_criteria_column_names(
            org.id, state_class_name)

        add = set(request.data.get('add', []))
        remove = set(request.data.get('remove', []))

        provided_columns = Column.objects.filter(
            column_name__in=add.union(remove),
            organization_id=org.id,
            table_name=state_class_name)
        if provided_columns.count() != (len(add) + len(remove)):
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Invalid column names provided.'
                },
                status=status.HTTP_404_NOT_FOUND)

        proposed_columns = current_columns.union(add).difference(remove)

        progress_key = self._start_whole_org_match_merge_link(
            org.id, state_class_name, list(proposed_columns))

        return JsonResponse({'progress_key': progress_key})

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_integer_field(
            'match_merge_link_id',
            required=True,
            description='ID of match merge link')
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def match_merge_link_result(self, request, pk=None):
        try:
            Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        identifier = request.query_params['match_merge_link_id']
        result_key = _get_match_merge_link_key(identifier)

        # using unsafe serialization b/c the result might not be a dict
        return JsonResponse(get_cache_raw(result_key), safe=False)

    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def geocoding_columns(self, request, pk=None):
        """
        Retrieve all geocoding columns for an org.
        """
        try:
            org = Organization.objects.get(pk=pk)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not retrieve organization at pk = ' + str(pk)
                },
                status=status.HTTP_404_NOT_FOUND)

        geocoding_columns_qs = org.column_set.\
            filter(geocoding_order__gt=0).\
            order_by('geocoding_order').\
            values('table_name', 'column_name')

        geocoding_columns = {
            'PropertyState': [],
            'TaxLotState': [],
        }

        for col in geocoding_columns_qs:
            geocoding_columns[col['table_name']].append(col['column_name'])

        return JsonResponse(geocoding_columns)

    def get_cycles(self, start, end, organization_id):
        if not isinstance(start, type(end)):
            raise TypeError('start and end not same types')
        # if of type int or convertable  assume they are cycle ids
        try:
            start = int(start)
            end = int(end)
        except ValueError as error:  # noqa
            # assume string is JS date
            if isinstance(start, basestring):
                start_datetime = dateutil.parser.parse(start)
                end_datetime = dateutil.parser.parse(end)
            else:
                raise Exception('Date is not a string')
        # get date times from cycles
        if isinstance(start, int):
            cycle = Cycle.objects.get(pk=start,
                                      organization_id=organization_id)
            start_datetime = cycle.start
            if start == end:
                end_datetime = cycle.end
            else:
                end_datetime = Cycle.objects.get(
                    pk=end, organization_id=organization_id).end
        return Cycle.objects.filter(
            start__gte=start_datetime,
            end__lte=end_datetime,
            organization_id=organization_id).order_by('start')

    def get_data(self, property_view, x_var, y_var):
        result = None
        state = property_view.state
        if getattr(state, x_var, None) and getattr(state, y_var, None):
            result = {
                "id": property_view.property_id,
                "x": getattr(state, x_var),
                "y": getattr(state, y_var),
            }
        return result

    def get_raw_report_data(self, organization_id, cycles, x_var, y_var,
                            campus_only):
        all_property_views = PropertyView.objects.select_related(
            'property',
            'state').filter(property__organization_id=organization_id,
                            cycle_id__in=cycles)
        organization = Organization.objects.get(pk=organization_id)
        results = []
        for cycle in cycles:
            property_views = all_property_views.filter(cycle_id=cycle)
            count_total = []
            count_with_data = []
            data = []
            for property_view in property_views:
                property_pk = property_view.property_id
                if property_view.property.campus and campus_only:
                    count_total.append(property_pk)
                    result = self.get_data(property_view, x_var, y_var)
                    if result:
                        result['yr_e'] = cycle.end.strftime('%Y')
                        data.append(result)
                        count_with_data.append(property_pk)
                elif not property_view.property.campus:
                    count_total.append(property_pk)
                    result = self.get_data(property_view, x_var, y_var)
                    if result:
                        result['yr_e'] = cycle.end.strftime('%Y')
                        de_unitted_result = apply_display_unit_preferences(
                            organization, result)
                        data.append(de_unitted_result)
                        count_with_data.append(property_pk)
            result = {
                "cycle_id": cycle.pk,
                "chart_data": data,
                "property_counts": {
                    "yr_e": cycle.end.strftime('%Y'),
                    "num_properties": len(count_total),
                    "num_properties_w-data": len(count_with_data),
                },
            }
            results.append(result)
        return results

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var', required=True, description='Raw column name for y axis'),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
        AutoSchemaHelper.query_string_field(
            'campus_only',
            required=False,
            description='If true, includes campuses'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report(self, request, pk=None):
        """
        Retrieve a summary report for charting x vs y
        """
        campus_only = json.loads(
            request.query_params.get('campus_only', 'false'))
        params = {}
        missing_params = []
        error = ''
        for param in ['x_var', 'y_var', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
        else:
            cycles = self.get_cycles(params['start'], params['end'], pk)
            data = self.get_raw_report_data(pk, cycles, params['x_var'],
                                            params['y_var'], campus_only)
            for datum in data:
                if datum['property_counts']['num_properties_w-data'] != 0:
                    break
            property_counts = []
            chart_data = []
            for datum in data:
                property_counts.append(datum['property_counts'])
                chart_data.extend(datum['chart_data'])
            data = {
                'property_counts': property_counts,
                'chart_data': chart_data,
            }
            result = {'status': 'success', 'data': data}
            status_code = status.HTTP_200_OK
        return Response(result, status=status_code)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var',
            required=True,
            description=
            'Raw column name for y axis, must be one of: "gross_floor_area", "use_description", "year_built"'
        ),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
        AutoSchemaHelper.query_string_field(
            'campus_only',
            required=False,
            description='If true, includes campuses'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report_aggregated(self, request, pk=None):
        """
        Retrieve a summary report for charting x vs y aggregated by y_var
        """
        campus_only = json.loads(
            request.query_params.get('campus_only', 'false'))
        valid_y_values = ['gross_floor_area', 'use_description', 'year_built']
        params = {}
        missing_params = []
        empty = True
        error = ''
        for param in ['x_var', 'y_var', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            elif param == 'y_var' and val not in valid_y_values:
                error = "{} {} is not a valid value for {}.".format(
                    error, val, param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
        else:
            cycles = self.get_cycles(params['start'], params['end'], pk)
            x_var = params['x_var']
            y_var = params['y_var']
            data = self.get_raw_report_data(pk, cycles, x_var, y_var,
                                            campus_only)
            for datum in data:
                if datum['property_counts']['num_properties_w-data'] != 0:
                    empty = False
                    break
            if empty:
                result = {'status': 'error', 'message': 'No data found'}
                status_code = status.HTTP_404_NOT_FOUND
        if not empty or not error:
            chart_data = []
            property_counts = []
            for datum in data:
                buildings = datum['chart_data']
                yr_e = datum['property_counts']['yr_e']
                chart_data.extend(self.aggregate_data(yr_e, y_var, buildings)),
                property_counts.append(datum['property_counts'])
            # Send back to client
            aggregated_data = {
                'chart_data': chart_data,
                'property_counts': property_counts
            }
            result = {
                'status': 'success',
                'aggregated_data': aggregated_data,
            }
            status_code = status.HTTP_200_OK
        return Response(result, status=status_code)

    def aggregate_data(self, yr_e, y_var, buildings):
        aggregation_method = {
            'use_description': self.aggregate_use_description,
            'year_built': self.aggregate_year_built,
            'gross_floor_area': self.aggregate_gross_floor_area,
        }
        return aggregation_method[y_var](yr_e, buildings)

    def aggregate_use_description(self, yr_e, buildings):
        # Group buildings in this year_ending group into uses
        chart_data = []
        grouped_uses = defaultdict(list)
        for b in buildings:
            grouped_uses[str(b['y']).lower()].append(b)

        # Now iterate over use groups to make each chart item
        for use, buildings_in_uses in grouped_uses.items():
            chart_data.append({
                'x': median([b['x'] for b in buildings_in_uses]),
                'y': use.capitalize(),
                'yr_e': yr_e
            })
        return chart_data

    def aggregate_year_built(self, yr_e, buildings):
        # Group buildings in this year_ending group into decades
        chart_data = []
        grouped_decades = defaultdict(list)
        for b in buildings:
            grouped_decades['%s0' % str(b['y'])[:-1]].append(b)

        # Now iterate over decade groups to make each chart item
        for decade, buildings_in_decade in grouped_decades.items():
            chart_data.append({
                'x':
                median([b['x'] for b in buildings_in_decade]),
                'y':
                '%s-%s' % (decade, '%s9' % str(decade)[:-1]),  # 1990-1999
                'yr_e':
                yr_e
            })
        return chart_data

    def aggregate_gross_floor_area(self, yr_e, buildings):
        chart_data = []
        y_display_map = {
            0: '0-99k',
            100000: '100-199k',
            200000: '200k-299k',
            300000: '300k-399k',
            400000: '400-499k',
            500000: '500-599k',
            600000: '600-699k',
            700000: '700-799k',
            800000: '800-899k',
            900000: '900-999k',
            1000000: 'over 1,000k',
        }
        max_bin = max(y_display_map)

        # Group buildings in this year_ending group into ranges
        grouped_ranges = defaultdict(list)
        for b in buildings:
            area = b['y']
            # make sure anything greater than the biggest bin gets put in
            # the biggest bin
            range_bin = min(max_bin, round_down_hundred_thousand(area))
            grouped_ranges[range_bin].append(b)

        # Now iterate over range groups to make each chart item
        for range_floor, buildings_in_range in grouped_ranges.items():
            chart_data.append({
                'x': median([b['x'] for b in buildings_in_range]),
                'y': y_display_map[range_floor],
                'yr_e': yr_e
            })
        return chart_data

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_string_field(
            'x_var', required=True, description='Raw column name for x axis'),
        AutoSchemaHelper.query_string_field('x_label',
                                            required=True,
                                            description='Label for x axis'),
        AutoSchemaHelper.query_string_field(
            'y_var', required=True, description='Raw column name for y axis'),
        AutoSchemaHelper.query_string_field('y_label',
                                            required=True,
                                            description='Label for y axis'),
        AutoSchemaHelper.query_string_field(
            'start',
            required=True,
            description='Start time, in the format "2018-12-31T23:53:00-08:00"'
        ),
        AutoSchemaHelper.query_string_field(
            'end',
            required=True,
            description='End time, in the format "2018-12-31T23:53:00-08:00"'),
    ])
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def report_export(self, request, pk=None):
        """
        Export a report as a spreadsheet
        """
        params = {}
        missing_params = []
        error = ''
        for param in ['x_var', 'x_label', 'y_var', 'y_label', 'start', 'end']:
            val = request.query_params.get(param, None)
            if not val:
                missing_params.append(param)
            else:
                params[param] = val
        if missing_params:
            error = "{} Missing params: {}".format(error,
                                                   ", ".join(missing_params))
        if error:
            status_code = status.HTTP_400_BAD_REQUEST
            result = {'status': 'error', 'message': error}
            return Response(result, status=status_code)

        response = HttpResponse(
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response['Content-Disposition'] = 'attachment; filename="report-data"'

        # Create WB
        output = BytesIO()
        wb = Workbook(output, {'remove_timezone': True})

        # Create sheets
        count_sheet = wb.add_worksheet('Counts')
        base_sheet = wb.add_worksheet('Raw')
        agg_sheet = wb.add_worksheet('Agg')

        # Enable bold format and establish starting cells
        bold = wb.add_format({'bold': True})
        data_row_start = 0
        data_col_start = 0

        # Write all headers across all sheets
        count_sheet.write(data_row_start, data_col_start, 'Year Ending', bold)
        count_sheet.write(data_row_start, data_col_start + 1,
                          'Properties with Data', bold)
        count_sheet.write(data_row_start, data_col_start + 2,
                          'Total Properties', bold)

        base_sheet.write(data_row_start, data_col_start, 'ID', bold)
        base_sheet.write(data_row_start, data_col_start + 1,
                         request.query_params.get('x_label'), bold)
        base_sheet.write(data_row_start, data_col_start + 2,
                         request.query_params.get('y_label'), bold)
        base_sheet.write(data_row_start, data_col_start + 3, 'Year Ending',
                         bold)

        agg_sheet.write(data_row_start, data_col_start,
                        request.query_params.get('x_label'), bold)
        agg_sheet.write(data_row_start, data_col_start + 1,
                        request.query_params.get('y_label'), bold)
        agg_sheet.write(data_row_start, data_col_start + 2, 'Year Ending',
                        bold)

        # Gather base data
        cycles = self.get_cycles(params['start'], params['end'], pk)
        data = self.get_raw_report_data(pk, cycles, params['x_var'],
                                        params['y_var'], False)

        base_row = data_row_start + 1
        agg_row = data_row_start + 1
        count_row = data_row_start + 1

        for cycle_results in data:
            total_count = cycle_results['property_counts']['num_properties']
            with_data_count = cycle_results['property_counts'][
                'num_properties_w-data']
            yr_e = cycle_results['property_counts']['yr_e']

            # Write Counts
            count_sheet.write(count_row, data_col_start, yr_e)
            count_sheet.write(count_row, data_col_start + 1, with_data_count)
            count_sheet.write(count_row, data_col_start + 2, total_count)

            count_row += 1

            # Write Base/Raw Data
            data_rows = cycle_results['chart_data']
            for datum in data_rows:
                base_sheet.write(base_row, data_col_start, datum.get('id'))
                base_sheet.write(base_row, data_col_start + 1, datum.get('x'))
                base_sheet.write(base_row, data_col_start + 2, datum.get('y'))
                base_sheet.write(base_row, data_col_start + 3,
                                 datum.get('yr_e'))

                base_row += 1

            # Gather and write Agg data
            for agg_datum in self.aggregate_data(yr_e, params['y_var'],
                                                 data_rows):
                agg_sheet.write(agg_row, data_col_start, agg_datum.get('x'))
                agg_sheet.write(agg_row, data_col_start + 1,
                                agg_datum.get('y'))
                agg_sheet.write(agg_row, data_col_start + 2,
                                agg_datum.get('yr_e'))

                agg_row += 1

        wb.close()

        xlsx_data = output.getvalue()

        response.write(xlsx_data)

        return response

    @has_perm_class('requires_member')
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def geocode_api_key_exists(self, request, pk=None):
        """
        Returns true if the organization has a mapquest api key
        """
        org = Organization.objects.get(id=pk)

        if org.mapquest_api_key:
            return True
        else:
            return False
Exemplo n.º 5
0
class ImportFileViewSet(viewsets.ViewSet):
    raise_exception = True
    queryset = ImportFile.objects.all()

    @api_endpoint_class
    @ajax_request_class
    def retrieve(self, request, pk=None):
        """
        Retrieves details about an ImportFile.
        """
        import_file_id = pk
        orgs = request.user.orgs.all()
        try:
            import_file = ImportFile.objects.get(pk=import_file_id)
            d = ImportRecord.objects.filter(super_organization__in=orgs,
                                            pk=import_file.import_record_id)
        except ObjectDoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not access an import file with this ID'
                },
                status=status.HTTP_403_FORBIDDEN)
        # check if user has access to the import file
        if not d.exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not locate import file with this ID',
                    'import_file': {},
                },
                status=status.HTTP_400_BAD_REQUEST)

        f = obj_to_dict(import_file)
        f['name'] = import_file.filename_only
        if not import_file.uploaded_filename:
            f['uploaded_filename'] = import_file.filename
        f['dataset'] = obj_to_dict(import_file.import_record)

        return JsonResponse({
            'status': 'success',
            'import_file': f,
        })

    @swagger_auto_schema_org_query_param
    @has_perm_class('requires_viewer')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def first_five_rows(self, request, pk=None):
        """
        Retrieves the first five rows of an ImportFile.
        """
        org_id = request.query_params.get('organization_id', None)
        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)
        if import_file.cached_second_to_fifth_row is None:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Internal problem occurred, import file first five rows not cached'
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)
        '''
        import_file.cached_second_to_fifth_row is a field that contains the first
        5 lines of data from the file, split on newlines, delimited by
        ROW_DELIMITER. This becomes an issue when fields have newlines in them,
        so the following is to handle newlines in the fields.
        In the case of only one data column there will be no ROW_DELIMITER.
        '''
        header = import_file.cached_first_row.split(ROW_DELIMITER)
        data = import_file.cached_second_to_fifth_row
        return JsonResponse({
            'status':
            'success',
            'first_five_rows':
            convert_first_five_rows_to_list(header, data)
        })

    @swagger_auto_schema_org_query_param
    @has_perm_class('requires_viewer')
    @api_endpoint_class
    @ajax_request_class
    @action(detail=True, methods=['GET'])
    def raw_column_names(self, request, pk=None):
        """
        Retrieves a list of all column names from an ImportFile.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return JsonResponse({
            'status': 'success',
            'raw_columns': import_file.first_row_columns
        })

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
    ],
                         responses={200: MappingResultsResponseSerializer})
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'], url_path='mapping_results')
    def mapping_results(self, request, pk=None):
        """
        Retrieves a paginated list of Properties and Tax Lots for an import file after mapping.
        """
        import_file_id = pk
        org_id = request.query_params.get('organization_id', None)
        org = Organization.objects.get(pk=org_id)

        try:
            # get the field names that were in the mapping
            import_file = ImportFile.objects.get(
                pk=import_file_id, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # List of the only fields to show
        field_names = import_file.get_cached_mapped_columns

        # set of fields
        fields = {
            'PropertyState': ['id', 'extra_data', 'lot_number'],
            'TaxLotState': ['id', 'extra_data']
        }
        columns_from_db = Column.retrieve_all(org_id)
        property_column_name_mapping = {}
        taxlot_column_name_mapping = {}
        for field_name in field_names:
            # find the field from the columns in the database
            for column in columns_from_db:
                if column['table_name'] == 'PropertyState' and \
                        field_name[0] == 'PropertyState' and \
                        field_name[1] == column['column_name']:
                    property_column_name_mapping[
                        column['column_name']] = column['name']
                    if not column['is_extra_data']:
                        fields['PropertyState'].append(
                            field_name[1])  # save to the raw db fields
                    continue
                elif column['table_name'] == 'TaxLotState' and \
                        field_name[0] == 'TaxLotState' and \
                        field_name[1] == column['column_name']:
                    taxlot_column_name_mapping[
                        column['column_name']] = column['name']
                    if not column['is_extra_data']:
                        fields['TaxLotState'].append(
                            field_name[1])  # save to the raw db fields
                    continue

        inventory_type = request.data.get('inventory_type', 'all')

        result = {'status': 'success'}

        if inventory_type == 'properties' or inventory_type == 'all':
            properties = PropertyState.objects.filter(
                import_file_id=import_file_id,
                data_state__in=[DATA_STATE_MAPPING, DATA_STATE_MATCHING],
                merge_state__in=[
                    MERGE_STATE_UNKNOWN, MERGE_STATE_NEW
                ]).only(*fields['PropertyState']).order_by('id')

            property_results = []
            for prop in properties:
                prop_dict = TaxLotProperty.model_to_dict_with_mapping(
                    prop,
                    property_column_name_mapping,
                    fields=fields['PropertyState'],
                    exclude=['extra_data'])

                prop_dict.update(
                    TaxLotProperty.extra_data_to_dict_with_mapping(
                        prop.extra_data,
                        property_column_name_mapping,
                        fields=prop.extra_data.keys(),
                    ).items())

                prop_dict = apply_display_unit_preferences(org, prop_dict)
                property_results.append(prop_dict)

            result['properties'] = property_results

        if inventory_type == 'taxlots' or inventory_type == 'all':
            tax_lots = TaxLotState.objects.filter(
                import_file_id=import_file_id,
                data_state__in=[DATA_STATE_MAPPING, DATA_STATE_MATCHING],
                merge_state__in=[MERGE_STATE_UNKNOWN, MERGE_STATE_NEW
                                 ]).only(*fields['TaxLotState']).order_by('id')

            tax_lot_results = []
            for tax_lot in tax_lots:
                tax_lot_dict = TaxLotProperty.model_to_dict_with_mapping(
                    tax_lot,
                    taxlot_column_name_mapping,
                    fields=fields['TaxLotState'],
                    exclude=['extra_data'])
                tax_lot_dict.update(
                    TaxLotProperty.extra_data_to_dict_with_mapping(
                        tax_lot.extra_data,
                        taxlot_column_name_mapping,
                        fields=tax_lot.extra_data.keys(),
                    ).items())

                tax_lot_dict = apply_display_unit_preferences(
                    org, tax_lot_dict)
                tax_lot_results.append(tax_lot_dict)

            result['tax_lots'] = tax_lot_results

        return result

    @staticmethod
    def has_coparent(state_id, inventory_type, fields=None):
        """
        Return the coparent of the current state id based on the inventory type. If fields
        are given (as a list), then it will only return the fields specified of the state model
        object as a dictionary.

        :param state_id: int, ID of PropertyState or TaxLotState
        :param inventory_type: string, either properties | taxlots
        :param fields: list, either None or list of fields to return
        :return: dict or state object, If fields is not None then will return state_object
        """
        state_model = PropertyState if inventory_type == 'properties' else TaxLotState

        # TODO: convert coparent to instance method, not class method
        audit_entry, audit_count = state_model.coparent(state_id)

        if audit_count == 0:
            return False

        if audit_count > 1:
            return JsonResponse(
                {
                    'status':
                    'error',
                    'message':
                    'Internal problem occurred, more than one merge record found'
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        return audit_entry[0]

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
    ],
                         request_body=AutoSchemaHelper.schema_factory({
                             'remap':
                             'boolean',
                             'mark_as_done':
                             'boolean',
                         }))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def map(self, request, pk=None):
        """
        Starts a background task to convert imported raw data into
        PropertyState and TaxLotState, using user's column mappings.
        """

        body = request.data

        remap = body.get('remap', False)
        mark_as_done = body.get('mark_as_done', True)

        org_id = request.query_params.get('organization_id', None)
        import_file = ImportFile.objects.filter(
            pk=pk, import_record__super_organization_id=org_id)
        if not import_file.exists():
            return {
                'status': 'error',
                'message': 'ImportFile {} does not exist'.format(pk)
            }

        # return remap_data(import_file_id)
        return JsonResponse(map_data(pk, remap, mark_as_done))

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def start_system_matching_and_geocoding(self, request, pk=None):
        """
        Starts a background task to attempt automatic matching between buildings
        in an ImportFile with other existing buildings within the same org.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            ImportFile.objects.get(pk=pk,
                                   import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return geocode_and_match_buildings_task(pk)

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def start_data_quality_checks(self, request, pk=None):
        """
        Starts a background task to attempt automatic matching between buildings
        in an ImportFile with other existing buildings within the same org.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return_value = do_checks(org_id, None, None, import_file.pk)
        # step 5: create a new model instance
        return JsonResponse({
            'progress_key': return_value['progress_key'],
            'progress': return_value,
        })

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def validate_use_cases(self, request, pk=None):
        """
        Starts a background task to call BuildingSync's use case validation
        tool.
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        return task_validate_use_cases(import_file.pk)

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
    ],
                         request_body=AutoSchemaHelper.schema_factory(
                             {'cycle_id': 'string'}))
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def start_save_data(self, request, pk=None):
        """
        Starts a background task to import raw data from an ImportFile
        into PropertyState objects as extra_data. If the cycle_id is set to
        year_ending then the cycle ID will be set to the year_ending column for each
        record in the uploaded file. Note that the year_ending flag is not yet enabled.
        """
        body = request.data
        import_file_id = pk
        if not import_file_id:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'must pass file_id of file to save'
                },
                status=status.HTTP_400_BAD_REQUEST)

        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=import_file_id, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        cycle_id = body.get('cycle_id')
        if not cycle_id:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'must pass cycle_id of the cycle to save the data'
                },
                status=status.HTTP_400_BAD_REQUEST)
        elif cycle_id == 'year_ending':
            _log.error("NOT CONFIGURED FOR YEAR ENDING OPTION AT THE MOMENT")
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'SEED is unable to parse year_ending at the moment'
                },
                status=status.HTTP_400_BAD_REQUEST)
        else:
            # find the cycle
            cycle = Cycle.objects.get(id=cycle_id)
            if cycle:
                # assign the cycle id to the import file object
                import_file.cycle = cycle
                import_file.save()
            else:
                return JsonResponse(
                    {
                        'status': 'error',
                        'message': 'cycle_id was invalid'
                    },
                    status=status.HTTP_400_BAD_REQUEST)

        return JsonResponse(task_save_raw(import_file.id))

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def mapping_done(self, request, pk=None):
        """
        Tell the backend that the mapping is complete.
        """
        import_file_id = pk
        if not import_file_id:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'must pass import_file_id'
                },
                status=status.HTTP_400_BAD_REQUEST)

        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=import_file_id, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'no import file with given id'
                },
                status=status.HTTP_404_NOT_FOUND)

        import_file.mapping_done = True
        import_file.save()

        return JsonResponse({'status': 'success', 'message': ''})

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def matching_and_geocoding_results(self, request, pk=None):
        """
        Retrieves the number of matched and unmatched properties & tax lots for
        a given ImportFile record.  Specifically for new imports
        """
        org_id = request.query_params.get('organization_id', None)

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # property views associated with this imported file (including merges)
        properties_new = []
        properties_matched = list(
            PropertyState.objects.filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_MERGED,
            ).values_list('id', flat=True))

        # Check audit log in case PropertyStates are listed as "new" but were merged into a different property
        properties = list(
            PropertyState.objects.filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_NEW,
            ))

        for state in properties:
            audit_creation_id = PropertyAuditLog.objects.only('id').exclude(
                import_filename=None).get(state_id=state.id,
                                          name='Import Creation')
            if PropertyAuditLog.objects.exclude(
                    record_type=AUDIT_USER_EDIT).filter(
                        parent1_id=audit_creation_id).exists():
                properties_matched.append(state.id)
            else:
                properties_new.append(state.id)

        tax_lots_new = []
        tax_lots_matched = list(
            TaxLotState.objects.only('id').filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_MERGED,
            ).values_list('id', flat=True))

        # Check audit log in case TaxLotStates are listed as "new" but were merged into a different tax lot
        taxlots = list(
            TaxLotState.objects.filter(
                import_file__pk=import_file.pk,
                data_state=DATA_STATE_MATCHING,
                merge_state=MERGE_STATE_NEW,
            ))

        for state in taxlots:
            audit_creation_id = TaxLotAuditLog.objects.only('id').exclude(
                import_filename=None).get(state_id=state.id,
                                          name='Import Creation')
            if TaxLotAuditLog.objects.exclude(
                    record_type=AUDIT_USER_EDIT).filter(
                        parent1_id=audit_creation_id).exists():
                tax_lots_matched.append(state.id)
            else:
                tax_lots_new.append(state.id)

        # Construct Geocode Results
        property_geocode_results = {
            'high_confidence':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='High')),
            'low_confidence':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='Low')),
            'manual':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Manually geocoded (N/A)')),
            'missing_address_components':
            len(
                PropertyState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Missing address components (N/A)')),
        }

        tax_lot_geocode_results = {
            'high_confidence':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='High')),
            'low_confidence':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence__startswith='Low')),
            'manual':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Manually geocoded (N/A)')),
            'missing_address_components':
            len(
                TaxLotState.objects.filter(
                    import_file__pk=import_file.pk,
                    data_state=DATA_STATE_MATCHING,
                    geocoding_confidence='Missing address components (N/A)')),
        }

        # merge in any of the matching results from the JSON field
        return {
            'status':
            'success',
            'import_file_records':
            import_file.matching_results_data.get('import_file_records', None),
            'properties': {
                'initial_incoming':
                import_file.matching_results_data.get(
                    'property_initial_incoming', None),
                'duplicates_against_existing':
                import_file.matching_results_data.get(
                    'property_duplicates_against_existing', None),
                'duplicates_within_file':
                import_file.matching_results_data.get(
                    'property_duplicates_within_file', None),
                'merges_against_existing':
                import_file.matching_results_data.get(
                    'property_merges_against_existing', None),
                'merges_between_existing':
                import_file.matching_results_data.get(
                    'property_merges_between_existing', None),
                'merges_within_file':
                import_file.matching_results_data.get(
                    'property_merges_within_file', None),
                'new':
                import_file.matching_results_data.get('property_new', None),
                'geocoded_high_confidence':
                property_geocode_results.get('high_confidence'),
                'geocoded_low_confidence':
                property_geocode_results.get('low_confidence'),
                'geocoded_manually':
                property_geocode_results.get('manual'),
                'geocode_not_possible':
                property_geocode_results.get('missing_address_components'),
            },
            'tax_lots': {
                'initial_incoming':
                import_file.matching_results_data.get(
                    'tax_lot_initial_incoming', None),
                'duplicates_against_existing':
                import_file.matching_results_data.get(
                    'tax_lot_duplicates_against_existing', None),
                'duplicates_within_file':
                import_file.matching_results_data.get(
                    'tax_lot_duplicates_within_file', None),
                'merges_against_existing':
                import_file.matching_results_data.get(
                    'tax_lot_merges_against_existing', None),
                'merges_between_existing':
                import_file.matching_results_data.get(
                    'tax_lot_merges_between_existing', None),
                'merges_within_file':
                import_file.matching_results_data.get(
                    'tax_lot_merges_within_file', None),
                'new':
                import_file.matching_results_data.get('tax_lot_new', None),
                'geocoded_high_confidence':
                tax_lot_geocode_results.get('high_confidence'),
                'geocoded_low_confidence':
                tax_lot_geocode_results.get('low_confidence'),
                'geocoded_manually':
                tax_lot_geocode_results.get('manual'),
                'geocode_not_possible':
                tax_lot_geocode_results.get('missing_address_components'),
            }
        }

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def mapping_suggestions(self, request, pk):
        """
        Returns suggested mappings from an uploaded file's headers to known
        data fields.
        """
        organization_id = request.query_params.get('organization_id', None)

        result = {'status': 'success'}

        membership = OrganizationUser.objects.select_related('organization') \
            .get(organization_id=organization_id, user=request.user)
        organization = membership.organization

        # For now, each organization holds their own mappings. This is non-ideal, but it is the
        # way it is for now. In order to move to parent_org holding, then we need to be able to
        # dynamically match columns based on the names and not the db id (or support many-to-many).
        # parent_org = organization.get_parent()

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=organization.pk)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # Get a list of the database fields in a list, these are the db columns and the extra_data columns
        property_columns = Column.retrieve_mapping_columns(
            organization.pk, 'property')
        taxlot_columns = Column.retrieve_mapping_columns(
            organization.pk, 'taxlot')

        # If this is a portfolio manager file, then load in the PM mappings and if the column_mappings
        # are not in the original mappings, default to PM
        if import_file.from_portfolio_manager:
            pm_mappings = simple_mapper.get_pm_mapping(
                import_file.first_row_columns, resolve_duplicates=True)
            suggested_mappings = mapper.build_column_mapping(
                import_file.first_row_columns,
                Column.retrieve_all_by_tuple(organization_id),
                previous_mapping=get_column_mapping,
                map_args=[organization],
                default_mappings=pm_mappings,
                thresh=80)
        elif import_file.from_buildingsync:
            bsync_mappings = xml_mapper.build_column_mapping()
            suggested_mappings = mapper.build_column_mapping(
                import_file.first_row_columns,
                Column.retrieve_all_by_tuple(organization_id),
                previous_mapping=get_column_mapping,
                map_args=[organization],
                default_mappings=bsync_mappings,
                thresh=80)
        else:
            # All other input types
            suggested_mappings = mapper.build_column_mapping(
                import_file.first_row_columns,
                Column.retrieve_all_by_tuple(organization.pk),
                previous_mapping=get_column_mapping,
                map_args=[organization],
                thresh=
                80  # percentage match that we require. 80% is random value for now.
            )
            # replace None with empty string for column names and PropertyState for tables
            # TODO #239: Move this fix to build_column_mapping
            for m in suggested_mappings:
                table, destination_field, _confidence = suggested_mappings[m]
                if destination_field is None:
                    suggested_mappings[m][1] = ''

        # Fix the table name, eventually move this to the build_column_mapping
        for m in suggested_mappings:
            table, _destination_field, _confidence = suggested_mappings[m]
            # Do not return the campus, created, updated fields... that is force them to be in the property state
            if not table or table == 'Property':
                suggested_mappings[m][0] = 'PropertyState'
            elif table == 'TaxLot':
                suggested_mappings[m][0] = 'TaxLotState'

        result['suggested_column_mappings'] = suggested_mappings
        result['property_columns'] = property_columns
        result['taxlot_columns'] = taxlot_columns

        return JsonResponse(result)

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    def destroy(self, request, pk):
        """
        Deletes an import file
        """
        organization_id = int(request.query_params.get('organization_id',
                                                       None))
        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=organization_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        # check if the import record exists for the file and organization
        d = ImportRecord.objects.filter(super_organization_id=organization_id,
                                        pk=import_file.import_record.pk)

        if not d.exists():
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'user does not have permission to delete file',
                },
                status=status.HTTP_403_FORBIDDEN)

        # This does not actually delete the object because it is a NonDeletableModel
        import_file.delete()
        return JsonResponse({'status': 'success'})

    @swagger_auto_schema(manual_parameters=[
        AutoSchemaHelper.query_org_id_field(),
        AutoSchemaHelper.query_integer_field(
            'view_id', required=True, description='ID for property view')
    ])
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def greenbutton_meters_preview(self, request, pk):
        """
        Returns validated type units and proposed imports
        """
        org_id = request.query_params.get('organization_id')
        view_id = request.query_params.get('view_id')

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        parser = reader.GreenButtonParser(import_file.local_file)
        raw_meter_data = list(parser.data)

        try:
            property_id = PropertyView.objects.get(
                pk=view_id, cycle__organization_id=org_id).property_id
        except PropertyView.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message':
                    'Could not find property with pk=' + str(view_id)
                },
                status=status.HTTP_400_BAD_REQUEST)

        meters_parser = MetersParser(org_id,
                                     raw_meter_data,
                                     source_type=Meter.GREENBUTTON,
                                     property_id=property_id)

        result = {}

        result["validated_type_units"] = meters_parser.validated_type_units()
        result["proposed_imports"] = meters_parser.proposed_imports

        import_file.matching_results_data['property_id'] = property_id
        import_file.save()

        return result

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()])
    @ajax_request_class
    @has_perm_class('requires_member')
    @action(detail=True, methods=['GET'])
    def pm_meters_preview(self, request, pk):
        """
        Returns validated type units, proposed imports and unlinkable PM ids
        """
        org_id = request.query_params.get('organization_id')

        try:
            import_file = ImportFile.objects.get(
                pk=pk, import_record__super_organization_id=org_id)
        except ImportFile.DoesNotExist:
            return JsonResponse(
                {
                    'status': 'error',
                    'message': 'Could not find import file with pk=' + str(pk)
                },
                status=status.HTTP_400_BAD_REQUEST)

        parser = reader.MCMParser(import_file.local_file,
                                  sheet_name='Meter Entries')
        raw_meter_data = list(parser.data)

        meters_parser = MetersParser(org_id, raw_meter_data)

        result = {}

        result["validated_type_units"] = meters_parser.validated_type_units()
        result["proposed_imports"] = meters_parser.proposed_imports
        result["unlinkable_pm_ids"] = meters_parser.unlinkable_pm_ids

        return result
Exemplo n.º 6
0
class TaxlotViewSet(viewsets.ViewSet, OrgMixin, ProfileIdMixin):
    renderer_classes = (JSONRenderer,)
    serializer_class = TaxLotSerializer
    parser_classes = (JSONParser,)
    _organization = None

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field(required=True)],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'selected': ['integer'],
            },
            description='IDs for taxlots to be checked for which labels are applied.'
        )
    )
    @action(detail=False, methods=['POST'])
    def labels(self, request):
        """
        Returns a list of all labels where the is_applied field
        in the response pertains to the labels applied to taxlot_view
        """
        labels = StatusLabel.objects.filter(
            super_organization=self.get_parent_org(self.request)
        ).order_by("name").distinct()
        super_organization = self.get_organization(request)
        # TODO: refactor to avoid passing request here
        return get_labels(request, labels, super_organization, 'taxlot_view')

    def _get_filtered_results(self, request, profile_id):
        page = request.query_params.get('page', 1)
        per_page = request.query_params.get('per_page', 1)
        org_id = request.query_params.get('organization_id', None)
        cycle_id = request.query_params.get('cycle')
        # check if there is a query paramater for the profile_id. If so, then use that one
        profile_id = request.query_params.get('profile_id', profile_id)
        if not org_id:
            return JsonResponse(
                {'status': 'error', 'message': 'Need to pass organization_id as query parameter'},
                status=status.HTTP_400_BAD_REQUEST)

        if cycle_id:
            cycle = Cycle.objects.get(organization_id=org_id, pk=cycle_id)
        else:
            cycle = Cycle.objects.filter(organization_id=org_id).order_by('name')
            if cycle:
                cycle = cycle.first()
            else:
                return JsonResponse({
                    'status': 'error',
                    'message': 'Could not locate cycle',
                    'pagination': {
                        'total': 0
                    },
                    'cycle_id': None,
                    'results': []
                })

        # Return taxlot views limited to the 'taxlot_view_ids' list.  Otherwise, if selected is empty, return all
        if 'taxlot_view_ids' in request.data and request.data['taxlot_view_ids']:
            taxlot_views_list = TaxLotView.objects.select_related('taxlot', 'state', 'cycle') \
                .filter(id__in=request.data['taxlot_view_ids'], taxlot__organization_id=org_id,
                        cycle=cycle) \
                .order_by('id')
        else:
            taxlot_views_list = TaxLotView.objects.select_related('taxlot', 'state', 'cycle') \
                .filter(taxlot__organization_id=org_id, cycle=cycle) \
                .order_by('id')

        paginator = Paginator(taxlot_views_list, per_page)

        try:
            taxlot_views = paginator.page(page)
            page = int(page)
        except PageNotAnInteger:
            taxlot_views = paginator.page(1)
            page = 1
        except EmptyPage:
            taxlot_views = paginator.page(paginator.num_pages)
            page = paginator.num_pages

        org = Organization.objects.get(pk=org_id)

        # Retrieve all the columns that are in the db for this organization
        columns_from_database = Column.retrieve_all(org_id, 'taxlot', False)

        # This uses an old method of returning the show_columns. There is a new method that
        # is preferred in v2.1 API with the ProfileIdMixin.
        if profile_id is None:
            show_columns = None
        elif profile_id == -1:
            show_columns = list(Column.objects.filter(
                organization_id=org_id
            ).values_list('id', flat=True))
        else:
            try:
                profile = ColumnListProfile.objects.get(
                    organization=org,
                    id=profile_id,
                    profile_location=VIEW_LIST,
                    inventory_type=VIEW_LIST_TAXLOT
                )
                show_columns = list(ColumnListProfileColumn.objects.filter(
                    column_list_profile_id=profile.id
                ).values_list('column_id', flat=True))
            except ColumnListProfile.DoesNotExist:
                show_columns = None

        related_results = TaxLotProperty.get_related(taxlot_views, show_columns,
                                                     columns_from_database)

        # collapse units here so we're only doing the last page; we're already a
        # realized list by now and not a lazy queryset
        unit_collapsed_results = [apply_display_unit_preferences(org, x) for x in related_results]

        response = {
            'pagination': {
                'page': page,
                'start': paginator.page(page).start_index(),
                'end': paginator.page(page).end_index(),
                'num_pages': paginator.num_pages,
                'has_next': paginator.page(page).has_next(),
                'has_previous': paginator.page(page).has_previous(),
                'total': paginator.count
            },
            'cycle_id': cycle.id,
            'results': unit_collapsed_results
        }

        return JsonResponse(response)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'cycle',
                required=True,
                description='The ID of the cycle to get taxlots'
            ),
            AutoSchemaHelper.query_integer_field(
                'page',
                required=False,
                description='The current page of taxlots to return'
            ),
            AutoSchemaHelper.query_integer_field(
                'per_page',
                required=False,
                description='The number of items per page to return'
            ),
            AutoSchemaHelper.query_integer_field(
                'profile_id',
                required=False,
                description='The ID of the column profile to use'
            )
        ]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    def list(self, request):
        """
        List all the properties
        """
        return self._get_filtered_results(request, profile_id=-1)

    @swagger_auto_schema(
        request_body=AutoSchemaHelper.schema_factory(
            {
                'organization_id': 'integer',
                'profile_id': 'integer',
                'cycle_ids': ['integer'],
            },
            required=['organization_id', 'cycle_ids'],
            description='Properties:\n'
                        '- organization_id: ID of organization\n'
                        '- profile_id: Either an id of a list settings profile, '
                        'or undefined\n'
                        '- cycle_ids: The IDs of the cycle to get taxlots'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    @action(detail=False, methods=['POST'])
    def filter_by_cycle(self, request):
        """
        List all the taxlots with all columns
        """
        # NOTE: we are using a POST http method b/c swagger and django handle
        # arrays differently in query parameters. ie this is just simpler
        org_id = request.data.get('organization_id', None)
        profile_id = request.data.get('profile_id', -1)
        cycle_ids = request.data.get('cycle_ids', [])

        if not org_id:
            return JsonResponse(
                {'status': 'error', 'message': 'Need to pass organization_id as query parameter'},
                status=status.HTTP_400_BAD_REQUEST)

        response = taxlots_across_cycles(org_id, profile_id, cycle_ids)

        return JsonResponse(response)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'cycle',
                required=True,
                description='The ID of the cycle to get tax lots'
            ),
            AutoSchemaHelper.query_integer_field(
                'page',
                required=False,
                description='The current page of taxlots to return'
            ),
            AutoSchemaHelper.query_integer_field(
                'per_page',
                required=False,
                description='The number of items per page to return'
            ),
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'profile_id': 'integer',
                'taxlot_view_ids': ['integer'],
            },
            required=['profile_id'],
            description='Properties:\n'
                        '- profile_id: Either an id of a list settings profile, or undefined\n'
                        '- taxlot_view_ids: List of taxlot view ids'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('requires_viewer')
    @action(detail=False, methods=['POST'])
    def filter(self, request):
        """
        List all the tax lots
        """
        if 'profile_id' not in request.data:
            profile_id = None
        else:
            if request.data['profile_id'] == 'None':
                profile_id = None
            else:
                profile_id = request.data['profile_id']

        return self._get_filtered_results(request, profile_id=profile_id)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
        ],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'taxlot_view_ids': ['integer']
            },
            required=['taxlot_view_ids'],
            description='Properties:\n'
                        '- taxlot_view_ids: Array containing tax lot state ids to merge'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=False, methods=['POST'])
    def merge(self, request):
        """
        Merge multiple tax lot records into a single new record, and run this
        new record through a match and merge round within it's current Cycle.
        """
        body = request.data
        organization_id = int(request.query_params.get('organization_id', None))

        taxlot_view_ids = body.get('taxlot_view_ids', [])
        taxlot_states = TaxLotView.objects.filter(
            id__in=taxlot_view_ids,
            cycle__organization_id=organization_id
        ).values('id', 'state_id')
        # get the state ids in order according to the given view ids
        taxlot_states_dict = {t['id']: t['state_id'] for t in taxlot_states}
        taxlot_state_ids = [
            taxlot_states_dict[view_id]
            for view_id in taxlot_view_ids if view_id in taxlot_states_dict
        ]

        if len(taxlot_state_ids) != len(taxlot_view_ids):
            return {
                'status': 'error',
                'message': 'All records not found.'
            }

        # Check the number of taxlot_state_ids to merge
        if len(taxlot_state_ids) < 2:
            return JsonResponse({
                'status': 'error',
                'message': 'At least two ids are necessary to merge'
            }, status=status.HTTP_400_BAD_REQUEST)

        merged_state = merge_taxlots(taxlot_state_ids, organization_id, 'Manual Match')

        merge_count, link_count, view_id = match_merge_link(merged_state.taxlotview_set.first().id, 'TaxLotState')

        result = {
            'status': 'success'
        }

        result.update({
            'match_merged_count': merge_count,
            'match_link_count': link_count,
        })

        return result

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def unmerge(self, request, pk=None):
        """
        Unmerge a taxlot view into two taxlot views
        """
        try:
            old_view = TaxLotView.objects.select_related(
                'taxlot', 'cycle', 'state'
            ).get(
                id=pk,
                taxlot__organization_id=self.request.GET['organization_id']
            )
        except TaxLotView.DoesNotExist:
            return {
                'status': 'error',
                'message': 'taxlot view with id {} does not exist'.format(pk)
            }

        # Duplicate pairing
        paired_view_ids = list(TaxLotProperty.objects.filter(taxlot_view_id=old_view.id)
                               .order_by('property_view_id').values_list('property_view_id',
                                                                         flat=True))

        # Capture previous associated labels
        label_ids = list(old_view.labels.all().values_list('id', flat=True))

        notes = old_view.notes.all()
        for note in notes:
            note.taxlot_view = None

        merged_state = old_view.state
        if merged_state.data_state != DATA_STATE_MATCHING or merged_state.merge_state != MERGE_STATE_MERGED:
            return {
                'status': 'error',
                'message': 'taxlot view with id {} is not a merged taxlot view'.format(pk)
            }

        log = TaxLotAuditLog.objects.select_related('parent_state1', 'parent_state2').filter(
            state=merged_state
        ).order_by('-id').first()

        if log.parent_state1 is None or log.parent_state2 is None:
            return {
                'status': 'error',
                'message': 'taxlot view with id {} must have two parent states'.format(pk)
            }

        state1 = log.parent_state1
        state2 = log.parent_state2
        cycle_id = old_view.cycle_id

        # Clone the taxlot record twice
        old_taxlot = old_view.taxlot
        new_taxlot = old_taxlot
        new_taxlot.id = None
        new_taxlot.save()

        new_taxlot_2 = TaxLot.objects.get(pk=new_taxlot.pk)
        new_taxlot_2.id = None
        new_taxlot_2.save()

        # If the canonical TaxLot is NOT associated to another -View
        if not TaxLotView.objects.filter(taxlot_id=old_view.taxlot_id).exclude(pk=old_view.id).exists():
            TaxLot.objects.get(pk=old_view.taxlot_id).delete()

        # Create the views
        new_view1 = TaxLotView(
            cycle_id=cycle_id,
            taxlot_id=new_taxlot.id,
            state=state1
        )
        new_view2 = TaxLotView(
            cycle_id=cycle_id,
            taxlot_id=new_taxlot_2.id,
            state=state2
        )

        # Mark the merged state as deleted
        merged_state.merge_state = MERGE_STATE_DELETE
        merged_state.save()

        # Change the merge_state of the individual states
        if log.parent1.name in ['Import Creation',
                                'Manual Edit'] and log.parent1.import_filename is not None:
            # State belongs to a new record
            state1.merge_state = MERGE_STATE_NEW
        else:
            state1.merge_state = MERGE_STATE_MERGED
        if log.parent2.name in ['Import Creation',
                                'Manual Edit'] and log.parent2.import_filename is not None:
            # State belongs to a new record
            state2.merge_state = MERGE_STATE_NEW
        else:
            state2.merge_state = MERGE_STATE_MERGED
        # In most cases data_state will already be 3 (DATA_STATE_MATCHING), but if one of the parents was a
        # de-duplicated record then data_state will be 0. This step ensures that the new states will be 3.
        state1.data_state = DATA_STATE_MATCHING
        state2.data_state = DATA_STATE_MATCHING
        state1.save()
        state2.save()

        # Delete the audit log entry for the merge
        log.delete()

        old_view.delete()
        new_view1.save()
        new_view2.save()

        # Asssociate labels
        label_objs = StatusLabel.objects.filter(pk__in=label_ids)
        new_view1.labels.set(label_objs)
        new_view2.labels.set(label_objs)

        # Duplicate notes to the new views
        for note in notes:
            created = note.created
            updated = note.updated
            note.id = None
            note.taxlot_view = new_view1
            note.save()
            ids = [note.id]
            note.id = None
            note.taxlot_view = new_view2
            note.save()
            ids.append(note.id)
            # Correct the created and updated times to match the original note
            Note.objects.filter(id__in=ids).update(created=created, updated=updated)

        for paired_view_id in paired_view_ids:
            TaxLotProperty(primary=True,
                           cycle_id=cycle_id,
                           taxlot_view_id=new_view1.id,
                           property_view_id=paired_view_id).save()
            TaxLotProperty(primary=True,
                           cycle_id=cycle_id,
                           taxlot_view_id=new_view2.id,
                           property_view_id=paired_view_id).save()

        return {
            'status': 'success',
            'view_id': new_view1.id
        }

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['GET'])
    def links(self, request, pk=None):
        """
        Get taxlot details for each linked taxlot across org cycles
        """
        organization_id = request.query_params.get('organization_id', None)
        base_view = TaxLotView.objects.select_related('cycle').filter(
            pk=pk,
            cycle__organization_id=organization_id
        )

        if base_view.exists():
            result = {'data': []}

            linked_views = TaxLotView.objects.select_related('cycle').filter(
                taxlot_id=base_view.get().taxlot_id,
                cycle__organization_id=organization_id
            ).order_by('-cycle__start')
            for linked_view in linked_views:
                state_data = TaxLotStateSerializer(linked_view.state).data

                state_data['cycle_id'] = linked_view.cycle.id
                state_data['view_id'] = linked_view.id
                result['data'].append(state_data)

            return JsonResponse(result, status=status.HTTP_200_OK)
        else:
            result = {
                'status': 'error',
                'message': 'property view with id {} does not exist in given organization'.format(pk)
            }
            return JsonResponse(result)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['POST'])
    def match_merge_link(self, request, pk=None):
        """
        Runs match merge link for an individual taxlot.

        Note that this method can return a view_id of None if the given -View
        was not involved in a merge.
        """
        org_id = request.query_params.get('organization_id', None)

        taxlot_view = TaxLotView.objects.get(
            pk=pk,
            cycle__organization_id=org_id
        )
        merge_count, link_count, view_id = match_merge_link(taxlot_view.pk, 'TaxLotState')

        result = {
            'view_id': view_id,
            'match_merged_count': merge_count,
            'match_link_count': link_count,
        }

        return JsonResponse(result)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'property_id',
                required=True,
                description='The property id to pair up with this taxlot'
            )
        ]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['PUT'])
    def pair(self, request, pk=None):
        """
        Pair a property to this taxlot
        """
        organization_id = int(request.query_params.get('organization_id'))
        property_id = int(request.query_params.get('property_id'))
        taxlot_id = int(pk)
        return pair_unpair_property_taxlot(property_id, taxlot_id, organization_id, True)

    @swagger_auto_schema(
        manual_parameters=[
            AutoSchemaHelper.query_org_id_field(),
            AutoSchemaHelper.query_integer_field(
                'property_id',
                required=True,
                description='The property id to unpair from this taxlot'
            )
        ]
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=True, methods=['PUT'])
    def unpair(self, request, pk=None):
        """
        Unpair a property from this taxlot
        """
        organization_id = int(request.query_params.get('organization_id'))
        property_id = int(request.query_params.get('property_id'))
        taxlot_id = int(pk)
        return pair_unpair_property_taxlot(property_id, taxlot_id, organization_id, False)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=AutoSchemaHelper.schema_factory(
            {
                'taxlot_view_ids': ['integer']
            },
            required=['taxlot_view_ids'],
            description='A list of taxlot view ids to delete'
        )
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    @action(detail=False, methods=['DELETE'])
    def batch_delete(self, request):
        """
        Batch delete several tax lots
        """
        org_id = request.query_params.get('organization_id', None)

        taxlot_view_ids = request.data.get('taxlot_view_ids', [])
        taxlot_state_ids = TaxLotView.objects.filter(
            id__in=taxlot_view_ids,
            cycle__organization_id=org_id
        ).values_list('state_id', flat=True)
        resp = TaxLotState.objects.filter(pk__in=Subquery(taxlot_state_ids)).delete()

        if resp[0] == 0:
            return JsonResponse({'status': 'warning', 'message': 'No action was taken'})

        return JsonResponse({'status': 'success', 'taxlots': resp[1]['seed.TaxLotState']})

    def _get_taxlot_view(self, taxlot_pk):
        try:
            taxlot_view = TaxLotView.objects.select_related(
                'taxlot', 'cycle', 'state'
            ).get(
                id=taxlot_pk,
                taxlot__organization_id=self.request.GET['organization_id']
            )
            result = {
                'status': 'success',
                'taxlot_view': taxlot_view
            }
        except TaxLotView.DoesNotExist:
            result = {
                'status': 'error',
                'message': 'taxlot view with id {} does not exist'.format(taxlot_pk)
            }
        return result

    def get_history(self, taxlot_view):
        """Return history in reverse order"""

        # access the history from the property state
        history, master = taxlot_view.state.history()

        # convert the history and master states to StateSerializers
        master['state'] = TaxLotStateSerializer(master['state_data']).data
        del master['state_data']
        del master['state_id']

        for h in history:
            h['state'] = TaxLotStateSerializer(h['state_data']).data
            del h['state_data']
            del h['state_id']

        return history, master

    def _get_properties(self, taxlot_view_pk):
        property_view_pks = TaxLotProperty.objects.filter(
            taxlot_view_id=taxlot_view_pk
        ).values_list('property_view_id', flat=True)
        property_views = PropertyView.objects.filter(
            pk__in=property_view_pks
        ).select_related('cycle', 'state')
        properties = []
        for property_view in property_views:
            properties.append(PropertyViewSerializer(property_view).data)
        return properties

    @swagger_auto_schema_org_query_param
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_view_data')
    def retrieve(self, request, pk):
        """
        Get taxlot details
        """
        result = self._get_taxlot_view(pk)
        if result.get('status', None) != 'error':
            taxlot_view = result.pop('taxlot_view')
            result.update(TaxLotViewSerializer(taxlot_view).data)
            # remove TaxLotView id from result
            result.pop('id')

            result['state'] = TaxLotStateSerializer(taxlot_view.state).data
            result['properties'] = self._get_properties(taxlot_view.pk)
            result['history'], master = self.get_history(taxlot_view)
            result = update_result_with_master(result, master)
            return JsonResponse(result, status=status.HTTP_200_OK)
        else:
            return JsonResponse(result, status=status.HTTP_404_NOT_FOUND)

    @swagger_auto_schema(
        manual_parameters=[AutoSchemaHelper.query_org_id_field()],
        request_body=UpdateTaxLotPayloadSerializer,
    )
    @api_endpoint_class
    @ajax_request_class
    @has_perm_class('can_modify_data')
    def update(self, request, pk):
        """
        Update a taxlot and run the updated record through a match and merge
        round within it's current Cycle.
        """
        data = request.data

        result = self._get_taxlot_view(pk)
        if result.get('status', 'error') != 'error':
            taxlot_view = result.pop('taxlot_view')
            taxlot_state_data = TaxLotStateSerializer(taxlot_view.state).data

            # get the taxlot state information from the request
            new_taxlot_state_data = data['state']

            # set empty strings to None
            for key, val in new_taxlot_state_data.items():
                if val == '':
                    new_taxlot_state_data[key] = None

            changed_fields, previous_data = get_changed_fields(taxlot_state_data, new_taxlot_state_data)
            if not changed_fields:
                result.update(
                    {'status': 'success', 'message': 'Records are identical'}
                )
                return JsonResponse(result, status=status.HTTP_204_NO_CONTENT)
            else:
                # Not sure why we are going through the pain of logging this all right now... need to
                # reevaluate this.
                log = TaxLotAuditLog.objects.select_related().filter(
                    state=taxlot_view.state
                ).order_by('-id').first()

                # if checks above pass, create an exact copy of the current state for historical purposes
                if log.name == 'Import Creation':
                    # Add new state by removing the existing ID.
                    taxlot_state_data.pop('id')
                    # Remove the import_file_id for the first edit of a new record
                    # If the import file has been deleted and this value remains the serializer won't be valid
                    taxlot_state_data.pop('import_file')
                    new_taxlot_state_serializer = TaxLotStateSerializer(
                        data=taxlot_state_data
                    )
                    if new_taxlot_state_serializer.is_valid():
                        # create the new property state, and perform an initial save / moving relationships
                        new_state = new_taxlot_state_serializer.save()

                        # then assign this state to the property view and save the whole view
                        taxlot_view.state = new_state
                        taxlot_view.save()

                        TaxLotAuditLog.objects.create(organization=log.organization,
                                                      parent1=log,
                                                      parent2=None,
                                                      parent_state1=log.state,
                                                      parent_state2=None,
                                                      state=new_state,
                                                      name='Manual Edit',
                                                      description=None,
                                                      import_filename=log.import_filename,
                                                      record_type=AUDIT_USER_EDIT)

                        result.update(
                            {'state': new_taxlot_state_serializer.data}
                        )

                        # save the property view so that the datetime gets updated on the property.
                        taxlot_view.save()
                    else:
                        result.update({
                            'status': 'error',
                            'message': 'Invalid update data with errors: {}'.format(
                                new_taxlot_state_serializer.errors)}
                        )
                        return JsonResponse(result, status=status.HTTP_422_UNPROCESSABLE_ENTITY)

                # redo assignment of this variable in case this was an initial edit
                taxlot_state_data = TaxLotStateSerializer(taxlot_view.state).data

                if 'extra_data' in new_taxlot_state_data:
                    taxlot_state_data['extra_data'].update(
                        new_taxlot_state_data['extra_data']
                    )

                taxlot_state_data.update(
                    {k: v for k, v in new_taxlot_state_data.items() if k != 'extra_data'}
                )

                log = TaxLotAuditLog.objects.select_related().filter(
                    state=taxlot_view.state
                ).order_by('-id').first()

                if log.name in ['Manual Edit', 'Manual Match', 'System Match', 'Merge current state in migration']:
                    # Convert this to using the serializer to save the data. This will override the
                    # previous values in the state object.

                    # Note: We should be able to use partial update here and pass in the changed
                    # fields instead of the entire state_data.
                    updated_taxlot_state_serializer = TaxLotStateSerializer(
                        taxlot_view.state,
                        data=taxlot_state_data
                    )
                    if updated_taxlot_state_serializer.is_valid():
                        # create the new property state, and perform an initial save / moving
                        # relationships
                        updated_taxlot_state_serializer.save()

                        result.update(
                            {'state': updated_taxlot_state_serializer.data}
                        )

                        # save the property view so that the datetime gets updated on the property.
                        taxlot_view.save()

                        Note.create_from_edit(request.user.id, taxlot_view, new_taxlot_state_data, previous_data)

                        merge_count, link_count, view_id = match_merge_link(taxlot_view.id, 'TaxLotState')

                        result.update({
                            'view_id': view_id,
                            'match_merged_count': merge_count,
                            'match_link_count': link_count,
                        })

                        return JsonResponse(result, status=status.HTTP_200_OK)
                    else:
                        result.update({
                            'status': 'error',
                            'message': 'Invalid update data with errors: {}'.format(
                                updated_taxlot_state_serializer.errors)}
                        )
                        return JsonResponse(result, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
                else:
                    result = {
                        'status': 'error',
                        'message': 'Unrecognized audit log name: ' + log.name
                    }
                    return JsonResponse(result, status=status.HTTP_204_NO_CONTENT)
        else:
            return JsonResponse(result, status=status.HTTP_404_NOT_FOUND)