Ejemplo n.º 1
0
 def test_titlecase(self):
     self.assertEqual(titlecase("return_nicely"), "Return Nicely")
     self.assertEqual(titlecase("return_nicely's_home"),
                      "Return Nicely's Home")
     self.assertEqual(titlecase("return nicely's home"),
                      "Return Nicely's Home")
     self.assertEqual(titlecase("3rd_role"), "3rd Role")
Ejemplo n.º 2
0
 def to_reso_dict(self, sub_name=False):
     """
     Return a dict where keys are RESO Green Verification compatible names.
     RESO Green Verification field names may optionally contain the type
     (i.e. name). e.g. GreenVerification[Type]Body
     :param sub_name: add name to key
     :type sub_name: bool
     """
     if isinstance(sub_name, basestring):
         sub = sub_name
     elif sub_name:
         sub = sub = "".join(
             [titlecase(word) for word in self.name.split()])
     else:
         sub = ''
     url_field = 'GreenVerification{}URL'.format(sub)
     reso_dict = {}
     for key, val in self.MAPPING.items():
         field = val[0]
         if field == 'GreenBuildingVerificationType':
             reso_dict[field] = self.name
         elif field:
             field = field.format(sub)
             reso_dict[field] = getattr(self, key)
     reso_dict[url_field] = [url.url for url in self.urls.all()]
     return reso_dict
Ejemplo n.º 3
0
 def get_details(self):
     """Generate details."""
     rtc = self.fake.random_element(
         elements=GreenAssessment.RECOGNITION_TYPE_CHOICES)
     color = titlecase(self.fake.safe_color_name())
     nelem = '' if rtc[1].startswith('Zero') else self.fake.random_element(
         elements=('Energy', 'Efficiency', 'Sustainability', 'Building'))
     award = '{} {}{}'.format(color, nelem, rtc[1])
     return {
         'name': award,
         'award_body': "{} {}".format(award, self.fake.company_suffix()),
         'recognition_type': rtc[0],
         'description': 'Fake Award',
         'is_numeric_score': True,
         'validity_duration': None,
         'organization': self.organization
     }
Ejemplo n.º 4
0
 def to_representation(self, value):
     """Serialize PropertyView"""
     property_view = PropertyView.objects.get(id=value.pk)
     state = property_view.state
     cycle = property_view.cycle
     start = '{}-{}-{}'.format(cycle.start.year, cycle.start.month,
                               cycle.start.day)
     end = '{}-{}-{}'.format(cycle.end.year, cycle.end.month, cycle.end.day)
     cycle_dict = OrderedDict(
         (('id', cycle.id), ('start', start), ('end', end)))
     address_line_1 = titlecase(
         state.normalized_address) if state.normalized_address else None
     return OrderedDict(
         (('id', value.pk), ('address_line_1', address_line_1),
          ('address_line_2', state.address_line_2), ('city', state.city),
          ('state', state.state), ('postal_code', state.postal_code),
          ('property', property_view.property.id), ('cycle', cycle_dict)))
Ejemplo n.º 5
0
    def test_property_view_field(self):
        """Test PropertyViewField"""
        property_view = self.property_view_factory.get_property_view()
        state = property_view.state
        cycle = OrderedDict(
            (('id', property_view.cycle.id),
             ('start', self.get_date_string(property_view.cycle.start)),
             ('end', self.get_date_string(property_view.cycle.end))))
        expected = OrderedDict(
            (('id', property_view.pk), ('address_line_1',
                                        titlecase(state.normalized_address)),
             ('address_line_2', state.address_line_2), ('city', state.city),
             ('state', state.state), ('postal_code', state.postal_code),
             ('property', property_view.property.id), ('cycle', cycle)))

        field = PropertyViewField(read_only=True)
        result = field.to_representation(property_view)
        self.assertEqual(expected, result)
Ejemplo n.º 6
0
    def retrieve_all(org_id, inventory_type):
        """
        # Retrieve all the columns for an organization. First, grab the columns from the
        # VIEW_COLUMNS_PROPERTY schema which defines the database columns with added data for
        # various reasons. Then query the database for all extra data columns and add in the
        # data as appropriate ensuring that duplicates that are taken care of (albeit crudely).

        # Note: this method should retrieve the columns from MappingData and then have a method
        # to return for JavaScript (i.e. UI-Grid) or native (standard JSON)

        :param org_id: Organization ID
        :param inventory_type: Inventory Type (property|taxlot)

        :return: dict
        """

        # Grab the default columns and their details
        columns = Column._retrieve_db_columns()

        # Clean up the columns
        for c in columns:
            if c['table'] == INVENTORY_MAP[inventory_type.lower()]:
                c['related'] = False
                if c.get('pinIfNative', False):
                    c['pinnedLeft'] = True
            else:
                c['related'] = True
                # For now, a related field has a prepended value to make the columns unique.
                if c.get('duplicateNameInOtherTable', False):
                    c['name'] = "{}_{}".format(
                        INVENTORY_MAP_PREPEND[inventory_type.lower()],
                        c['name'])

            # Remove some keys that are not needed for the API
            try:
                c.pop('pinIfNative')
            except KeyError:
                pass

            try:
                c.pop('duplicateNameInOtherTable')
            except KeyError:
                pass

            try:
                c.pop('dbField')
            except KeyError:
                pass

        # Add in all the extra columns
        # don't return columns that have no table_name as these are the columns of the import files
        extra_data_columns = Column.objects.filter(
            organization_id=org_id,
            is_extra_data=True).exclude(table_name='').exclude(table_name=None)

        for edc in extra_data_columns:
            name = edc.column_name
            table = edc.table_name

            # MAKE NOTE ABOUT HOW IMPORTANT THIS IN
            if name == 'id':
                name += '_extra'

            # check if the column name is already defined in the list. For example, gross_floor_area
            # is a core field, but can be an extra field in taxlot, meaning that the other one
            # needs to be tagged something else.
            # for col in columns:

            # add _extra if the column is already in the list and it is not the one of
            while any(col['name'] == name and col['table'] != table
                      for col in columns):
                name += '_extra'

            # TODO: need to check if the column name is already in the list and if it is then
            # overwrite the data

            columns.append({
                'name':
                name,
                'table':
                edc.table_name,
                'displayName':
                titlecase(edc.column_name),
                # 'dataType': 'string',  # TODO: how to check dataTypes on extra_data!
                'related':
                edc.table_name != INVENTORY_MAP[inventory_type.lower()],
                'extraData':
                True
            })

        # validate that the column names are unique
        uniq = set()
        for c in columns:
            if c['name'] in uniq:
                raise Exception("Duplicate name '{}' found in columns".format(
                    c['name']))
            else:
                uniq.add(c['name'])

        return columns
Ejemplo n.º 7
0
    def retrieve_all(org_id, inventory_type, only_used):
        """
        # Retrieve all the columns for an organization. First, grab the columns from the
        # VIEW_COLUMNS_PROPERTY schema which defines the database columns with added data for
        # various reasons. Then query the database for all extra data columns and add in the
        # data as appropriate ensuring that duplicates that are taken care of (albeit crudely).

        # Note: this method should retrieve the columns from MappingData and then have a method
        # to return for JavaScript (i.e. UI-Grid) or native (standard JSON)

        :param org_id: Organization ID
        :param inventory_type: Inventory Type (property|taxlot)
        :param only_used: View only the used columns that exist in the Column's table

        :return: dict
        """

        # Grab the default columns and their details
        columns = Column._retrieve_db_columns()
        remove_columns = []
        # Clean up the columns
        for index, c in enumerate(columns):
            # set the raw db name as well. Eventually we will want the table/db_name to be the unique id
            c['dbName'] = c['name']

            # check if the column is in the database and if it is then add in the other information that
            # is in the database
            db_col = Column.objects.filter(organization_id=org_id, is_extra_data=False,
                                           table_name=c['table'], column_name=c['name'])
            if len(db_col) == 1:
                db_col = db_col.first()
                c['sharedFieldType'] = db_col.get_shared_field_type_display()
            elif len(db_col) == 0:
                if only_used:
                    remove_columns.append(index)
                else:
                    c['sharedFieldType'] = 'None'

            if c['table'] and (inventory_type.lower() in c['table'].lower()):
                c['related'] = False
                if c.get('pinIfNative', False):
                    c['pinnedLeft'] = True
            else:
                c['related'] = True
                # For now, a related field has a prepended value to make the columns unique.
                if c.get('duplicateNameInOtherTable', False):
                    c['name'] = "{}_{}".format(INVENTORY_MAP_PREPEND[inventory_type.lower()], c['name'])

            # Remove some keys that are not needed for the API
            try:
                c.pop('pinIfNative')
            except KeyError:
                pass

            try:
                c.pop('duplicateNameInOtherTable')
            except KeyError:
                pass

            try:
                c.pop('dbField')
            except KeyError:
                pass

        # reverse the remove_columns list and remove the indexes from the columns
        for remove_column in remove_columns[::-1]:
            del columns[remove_column]

        # Add in all the extra columns
        # don't return columns that have no table_name as these are the columns of the import files
        extra_data_columns = Column.objects.filter(
            organization_id=org_id, is_extra_data=True
        ).exclude(table_name='').exclude(table_name=None)

        for edc in extra_data_columns:
            name = edc.column_name
            table = edc.table_name
            # set the raw db name as well. Eventually we will want the table/db_name to be the unique id
            db_name = name

            # Why is this important? Need to clarify
            if name == 'id':
                name += '_extra'

            # check if the column name is already defined in the list. For example, gross_floor_area
            # is a core field, but can be an extra field in taxlot, meaning that the other one
            # needs to be tagged something else.

            # add _extra if the column is already in the list and it is not the one of
            while any(col['name'] == name and col['table'] != table for col in columns):
                name += '_extra'

            # TODO: need to check if the column name is already in the list and if it is then overwrite the data

            columns.append(
                {
                    'name': name,
                    'dbName': db_name,
                    'table': edc.table_name,
                    'displayName': titlecase(edc.column_name),
                    # 'dataType': 'string',  # TODO: how to check dataTypes on extra_data!
                    'related': not (inventory_type.lower() in edc.table_name.lower()),
                    'extraData': True,
                    'sharedFieldType': edc.get_shared_field_type_display(),
                }
            )

        # validate that the field 'name' is unique.
        uniq = set()
        for c in columns:
            if (c['table'], c['name']) in uniq:
                raise Exception("Duplicate name '{}' found in columns".format(c['name']))
            else:
                uniq.add((c['table'], c['name']))

        return columns
Ejemplo n.º 8
0
def get_columns(org_id, all_fields=False):
    """
    Get default columns, to be overridden in future

    Returns::

        title: HTML presented title of column
        sort_column: semantic name used by js and for searching DB
        class: HTML CSS class for row td elements
        title_class: HTML CSS class for column td elements
        type: 'string', 'number', 'date'
        min, max: the django filter key e.g. gross_floor_area__gte
        field_type: assessor, pm, or compliance (currently not used)
        sortable: determines if the column is sortable
        checked: initial state of "edit columns" modal
        static: True if option can be toggle (ID is false because it is
            always needed to link to the building detail page)
        link: signifies that the cell's data should link to a building detail
            page

    """
    cols = []
    translator = {
        '': 'string',
        'date': 'date',
        'float': 'number',
        'string': 'string',
        'decimal': 'number',
        'datetime': 'date',
        'foreignkey': 'number'
    }
    field_types = {}
    for k, v in get_mappable_types().items():
        d = {
            "title": titlecase(k),
            "sort_column": k,
            "type": translator[v],
            "class": "is_aligned_right",
            "sortable": True,
            "checked": False,
            "static": False,
            "field_type": field_types.get(k),
            "link": True if '_id' in k or 'address' in k.lower() else False,
        }
        if d['sort_column'] == 'gross_floor_area':
            d['type'] = 'floor_area'
            d['subtitle'] = u"ft" + u"\u00B2"
        if d['type'] != 'string':
            d["min"] = "{0}__gte".format(k)
            d["max"] = "{0}__lte".format(k)

        cols.append(d)

    for col in cols:
        if col['sort_column'] in ASSESSOR_FIELDS_BY_COLUMN:
            assessor_field = ASSESSOR_FIELDS_BY_COLUMN[col['sort_column']]
            col['field_type'] = assessor_field['field_type']

    if all_fields:
        qs = models.Column.objects.filter(is_extra_data=True).filter(
            Q(organization=None)
            | Q(mapped_mappings__super_organization=org_id)).select_related(
                'unit').distinct()
    else:
        qs = models.Column.objects.filter(is_extra_data=True).filter(
            mapped_mappings__super_organization=org_id).select_related(
                'unit').distinct()
    for c in qs:
        t = c.unit.get_unit_type_display().lower() if c.unit else 'string'
        link = False
        if '_id' in c.column_name or 'address' in c.column_name.lower():
            link = True
        d = {
            "title": c.column_name,
            "sort_column": c.column_name,
            "type": translator[t],
            "class": "is_aligned_right",
            "field_type": "assessor",
            "sortable": True,
            "checked": False,
            "static": False,
            "link": link,
            "is_extra_data": True,
        }
        if d['type'] != 'string':
            d["min"] = "{0}__gte".format(c.column_name)
            d["max"] = "{0}__lte".format(c.column_name)
        cols.append(d)

    cols.sort(key=lambda x: x['title'])
    columns = {
        'fields': cols,
    }

    return columns