def get_project(request): """ Retrieves details about a project. :GET: Expects the project's identifier (slug) as project_slug in the query string. Expects an organization_id (to which project belongs) in the query string. Returns:: { 'name': project's name, 'slug': project's identifier, 'status': 'active', 'number_of_buildings': Count of buildings associated with project 'last_modified': Timestamp when project last changed 'last_modified_by': { 'first_name': first name of user that made last change, 'last_name': last name, 'email': email address, }, 'is_compliance': True if project is a compliance project, 'compliance_type': Description of compliance type, 'deadline_date': Timestamp of when compliance is due, 'end_date': Timestamp of end of project } """ project_slug = request.GET.get('project_slug', '') project = Project.objects.get(slug=project_slug) if project.super_organization_id != int(request.GET['organization_id']): return {'status': 'error', 'message': 'Permission denied'} project_dict = project.__dict__ project_dict['is_compliance'] = project.has_compliance if project_dict['is_compliance']: c = project.get_compliance() project_dict['end_date'] = convert_to_js_timestamp(c.end_date) project_dict['deadline_date'] = convert_to_js_timestamp( c.deadline_date) project_dict['compliance_type'] = c.compliance_type del (project_dict['_state']) del (project_dict['modified']) del (project_dict['created']) return {'status': 'success', 'project': project_dict}
def get_dataset(request): """returns an array of import files for a data set The data set/import record id comes in as a GET param returns: importfiles = [ { name: "DC_CoveredBuildings_50k.csv", number_of_buildings: 511, number_of_mappings: 511, number_of_cleanings: 1349, source_type: "AssessorRaw", number_of_matchings: 403, id: 1 }, { name: "DC_ESPM_Report.csv", number_of_buildings: 511, number_of_matchings: 403, source_type: "PMRaw", id: 2 } ]; """ from seed.models import obj_to_dict dataset_id = request.GET.get('dataset_id', '') orgs = request.user.orgs.all() # check if user has access to the dataset d = ImportRecord.objects.filter( super_organization__in=orgs, pk=dataset_id ) if d.exists(): d = d[0] else: return { 'status': 'success', 'dataset': {}, } dataset = obj_to_dict(d) importfiles = [] for f in d.files: importfile = obj_to_dict(f) importfile['name'] = f.filename_only importfiles.append(importfile) dataset['importfiles'] = importfiles if d.last_modified_by: dataset['last_modified_by'] = d.last_modified_by.email dataset['number_of_buildings'] = BuildingSnapshot.objects.filter( import_file__in=d.files ).count() dataset['updated_at'] = convert_to_js_timestamp(d.updated_at) return { 'status': 'success', 'dataset': dataset, }
def get_datasets(request): """returns an array of datasets for a user's organization importfiles = [ { name: "DC_CoveredBuildings_50k.csv", number_of_buildings: 511, number_of_mappings: 511, number_of_cleanings: 1349, source_type: "AssessorRaw", number_of_matchings: 403, id: 1 }, { name: "DC_ESPM_Report.csv", number_of_buildings: 511, number_of_matchings: 403, source_type: "PMRaw", id: 2 } ]; datasets = [ { name: "DC 2013 data", last_modified: (new Date()).getTime(), last_modified_by: "*****@*****.**", number_of_buildings: 89, id: 1, importfiles: mock_importfiles }, ... ]; """ from seed.models import obj_to_dict org = Organization.objects.get(pk=request.GET.get('organization_id')) datasets = [] for d in ImportRecord.objects.filter(super_organization=org): importfiles = [obj_to_dict(f) for f in d.files] dataset = obj_to_dict(d) dataset['importfiles'] = importfiles if d.last_modified_by: dataset['last_modified_by'] = d.last_modified_by.email dataset['number_of_buildings'] = BuildingSnapshot.objects.filter( import_file__in=d.files, canonicalbuilding__active=True, ).count() dataset['updated_at'] = convert_to_js_timestamp(d.updated_at) datasets.append(dataset) return { 'status': 'success', 'datasets': datasets, }
def serialize_building_snapshot(b, pm_cb, building): """returns a dict that's safe to JSON serialize""" b_as_dict = b.__dict__.copy() for key, val in b_as_dict.items(): if type(val) == datetime.datetime or type(val) == datetime.date: b_as_dict[key] = time.convert_to_js_timestamp(val) del(b_as_dict['_state']) # check if they're matched if b.canonical_building == pm_cb: b_as_dict['matched'] = True else: b_as_dict['matched'] = False if '_canonical_building_cache' in b_as_dict: del(b_as_dict['_canonical_building_cache']) return b_as_dict
def record_dict(log): filename = None if not log.import_filename else path.basename(log.import_filename) if filename: # Attempt to remove NamedTemporaryFile suffix name, ext = path.splitext(filename) pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$') match = pattern.match(name) if match: filename = match.groups()[0] + ext return { 'state': PropertyStateSerializer(log.state).data, 'date_edited': convert_to_js_timestamp(log.created), 'source': log.get_record_type_display(), 'filename': filename, # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None }
def list(self, request): """ Retrieves all datasets for the user's organization. --- type: status: required: true type: string description: Either success or error datasets: required: true type: array[dataset] description: Returns an array where each item is a full dataset structure, including keys ''name'', ''number_of_buildings'', ''id'', ''updated_at'', ''last_modified_by'', ''importfiles'', ... parameters: - name: organization_id description: The organization_id for this user's organization required: true paramType: query """ org_id = request.query_params.get('organization_id', None) org = Organization.objects.get(pk=org_id) datasets = [] for d in ImportRecord.objects.filter(super_organization=org): importfiles = [obj_to_dict(f) for f in d.files] dataset = obj_to_dict(d) dataset['importfiles'] = importfiles if d.last_modified_by: dataset['last_modified_by'] = d.last_modified_by.email dataset['number_of_buildings'] = BuildingSnapshot.objects.filter( import_file__in=d.files, canonicalbuilding__active=True, ).count() dataset['updated_at'] = convert_to_js_timestamp(d.updated_at) datasets.append(dataset) return JsonResponse({ 'status': 'success', 'datasets': datasets, })
def list(self, request): """ Retrieves all datasets for the user's organization. """ org_id = request.query_params.get('organization_id', None) org = Organization.objects.get(pk=org_id) datasets = [] for d in ImportRecord.objects.filter(super_organization=org): importfiles = [obj_to_dict(f) for f in d.files] dataset = obj_to_dict(d) dataset['importfiles'] = importfiles if d.last_modified_by: dataset['last_modified_by'] = d.last_modified_by.email dataset['updated_at'] = convert_to_js_timestamp(d.updated_at) datasets.append(dataset) return JsonResponse({ 'status': 'success', 'datasets': datasets, })
def list(self, request): """ Retrieves all datasets for the user's organization. --- type: status: required: true type: string description: Either success or error datasets: required: true type: array[dataset] description: Returns an array where each item is a full dataset structure, including keys ''name'', ''number_of_buildings'', ''id'', ''updated_at'', ''last_modified_by'', ''importfiles'', ... parameters: - name: organization_id description: The organization_id for this user's organization required: true paramType: query """ org_id = request.query_params.get('organization_id', None) org = Organization.objects.get(pk=org_id) datasets = [] for d in ImportRecord.objects.filter(super_organization=org): importfiles = [obj_to_dict(f) for f in d.files] dataset = obj_to_dict(d) dataset['importfiles'] = importfiles if d.last_modified_by: dataset['last_modified_by'] = d.last_modified_by.email dataset['updated_at'] = convert_to_js_timestamp(d.updated_at) datasets.append(dataset) return JsonResponse({ 'status': 'success', 'datasets': datasets, })
def get_PM_building(request): """returns a paginated list of BuildingSnapshot inst. buildings matching search params and pagination params """ body = json.loads(request.body) b = BuildingSnapshot.objects.get(pk=body['building_id']) # converts dates for JSON serialization building = b.__dict__.copy() for key, val in building.items(): if type(val) == datetime.datetime or type(val) == datetime.date: building[key] = convert_to_js_timestamp(val) del(building['_state']) c = b.canonical_building if c and c.canonical_snapshot: building['matched'] = True building['confidence'] = c.canonical_snapshot.confidence else: building['matched'] = False return { 'status': 'success', 'building': building, }
def history(self): """ Return the history of the property state by parsing through the auditlog. Returns only the ids of the parent states and some descriptions. master / \ / \ parent1 parent2 In the records, parent2 is most recent, so make sure to navigate parent two first since we are returning the data in reverse over (that is most recent changes first) :return: list, history as a list, and the master record """ """Return history in reverse order.""" history = [] master = { 'state_id': self.id, 'state_data': self, 'date_edited': None, } def record_dict(log): filename = None if not log.import_filename else path.basename( log.import_filename) if filename: # Attempt to remove NamedTemporaryFile suffix name, ext = path.splitext(filename) pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$') match = pattern.match(name) if match: filename = match.groups()[0] + ext return { 'state_id': log.state.id, 'state_data': log.state, 'date_edited': convert_to_js_timestamp(log.created), 'source': log.get_record_type_display(), 'filename': filename, # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None } log = PropertyAuditLog.objects.select_related( 'state', 'parent1', 'parent2').filter(state_id=self.id).order_by('-id').first() if log: master = { 'state_id': log.state.id, 'state_data': log.state, 'date_edited': convert_to_js_timestamp(log.created), } # Traverse parents and add to history if log.name in [ 'Manual Match', 'System Match', 'Merge current state in migration' ]: done_searching = False while not done_searching: # if there is no parents, then break out immediately if (log.parent1_id is None and log.parent2_id is None ) or log.name == 'Manual Edit': break # initalize the tree to None everytime. If not new tree is found, then we will not iterate tree = None # Check if parent2 has any other parents or is the original import creation. Start with parent2 # because parent2 will be the most recent import file. if log.parent2: if log.parent2.name in [ 'Import Creation', 'Manual Edit' ]: record = record_dict(log.parent2) history.append(record) elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \ log.parent2.parent2.name == 'Import Creation': # Handle case where an import file matches within itself, and proceeds to match with # existing records record = record_dict(log.parent2.parent2) history.append(record) record = record_dict(log.parent2.parent1) history.append(record) else: tree = log.parent2 if log.parent1: if log.parent1.name in [ 'Import Creation', 'Manual Edit' ]: record = record_dict(log.parent1) history.append(record) elif log.parent1.name == 'System Match' and log.parent1.parent1.name == 'Import Creation' and \ log.parent1.parent2.name == 'Import Creation': # Handle case where an import file matches within itself, and proceeds to match with # existing records record = record_dict(log.parent1.parent2) history.append(record) record = record_dict(log.parent1.parent1) history.append(record) else: tree = log.parent1 if not tree: done_searching = True else: log = tree elif log.name == 'Manual Edit': record = record_dict(log.parent1) history.append(record) elif log.name == 'Import Creation': record = record_dict(log) history.append(record) return history, master
def retrieve(self, request, pk=None): """ Retrieves a dataset (ImportRecord). --- type: status: required: true type: string description: Either success or error dataset: required: true type: dictionary description: A dictionary of a full dataset structure, including keys ''name'', ''id'', ''updated_at'', ''last_modified_by'', ''importfiles'', ... parameter_strategy: replace parameters: - name: pk description: The ID of the dataset to retrieve required: true paramType: path - name: organization_id description: The organization_id for this user's organization required: true paramType: query """ organization_id = request.query_params.get('organization_id', None) if organization_id is None: return JsonResponse( { 'status': 'error', 'message': 'Missing organization_id query parameter' }, status=status.HTTP_400_BAD_REQUEST) try: organization_id = int(organization_id) except ValueError: return JsonResponse( { 'status': 'error', 'message': 'Bad (non-numeric) organization_id' }, status=status.HTTP_400_BAD_REQUEST) valid_orgs = OrganizationUser.objects.filter( user_id=request.user.id).values_list( 'organization_id', flat=True).order_by('organization_id') if organization_id not in valid_orgs: return JsonResponse( { 'status': 'error', 'message': 'Cannot access datasets for this organization id', }, status=status.HTTP_403_FORBIDDEN) # check if dataset exists try: d = ImportRecord.objects.get(pk=pk) except ImportRecord.DoesNotExist: return JsonResponse( { 'status': 'error', 'message': 'dataset with id {} does not exist'.format(pk) }, status=status.HTTP_404_NOT_FOUND) if d.super_organization_id != organization_id: return JsonResponse( { 'status': 'error', 'message': 'Organization ID mismatch between dataset and organization' }, status=status.HTTP_400_BAD_REQUEST) dataset = obj_to_dict(d) importfiles = [] for f in d.files: importfile = obj_to_dict(f) if not f.uploaded_filename: importfile['name'] = f.filename_only else: importfile['name'] = f.uploaded_filename importfiles.append(importfile) dataset['importfiles'] = importfiles if d.last_modified_by: dataset['last_modified_by'] = d.last_modified_by.email dataset['updated_at'] = convert_to_js_timestamp(d.updated_at) return JsonResponse({ 'status': 'success', 'dataset': dataset, })
def get_history(self, taxlot_view): """Return history in reverse order.""" history = [] def record_dict(log): filename = None if not log.import_filename else path.basename( log.import_filename) if filename: # Attempt to remove NamedTemporaryFile suffix name, ext = path.splitext(filename) pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$') match = pattern.match(name) if match: filename = match.groups()[0] + ext return { 'state': TaxLotStateSerializer(log.state).data, 'date_edited': convert_to_js_timestamp(log.created), 'source': log.get_record_type_display(), 'filename': filename, # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None } log = TaxLotAuditLog.objects.select_related( 'state', 'parent1', 'parent2').filter( state_id=taxlot_view.state_id).order_by('-id').first() master = { 'state': TaxLotStateSerializer(log.state).data, 'date_edited': convert_to_js_timestamp(log.created), } # Traverse parents and add to history if log.name in [ 'Manual Match', 'System Match', 'Merge current state in migration' ]: done_searching = False while not done_searching: if (log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit': done_searching = True elif log.name == 'Merge current state in migration': record = record_dict(log.parent1) history.append(record) if log.parent1.name == 'Import Creation': done_searching = True else: tree = log.parent1 log = tree else: tree = None if log.parent2: if log.parent2.name in [ 'Import Creation', 'Manual Edit' ]: record = record_dict(log.parent2) history.append(record) elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \ log.parent2.parent2.name == 'Import Creation': # Handle case where an import file matches within itself, and proceeds to match with # existing records record = record_dict(log.parent2.parent2) history.append(record) record = record_dict(log.parent2.parent1) history.append(record) else: tree = log.parent2 if log.parent1.name in ['Import Creation', 'Manual Edit']: record = record_dict(log.parent1) history.append(record) else: tree = log.parent1 if not tree: done_searching = True else: log = tree elif log.name == 'Manual Edit': record = record_dict(log.parent1) history.append(record) elif log.name == 'Import Creation': record = record_dict(log) history.append(record) return history, master
def retrieve(self, request, pk=None): """ Retrieves a dataset (ImportRecord). --- type: status: required: true type: string description: Either success or error dataset: required: true type: dictionary description: A dictionary of a full dataset structure, including keys ''name'', ''number_of_buildings'', ''id'', ''updated_at'', ''last_modified_by'', ''importfiles'', ... parameter_strategy: replace parameters: - name: pk description: "Primary Key" required: true paramType: path - name: organization_id description: The organization_id for this user's organization required: true paramType: query """ organization_id = request.query_params.get('organization_id', None) if organization_id is None: return JsonResponse({'status': 'error', 'message': 'Missing organization_id query parameter'}) try: organization_id = int(organization_id) except ValueError: return JsonResponse({'status': 'error', 'message': 'Bad (non-numeric) organization_id'}) dataset_id = pk # check if user has access to the dataset d = ImportRecord.objects.filter( super_organization_id=organization_id, pk=dataset_id ) if d.exists(): d = d[0] else: return JsonResponse({ 'status': 'success', 'dataset': {}, }) dataset = obj_to_dict(d) importfiles = [] for f in d.files: importfile = obj_to_dict(f) if not f.uploaded_filename: importfile['name'] = f.filename_only else: importfile['name'] = f.uploaded_filename importfiles.append(importfile) dataset['importfiles'] = importfiles if d.last_modified_by: dataset['last_modified_by'] = d.last_modified_by.email dataset['number_of_buildings'] = BuildingSnapshot.objects.filter( import_file__in=d.files ).count() dataset['updated_at'] = convert_to_js_timestamp(d.updated_at) return JsonResponse({ 'status': 'success', 'dataset': dataset, })
def get_projects(request): """ Retrieves all projects for a given organization. :GET: Expects organization_id in query string. Returns:: { 'status': 'success', 'projects': [ { 'name': project's name, 'slug': project's identifier, 'status': 'active', 'number_of_buildings': Count of buildings associated with project 'last_modified': Timestamp when project last changed 'last_modified_by': { 'first_name': first name of user that made last change, 'last_name': last name, 'email': email address, }, 'is_compliance': True if project is a compliance project, 'compliance_type': Description of compliance type, 'deadline_date': Timestamp of when compliance is due, 'end_date': Timestamp of end of project }... ] } """ organization_id = request.GET['organization_id'] projects = [] for p in Project.objects.filter( super_organization_id=organization_id, ).distinct(): if p.last_modified_by: first_name = p.last_modified_by.first_name last_name = p.last_modified_by.last_name email = p.last_modified_by.email else: first_name = None last_name = None email = None p_as_json = { 'name': p.name, 'slug': p.slug, 'status': 'active', 'number_of_buildings': p.project_building_snapshots.count(), # convert to JS timestamp 'last_modified': int(p.modified.strftime("%s")) * 1000, 'last_modified_by': { 'first_name': first_name, 'last_name': last_name, 'email': email, }, 'is_compliance': p.has_compliance, } if p.has_compliance: compliance = p.get_compliance() p_as_json['end_date'] = convert_to_js_timestamp( compliance.end_date) p_as_json['deadline_date'] = convert_to_js_timestamp( compliance.deadline_date) p_as_json['compliance_type'] = compliance.compliance_type projects.append(p_as_json) return {'status': 'success', 'projects': projects}