Esempio n. 1
0
def remap_data(import_file_pk):
    """"Delete mapped buildings for current import file, re-map them."""
    import_file = ImportFile.objects.get(pk=import_file_pk)
    # Check to ensure that the building has not already been merged.
    mapping_cache_key = get_prog_key('map_data', import_file.pk)
    if import_file.matching_done or import_file.matching_completion:
        result = {
            'status': 'warning',
            'progress': 100,
            'message': 'Mapped buildings already merged'
        }
        set_cache(mapping_cache_key, result['status'], result)
        return result

    _remap_data.delay(import_file_pk)

    # Make sure that our mapping cache progress is reset.
    result = {
        'status': 'warning',
        'progress': 0,
        'message': 'Mapped buildings already merged'
    }
    set_cache(mapping_cache_key, result['status'], result)
    # Here we also return the mapping_prog_key so that the front end can
    # follow the progress.
    return {
        'status': 'success',
        'progress_key': mapping_cache_key
    }
Esempio n. 2
0
def _finish_delete(results, org_pk, prog_key):
    result = {
        'status': 'success',
        'progress': 100,
        'progress_key': prog_key
    }
    set_cache(prog_key, result['status'], result)
Esempio n. 3
0
def delete_organization(org_pk, deleting_cache_key, chunk_size=100, *args,
                        **kwargs):
    result = {
        'status': 'success',
        'progress': 0,
        'progress_key': deleting_cache_key
    }

    set_cache(deleting_cache_key, result['status'], result)

    if CanonicalBuilding.objects.filter(
            canonical_snapshot__super_organization=org_pk).exists():
        _delete_canonical_buildings.delay(org_pk)

    if BuildingSnapshot.objects.filter(super_organization=org_pk).exists():
        ids = list(
            BuildingSnapshot.objects.filter(
                super_organization=org_pk).values_list('id', flat=True)
        )

        step = float(chunk_size) / len(ids)
        tasks = []
        for del_ids in batch(ids, chunk_size):
            # we could also use .s instead of .subtask and not wrap the *args
            tasks.append(
                _delete_organization_buildings_chunk.subtask(
                    (del_ids, deleting_cache_key, step, org_pk)
                )
            )
        chord(tasks, interval=15)(_delete_organization_related_data.subtask(
            [org_pk, deleting_cache_key]))
    else:
        _delete_organization_related_data(None, org_pk, deleting_cache_key)
Esempio n. 4
0
def _delete_organization_buildings(org_pk, chunk_size=100, *args, **kwargs):
    """Deletes all BuildingSnapshot instances within an organization

    :param org_pk: int, str, the organization pk
    """
    qs = BuildingSnapshot.objects.filter(super_organization=org_pk)
    ids = qs.values_list('id', flat=True)
    deleting_cache_key = get_prog_key(
        'delete_organization_buildings',
        org_pk
    )
    if not ids:
        set_cache(deleting_cache_key, 'success', 100)
        return

    # delete the canonical buildings
    can_ids = CanonicalBuilding.objects.filter(
        canonical_snapshot__super_organization=org_pk
    ).values_list('id', flat=True)
    _delete_canonical_buildings.delay(can_ids)

    step = float(chunk_size) / len(ids)
    set_cache(deleting_cache_key, 'success', 0)
    tasks = []
    for del_ids in batch(ids, chunk_size):
        # we could also use .s instead of .subtask and not wrap the *args
        tasks.append(
            _delete_organization_buildings_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)
            )
        )
    chord(tasks, interval=15)(finish_delete.subtask([org_pk]))
Esempio n. 5
0
def delete_organization(org_pk, deleting_cache_key, chunk_size=100, *args,
                        **kwargs):
    result = {
        'status': 'success',
        'progress': 0,
        'progress_key': deleting_cache_key
    }

    set_cache(deleting_cache_key, result['status'], result)

    if CanonicalBuilding.objects.filter(
            canonical_snapshot__super_organization=org_pk).exists():
        _delete_canonical_buildings.delay(org_pk)

    if BuildingSnapshot.objects.filter(super_organization=org_pk).exists():
        ids = list(
            BuildingSnapshot.objects.filter(
                super_organization=org_pk).values_list('id', flat=True)
        )

        step = float(chunk_size) / len(ids)
        tasks = []
        for del_ids in batch(ids, chunk_size):
            # we could also use .s instead of .subtask and not wrap the *args
            tasks.append(
                _delete_organization_buildings_chunk.subtask(
                    (del_ids, deleting_cache_key, step, org_pk)
                )
            )
        chord(tasks, interval=15)(_delete_organization_related_data.subtask(
            [org_pk, deleting_cache_key]))
    else:
        _delete_organization_related_data(None, org_pk, deleting_cache_key)
Esempio n. 6
0
def delete_organization_inventory(org_pk,
                                  deleting_cache_key,
                                  chunk_size=100,
                                  *args,
                                  **kwargs):
    """Deletes all properties & taxlots within an organization."""
    sys.setrecursionlimit(5000)  # default is 1000

    result = {
        'status': 'success',
        'progress_key': deleting_cache_key,
        'progress': 0
    }

    property_ids = list(
        Property.objects.filter(organization_id=org_pk).values_list('id',
                                                                    flat=True))
    property_state_ids = list(
        PropertyState.objects.filter(organization_id=org_pk).values_list(
            'id', flat=True))
    taxlot_ids = list(
        TaxLot.objects.filter(organization_id=org_pk).values_list('id',
                                                                  flat=True))
    taxlot_state_ids = list(
        TaxLotState.objects.filter(organization_id=org_pk).values_list(
            'id', flat=True))

    total = len(property_ids) + len(property_state_ids) + len(
        taxlot_ids) + len(taxlot_state_ids)

    if total == 0:
        result['progress'] = 100

    set_cache(deleting_cache_key, result['status'], result)

    if total == 0:
        return

    step = float(chunk_size) / total
    tasks = []
    # we could also use .s instead of .subtask and not wrap the *args
    for del_ids in batch(property_ids, chunk_size):
        tasks.append(
            _delete_organization_property_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)))
    for del_ids in batch(property_state_ids, chunk_size):
        tasks.append(
            _delete_organization_property_state_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)))
    for del_ids in batch(taxlot_ids, chunk_size):
        tasks.append(
            _delete_organization_taxlot_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)))
    for del_ids in batch(taxlot_state_ids, chunk_size):
        tasks.append(
            _delete_organization_taxlot_state_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)))
    chord(tasks,
          interval=15)(_finish_delete.subtask([org_pk, deleting_cache_key]))
Esempio n. 7
0
    def save(self):
        """Save the data to the cache"""
        # save some member variables
        self.data['total'] = self.total

        set_cache(self.key, self.data['status'], self.data)

        return get_cache(self.key)
Esempio n. 8
0
def _save_raw_data(file_pk, *args, **kwargs):
    """Chunk up the CSV or XLSX file and save the raw data into the DB BuildingSnapshot table."""

    result = {'status': 'success', 'progress': 100}
    prog_key = get_prog_key('save_raw_data', file_pk)
    try:
        import_file = ImportFile.objects.get(pk=file_pk)
        if import_file.raw_save_done:
            result['status'] = 'warning'
            result['message'] = 'Raw data already saved'
            set_cache(prog_key, result['status'], result)
            return result

        if import_file.source_type == "Green Button Raw":
            return _save_raw_green_button_data(file_pk, *args, **kwargs)

        parser = reader.MCMParser(import_file.local_file)
        cache_first_rows(import_file, parser)
        rows = parser.next()
        import_file.num_rows = 0
        import_file.num_columns = parser.num_columns()

        # Why are we setting the num_rows to the number of chunks?
        tasks = []
        for chunk in batch(rows, 100):
            import_file.num_rows += len(chunk)
            tasks.append(_save_raw_data_chunk.s(chunk, file_pk, prog_key))

        import_file.save()

        # need to rework how the progress keys are implemented here
        tasks = add_cache_increment_parameter(tasks)
        if tasks:
            chord(tasks, interval=15)(finish_raw_save.s(file_pk))
        else:
            finish_raw_save.s(file_pk)

    except StopIteration:
        result['status'] = 'error'
        result['message'] = 'StopIteration Exception'
        result['stacktrace'] = traceback.format_exc()
    except Error as e:
        result['status'] = 'error'
        result['message'] = 'File Content Error: ' + e.message
        result['stacktrace'] = traceback.format_exc()
    except KeyError as e:
        result['status'] = 'error'
        result['message'] = 'Invalid Column Name: "' + e.message + '"'
        result['stacktrace'] = traceback.format_exc()
    except Exception as e:
        result['status'] = 'error'
        result['message'] = 'Unhandled Error: ' + str(e.message)
        result['stacktrace'] = traceback.format_exc()

    set_cache(prog_key, result['status'], result)
    return result
Esempio n. 9
0
    def _row_cb(i):
        data = get_cache("export_buildings__%s" % export_id)
        data['buildings_processed'] = i

        if data['total_buildings'] == 0 or not data['total_buildings']:
            data['progress'] = 100
        else:
            data['progress'] = (i * 100) / data['total_buildings']

        set_cache("export_buildings__%s" % export_id, data['status'], data)
Esempio n. 10
0
def _finish_delete(results, org_pk, prog_key):
    result = {
        'status': 'success',
        'progress': 100,
        'progress_key': prog_key
    }

    # set recursion limits back to 1000
    sys.setrecursionlimit(1000)
    set_cache(prog_key, result['status'], result)
Esempio n. 11
0
    def _row_cb(i):
        data = get_cache("export_buildings__%s" % export_id)
        data['buildings_processed'] = i

        if data['total_buildings'] == 0 or not data['total_buildings']:
            data['progress'] = 100
        else:
            data['progress'] = (i * 100) / data['total_buildings']

        set_cache("export_buildings__%s" % export_id, data['status'], data)
Esempio n. 12
0
def finish_mapping(results, file_pk):
    import_file = ImportFile.objects.get(pk=file_pk)
    import_file.mapping_done = True
    import_file.save()
    finish_import_record(import_file.import_record.pk)
    prog_key = get_prog_key('map_data', file_pk)
    set_cache(prog_key, 'success', 100)

    # now call cleansing
    _cleanse_data(file_pk)
Esempio n. 13
0
def _map_data(file_pk, *args, **kwargs):
    """Get all of the raw data and process it using appropriate mapping.
    @lock_and_track returns a progress_key

    :param file_pk: int, the id of the import_file we're working with.

    """

    import_file = ImportFile.objects.get(pk=file_pk)
    # Don't perform this task if it's already been completed.
    if import_file.mapping_done:
        prog_key = get_prog_key('map_data', file_pk)
        result = {
            'status': 'warning',
            'progress': 100,
            'message': 'mapping already complete'
        }
        set_cache(prog_key, result['status'], result)
        return result

    # If we haven't finished saving, we shouldn't proceed with mapping
    # Re-queue this task.
    if not import_file.raw_save_done:
        map_data.apply_async(args=[file_pk], countdown=60, expires=120)
        return {'status': 'error', 'message': 'waiting for raw data save.'}

    source_type_dict = {
        'Portfolio Raw': PORTFOLIO_RAW,
        'Assessed Raw': ASSESSED_RAW,
        'Green Button Raw': GREEN_BUTTON_RAW,
    }
    source_type = source_type_dict.get(import_file.source_type, ASSESSED_RAW)

    qs = BuildingSnapshot.objects.filter(
        import_file=import_file,
        source_type=source_type,
    ).iterator()

    prog_key = get_prog_key('map_data', file_pk)
    tasks = []
    for chunk in batch(qs, 100):
        serialized_data = [obj.extra_data for obj in chunk]
        tasks.append(map_row_chunk.s(serialized_data, file_pk, source_type, prog_key))

    # need to rework how the progress keys are implemented here, but at least the method gets called above for cleansing
    tasks = add_cache_increment_parameter(tasks)
    if tasks:
        chord(tasks, interval=15)(finish_mapping.subtask([file_pk]))
    else:
        finish_mapping.subtask(file_pk)

    return {'status': 'success'}
Esempio n. 14
0
def finish_raw_save(results, file_pk):
    """
    Finish importing the raw file.

    :param results: results from the other tasks before the chord ran
    :param file_pk: ID of the file that was being imported
    :return: None
    """
    import_file = ImportFile.objects.get(pk=file_pk)
    import_file.raw_save_done = True
    import_file.save()
    prog_key = get_prog_key('save_raw_data', file_pk)
    set_cache(prog_key, 'success', 100)
Esempio n. 15
0
def finish_cleansing(file_pk):
    """
    Chord that is called after the cleansing is complete

    :param file_pk: import file primary key
    :return:
    """

    prog_key = get_prog_key('cleanse_data', file_pk)
    result = {
        'status': 'success',
        'progress': 100,
        'message': 'cleansing complete'
    }
    set_cache(prog_key, result['status'], result)
Esempio n. 16
0
def delete_organization(org_pk,
                        deleting_cache_key,
                        chunk_size=100,
                        *args,
                        **kwargs):
    result = {
        'status': 'success',
        'progress': 0,
        'progress_key': deleting_cache_key
    }
    set_cache(deleting_cache_key, result['status'], result)

    chain(delete_organization_inventory.si(org_pk, deleting_cache_key),
          _delete_organization_related_data.si(org_pk, deleting_cache_key),
          _finish_delete.si(None, org_pk, deleting_cache_key))()
Esempio n. 17
0
def _delete_organization_related_data(org_pk, prog_key):
    # Get all org users
    user_ids = OrganizationUser.objects.filter(
        organization_id=org_pk).values_list('user_id', flat=True)
    users = list(User.objects.filter(pk__in=user_ids))

    Organization.objects.get(pk=org_pk).delete()

    # TODO: Delete measures in BRICR branch

    # Delete any abandoned users.
    for user in users:
        if not OrganizationUser.objects.filter(user_id=user.pk).exists():
            user.delete()

    result = {'status': 'success', 'progress': 100, 'progress_key': prog_key}
    set_cache(prog_key, result['status'], result)
Esempio n. 18
0
def _delete_organization_related_data(chain, org_pk, prog_key):
    # Get all org users
    user_ids = OrganizationUser.objects.filter(
        organization_id=org_pk).values_list('user_id', flat=True)
    users = list(User.objects.filter(pk__in=user_ids))

    Organization.objects.get(pk=org_pk).delete()

    # Delete any abandoned users.
    for user in users:
        if not OrganizationUser.objects.filter(user_id=user.pk).exists():
            user.delete()

    result = {
        'status': 'success',
        'progress': 100,
        'progress_key': prog_key
    }
    set_cache(prog_key, result['status'], result)
Esempio n. 19
0
def match_buildings(file_pk, user_pk):
    """kicks off system matching, returns progress key #NL -- this seems to return a JSON--not a progress key?"""
    import_file = ImportFile.objects.get(pk=file_pk)
    if import_file.matching_done:
        prog_key = get_prog_key('match_buildings', file_pk)
        set_cache(prog_key, 'warning', 100)
        return {'status': 'warning', 'message': 'matching already complete'}

    if not import_file.mapping_done:
        # Re-add to the queue, hopefully our mapping will be done by then.
        match_buildings.apply_async(
            args=[file_pk, user_pk], countdown=10, expires=20
        )
        return {
            'status': 'error',
            'message': 'waiting for mapping to complete'
        }

    _match_buildings.delay(file_pk, user_pk)

    return {'status': 'success'}
Esempio n. 20
0
def _save_raw_green_button_data(file_pk, *args, **kwargs):
    """
    Pulls identifying information out of the xml data, find_or_creates
    a building_snapshot for the data, parses and stores the timeseries
    meter data and associates it with the building snapshot.
    """

    import_file = ImportFile.objects.get(pk=file_pk)

    import_file.raw_save_done = True
    import_file.save()

    res = xml_importer.import_xml(import_file)

    prog_key = get_prog_key('save_raw_data', file_pk)
    set_cache(prog_key, 'success', 100)

    if res:
        return {'status': 'success', 'progress': 100}

    return {
        'status': 'error',
        'message': 'data failed to import'
    }
Esempio n. 21
0
def delete_organization_buildings(org_pk, deleting_cache_key, chunk_size=100,
                                  *args, **kwargs):
    """Deletes all BuildingSnapshot instances within an organization."""
    result = {
        'status': 'success',
        'progress_key': deleting_cache_key
    }

    if not BuildingSnapshot.objects.filter(super_organization=org_pk).exists():
        result['progress'] = 100
    else:
        result['progress'] = 0

    set_cache(deleting_cache_key, result['status'], result)

    if result['progress'] == 100:
        return

    _delete_canonical_buildings.delay(org_pk)

    ids = list(
        BuildingSnapshot.objects.filter(super_organization=org_pk).values_list(
            'id', flat=True)
    )

    step = float(chunk_size) / len(ids)
    tasks = []
    for del_ids in batch(ids, chunk_size):
        # we could also use .s instead of .subtask and not wrap the *args
        tasks.append(
            _delete_organization_buildings_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)
            )
        )
    chord(tasks, interval=15)(
        _finish_delete.subtask([org_pk, deleting_cache_key]))
Esempio n. 22
0
def delete_organization_buildings(org_pk, deleting_cache_key, chunk_size=100,
                                  *args, **kwargs):
    """Deletes all BuildingSnapshot instances within an organization."""
    result = {
        'status': 'success',
        'progress_key': deleting_cache_key
    }

    if not BuildingSnapshot.objects.filter(super_organization=org_pk).exists():
        result['progress'] = 100
    else:
        result['progress'] = 0

    set_cache(deleting_cache_key, result['status'], result)

    if result['progress'] == 100:
        return

    _delete_canonical_buildings.delay(org_pk)

    ids = list(
        BuildingSnapshot.objects.filter(super_organization=org_pk).values_list(
            'id', flat=True)
    )

    step = float(chunk_size) / len(ids)
    tasks = []
    for del_ids in batch(ids, chunk_size):
        # we could also use .s instead of .subtask and not wrap the *args
        tasks.append(
            _delete_organization_buildings_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)
            )
        )
    chord(tasks, interval=15)(
        _finish_delete.subtask([org_pk, deleting_cache_key]))
Esempio n. 23
0
def add_buildings(project_slug, project_dict, user_pk):
    """adds buildings to a project. if a user has selected all buildings,
       then the the search parameters within project_dict are used to determine
       the total set
       of buildings.
       also creates a Compliance inst. if satisfying params are present

       :param str project_slug: a project's slug used to get the project
       :param dict project_dict: contains search params, and browser state
       information
       :user_pk int or str: the user's pk or id

    """
    project = Project.objects.get(slug=project_slug)

    # Initialize the progress cache
    prog_key = project.adding_buildings_status_percentage_cache_key
    data = {
        'status': 'processing',
        'progress': 0,
        'progress_key': prog_key,
        'numerator': 0,
        'denominator': 0,
    }
    set_cache(project.adding_buildings_status_percentage_cache_key,
              data['status'], data)

    user = User.objects.get(pk=user_pk)
    project.last_modified_by = user
    project.save()

    # Perform the appropriate filtering to get the raw list of buildings.
    params = search.process_search_params(project_dict, user,
                                          is_api_request=False)
    buildings_queryset = search.orchestrate_search_filter_sort(
        params=params,
        user=user,
    )

    # Get selected buildings based on either individual selection or select-all
    # selection.
    if project_dict.get('select_all_checkbox'):
        selected_buildings = buildings_queryset
    else:
        selected_buildings = buildings_queryset.filter(
            id__in=project_dict.get('selected_buildings', []),
        )

    denominator = len(selected_buildings)

    # Loop over the buildings adding them to the project and updating the
    # progress cache.
    for idx, bs in enumerate(selected_buildings):
        data = {
            'status': 'processing',
            'progress': (float(idx) / denominator * 100),
            'progress_key': prog_key,
            'numerator': idx,
            'denominator': denominator
        }
        set_cache(prog_key, data['status'], data)
        ProjectBuilding.objects.get_or_create(
            project=project, building_snapshot=bs
        )

    # Mark the progress cache as complete.
    result = {
        'status': 'completed',
        'progress': 100,
        'progress_key': prog_key,
        'numerator': denominator,
        'denominator': denominator
    }
    set_cache(prog_key, result['status'], result)

    deadline_date = time_utils.parse_datetime(
        project_dict.get('deadline_date'))

    end_date = time_utils.parse_datetime(project_dict.get('end_date'))

    if end_date:
        last_day_of_month = calendar.monthrange(
            end_date.year, end_date.month
        )[1]
        end_date = datetime.datetime(
            end_date.year, end_date.month, last_day_of_month
        )

    if project_dict.get('compliance_type'):
        compliance = Compliance.objects.create(
            compliance_type=project_dict.get('compliance_type'),
            end_date=end_date,
            deadline_date=deadline_date,
            project=project
        )
        compliance.save()
Esempio n. 24
0
def remove_buildings(project_slug, project_dict, user_pk):
    """adds buildings to a project. if a user has selected all buildings,
       then the the search parameters within project_dict are used to determine
       the total set of buildings.

       :param str project_slug: a project's slug used to get the project
       :param dict project_dict: contains search params, and browser state
           information
       :user_pk int or str: the user's pk or id
    """
    project = Project.objects.get(slug=project_slug)
    user = User.objects.get(pk=user_pk)
    project.last_modified_by = user
    project.save()

    selected_buildings = project_dict.get('selected_buildings', [])
    prog_key = project.removing_buildings_status_percentage_cache_key
    data = {
        'status': 'processing',
        'progress': 0,
        'progress_key': prog_key,
        'numerator': 0,
        'denominator': 0,
    }
    set_cache(prog_key, data['status'], data)
    i = 0
    denominator = 1
    if not project_dict.get('select_all_checkbox', False):
        for sfid in selected_buildings:
            i += 1
            denominator = len(selected_buildings)
            data = {
                'status': 'processing',
                'progress': (float(i) / max(len(selected_buildings), 1) * 100),
                'progress_key': prog_key,
                'numerator': i,
                'denominator': denominator
            }
            set_cache(prog_key, data['status'], data)
            ab = BuildingSnapshot.objects.get(pk=sfid)
            ProjectBuilding.objects.get(project=project,
                                        building_snapshot=ab).delete()
    else:
        query_buildings = get_search_query(user, project_dict)
        denominator = query_buildings.count() - len(selected_buildings)
        data = {
            'status': 'processing',
            'progress': 10,
            'progress_key': prog_key,
            'numerator': i,
            'denominator': denominator
        }
        set_cache(prog_key, data['status'], data)
        for b in query_buildings:
            ProjectBuilding.objects.get(project=project,
                                        building_snapshot=b).delete()
        data = {
            'status': 'processing',
            'progress': 50,
            'progress_key': prog_key,
            'numerator': denominator - len(selected_buildings),
            'denominator': denominator
        }
        set_cache(prog_key, data['status'], data)
        for building in selected_buildings:
            i += 1
            ab = BuildingSnapshot.objects.get(source_facility_id=building)
            ProjectBuilding.objects.create(project=project,
                                           building_snapshot=ab)
            data = {
                'status':
                'processing',
                'progress': (float(denominator - len(selected_buildings) + i) /
                             denominator * 100),
                'progress_key':
                prog_key,
                'numerator':
                denominator - len(selected_buildings) + i,
                'denominator':
                denominator
            }
            set_cache(prog_key, data['status'], data)

    result = {
        'status': 'complete',
        'progress': 100,
        'progress_key': prog_key,
        'numerator': i,
        'denominator': denominator
    }
    set_cache(prog_key, result['status'], result)
Esempio n. 25
0
def add_buildings(project_slug, project_dict, user_pk):
    """adds buildings to a project. if a user has selected all buildings,
       then the the search parameters within project_dict are used to determine
       the total set
       of buildings.
       also creates a Compliance inst. if satisfying params are present

       :param str project_slug: a project's slug used to get the project
       :param dict project_dict: contains search params, and browser state
       information
       :user_pk int or str: the user's pk or id

    """
    project = Project.objects.get(slug=project_slug)

    # Initialize the progress cache
    prog_key = project.adding_buildings_status_percentage_cache_key
    data = {
        'status': 'processing',
        'progress': 0,
        'progress_key': prog_key,
        'numerator': 0,
        'denominator': 0,
    }
    set_cache(project.adding_buildings_status_percentage_cache_key,
              data['status'], data)

    user = User.objects.get(pk=user_pk)
    project.last_modified_by = user
    project.save()

    # Perform the appropriate filtering to get the raw list of buildings.
    params = search.process_search_params(project_dict,
                                          user,
                                          is_api_request=False)
    buildings_queryset = search.orchestrate_search_filter_sort(
        params=params,
        user=user,
    )

    # Get selected buildings based on either individual selection or select-all
    # selection.
    if project_dict.get('select_all_checkbox'):
        selected_buildings = buildings_queryset
    else:
        selected_buildings = buildings_queryset.filter(id__in=project_dict.get(
            'selected_buildings', []), )

    denominator = len(selected_buildings)

    # Loop over the buildings adding them to the project and updating the
    # progress cache.
    for idx, bs in enumerate(selected_buildings):
        data = {
            'status': 'processing',
            'progress': (float(idx) / denominator * 100),
            'progress_key': prog_key,
            'numerator': idx,
            'denominator': denominator
        }
        set_cache(prog_key, data['status'], data)
        ProjectBuilding.objects.get_or_create(project=project,
                                              building_snapshot=bs)

    # Mark the progress cache as complete.
    result = {
        'status': 'completed',
        'progress': 100,
        'progress_key': prog_key,
        'numerator': denominator,
        'denominator': denominator
    }
    set_cache(prog_key, result['status'], result)

    deadline_date = time_utils.parse_datetime(
        project_dict.get('deadline_date'))

    end_date = time_utils.parse_datetime(project_dict.get('end_date'))

    if end_date:
        last_day_of_month = calendar.monthrange(end_date.year,
                                                end_date.month)[1]
        end_date = datetime.datetime(end_date.year, end_date.month,
                                     last_day_of_month)

    if project_dict.get('compliance_type'):
        compliance = Compliance.objects.create(
            compliance_type=project_dict.get('compliance_type'),
            end_date=end_date,
            deadline_date=deadline_date,
            project=project)
        compliance.save()
Esempio n. 26
0
def remove_buildings(project_slug, project_dict, user_pk):
    """adds buildings to a project. if a user has selected all buildings,
       then the the search parameters within project_dict are used to determine
       the total set of buildings.

       :param str project_slug: a project's slug used to get the project
       :param dict project_dict: contains search params, and browser state
           information
       :user_pk int or str: the user's pk or id
    """
    project = Project.objects.get(slug=project_slug)
    user = User.objects.get(pk=user_pk)
    project.last_modified_by = user
    project.save()

    selected_buildings = project_dict.get('selected_buildings', [])
    prog_key = project.removing_buildings_status_percentage_cache_key
    data = {
        'status': 'processing',
        'progress': 0,
        'progress_key': prog_key,
        'numerator': 0,
        'denominator': 0,
    }
    set_cache(prog_key, data['status'], data)
    i = 0
    denominator = 1
    if not project_dict.get('select_all_checkbox', False):
        for sfid in selected_buildings:
            i += 1
            denominator = len(selected_buildings)
            data = {
                'status': 'processing',
                'progress': (float(i) / max(len(selected_buildings), 1) * 100),
                'progress_key': prog_key,
                'numerator': i,
                'denominator': denominator
            }
            set_cache(prog_key, data['status'], data)
            ab = BuildingSnapshot.objects.get(pk=sfid)
            ProjectBuilding.objects.get(
                project=project, building_snapshot=ab
            ).delete()
    else:
        query_buildings = get_search_query(user, project_dict)
        denominator = query_buildings.count() - len(selected_buildings)
        data = {
            'status': 'processing',
            'progress': 10,
            'progress_key': prog_key,
            'numerator': i,
            'denominator': denominator
        }
        set_cache(prog_key, data['status'], data)
        for b in query_buildings:
            ProjectBuilding.objects.get(
                project=project, building_snapshot=b
            ).delete()
        data = {
            'status': 'processing',
            'progress': 50,
            'progress_key': prog_key,
            'numerator': denominator - len(selected_buildings),
            'denominator': denominator
        }
        set_cache(prog_key, data['status'], data)
        for building in selected_buildings:
            i += 1
            ab = BuildingSnapshot.objects.get(source_facility_id=building)
            ProjectBuilding.objects.create(
                project=project, building_snapshot=ab
            )
            data = {
                'status': 'processing',
                'progress': (float(denominator - len(
                    selected_buildings) + i) / denominator * 100),
                'progress_key': prog_key,
                'numerator': denominator - len(selected_buildings) + i,
                'denominator': denominator
            }
            set_cache(prog_key, data['status'], data)

    result = {
        'status': 'complete',
        'progress': 100,
        'progress_key': prog_key,
        'numerator': i,
        'denominator': denominator
    }
    set_cache(prog_key, result['status'], result)
Esempio n. 27
0
def finish_delete(results, org_pk):
    prog_key = get_prog_key('delete_organization_buildings', org_pk)
    set_cache(prog_key, 'success', 100)
Esempio n. 28
0
def delete_organization_inventory(org_pk, deleting_cache_key, chunk_size=100, *args, **kwargs):
    """Deletes all properties & taxlots within an organization."""
    result = {
        'status': 'success',
        'progress_key': deleting_cache_key,
        'progress': 0
    }

    property_ids = list(
        Property.objects.filter(organization_id=org_pk).values_list('id', flat=True)
    )
    property_state_ids = list(
        PropertyState.objects.filter(organization_id=org_pk).values_list('id', flat=True)
    )
    taxlot_ids = list(
        TaxLot.objects.filter(organization_id=org_pk).values_list('id', flat=True)
    )
    taxlot_state_ids = list(
        TaxLotState.objects.filter(organization_id=org_pk).values_list('id', flat=True)
    )

    total = len(property_ids) + len(property_state_ids) + len(taxlot_ids) + len(taxlot_state_ids)

    if total == 0:
        result['progress'] = 100

    set_cache(deleting_cache_key, result['status'], result)

    if total == 0:
        return

    step = float(chunk_size) / total
    tasks = []
    for del_ids in batch(property_ids, chunk_size):
        # we could also use .s instead of .subtask and not wrap the *args
        tasks.append(
            _delete_organization_property_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)
            )
        )
    for del_ids in batch(property_state_ids, chunk_size):
        # we could also use .s instead of .subtask and not wrap the *args
        tasks.append(
            _delete_organization_property_state_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)
            )
        )
    for del_ids in batch(taxlot_ids, chunk_size):
        # we could also use .s instead of .subtask and not wrap the *args
        tasks.append(
            _delete_organization_taxlot_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)
            )
        )
    for del_ids in batch(taxlot_state_ids, chunk_size):
        # we could also use .s instead of .subtask and not wrap the *args
        tasks.append(
            _delete_organization_taxlot_state_chunk.subtask(
                (del_ids, deleting_cache_key, step, org_pk)
            )
        )
    chord(tasks, interval=15)(
        _finish_delete.subtask([org_pk, deleting_cache_key]))
Esempio n. 29
0
def _finish_matching(import_file, progress_key):
    import_file.matching_done = True
    import_file.mapping_completion = 100
    import_file.save()
    set_cache(progress_key, 'success', 100)