def __call__(self):
        request = self.request
        dbsession = request.dbsession
        owner = request.owner
        owner_id = owner.id

        schema = StatementUploadSchema()
        try:
            self.appstruct = appstruct = schema.deserialize(request.json)
        except colander.Invalid as e:
            handle_invalid(e, schema=schema)

        name = appstruct['name']
        pos = name.rfind('.')
        if pos >= 0:
            ext = name[pos:].lower()
        else:
            ext = ''

        content_type = appstruct['type'].split(';')[0]
        if content_type in self.excel_types or ext in self.excel_extensions:
            self.handle_excel()

        if self.statement is None:
            raise HTTPBadRequest(
                json_body={
                    'error':
                    'file_type_not_supported',
                    'error_description': ("File type not supported: %s (%s)" %
                                          (ext, appstruct['type'])),
                })

        # Auto-reconcile the statement to the extent possible.
        configure_dblog(request=request, event_type='statement_auto_reco')
        auto_reco_statement(dbsession=dbsession,
                            owner=owner,
                            period=self.context.period,
                            statement=self.statement)

        entry_count = (dbsession.query(
            func.count(1)).select_from(AccountEntry).filter(
                AccountEntry.statement_id == self.statement.id).scalar())

        dbsession.add(
            OwnerLog(
                owner_id=owner_id,
                personal_id=request.personal_id,
                event_type='statement_upload',
                remote_addr=request.remote_addr,
                user_agent=request.user_agent,
                content={
                    'statement_id': self.statement.id,
                    'filename': appstruct['name'],
                    'content_type': appstruct['type'],
                    'size': appstruct['size'],
                    'entry_count': entry_count,
                },
            ))

        return {'statement': serialize_statement(self.statement)}
Beispiel #2
0
    def _get_profile_id_for_token(self, request, token):
        if not token:
            return None

        now = datetime.datetime.utcnow()
        entry = self.token_cache.get(token)
        if entry is not None:
            if now < entry['valid_until']:
                request.wallet_info = entry['wallet_info']
                return entry['id']

            wallet_info = self._request_wallet_info(request, token)
            if wallet_info:
                # This token hasn't actually expired yet.
                profile_info = wallet_info['profile']
                profile_id = profile_info['id']
                self.token_cache[token] = {
                    'id': profile_id,
                    'valid_until': now + self.cache_duration,
                    'wallet_info': wallet_info,
                }
                request.wallet_info = wallet_info
                return profile_id

            else:
                # This token expired.
                # Take an opportunity to clean up the token cache.
                to_delete = []
                for token, entry1 in self.token_cache.items():
                    if now >= entry1['valid_until']:
                        to_delete.append(token)
                for token in to_delete:
                    self.token_cache.pop(token, None)
                return None

        wallet_info = self._request_wallet_info(request, token)
        if wallet_info is not None:
            profile_info = wallet_info['profile']
            profile_id = profile_info['id']
            self.token_cache[token] = {
                'id': profile_id,
                'valid_until': now + self.cache_duration,
                'wallet_info': wallet_info,
            }
            request.wallet_info = wallet_info

            request.owner  # Add the Owner to the database
            request.dbsession.add(OwnerLog(
                owner_id=profile_id,
                personal_id=request.personal_id,
                event_type='access',
                remote_addr=request.remote_addr,
                user_agent=request.user_agent,
                content={'title': profile_info['title']},
            ))

            return profile_id

        return None
Beispiel #3
0
def owner(request):
    """Get or create the Owner row for the authenticated profile."""
    authenticated_userid = request.authenticated_userid
    if not authenticated_userid:
        return None

    dbsession = request.dbsession
    owner = (
        dbsession.query(Owner)
        .filter_by(id=authenticated_userid)
        .first())
    if owner is None:
        owner_info = request.wallet_info['profile']

        # Insert without creating a conflict with concurrent requests.
        values = {
            'id': owner_info['id'],
            'title': owner_info['title'],
            'username': owner_info['username'] or '',
        }
        stmt = (
            sqlalchemy.dialects.postgresql.insert(
                Owner.__table__, bind=dbsession).values(**values)
            .on_conflict_do_nothing())
        dbsession.execute(stmt)

        # Now the owner should exist.
        owner = (
            dbsession.query(Owner)
            .filter_by(id=authenticated_userid)
            .one())

        dbsession.add(OwnerLog(
            owner_id=owner.id,
            personal_id=request.personal_id,
            event_type='created',
            remote_addr=request.remote_addr,
            user_agent=request.user_agent,
            content={'title': owner.title},
        ))

    else:
        now = datetime.datetime.utcnow()
        if now - owner.last_update >= datetime.timedelta(seconds=60 * 15):
            # Update the owner's title and username.
            wallet_info = request.wallet_info
            profile_info = wallet_info['profile']
            if owner.title != profile_info['title']:
                owner.title = profile_info['title']
            username = profile_info['username'] or ''
            if owner.username != username:
                owner.username = username
            owner.last_update = now

    return owner
def add_open_period(request, file_id, event_type):
    """Add a new period.

    Base it on the end date and end balances of the period with the
    newest end date.
    """
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id

    prev = (dbsession.query(Period).filter(
        Period.owner_id == owner_id,
        Period.file_id == file_id,
        Period.end_date != null,
    ).order_by(Period.end_date.desc()).first())

    if prev is not None:
        next_start_date = prev.end_date + datetime.timedelta(days=1)
        if prev.closed:
            next_start_circ = prev.end_circ
            next_start_surplus = prev.end_surplus
        else:
            # Compute the previous end_circ and end_surplus.
            totals = compute_period_totals(dbsession=dbsession,
                                           owner_id=owner_id,
                                           period_ids=[prev.id])[prev.id]
            next_start_circ = totals['end']['circ']
            next_start_surplus = totals['end']['surplus']
    else:
        next_start_date = None
        next_start_circ = 0
        next_start_surplus = 0

    period = Period(owner_id=owner_id,
                    file_id=file_id,
                    start_date=next_start_date,
                    start_circ=next_start_circ,
                    start_surplus=next_start_surplus)
    dbsession.add(period)
    dbsession.flush()  # Assign period.id

    dbsession.add(
        OwnerLog(owner_id=owner_id,
                 personal_id=request.personal_id,
                 event_type=event_type,
                 content={
                     'period_id': period.id,
                     'file_id': file_id,
                     'start_date': next_start_date,
                     'start_circ': next_start_circ,
                     'start_surplus': next_start_surplus,
                 }))

    return period
Beispiel #5
0
def file_unarchive(context, request):
    """Unarchive the file.
    """
    file = context.file
    file.archived = False

    request.dbsession.add(OwnerLog(
        owner_id=request.owner.id,
        personal_id=request.personal_id,
        event_type='unarchive_file',
        content={
            'file_id': file.id,
            'archived': False,
        }))

    return serialize_file(file)
Beispiel #6
0
def file_archive(context, request):
    """Archive the file.

    This merely marks the file as archived, preventing changes until
    the user unarchives it.
    """
    file = context.file
    file.archived = True

    request.dbsession.add(OwnerLog(
        owner_id=request.owner.id,
        personal_id=request.personal_id,
        event_type='archive_file',
        content={
            'file_id': file.id,
            'archived': True,
        }))

    return serialize_file(file)
Beispiel #7
0
def file_save(context, request):
    """Change the file."""
    file = context.file

    schema = FileSaveSchema()
    try:
        appstruct = schema.deserialize(request.json)
    except colander.Invalid as e:
        handle_invalid(e, schema=schema)

    file.title = appstruct['title']

    if file.file_type == 'closed_circ':
        file.auto_enable_loops = appstruct['auto_enable_loops']

    if appstruct['reinterpret']:
        # Reinterpret all the movements in this file.
        # This may make some movements reconcileable and may make
        # reconciliation unavailable for some unreconciled movements.
        # It does not remove reconciled movements.
        dbsession = request.dbsession
        query = (
            dbsession.query(FileSync).filter(FileSync.file_id == file.id))
        query.delete(synchronize_session=False)

        dbsession.expire_all()
        sync = SyncBase(request)
        sync.sync_missing()

    request.dbsession.add(OwnerLog(
        owner_id=request.owner.id,
        personal_id=request.personal_id,
        event_type='edit_file',
        content={
            'file_id': file.id,
            'title': file.title,
            'auto_enable_loops': appstruct.get('auto_enable_loops'),
            'reinterpret': appstruct['reinterpret'],
        }))

    return serialize_file(file)
def set_tzname(request):
    tzname = request.json.get('tzname')
    if tzname not in pytz.all_timezones:
        raise HTTPBadRequest(json_body={
            'error': 'unrecognized_time_zone',
        })

    owner = request.owner
    owner.tzname = tzname
    request.dbsession.add(
        OwnerLog(
            owner_id=owner.id,
            personal_id=request.personal_id,
            event_type='tzname_change',
            remote_addr=request.remote_addr,
            user_agent=request.user_agent,
            content={
                'tzname': tzname,
            },
        ))

    return settings_api(request)
def statement_add_blank(context, request):
    """Add a blank statement."""
    period = context.period
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id

    schema = StatementAddBlankSchema()
    try:
        appstruct = schema.deserialize(request.json)
    except colander.Invalid as e:
        handle_invalid(e, schema=schema)

    statement = Statement(
        owner_id=owner_id,
        period_id=period.id,
        file_id=period.file_id,
        source=appstruct['source'],
    )
    dbsession.add(statement)
    dbsession.flush()  # Assign statement.id

    dbsession.add(
        OwnerLog(
            owner_id=owner_id,
            personal_id=request.personal_id,
            event_type='statement_add_blank',
            remote_addr=request.remote_addr,
            user_agent=request.user_agent,
            content={
                'statement_id': statement.id,
                'source': appstruct['source'],
            },
        ))

    return {
        'statement': serialize_statement(statement),
    }
Beispiel #10
0
    def set_tzname(self):
        """If the owner doesn't have a tzname yet, try to set it."""
        request = self.request
        owner = self.owner

        if not owner.tzname:
            try:
                params = self.request.json
            except Exception:
                params = {}
            tzname = params.get('tzname', '').strip()
            if tzname and tzname in pytz.all_timezones:
                owner.tzname = tzname
                request.dbsession.add(
                    OwnerLog(
                        owner_id=owner.id,
                        personal_id=request.personal_id,
                        event_type='tzname_init',
                        remote_addr=request.remote_addr,
                        user_agent=request.user_agent,
                        content={
                            'tzname': tzname,
                        },
                    ))
def statement_delete(context, request):
    """Delete a statement and the contained account entries."""
    period = context.period
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id

    schema = StatementDeleteSchema()
    try:
        appstruct = schema.deserialize(request.json)
    except colander.Invalid as e:
        handle_invalid(e, schema=schema)

    statement = (dbsession.query(Statement).filter(
        Statement.owner_id == owner_id,
        Statement.id == appstruct['id'],
        Statement.file_id == period.file_id,
    ).first())

    if statement is None:
        raise HTTPBadRequest(
            json_body={
                'error': 'statement_not_found',
                'error_description': ("Statement %s not found." %
                                      appstruct['id']),
            })

    delete_conflicts = get_delete_conflicts(dbsession=dbsession,
                                            statement=statement)

    if delete_conflicts:
        raise HTTPBadRequest(
            json_body={
                'error':
                'statement_delete_conflict',
                'error_description': (
                    "The statement can not be deleted for the following "
                    "reasons: %s" % delete_conflicts),
            })

    # Indicate that entries are being deleted and movements are being
    # changed because the statement is being deleted.
    configure_dblog(request=request, event_type='statement_delete')

    # reco_ids represents the list of recos to empty.
    reco_ids = (dbsession.query(AccountEntry.reco_id).filter(
        AccountEntry.statement_id == statement.id, ).distinct().subquery(
            name='reco_ids_subq'))

    # Cancel the reco_id of movements reconciled with any entry
    # in the statement.
    (dbsession.query(FileMovement).filter(
        FileMovement.reco_id.in_(reco_ids), ).update(
            {
                'reco_id': None,
                # Also reset the surplus_delta for each movement.
                'surplus_delta': -FileMovement.wallet_delta,
            },
            synchronize_session='fetch'))

    # Cancel the reco_id of account entries on other statements
    # reconciled with any entry in the statement.
    (dbsession.query(AccountEntry).filter(
        AccountEntry.reco_id.in_(reco_ids),
        AccountEntry.statement_id != statement.id,
    ).update({
        'reco_id': None,
    }, synchronize_session='fetch'))

    # Delete the account entries, but leave the account entry logs.
    (dbsession.query(AccountEntry).filter(
        AccountEntry.statement_id == statement.id, ).delete(
            synchronize_session='fetch'))

    # Delete the statement.
    (dbsession.query(Statement).filter(
        Statement.id == statement.id, ).delete(synchronize_session='fetch'))

    request.dbsession.add(
        OwnerLog(
            owner_id=owner_id,
            personal_id=request.personal_id,
            event_type='statement_delete',
            remote_addr=request.remote_addr,
            user_agent=request.user_agent,
            content=appstruct,
        ))

    return {}
def statement_save(context, request):
    """Save changes to a statement."""
    period = context.period
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id

    schema = StatementSaveSchema()
    try:
        appstruct = schema.deserialize(request.json)
    except colander.Invalid as e:
        handle_invalid(e, schema=schema)

    statement = (dbsession.query(Statement).filter(
        Statement.owner_id == owner_id,
        Statement.id == appstruct['id'],
        Statement.file_id == period.file_id,
    ).first())

    if statement is None:
        raise HTTPBadRequest(
            json_body={
                'error': 'statement_not_found',
                'error_description': ("Statement %s not found." %
                                      appstruct['id']),
            })

    new_period = None
    if statement.period_id != appstruct['period_id']:
        new_period = (dbsession.query(Period).filter(
            Period.owner_id == owner_id,
            Period.file_id == period.file_id,
            ~Period.closed,
            Period.id == appstruct['period_id'],
        ).first())
        if new_period is None:
            raise HTTPBadRequest(
                json_body={
                    'error':
                    'invalid_period_id',
                    'error_description': (
                        "The selected period is closed or not available."),
                })

    changes = {}

    if statement.source != appstruct['source']:
        changes['source'] = appstruct['source']
        statement.source = appstruct['source']

    if new_period is not None:
        changes['period_id'] = appstruct['period_id']
        old_period_id = statement.period_id
        statement.period_id = appstruct['period_id']

        # Change the period of the statement's account entries and recos that
        # should move with the statement.
        configure_dblog(request=request,
                        event_type='reassign_statement_period')

        reassign_statement_period(dbsession=dbsession,
                                  statement=statement,
                                  old_period_id=old_period_id,
                                  new_period_id=statement.period_id)

    dbsession.add(
        OwnerLog(
            owner_id=owner_id,
            personal_id=request.personal_id,
            event_type='statement_save',
            remote_addr=request.remote_addr,
            user_agent=request.user_agent,
            content=appstruct,
        ))

    return {
        'statement': serialize_statement(statement),
    }
Beispiel #13
0
    def __call__(self):
        request = self.request
        owner = self.owner

        self.set_tzname()

        if owner.first_sync_ts is None:
            # Start a new sync. Download transfers created or changed
            # after 5 minutes before the last sync. (Add 5 minutes in
            # case some transfers showed up out of order.)
            if owner.last_sync_ts is not None:
                sync_ts = (owner.last_sync_ts -
                           datetime.timedelta(seconds=60 * 5))
            else:
                sync_ts = datetime.datetime(1970, 1, 1)
            sync_transfer_id = None
            count_remain = True
        else:
            # A sync was started but not finished. Download the next batch.
            sync_ts = owner.last_sync_ts
            sync_transfer_id = owner.last_sync_transfer_id
            count_remain = False
        sync_ts_iso = sync_ts.isoformat() + 'Z'

        transfers_download = self.download_batch(
            sync_ts_iso=sync_ts_iso,
            sync_transfer_id=sync_transfer_id,
            count_remain=count_remain)

        dbsession = request.dbsession
        more = transfers_download['more']
        now = datetime.datetime.utcnow()

        if more:
            len_results = len(transfers_download['results'])
            if owner.first_sync_ts is None:
                owner.first_sync_ts = to_datetime(
                    transfers_download['first_sync_ts'])
                owner.sync_total = len_results + transfers_download['remain']
                owner.sync_done = len_results
            else:
                owner.sync_done += len_results
            owner.last_sync_ts = to_datetime(
                transfers_download['last_sync_ts'])
            owner.last_sync_transfer_id = (
                transfers_download['results'][-1]['id'])
            # Note: avoid division by zero.
            progress_percent = min(
                99,
                int(100.0 * owner.sync_done /
                    owner.sync_total if owner.sync_total else 0.0))
        else:
            owner.first_sync_ts = None
            owner.last_sync_transfer_id = None
            owner.last_sync_ts = now
            owner.sync_total = 0
            owner.sync_done = 0
            progress_percent = 100

        opn_download = OPNDownload(
            owner_id=owner.id,
            content={
                'transfers': transfers_download,
                'more': more,
            },
        )
        dbsession.add(opn_download)
        dbsession.flush()

        self.opn_download_id = opn_download.id

        dbsession.add(
            OwnerLog(
                owner_id=owner.id,
                personal_id=request.personal_id,
                event_type='opn_sync',
                remote_addr=request.remote_addr,
                user_agent=request.user_agent,
                content={
                    'sync_ts': sync_ts_iso,
                    'progress_percent': progress_percent,
                    'change_count': len(self.change_log),
                    'transfers': {
                        'ids':
                        sorted(t['id'] for t in transfers_download['results']),
                        'count':
                        len(transfers_download['results']),
                        'more':
                        more,
                        'first_sync_ts':
                        transfers_download['first_sync_ts'],
                        'last_sync_ts':
                        transfers_download['last_sync_ts'],
                    }
                },
            ))

        try:
            self.import_transfer_records(transfers_download)
            if not more:
                self.sync_missing()
        except VerificationFailure as e:
            # HTTP Error 507 is reasonably close to 'data verification error'.
            raise HTTPInsufficientStorage(json_body={
                'error': 'verification_failure',
                'error_description': str(e),
            })

        return {
            'progress_percent': progress_percent,
            'change_count': len(self.change_log),
            'download_count': len(transfers_download['results']),
            'more': more,
            'first_sync_ts': transfers_download['first_sync_ts'],
            'last_sync_ts': transfers_download['last_sync_ts'],
        }
Beispiel #14
0
def push_unreco(request, period, op):
    """Push the unreconciled movements or entries to other open periods.

    Create a new period if necessary.
    """
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id
    assert period.owner_id == owner_id

    item_filter = and_(
        op.table.owner_id == owner_id,
        op.table.file_id == period.file_id,
        op.table.period_id == period.id,
        op.table.reco_id == null,
    )

    # List the dates of all items in the period.
    unreco_query = (dbsession.query(
        op.date_c.label('day'),
        op.id_c.label('item_id'),
    ).filter(item_filter))
    unreco_rows = unreco_query.all()

    if not unreco_rows:
        # There are no unreconciled items in the period.
        return 0

    # List the other open periods for the file.
    period_list = (dbsession.query(Period).filter(
        Period.owner_id == owner_id, Period.file_id == period.file_id,
        ~Period.closed, Period.id != period.id).all())

    # List the items to reassign.
    days = set()
    item_ids = []
    for day, item_id in unreco_rows:
        days.add(day)
        if item_id is not None:
            item_ids.append(item_id)

    # Map the days to periods.
    day_periods, day_period_cte, missing_period = make_day_period_cte(
        days=sorted(days), period_list=period_list)

    # If no period is available for some of the items,
    # create a new period.
    if missing_period:
        new_period = add_open_period(
            request=request,
            file_id=period.file_id,
            event_type='add_period_for_push_unreco_%s' % op.plural)
        new_period_id = new_period.id
    else:
        new_period_id = None

    # Reassign the items.
    subq = (dbsession.query(day_period_cte.c.period_id).filter(
        day_period_cte.c.day == op.date_c).as_scalar())
    (dbsession.query(op.table).filter(item_filter).update(
        {'period_id': func.coalesce(subq, new_period_id)},
        synchronize_session='fetch'))

    dbsession.add(
        OwnerLog(owner_id=owner_id,
                 personal_id=request.personal_id,
                 event_type='push_unreco_%s' % op.plural,
                 content={
                     'period_id': period.id,
                     'file_id': period.file_id,
                     'item_ids': item_ids,
                     'day_periods': day_periods,
                     'new_period_id': new_period_id,
                 }))

    return len(item_ids)
Beispiel #15
0
    def import_peer(self, peer_id, info):
        """Import a peer from a transfer record or other source."""
        if not peer_id:
            # A transfer's sender or recipient is not yet known.
            # There's nothing to import.
            return

        if not self.write_enabled:
            # This method doesn't need to do anything when writing is
            # disabled.
            return

        if peer_id == self.owner_id:
            # Get better info from the owner profile.
            info = {
                'title': self.owner.title,
                'screen_name': self.owner.username,
                'is_dfi_account': False,
                'is_own_dfi_account': False,
            }

        else:
            # Is the peer an account held by the user? If so, get
            # better info from the account map.
            account = self.account_map.get(peer_id)
            if account:
                title = '%s at %s' % (
                    account['redacted_account_num'],
                    account['rdfi_name'],
                )
                if account['alias']:
                    title += ' (%s)' % account['alias']

                info = {
                    'title': title,
                    'screen_name': '',
                    'is_dfi_account': True,
                    'is_own_dfi_account': True,
                }

        dbsession = self.request.dbsession

        peer = self.peers.get(peer_id)
        if peer is None:
            peer = Peer(
                owner_id=self.owner_id,
                peer_id=peer_id,
                title=info.get('title'),
                username=info.get('screen_name'),
                is_dfi_account=info.get('is_dfi_account'),
                is_own_dfi_account=info.get('is_own_dfi_account'),
                last_update=now_func,
            )
            dbsession.add(peer)
            self.change_log.append({
                'event_type': 'peer_add',
                'peer_id': peer_id,
            })
            self.peers[peer_id] = peer

            dbsession.add(OwnerLog(
                owner_id=self.owner_id,
                personal_id=self.request.personal_id,
                event_type='peer_add',
                content={
                    'peer_id': peer_id,
                    'info': info,
                }))

        else:
            attrs_found = 0
            changes = {}

            # Changeable attrs
            attrs = (
                ('title', 'title'),
                ('screen_name', 'username'),
            )
            for source_attr, dest_attr in attrs:
                value = info.get(source_attr)
                if value:
                    attrs_found += 1
                    if getattr(peer, dest_attr) != value:
                        changes[dest_attr] = value
                        setattr(peer, dest_attr, value)

            # One-shot boolean attrs (once set, stay set)
            attrs = (
                ('is_dfi_account', 'is_dfi_account'),
                ('is_own_dfi_account', 'is_own_dfi_account'),
            )
            for source_attr, dest_attr in attrs:
                value = info.get(source_attr)
                if value is not None:
                    attrs_found += 1
                    if value and not getattr(peer, dest_attr):
                        changes[dest_attr] = True
                        setattr(peer, dest_attr, True)

            if attrs_found:
                peer.last_update = now_func
                if changes:
                    self.change_log.append({
                        'event_type': 'peer_update',
                        'peer_id': peer_id,
                    })
                    dbsession.add(OwnerLog(
                        owner_id=self.owner_id,
                        personal_id=self.request.personal_id,
                        event_type='peer_update',
                        content={
                            'peer_id': peer_id,
                            'changes': changes,
                        }))
Beispiel #16
0
def push_recos(request, period):
    """Push all the recos in a period to other open periods.

    Create a new period if necessary.

    This is done in preparation for deleting the period.
    """
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id
    assert period.owner_id == owner_id

    entry_date_c = (dbsession.query(func.min(AccountEntry.entry_date)).filter(
        AccountEntry.reco_id == Reco.id).correlate(Reco).as_scalar())

    movement_date_c = (dbsession.query(
        func.date(
            func.timezone(get_tzname(owner),
                          func.timezone('UTC', func.min(FileMovement.ts))))
    ).select_from(FileMovement).filter(
        FileMovement.reco_id == Reco.id).correlate(Reco).as_scalar())

    future = datetime.date.today() + datetime.timedelta(days=366 * 100)

    # reco_date_c provides the date of each reco.
    # Recos with no entries or movements have no date, so fall back to
    # an arbitrary date 100+ years in the future as the reco date.
    # The arbitrary date does not get stored (unless the user has created
    # a period for 100+ years in the future.)
    reco_date_c = func.coalesce(entry_date_c, movement_date_c, future)

    # List the dates of all recos in this period.
    reco_rows = (dbsession.query(
        reco_date_c,
        Reco.id).select_from(Reco).filter(Reco.period_id == period.id).all())

    if not reco_rows:
        # There are no reconciliations in the period.
        return 0

    # List the other open periods for the peer.
    period_list = (dbsession.query(Period).filter(
        Period.owner_id == owner_id, Period.file_id == period.file_id,
        ~Period.closed, Period.id != period.id).all())

    # List the recos to reassign.
    days = set()
    reco_ids = []
    for day, reco_id in reco_rows:
        days.add(day)
        if reco_id is not None:
            reco_ids.append(reco_id)

    # Map the days to periods.
    day_periods, day_period_cte, missing_period = make_day_period_cte(
        days=sorted(days), period_list=period_list)

    # If no period is available for some of the recos,
    # create a new period.
    if missing_period:
        new_period = add_open_period(request=request,
                                     file_id=period.file_id,
                                     event_type='add_period_for_push_reco',
                                     has_vault=period.has_vault)
        new_period_id = new_period.id
    else:
        new_period_id = None

    # Reassign the recos.
    subq = (dbsession.query(day_period_cte.c.period_id).filter(
        day_period_cte.c.day == reco_date_c).as_scalar())
    (dbsession.query(Reco).filter(Reco.id.in_(reco_ids)).update(
        {'period_id': func.coalesce(subq, new_period_id)},
        synchronize_session='fetch'))

    # Reassign the period_id of affected movements.
    subq = (dbsession.query(
        Reco.period_id).filter(Reco.id == FileMovement.reco_id).as_scalar())
    (dbsession.query(FileMovement).filter(
        FileMovement.reco_id.in_(reco_ids)).update(
            {'period_id': subq}, synchronize_session='fetch'))

    # Reassign the period_id of affected account entries.
    subq = (dbsession.query(
        Reco.period_id).filter(Reco.id == AccountEntry.reco_id).as_scalar())
    (dbsession.query(AccountEntry).filter(
        AccountEntry.reco_id.in_(reco_ids)).update(
            {'period_id': subq}, synchronize_session='fetch'))

    dbsession.add(
        OwnerLog(owner_id=owner_id,
                 personal_id=request.personal_id,
                 event_type='push_recos',
                 content={
                     'period_id': period.id,
                     'file_id': period.file_id,
                     'reco_ids': reco_ids,
                     'day_periods': day_periods,
                     'new_period_id': new_period_id,
                 }))

    return len(reco_ids)
Beispiel #17
0
def pull_recos(request, period):
    """Pull recos from other open periods into this period.
    """
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id
    assert period.owner_id == owner_id

    # reco_filter finds the recos that might need to be pulled.
    reco_filter = and_(
        Reco.owner_id == owner_id,
        Reco.period_id != period.id,
        Period.file_id == period.file_id,
        ~Period.closed,
    )

    entry_date_c = (dbsession.query(func.min(AccountEntry.entry_date)).filter(
        AccountEntry.reco_id == Reco.id).correlate(Reco).as_scalar())

    movement_date_c = (dbsession.query(
        func.date(
            func.timezone(get_tzname(owner),
                          func.timezone('UTC', func.min(FileMovement.ts))))
    ).filter(FileMovement.reco_id == Reco.id).correlate(Reco).as_scalar())

    # reco_date_c provides the date of each reco. Note that
    # some recos have no account entries or movements; they have a date
    # of None. We don't want to move those recos into this period.
    reco_date_c = func.coalesce(entry_date_c, movement_date_c)

    # List the dates of all recos in other open periods
    # for the same peer loop.
    day_rows = (dbsession.query(reco_date_c).select_from(Reco).join(
        Period,
        Period.id == Reco.period_id).filter(reco_filter).distinct().all())

    if not day_rows:
        # There are no recos to pull in.
        return 0

    # List the dates of the recos to pull in.
    reassign_days = []
    period_list = [period]
    for (day, ) in day_rows:
        if day is not None and get_period_for_day(
                period_list, day, default_endless=False) is period:
            reassign_days.append(day)

    if not reassign_days:
        # None of the recos found should be pulled in to this period.
        return 0

    # Map the reassignable recos to this period.
    # (Recos for other periods will not be listed in day_period_cte.)
    day_periods, day_period_cte, missing_period = make_day_period_cte(
        days=sorted(reassign_days),
        period_list=period_list,
        default_endless=False)

    # List the recos to reassign.
    reco_id_rows = (dbsession.query(Reco.id).join(
        Period, Period.id == Reco.period_id).join(
            day_period_cte,
            day_period_cte.c.day == reco_date_c).filter(reco_filter).all())

    reco_ids = [reco_id for (reco_id, ) in reco_id_rows]

    # Reassign recos.
    (dbsession.query(Reco).filter(Reco.id.in_(reco_ids)).update(
        {'period_id': period.id}, synchronize_session='fetch'))

    # Reassign the period_id of affected movements.
    (dbsession.query(FileMovement).filter(
        FileMovement.reco_id.in_(reco_ids)).update(
            {'period_id': period.id}, synchronize_session='fetch'))

    # Reassign the period_id of affected account entries.
    (dbsession.query(AccountEntry).filter(
        AccountEntry.reco_id.in_(reco_ids)).update(
            {'period_id': period.id}, synchronize_session='fetch'))

    dbsession.add(
        OwnerLog(owner_id=owner_id,
                 personal_id=request.personal_id,
                 event_type='pull_recos',
                 content={
                     'period_id': period.id,
                     'file_id': period.file_id,
                     'reco_ids': reco_ids,
                     'day_periods': day_periods,
                 }))

    return len(reco_ids)
Beispiel #18
0
def pull_unreco(request, period, op):
    """Pull unreconciled items from other open periods into this period.
    """
    dbsession = request.dbsession
    owner = request.owner
    owner_id = owner.id
    assert period.owner_id == owner_id

    item_filter = and_(
        op.table.owner_id == owner_id,
        op.table.file_id == period.file_id,
        op.table.period_id != period.id,
        op.table.reco_id == null,
        ~Period.closed,
    )

    # List the dates of all unreconciled items in other open periods
    # for the same peer loop.
    day_rows = (dbsession.query(op.date_c).join(
        Period,
        Period.id == op.table.period_id).filter(item_filter).distinct().all())

    if not day_rows:
        # There are no items to pull in.
        return 0

    # List the dates of the items to pull in.
    reassign_days = []
    period_list = [period]
    for (day, ) in day_rows:
        if get_period_for_day(period_list, day,
                              default_endless=False) is period:
            reassign_days.append(day)

    if not reassign_days:
        # None of the items found should be pulled in to this period.
        return 0

    # Map the reassignable items to this period.
    day_periods, day_period_cte, missing_period = make_day_period_cte(
        days=sorted(reassign_days),
        period_list=period_list,
        default_endless=False)

    # Make a subquery that lists the items to reassign.
    ids_query = (select([op.id_c]).select_from(
        op.table.__table__.join(Period, Period.id == op.table.period_id).join(
            day_period_cte,
            day_period_cte.c.day == op.date_c)).where(item_filter))

    item_ids = [item_id for (item_id, ) in dbsession.execute(ids_query)]

    # Reassign items.
    (dbsession.query(op.table).filter(
        op.id_c.in_(ids_query),
        op.table.file_id == period.file_id,
    ).update({'period_id': period.id}, synchronize_session='fetch'))

    dbsession.add(
        OwnerLog(owner_id=owner_id,
                 personal_id=request.personal_id,
                 event_type='pull_unreco_%s' % op.plural,
                 content={
                     'period_id': period.id,
                     'file_id': period.file_id,
                     'item_ids': item_ids,
                     'day_periods': day_periods,
                 }))

    return len(item_ids)