def recheck_tender(request): tender_id = request.matchdict['tender_id'] scheduler = request.registry.scheduler url = request.registry.api_url + 'tenders/' + tender_id api_token = request.registry.api_token recheck_url = request.registry.callback_url + 'recheck/' + tender_id request_id = request.environ.get('REQUEST_ID', '') next_check = None r = SESSION.patch(url, data=dumps({'data': { 'id': tender_id }}), headers={ 'Content-Type': 'application/json', 'X-Client-Request-ID': request_id }, auth=(api_token, '')) if r.status_code != requests.codes.ok: LOGGER.error("Error {} on checking tender '{}': {}".format( r.status_code, url, r.text), extra=context_unpack(request, {'MESSAGE_ID': 'error_check_tender'}, {'ERROR_STATUS': r.status_code})) if r.status_code not in [ requests.codes.forbidden, requests.codes.not_found, requests.codes.gone ]: next_check = get_now() + timedelta(minutes=1) elif r.json() and r.json()['data'].get('next_check'): next_check = parse_date(r.json()['data']['next_check'], TZ).astimezone(TZ) if next_check: check_args = dict(timezone=TZ, id='recheck_{}'.format(tender_id), name='Recheck {}'.format(tender_id), misfire_grace_time=60 * 60, replace_existing=True, args=[recheck_url, None]) if next_check < get_now(): scheduler.add_job( push, 'date', run_date=get_now() + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), **check_args) else: scheduler.add_job( push, 'date', run_date=next_check + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), **check_args) return next_check and next_check.isoformat()
def process_listing(tenders, scheduler, callback_url, db, check=True): run_date = get_now() for tender in tenders: if check: check_auction(db, tender) tid = tender['id'] next_check = tender.get('next_check') if next_check: check_args = dict(timezone=TZ, id='recheck_{}'.format(tid), name='Recheck {}'.format(tid), misfire_grace_time=60 * 60, replace_existing=True, args=[callback_url + 'recheck/' + tid, None]) next_check = parse_date(next_check, TZ).astimezone(TZ) recheck_job = scheduler.get_job('recheck_{}'.format(tid)) if next_check < run_date: scheduler.add_job( push, 'date', run_date=run_date + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), **check_args) elif not recheck_job or recheck_job.next_run_time != next_check: scheduler.add_job( push, 'date', run_date=next_check + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), **check_args) if any([ 'shouldStartAfter' in i.get('auctionPeriod', {}) and parse_date(i['auctionPeriod']['shouldStartAfter'], TZ).astimezone(TZ) > parse_date(i['auctionPeriod'].get('startDate', '0001'), TZ) for i in tender.get('lots', []) ]) or ('shouldStartAfter' in tender.get('auctionPeriod', {}) and parse_date(tender['auctionPeriod']['shouldStartAfter'], TZ).astimezone(TZ) > parse_date( tender['auctionPeriod'].get( 'startDate', '0001'), TZ)): resync_job = scheduler.get_job(tid) if not resync_job or resync_job.next_run_time > run_date + timedelta( minutes=1): scheduler.add_job( push, 'date', run_date=run_date + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), timezone=TZ, id=tid, name='Resync {}'.format(tid), misfire_grace_time=60 * 60, args=[callback_url + 'resync/' + tid, None], replace_existing=True)
def resync_tenders_back(request): next_url = request.params.get('url', '') if not next_url: next_url = request.registry.api_url + 'tenders?mode=_all_&feed=changes&descending=1&opt_fields=status%2CauctionPeriod%2Clots%2Cnext_check' scheduler = request.registry.scheduler api_token = request.registry.api_token callback_url = request.registry.callback_url request_id = request.environ.get('REQUEST_ID', '') break_reason = 'unknown' LOGGER.info('Resync back started', extra=context_unpack(request, {'MESSAGE_ID': 'resync_back_started'})) while True: try: r = get_request(next_url, auth=(api_token, ''), headers={'X-Client-Request-ID': request_id}) if r.status_code == requests.codes.not_found: next_url = '' break_reason = 'not_found' break elif r.status_code != requests.codes.ok: break_reason = 'not_ok' break json = r.json() next_url = json['next_page']['uri'] if not json['data']: LOGGER.info('Resync back stopped', extra=context_unpack( request, {'MESSAGE_ID': 'resync_back_stoped'})) return next_url process_listing(json['data'], scheduler, callback_url, request.registry.db, False) sleep(0.1) except Exception as e: LOGGER.error('Error on resync back: {}'.format(repr(e)), extra=context_unpack( request, { 'MESSAGE_ID': 'error_resync_back', })) break_reason = 'exception' break LOGGER.info('Resync back break, reason: {}'.format(break_reason), extra=context_unpack( request, { 'MESSAGE_ID': 'resync_back_break', 'BREAK_REASON': break_reason, })) run_date = get_now() + timedelta(minutes=1) scheduler.add_job(push, 'date', run_date=run_date, timezone=TZ, id='resync_back', name='Resync back', misfire_grace_time=60 * 60, args=[callback_url + 'resync_back', { 'url': next_url }], replace_existing=True) return next_url
def resync_tender(request): tender_id = request.matchdict['tender_id'] scheduler = request.registry.scheduler url = request.registry.api_url + 'tenders/' + tender_id api_token = request.registry.api_token resync_url = request.registry.callback_url + 'resync/' + tender_id recheck_url = request.registry.callback_url + 'recheck/' + tender_id db = request.registry.db request_id = request.environ.get('REQUEST_ID', '') next_check = None next_sync = None r = get_request(url, auth=(api_token, ''), headers={'X-Client-Request-ID': request_id}) if r.status_code != requests.codes.ok: LOGGER.error("Error {} on getting tender '{}': {}".format( r.status_code, url, r.text), extra=context_unpack(request, {'MESSAGE_ID': 'error_get_tender'}, {'ERROR_STATUS': r.status_code})) if r.status_code in [requests.codes.not_found, requests.codes.gone]: return changes = None next_sync = get_now() + timedelta( seconds=randint(SMOOTHING_REMIN, SMOOTHING_MAX)) else: json = r.json() tender = json['data'] changes = check_tender(request, tender, db) if changes: data = dumps({'data': changes}) r = SESSION.patch(url, data=data, headers={ 'Content-Type': 'application/json', 'X-Client-Request-ID': request_id }, auth=(api_token, '')) if r.status_code != requests.codes.ok: LOGGER.error( "Error {} on updating tender '{}' with '{}': {}".format( r.status_code, url, data, r.text), extra=context_unpack(request, {'MESSAGE_ID': 'error_patch_tender'}, {'ERROR_STATUS': r.status_code})) next_sync = get_now() + timedelta( seconds=randint(SMOOTHING_REMIN, SMOOTHING_MAX)) elif r.json(): if r.json()['data'].get('next_check'): next_check = parse_date(r.json()['data']['next_check'], TZ).astimezone(TZ) if next_check: check_args = dict(timezone=TZ, id='recheck_{}'.format(tender_id), name='Recheck {}'.format(tender_id), misfire_grace_time=60 * 60, replace_existing=True, args=[recheck_url, None]) if next_check < get_now(): scheduler.add_job( push, 'date', run_date=get_now() + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), **check_args) else: scheduler.add_job( push, 'date', run_date=next_check + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), **check_args) if next_sync: scheduler.add_job( push, 'date', run_date=next_sync + timedelta(seconds=randint(SMOOTHING_MIN, SMOOTHING_MAX)), timezone=TZ, id=tender_id, name='Resync {}'.format(tender_id), misfire_grace_time=60 * 60, replace_existing=True, args=[resync_url, None]) return next_sync and next_sync.isoformat()
def check_tender(request, tender, db): now = get_now() quick = environ.get('SANDBOX_MODE', False) and 'quick' in tender.get( 'submissionMethodDetails', '') if not tender.get('lots') and 'shouldStartAfter' in tender.get( 'auctionPeriod', {}) and tender['auctionPeriod']['shouldStartAfter'] > tender[ 'auctionPeriod'].get('startDate', ''): period = tender.get('auctionPeriod') shouldStartAfter = max( parse_date(period.get('shouldStartAfter'), TZ).astimezone(TZ), now) planned = False while not planned: try: auctionPeriod, stream, skip_days = planning_auction( tender, shouldStartAfter, db, quick) planned = True except ResourceConflict: planned = False auctionPeriod = randomize(auctionPeriod).isoformat() planned = 'replanned' if period.get('startDate') else 'planned' LOGGER.info('{} auction for tender {} to {}. Stream {}.{}'.format( planned.title(), tender['id'], auctionPeriod, stream, skipped_days(skip_days)), extra=context_unpack( request, {'MESSAGE_ID': '{}_auction_tender'.format(planned)}, { 'PLANNED_DATE': auctionPeriod, 'PLANNED_STREAM': stream, 'PLANNED_DAYS_SKIPPED': skip_days })) return {'auctionPeriod': {'startDate': auctionPeriod}} elif tender.get('lots'): lots = [] for lot in tender.get('lots', []): if lot['status'] != 'active' or 'shouldStartAfter' not in lot.get( 'auctionPeriod', {}) or lot['auctionPeriod']['shouldStartAfter'] < lot[ 'auctionPeriod'].get('startDate', ''): lots.append({}) continue period = lot.get('auctionPeriod') shouldStartAfter = max( parse_date(period.get('shouldStartAfter'), TZ).astimezone(TZ), now) lot_id = lot['id'] planned = False while not planned: try: auctionPeriod, stream, skip_days = planning_auction( tender, shouldStartAfter, db, quick, lot_id) planned = True except ResourceConflict: planned = False auctionPeriod = randomize(auctionPeriod).isoformat() planned = 'replanned' if period.get('startDate') else 'planned' lots.append({'auctionPeriod': {'startDate': auctionPeriod}}) LOGGER.info( '{} auction for lot {} of tender {} to {}. Stream {}.{}'. format(planned.title(), lot_id, tender['id'], auctionPeriod, stream, skipped_days(skip_days)), extra=context_unpack( request, {'MESSAGE_ID': '{}_auction_lot'.format(planned)}, { 'PLANNED_DATE': auctionPeriod, 'PLANNED_STREAM': stream, 'PLANNED_DAYS_SKIPPED': skip_days, 'LOT_ID': lot_id })) if any(lots): return {'lots': lots} return None