def reset_endpoints_task(org_uid): """ Processes org reset task from the task queue (clears endpoint state to cause the next sync to fetch all the data, and creates a task on the update queue to kick of the sync cycle for the org). """ org = Org.get_by_id(org_uid) if (org.changeset_started_at and not org.changeset_completed_at) or org.update_cycle_active: logging.info("org syncing at the moment, will try again later") return '', 423 endpoint_indexes = request.form.getlist('endpoint_index') logging.info("resetting markers for org {} and endpoints {}".format( org_uid, endpoint_indexes)) # TODO: this is a hack, this should be delegated to a qbo class, instantiated via a factory from the org provider sync_data = QboSyncData.get_by_id(org_uid) if not sync_data: logging.warning("could not find sync data") return '', 204 for endpoint_index in [int(_index) for _index in endpoint_indexes]: sync_data.markers[endpoint_index] = START_OF_TIME sync_data.put() sync_utils.init_update(org_uid) return '', 204
def test_init_update_existing_org(self, publish_mock): """ Tests how new changeset is initialised an existing org (previously synced). Args: publish_mock(Mock): mock of the changeset publish function """ some_date = datetime.utcnow() Org(id='test', changeset=10, changeset_started_at=some_date, changeset_completed_at=some_date, last_update_cycle_completed_at=some_date - timedelta(hours=1)).put() sync_utils.init_update('test') org = Org.get_by_id('test') # changeset has been incremented self.assertEqual(org.changeset, 11) # and changeset timestamps are set self.assertIsNotNone(org.changeset_started_at) self.assertIsNone(org.changeset_completed_at) # and the update task has been created task_count = len(self.taskqueue.get_filtered_tasks()) self.assertEqual(task_count, 1) # and changeset status is published publish_mock.assert_called_once_with('test', 11, 'syncing')
def test_init_update_new_org(self, publish_mock): """ Tests how new changeset is initialised a new org (never synced). Args: publish_status_mock(Mock): pubsub publish function mock """ Org(id='test', changeset_started_at=None, changeset_completed_at=None).put() sync_utils.init_update('test') org = Org.get_by_id('test') # changeset has been incremented self.assertEqual(org.changeset, 0) # and changeset timestamps are set self.assertIsNotNone(org.changeset_started_at) self.assertIsNone(org.changeset_completed_at) # and the update task has been created task_count = len(self.taskqueue.get_filtered_tasks()) self.assertEqual(task_count, 1) # and changeset status is published publish_mock.assert_called_once_with('test', 0, 'syncing')
def test_init_update_inactive_update_cycle(self, publish_mock): """ Verifies that a new changeset is not created for an org with a sync in progress with an active update cycle (ie. has a task on adapter-update). Args: publish_mock(Mock): mock of the changeset publish function """ some_date = datetime.utcnow() Org(id='test', changeset=10, changeset_started_at=some_date, changeset_completed_at=None, update_cycle_active=False).put() sync_utils.init_update('test') org = Org.get_by_id('test') # changeset has not been changed self.assertEqual(org.changeset, 10) # and changeset timestamps have not been changed self.assertIsNotNone(org.changeset_started_at) self.assertIsNone(org.changeset_completed_at) # and a new update task has been created because the update_cycle_active was false task_count = len(self.taskqueue.get_filtered_tasks()) self.assertEqual(task_count, 1) # and changeset status is published publish_mock.assert_called_once_with('test', 10, 'syncing')
def sync(): """ Kicks off a sync of one or all orgs. """ if request.form.get('org_uid'): org_uid = request.form.get('org_uid') sync_utils.init_update(org_uid) flash("Sync for {} kicked off".format(org_uid)) return redirect(prefix('/')) else: sync_utils.init_all_updates() flash("Sync for all orgs kicked off") return redirect(prefix('/commands'))
def oauth(org_uid=None): """ Endpoint which handles the second step of the oAuth flow. This is where the data provider redirects the user to after they complete the auth flow. The payload contains tokens needed to start pulling data, and the state parameter identifies which org_uid the user is connecting (the state has been passed in the first step of the auth flow, ie. the connect function above). Returns: (str, int)|str: redirect back to the app if redirect url is supplied, otherwise just say okidoki """ # TODO: Update redirect urls on qbo to include /qbo/ provider = 'xerov2' if org_uid else 'qbo' org_uid = org_uid or request.args.get('state') logging.info("processing oauth callback for {}".format(org_uid)) try: session = client_factory.get_token_session(provider, org_uid, request.args) session.get_and_save_token() except AuthCancelled as exc: logging.info("got an error - oauth flow cancelled") _abort_link(org_uid) return _respond(exc.org, {'error_code': 'cancelled'}, 'not okidoki') except MismatchingFileConnectionAttempt as exc: logging.info("got an error - mismatching file connection attempt") _abort_link(org_uid) return _respond(exc.org, {'error_code': 'source_mismatch'}, 'not okidoki') except FailedToGetIdentifier as exc: logging.info( "got an error - failed to get org identifier from {}".format( provider)) _abort_link(org_uid) return _respond(exc.org, {'error_code': 'failed_to_get_identifier'}, 'not okidoki') mark_as_connected(org_uid=org_uid, also_linked=True) try: data_source_name = client_factory.get_api_session( provider, org_uid).get_company_name() except FailedToGetCompanyName: # TODO: this should be sent to the client as an error code rather than an empty name data_source_name = None init_update(org_uid) return _respond(session.org, {'data_source_name': data_source_name}, 'okidoki')
def init_update(org_uid): """ Endpoint that initiates data pull for a specific org. Under the covers it does bookkeeping for the org's changeset and invokes the update endpoint via task queues. Args: org_uid(str): org identifier Returns: (str, int): http response """ logging.info("initializing update cycle for org {}".format(org_uid)) sync_utils.init_update(org_uid) return '', 204
def basic_auth(provider, org_uid, username, password): """ Handles basic username/password auth flow. Users credentials (username/password) are stored in the UserCredentials kind TODO: This should be temporary! and only implemented in DEV until vault is integrated Args: provider(str): The provider org_uid(str): The org ID username(str): The username password(str): The password Returns: (str): Response text """ # If authenticating for Zuora, get a session cookie and store in OrgCredentials if provider == 'zuora': # Multi-entity may be enabled, we need to specify it as a header when authenticating # TODO: Fix this to work with multiple entities once its figured out how it works. entity_id = None session = client_factory.get_token_session(provider, org_uid, username, password) try: session.get_and_save_token() except UnauthorizedApiCallException: logging.info("got an error - Invalid Credentials".format(provider)) _abort_link(org_uid) return _respond(Org.get_by_id(org_uid), {'error_code': 'invalid_credentials'}, 'not okidoki') mark_as_connected(org_uid=org_uid, also_linked=True) try: data_source_name = client_factory.get_api_session( provider, org_uid).get_company_name() except FailedToGetCompanyName: # TODO: this should be sent to the client as an error code rather than an empty name data_source_name = None init_update(org_uid) return _respond(Org.get_by_id(org_uid), {'data_source_name': data_source_name}, 'okidoki')
def reconnect(org_uid): """ Endpoint to facilitate long term org re-connection loop. Normal update process will mark an org as disconnected after getting 401s from the gl api after about 15 minutes, but sometimes a gl just returns 401 for a while. This long-term re-connect loop will make an api call to the gl every few hours, and update the status of the org to connected if the api call is successful. from that point on normal update cycle will resume for this org. """ org = Org.get_by_id(org_uid) # the user could have connected the org manually by now if org.status == CONNECTED: logging.info("org is connected, nothing to do, resolving this task") return '', 204 # 42 attempts is about a week with the current queue config (4 hours between attempts) exec_count = int(request.headers.get('X-AppEngine-TaskExecutionCount')) if exec_count > 42: logging.info("reached maximum number of reconnect attempts, giving up") return '', 204 logging.info( "checking connection status (check number {})".format(exec_count)) try: if client_factory.get_api_session(org.provider, org_uid).is_authenticated(): logging.info( "made a successful api call, marking the org as connected") sync_utils.mark_as_connected(org_uid) sync_utils.init_update(org_uid) return '', 204 except DisconnectException as e: logging.exception("failed reconnecting to client.", e) logging.info( "could not make a successful api call, leaving org as disconnected, will try again" ) return '', 423
def test_init_update_in_progress_changeset(self): """ Verifies that a new changeset is not created for an org with a sync in progress. """ some_date = datetime.utcnow() Org(id='test', changeset=10, changeset_started_at=some_date, changeset_completed_at=None, update_cycle_active=True).put() sync_utils.init_update('test') org = Org.get_by_id('test') # changeset has not been changed self.assertEqual(org.changeset, 10) # and changeset timestamps have not been changed self.assertIsNotNone(org.changeset_started_at) self.assertIsNone(org.changeset_completed_at) # and no new update task has been created task_count = len(self.taskqueue.get_filtered_tasks()) self.assertEqual(task_count, 0)