def test_actions_are_claimed(purge_accounts_and_actions, patched_worker): with session_scope_by_shard_id(0) as db_session: account = add_generic_imap_account(db_session, email_address='{}@test.com'.format(0)) schedule_test_action(db_session, account) with session_scope_by_shard_id(1) as db_session: account = add_generic_imap_account(db_session, email_address='{}@test.com'.format(1)) schedule_test_action(db_session, account) service = SyncbackService(cpu_id=1, total_cpus=2) service.workers = set() service._process_log() gevent.joinall(list(service.workers)) with session_scope_by_shard_id(0) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status == 'pending' for a in q) with session_scope_by_shard_id(1) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status != 'pending' for a in q)
def test_actions_are_claimed(purge_accounts_and_actions, patched_task): with session_scope_by_shard_id(0) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(0)) schedule_test_action(db_session, account) with session_scope_by_shard_id(1) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(1)) schedule_test_action(db_session, account) service = SyncbackService( syncback_id=0, process_number=1, total_processes=2, num_workers=2) service._restart_workers() service._process_log() while not service.task_queue.empty(): gevent.sleep(0) with session_scope_by_shard_id(0) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status == 'pending' for a in q) with session_scope_by_shard_id(1) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status != 'pending' for a in q)
def test_actions_are_claimed(purge_accounts_and_actions, patched_worker): with session_scope_by_shard_id(0) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(0)) schedule_test_action(db_session, account) with session_scope_by_shard_id(1) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(1)) schedule_test_action(db_session, account) service = SyncbackService(cpu_id=1, total_cpus=2) service.workers = set() service._process_log() gevent.joinall(list(service.workers)) with session_scope_by_shard_id(0) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status == 'pending' for a in q) with session_scope_by_shard_id(1) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status != 'pending' for a in q)
def test_folder_name_translation(empty_db, generic_account, imap_api_client, mock_imapclient, monkeypatch): from inbox.transactions.actions import SyncbackService syncback = SyncbackService(syncback_id=0, process_number=0, total_processes=1) imap_namespaces = (((u'INBOX.', u'.'),),) mock_imapclient.create_folder = mock.Mock() mock_imapclient.namespace = mock.Mock(return_value=imap_namespaces) folder_list = [(('\\HasChildren',), '.', u'INBOX')] mock_imapclient.list_folders = mock.Mock(return_value=folder_list) mock_imapclient.has_capability = mock.Mock(return_value=True) folder_prefix, folder_separator = imap_namespaces[0][0] generic_account.folder_prefix = folder_prefix generic_account.folder_separator = folder_separator empty_db.session.commit() folder_json = {'display_name': 'Taxes/Accounting'} imap_api_client.post_data('/folders', folder_json) syncback._process_log() gevent.joinall(list(syncback.workers)) mock_imapclient.create_folder.assert_called_with('INBOX.Taxes.Accounting')
def syncback_service(): # aggressive=False used to avoid AttributeError in other tests, see # https://groups.google.com/forum/#!topic/gevent/IzWhGQHq7n0 # TODO(emfree): It's totally whack that monkey-patching here would affect # other tests. Can we make this not happen? monkey.patch_all(aggressive=False) from inbox.transactions.actions import SyncbackService s = SyncbackService(poll_interval=0, retry_interval=0) s.start() yield s s.stop()
def syncback_service(): from inbox.transactions.actions import SyncbackService from gevent import monkey # aggressive=False used to avoid AttributeError in other tests, see # https://groups.google.com/forum/#!topic/gevent/IzWhGQHq7n0 # TODO(emfree): It's totally whack that monkey-patching here would affect # other tests. Can we make this not happen? monkey.patch_all(aggressive=False) s = SyncbackService(poll_interval=1) s.start() gevent.sleep() yield s kill_greenlets()
def syncback_service(): from inbox.transactions.actions import SyncbackService s = SyncbackService(poll_interval=0, retry_interval=0) s.start() yield s s.stop() s.join()
def test_all_keys_are_assigned_exactly_once(patched_enginemanager): assigned_keys = [] service = SyncbackService(cpu_id=0, total_cpus=2) assert service.keys == [0, 2, 4] assigned_keys.extend(service.keys) service = SyncbackService(cpu_id=1, total_cpus=2) assert service.keys == [1, 3, 5] assigned_keys.extend(service.keys) # All keys are assigned (therefore all accounts are assigned) assert set(engine_manager.engines.keys()) == set(assigned_keys) # No key is assigned more than once (and therefore, no account) assert len(assigned_keys) == len(set(assigned_keys))
def mock_syncback_service(): """Running SyncbackService with a mock queue.""" from inbox.transactions.actions import SyncbackService from gevent import monkey # aggressive=False used to avoid AttributeError in other tests, see # https://groups.google.com/forum/#!topic/gevent/IzWhGQHq7n0 # TODO(emfree): It's totally whack that monkey-patching here would affect # other tests. Can we make this not happen? monkey.patch_all(aggressive=False) s = SyncbackService(poll_interval=0) s.queue = MockQueue() s.start() gevent.sleep() assert len(s.queue) == 0 yield s kill_greenlets()
def test_actions_for_invalid_accounts_are_skipped(purge_accounts_and_actions, patched_task): with session_scope_by_shard_id(0) as db_session: account = add_generic_imap_account(db_session, email_address='*****@*****.**') schedule_test_action(db_session, account) namespace_id = account.namespace.id count = db_session.query(ActionLog).filter( ActionLog.namespace_id == namespace_id).count() assert account.sync_state != 'invalid' another_account = add_generic_imap_account( db_session, email_address='*****@*****.**') schedule_test_action(db_session, another_account) another_namespace_id = another_account.namespace.id another_count = db_session.query(ActionLog).filter( ActionLog.namespace_id == another_namespace_id).count() assert another_account.sync_state != 'invalid' account.mark_invalid() db_session.commit() service = SyncbackService(syncback_id=0, process_number=0, total_processes=2, num_workers=2) service._process_log() while not service.task_queue.empty(): gevent.sleep(0) with session_scope_by_shard_id(0) as db_session: q = db_session.query(ActionLog).filter( ActionLog.namespace_id == namespace_id, ActionLog.status == 'pending') assert q.count() == count q = db_session.query(ActionLog).filter( ActionLog.namespace_id == another_namespace_id) assert q.filter(ActionLog.status == 'pending').count() == 0 assert q.filter( ActionLog.status == 'successful').count() == another_count
def test_actions_claimed_by_a_single_service(purge_accounts_and_actions, patched_worker): actionlogs = [] for key in (0, 1): with session_scope_by_shard_id(key) as db_session: account = make_imap_account(db_session, '{}@test.com'.format(key)) schedule_test_action(db_session, account) actionlogs += [db_session.query(ActionLog).one().id] services = [] for cpu_id in (0, 1): service = SyncbackService(cpu_id=cpu_id, total_cpus=2) service.workers = set() service._process_log() services.append(service) for i, service in enumerate(services): assert len(service.workers) == 1 assert list(service.workers)[0].action_log_id == actionlogs[i] gevent.joinall(list(service.workers))
def test_actions_are_claimed(purge_accounts_and_actions, patched_task): with session_scope_by_shard_id(0) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(0)) schedule_test_action(db_session, account) with session_scope_by_shard_id(1) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(1)) schedule_test_action(db_session, account) service = SyncbackService(syncback_id=0, process_number=1, total_processes=2, num_workers=2) service._restart_workers() service._process_log() while not service.task_queue.empty(): gevent.sleep(0.1) with session_scope_by_shard_id(0) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status == 'pending' for a in q) with session_scope_by_shard_id(1) as db_session: q = db_session.query(ActionLog) assert q.count() == 1 assert all(a.status != 'pending' for a in q)
def start(port, start_syncback, enable_tracer, enable_profiler): # We need to import this down here, because this in turn imports # ignition.engine, which has to happen *after* we read any config overrides # for the database parameters. Boo for imports with side-effects. from inbox.api.srv import app if start_syncback: # start actions service from inbox.transactions.actions import SyncbackService if enable_profiler: inbox_config["DEBUG_PROFILING_ON"] = True enable_profiler_api = inbox_config.get("DEBUG_PROFILING_ON") syncback = SyncbackService(0, 0, 1) profiling_frontend = SyncbackHTTPFrontend( int(port) + 1, enable_tracer, enable_profiler_api) profiling_frontend.start() syncback.start() nylas_logger = get_logger() http_server = WSGIServer(("", int(port)), app, log=nylas_logger, handler_class=NylasWSGIHandler) nylas_logger.info("Starting API server", port=port) http_server.serve_forever() if start_syncback: syncback.join()
def test_actions_for_invalid_accounts_are_skipped(purge_accounts_and_actions, patched_worker): with session_scope_by_shard_id(0) as db_session: account = add_generic_imap_account( db_session, email_address='*****@*****.**') schedule_test_action(db_session, account) namespace_id = account.namespace.id count = db_session.query(ActionLog).filter( ActionLog.namespace_id == namespace_id).count() assert account.sync_state != 'invalid' another_account = add_generic_imap_account( db_session, email_address='*****@*****.**') schedule_test_action(db_session, another_account) another_namespace_id = another_account.namespace.id another_count = db_session.query(ActionLog).filter( ActionLog.namespace_id == another_namespace_id).count() assert another_account.sync_state != 'invalid' account.mark_invalid() db_session.commit() service = SyncbackService( syncback_id=0, process_number=0, total_processes=2) service._process_log() while len(service.workers) >= 1: gevent.sleep(0.1) gevent.killall(service.workers) with session_scope_by_shard_id(0) as db_session: q = db_session.query(ActionLog).filter( ActionLog.namespace_id == namespace_id, ActionLog.status == 'pending') assert q.count() == count q = db_session.query(ActionLog).filter( ActionLog.namespace_id == another_namespace_id) assert q.filter(ActionLog.status == 'pending').count() == 0 assert q.filter(ActionLog.status == 'successful').count() == another_count
def test_folder_name_translation(empty_db, generic_account, imap_api_client, mock_imapclient, monkeypatch): from inbox.transactions.actions import SyncbackService syncback = SyncbackService(syncback_id=0, process_number=0, total_processes=1, num_workers=2) imap_namespaces = (((u'INBOX.', u'.'),),) mock_imapclient.create_folder = mock.Mock() mock_imapclient.namespace = mock.Mock(return_value=imap_namespaces) folder_list = [(('\\HasChildren',), '.', u'INBOX')] mock_imapclient.list_folders = mock.Mock(return_value=folder_list) mock_imapclient.has_capability = mock.Mock(return_value=True) folder_prefix, folder_separator = imap_namespaces[0][0] generic_account.folder_prefix = folder_prefix generic_account.folder_separator = folder_separator empty_db.session.commit() folder_json = {'display_name': 'Taxes/Accounting'} imap_api_client.post_data('/folders', folder_json) syncback._process_log() syncback._restart_workers() while not syncback.task_queue.empty(): gevent.sleep(0.1) mock_imapclient.create_folder.assert_called_with('INBOX.Taxes.Accounting')
def test_failed_event_creation(db, patched_syncback_task, default_account, event): schedule_action("create_event", event, default_account.namespace.id, db.session) schedule_action("update_event", event, default_account.namespace.id, db.session) schedule_action("update_event", event, default_account.namespace.id, db.session) schedule_action("delete_event", event, default_account.namespace.id, db.session) db.session.commit() NUM_WORKERS = 2 service = SyncbackService( syncback_id=0, process_number=0, total_processes=NUM_WORKERS, num_workers=NUM_WORKERS, ) service._restart_workers() service._process_log() while not service.task_queue.empty(): gevent.sleep(0.1) # This has to be a separate while-loop because there's a brief moment where # the task queue is empty, but num_idle_workers hasn't been updated yet. # On slower systems, we might need to sleep a bit between the while-loops. while service.num_idle_workers != NUM_WORKERS: gevent.sleep(0.1) q = db.session.query(ActionLog).filter_by(record_id=event.id).all() assert all(a.status == "failed" for a in q)
def test_actions_claimed_by_a_single_service(purge_accounts_and_actions, patched_task): actionlogs = [] for key in (0, 1): with session_scope_by_shard_id(key) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(key)) schedule_test_action(db_session, account) actionlogs += [db_session.query(ActionLog).one().id] services = [] for process_number in (0, 1): service = SyncbackService(syncback_id=0, process_number=process_number, total_processes=2, num_workers=2) service._process_log() services.append(service) for i, service in enumerate(services): assert service.task_queue.qsize() == 1 assert service.task_queue.peek().action_log_ids() == [actionlogs[i]]
def test_actions_claimed_by_a_single_service(purge_accounts_and_actions, patched_task): actionlogs = [] for key in (0, 1): with session_scope_by_shard_id(key) as db_session: account = add_generic_imap_account( db_session, email_address='{}@test.com'.format(key)) schedule_test_action(db_session, account) actionlogs += [db_session.query(ActionLog).one().id] services = [] for process_number in (0, 1): service = SyncbackService( syncback_id=0, process_number=process_number, total_processes=2, num_workers=2) service._process_log() services.append(service) for i, service in enumerate(services): assert service.task_queue.qsize() == 1 assert service.task_queue.peek().action_log_ids() == [actionlogs[i]]
def test_failed_event_creation(db, patched_syncback_task, default_account, event): schedule_action('create_event', event, default_account.namespace.id, db.session) schedule_action('update_event', event, default_account.namespace.id, db.session) schedule_action('update_event', event, default_account.namespace.id, db.session) schedule_action('delete_event', event, default_account.namespace.id, db.session) db.session.commit() NUM_WORKERS = 2 service = SyncbackService(syncback_id=0, process_number=0, total_processes=NUM_WORKERS, num_workers=NUM_WORKERS) service._restart_workers() service._process_log() while not service.task_queue.empty(): gevent.sleep(0.1) # This has to be a separate while-loop because there's a brief moment where # the task queue is empty, but num_idle_workers hasn't been updated yet. # On slower systems, we might need to sleep a bit between the while-loops. while service.num_idle_workers != NUM_WORKERS: gevent.sleep(0.1) q = db.session.query(ActionLog).filter_by(record_id=event.id).all() assert all(a.status == 'failed' for a in q)
def start(): # Start the syncback service, and just hang out forever syncback = SyncbackService(syncback_id, process_num, total_processes) if enable_profiler: inbox_config["DEBUG_PROFILING_ON"] = True port = 16384 + process_num enable_profiler_api = inbox_config.get("DEBUG_PROFILING_ON") frontend = SyncbackHTTPFrontend(port, enable_tracer, enable_profiler_api) frontend.start() syncback.start() syncback.join()
def __init__(self, *args, **kwargs): self.scheduled_actions = [] SyncbackService.__init__(self, *args, **kwargs)