def test_celery_beat(init): """Scheduled tasks run properly on the celery worker + celery beat process.""" ini_file = os.path.join(os.path.dirname(__file__), "task-test.ini") worker, beat = run_worker_and_beat(ini_file) try: # Reset test database redis = get_redis(init.config.registry) redis.delete("foo", "bar") foo = "no read" deadline = time.time() + 10 while time.time() < deadline: redis = get_redis(init.config.registry) # scheduledtasks.ticker should beat every second and reset values in Redis foo = redis.get("foo") if foo: break time.sleep(0.5) if worker: assert worker.returncode is None if beat: assert beat.returncode is None if foo != b"xoo": # TravisCI headless debugging # print(worker.stdout.read().decode("utf-8")) # print(worker.stderr.read().decode("utf-8")) # print(beat.stdout.read().decode("utf-8")) # print(beat.stderr.read().decode("utf-8")) pass assert foo == b"xoo" # Set back by its original value by 1 second beat finally: try: if worker: worker.terminate() except ProcessLookupError: pass try: beat and beat.terminate() except ProcessLookupError: pass
def test_login_actions_get_email(dbsession, browser, web_server, test_request): """See that login actions are correctly executed when user hits example page and does GET action.""" redis = get_redis(test_request) requireloginexamples._was_logged_in = None b = browser b.visit(web_server + "/require_login_example_page") assert b.is_element_present_by_css("#heading-require-login-example") b.find_by_css("#btn-get-example").click() assert b.is_element_present_by_css("#panel-magic-login") # Move to email sign in b.find_by_css("#nav-sign-in-by-email").click() b.fill("email", "*****@*****.**") b.find_by_css("button[name='confirm']").click() assert b.is_element_present_by_css("#panel-magic-login-email-sent") b.visit("{}/verify-email-login/{}".format(web_server, peek_token(redis)["token"])) # User lands after post login actions assert not b.is_element_present_by_css("#msg-you-are-logged-in") assert b.is_element_present_by_css("#msg-example-get-view-success") assert requireloginexamples._was_logged_in is False
def set_verification_token(request, token_type, user_id, next_url=None, extras=None): redis = get_redis(request.registry) email_token_expiration_time = int( request.registry.settings.get( "magiclink.email_token_expiration_seconds", 3600)) token = rand_string(length=EMAIL_TOKEN_LENGTH) expires = time.time() + email_token_expiration_time data = { "token_type": token_type, "expires": expires, "token": token, "email": user_id, "next_url": next_url, "extras": extras, } redis.hset(LOGIN_VERIFICATION_REDIS_HKEY, token, json.dumps(data)) return token, data
def main(argv=sys.argv): if len(argv) < 2: usage(argv) config_uri = argv[1] request = init_websauna(config_uri) imported_objects = OrderedDict() imported_objects["request"] = request imported_objects["dbsession"] = request.dbsession imported_objects["transaction"] = transaction imported_objects["redis"] = get_redis(request) imported_objects["now"] = now imported_objects["datetime"] = datetime for name, cls in Base._decl_class_registry.items(): if name == "_sa_module_registry": continue imported_objects[name] = cls print("") print("Following classes and objects are available:") for var, val in imported_objects.items(): print("{:30}: {}".format(var, str(val).replace("\n", " ").replace("\r", " "))) print("") embed(user_ns=imported_objects)
def redis_test(request): redis = get_redis(request) redis.set("foo", b"bar") assert redis.get("foo") == b"bar" redis.set("foo", "ÅÄÖ") assert redis.get("foo").decode("utf-8") == "ÅÄÖ" return HTTPOk()
def test_celery_beat(init, run_worker_and_beat): """Scheduled tasks run properly on the celery worker + celery beat process.""" # Reset test database redis = get_redis(init.config.registry) redis.delete("foo", "bar") foo = "no read" deadline = time.time() + 10 while time.time() < deadline: redis = get_redis(init.config.registry) # scheduledtasks.ticker should beat every second and reset values in Redis foo = redis.get("foo") if foo: break time.sleep(0.5) assert foo == b"xoo" # Set back by its original value by 1 second beat
def update_networks(self: Task): """Update all incoming and outgoing events from a network through Celery. Offer an alternative for runnign standalone ethereum-service. """ request = self.request.request redis = get_redis(request) # Get list of configured networks services = ServiceCore.parse_network_config(request) for network_name in services.keys(): # Update each network separately and have a lock to ensure we don't # accidentally do two overlapping update runs # https://pypi.python.org/pypi/python-redis-lock lock = redis_lock.Lock(redis, "network-update-lock-{}".format(network_name)) if not lock.acquire(blocking=False): # This network is still procesing pending operations from the previous task run lock_acquired_at = redis.get("network-update-lock-started-{}".format(network_name)) lock_acquired_by = redis.get("network-update-lock-started-by-{}".format(network_name)) if lock_acquired_by: lock_acquired_by = lock_acquired_by.decode("utf-8") if lock_acquired_at: try: friendly_at = datetime.datetime.utcfromtimestamp(lock_acquired_at) except: friendly_at = 0 diff = time.time() - float(lock_acquired_at) if diff > BAD_LOCK_TIMEOUT: logger.warn("Failed to get wallet update lock on %s network when doing update_networks for %f seconds, originally acquired by %s at %s", network_name, diff, friendly_at, lock_acquired_by) continue lock.release() with lock: redis.set("network-update-lock-started-{}".format(network_name), time.time()) redis.set("network-update-lock-started-by-{}".format(network_name), "process: {} thread:{}".format(os.getpid(), threading.current_thread())) logger.info("Updating network %s", network_name) start = time.time() one_shot = OneShot(request, network_name, services[network_name]) one_shot.run_shot() logger.info("Updated network %s in %d seconds", network_name, time.time() - start) redis.delete("network-update-lock-started-{}".format(network_name)) redis.delete("network-update-lock-started-by-{}".format(network_name)) request.registry.notify(ServiceUpdated(request, network_name, time.time() - start))
def get(registry, key): """Get the current hits per rolling time window. Use ``key`` to store the current hit rate in Redis. :param registry: Pyramid registry e.g. request.registry :param key: Redis key name we use to keep counter :return: int, how many hits we have within the current rolling time window """ redis = get_redis(registry) return _check(redis, key)
def clear_throttle(request: Request, key_name: str): """Clear the throttling status. Example: .. code-block:: python clear_throttle(request, "new-phone-number") """ redis = get_redis(request) redis.delete("throttle_{}".format(key_name))
def check(registry, key, window=60, limit=10): """Do a rolling time window counter hit. Use ``key`` to store the current hit rate in Redis. :param registry: Pyramid registry e.g. request.registry :param key: Redis key name we use to keep counter :param window: Rolling time window in seconds. Default 60 seconds. :param limit: Allowed operations per time window. Default 10 hits. :return: True is the maximum limit has been reached for the current time window """ redis = get_redis(registry) return _check(redis, key, window, limit)
def test_run_scheduled(init): """Scheduled tasks run properly on the celery worker + celery beat process.""" ini_file = os.path.join(os.path.dirname(__file__), "scheduler-test.ini") worker, beat = run_worker_and_beat(ini_file) # worker, beat = None, None try: # Reset test database redis = get_redis(init.config.registry) redis.delete("foo", "bar") # scheduledtasks.ticker should beat every second and reset values in Redis # sets foo time.sleep(10) redis = get_redis(init.config.registry) foo = redis.get("foo") if worker: assert worker.returncode is None if beat: assert beat.returncode is None assert foo == b"foo" # Set back by its original value by 1 second beat finally: try: worker and worker.terminate() except ProcessLookupError: pass try: beat and beat.terminate() except ProcessLookupError: pass
def verify_email_login(request: Request, token: str): """Verify email login token.""" redis = get_redis(request.registry) def fail(msg="Sign in link invalid. Please try again."): messages.add(request, kind="error", msg=msg, msg_id="msg-bad-email-token") return HTTPFound(request.route_url("login")) # Hackety hacky by our Russian friends again? if len(token) != EMAIL_TOKEN_LENGTH: logger.warn("Bad token: %s", token) return fail() token_data = redis.hget(LOGIN_VERIFICATION_REDIS_HKEY, token) if not token_data: return fail() # Allow use the code only once, then erase redis.hdel(LOGIN_VERIFICATION_REDIS_HKEY, token) data = json.loads(token_data.decode("utf-8")) # Only verify email tokens in this view assert data["token_type"] == "email" if time.time() > data["expires"]: return fail("Sign in link expired. Please try again.") email = data["email"] # Create new user or get existing user based on this email user = get_or_create_email_user(request, email) login_service = get_login_service(request) # next_url was saved to the Redis by the view that rendered login buttons next_url = data.get("next_url") # Restore extra passed parameters request.session["login_extras"] = data.get("extras", {}) # Returns HTTPRedirect taking user to post-login page return login_service.authenticate_user(user, login_source="email", location=next_url)
def test_send_preview(mailgun, populated_mailing_list, domain, test_request): """Send out a news letter preview.""" # Make sure we run on empty state redis = get_redis(test_request) redis.delete(NewsletterState.REDIS_NEWSLETTER_TIMESTAMP) now = datetime.datetime(1980, 1, 15) with transaction.manager: # This is triggered only on commit send_newsletter(test_request, "Test subject", testmode=True, now_=now) state = NewsletterState(test_request) assert state.get_last_send_timestamp().year == 1980
def test_email_login(web_server: str, browser: DriverAPI, dbsession: Session, test_request): """See that we can sign up / sign in through email login.""" # Reset login key status before proceeding redis = get_redis(test_request) redis.delete("login_verification_token") b = browser b.visit(web_server + "/login") assert b.is_element_present_by_css("#panel-magic-login") # Move to email sign in b.find_by_css("#nav-sign-in-by-email").click() b.fill("email", "*****@*****.**") b.find_by_css("button[name='confirm']").click() assert b.is_element_present_by_css("#panel-magic-login-email-sent") b.visit("{}/verify-email-login/{}".format(web_server, peek_token(redis)["token"])) assert b.is_element_present_by_css("#msg-you-are-logged-in") # Check we created a sane user with transaction.manager: u = dbsession.query(User).first() assert u.email == "*****@*****.**" assert u.first_login # # Do it again so we capture both new user and old user flows # b.find_by_css("#nav-logout").click() b.visit(web_server + "/login-email") b.fill("email", "*****@*****.**") b.find_by_css("button[name='confirm']").click() assert b.is_element_present_by_css("#panel-magic-login-email-sent") b.visit("{}/verify-email-login/{}".format(web_server, peek_token(redis)["token"])) assert b.is_element_present_by_css("#msg-you-are-logged-in") # Check the user is still sane with transaction.manager: u = dbsession.query(User).first() assert u.email == "*****@*****.**" assert not u.first_login
def login(browser, test_request, web_server: str): # Reset login key status before proceeding redis = get_redis(test_request) redis.delete("login_verification_token") b = browser b.visit(web_server + "/login") assert b.is_element_present_by_css("#panel-magic-login") # Move to email sign in b.find_by_css("#nav-sign-in-by-email").click() b.fill("email", "*****@*****.**") b.find_by_css("button[name='confirm']").click() assert b.is_element_present_by_css("#panel-magic-login-email-sent") b.visit("{}/verify-email-login/{}".format(web_server, peek_token(redis)["token"])) assert b.is_element_present_by_css("#msg-you-are-logged-in")
def main(argv: t.List[str] = sys.argv): """Execute the IPython shell prompt with Websauna configuration already initialised. :param argv: Command line arguments, second one needs to be the uri to a configuration file. :raises sys.SystemExit: """ if len(argv) < 2: usage_message(argv, additional_params='[var=value]') config_uri = get_config_uri(argv) request = init_websauna(config_uri) imported_objects = OrderedDict() imported_objects["request"] = request imported_objects["dbsession"] = request.dbsession imported_objects["transaction"] = transaction imported_objects["redis"] = get_redis(request) imported_objects["now"] = now imported_objects["datetime"] = datetime for name, cls in Base._decl_class_registry.items(): if name == "_sa_module_registry": continue imported_objects[name] = cls feedback('', False) feedback('Following classes and objects are available:', False) for var, val in imported_objects.items(): line = "{key:30}: {value}".format(key=var, value=str(val).replace( '\n', ' ').replace('\r', ' ')) feedback(line) feedback('', False) embed(user_ns=imported_objects)
def main(argv=sys.argv): def usage(argv): cmd = os.path.basename(argv[0]) print('usage: %s <config_uri> <network name>\n' '(example: "%s conf/production.ini")' % (cmd, cmd)) sys.exit(1) if len(argv) < 2: usage(argv) config_uri = argv[1] # console_app sets up colored log output from websauna.system.devop.cmdline import init_websauna request = init_websauna(config_uri, sanity_check=True) # Get list of configured networks services = ServiceCore.parse_network_config(request) redis = get_redis(request) for network_name in services.keys(): # Update each network separately and have a lock to ensure we don't # accidentally do two overlapping update runs # https://pypi.python.org/pypi/python-redis-lock lock = redis_lock.Lock(redis, "network-update-lock-{}".format(network_name)) if not lock.acquire(blocking=False): # This network is still procesing pending operations from the previous task run print("Lock {} is blocked, reseting".format(network_name)) lock.reset() else: lock.release() print("Unlock complete") sys.exit(0)
def update_networks(self: Task): """Update all incoming and outgoing events from a network through Celery. Offer an alternative for runnign standalone ethereum-service. """ request = self.request.request redis = get_redis(request) # Get list of configured networks services = ServiceCore.parse_network_config(request) for network_name in services.keys(): # Update each network separately and have a lock to ensure we don't # accidentally do two overlapping update runs # https://pypi.python.org/pypi/python-redis-lock lock = redis_lock.Lock(redis, "network-update-lock-{}".format(network_name)) if not lock.acquire(blocking=False): # This network is still procesing pending operations from the previous task run lock_acquired_at = redis.get("network-update-lock-started-{}".format(network_name)) lock_acquired_by = redis.get("network-update-lock-started-by-{}".format(network_name)) if lock_acquired_by: lock_acquired_by = lock_acquired_by.decode("utf-8") if lock_acquired_at: try: friendly_at = datetime.datetime.utcfromtimestamp(lock_acquired_at) except: friendly_at = 0 diff = time.time() - float(lock_acquired_at) if diff > BAD_LOCK_TIMEOUT: logger.warn( "Failed to get wallet update lock on %s network when doing update_networks for %f seconds, originally acquired by %s at %s", network_name, diff, friendly_at, lock_acquired_by, ) continue lock.release() with lock: redis.set("network-update-lock-started-{}".format(network_name), time.time()) redis.set( "network-update-lock-started-by-{}".format(network_name), "process: {} thread:{}".format(os.getpid(), threading.current_thread()), ) logger.info("Updating network %s", network_name) start = time.time() one_shot = OneShot(request, network_name, services[network_name]) one_shot.run_shot() logger.info("Updated network %s in %d seconds", network_name, time.time() - start) request.registry.notify(ServiceUpdated(request, network_name, time.time() - start))
def __init__(self, request: Request): self.request = request self.redis = get_redis(self.request)
def increment_retry_attempt_count(request): redis = get_redis(request) if redis.get(RETRY_ATTEMPT_KEY) is None: redis.set(RETRY_ATTEMPT_KEY, 1, ex=2) else: redis.incr(RETRY_ATTEMPT_KEY)
def redis_test_write(self: WebsaunaTask): logger.error("Called by beat") request = self.get_request() connection = get_redis(request) connection.set("foo", "xoo")
def redis_test_write(self): request = self.request.request connection = get_redis(request) connection.set("foo", "foo")
def get_retry_attempt_count(request): redis = get_redis(request) return int(redis.get(RETRY_ATTEMPT_KEY) or 0)
def redis_test_write(request): registry = request.registry connection = get_redis(registry) connection.set("foo", "foo")
def reset_retry_attempt_count(request): redis = get_redis(request) redis.delete(RETRY_ATTEMPT_KEY)