def valid_password(a, password, compare_password=None): # bail out early if the account or password's invalid if not hasattr(a, 'name') or not hasattr(a, 'password') or not password: return False convert_password = False if compare_password is None: convert_password = True compare_password = a.password # standardize on utf-8 encoding password = filters._force_utf8(password) if compare_password.startswith('$2a$'): # it's bcrypt. try: expected_hash = bcrypt.hashpw(password, compare_password) except ValueError: # password is invalid because it contains null characters return False if not constant_time_compare(compare_password, expected_hash): return False # if it's using the current work factor, we're done, but if it's not # we'll have to rehash. # the format is $2a$workfactor$salt+hash work_factor = int(compare_password.split("$")[2]) if work_factor == g.bcrypt_work_factor: return a else: # alright, so it's not bcrypt. how old is it? # if the length of the stored hash is 43 bytes, the sha-1 hash has a salt # otherwise it's sha-1 with no salt. salt = '' if len(compare_password) == 43: salt = compare_password[:3] expected_hash = passhash(a.name, password, salt) if not constant_time_compare(compare_password, expected_hash): return False # since we got this far, it's a valid password but in an old format # let's upgrade it if convert_password: a.password = bcrypt_password(password) a._commit() return a
def get_authenticated_account(self): from v1.models import Account, NotFound, register try: authorization = request.environ.get("HTTP_AUTHORIZATION") username, password = parse_http_basic(authorization) except RequirementException: return None try: account = Account._by_name(username) except NotFound: if g.auth_trust_http_authorization: # note: we're explicitly allowing automatic re-registration of # _deleted accounts and login of _banned accounts here because # we're trusting you know what you're doing in an SSO situation account = register(username, password, request.ip) else: return None # if we're to trust the authorization headers, don't check passwords if g.auth_trust_http_authorization: return account # not all systems support bcrypt in the standard crypt if account.password.startswith("$2a$"): expected_hash = bcrypt.hashpw(password, account.password) else: expected_hash = crypt.crypt(password, account.password) if not constant_time_compare(expected_hash, account.password): return None return account
def GET_mediaembed(self, link, credentials): if request.host != g.media_domain: # don't serve up untrusted content except on our # specifically untrusted domain abort(404) if link.subverbify_slow.type in Subverbify.private_types: expected_mac = hmac.new(g.secrets["media_embed"], link._id36, hashlib.sha1).hexdigest() if not constant_time_compare(credentials or "", expected_mac): abort(404) if not c.secure: media_object = link.media_object else: media_object = link.secure_media_object if not media_object: abort(404) elif isinstance(media_object, dict): # otherwise it's the new style, which is a dict(type=type, **args) media_embed = get_media_embed(media_object) content = media_embed.content c.allow_framing = True return MediaEmbedBody(body=content).render()
def valid_admin_cookie(cookie): if g.read_only_mode: return (False, None) # parse the cookie try: first_login, last_request, hash = cookie.split(',') except ValueError: return (False, None) # make sure it's a recent cookie try: first_login_time = datetime.strptime(first_login, COOKIE_TIMESTAMP_FORMAT) last_request_time = datetime.strptime(last_request, COOKIE_TIMESTAMP_FORMAT) except ValueError: return (False, None) cookie_age = datetime.utcnow() - first_login_time if cookie_age.total_seconds() > g.ADMIN_COOKIE_TTL: return (False, None) idle_time = datetime.utcnow() - last_request_time if idle_time.total_seconds() > g.ADMIN_COOKIE_MAX_IDLE: return (False, None) # validate expected_cookie = c.user.make_admin_cookie(first_login, last_request) return (constant_time_compare(cookie, expected_cookie), first_login)
def parse_and_validate_reply_to_address(address): """Validate the address and parse out and return the message id. This is the reverse operation of `get_reply_to_address`. """ recipient, sep, domain = address.partition("@") if not sep or not recipient or domain != g.modmail_email_domain: return main, sep, remainder = recipient.partition("+") if not sep or not main or main != "zendeskreply": return try: email_id, email_mac = remainder.split("-") except ValueError: return expected_mac = hmac.new(g.secrets['modmail_email_secret'], email_id, hashlib.sha256).hexdigest() if not constant_time_compare(expected_mac, email_mac): return message_id36 = email_id return message_id36
def valid_feed(name, feedhash, path): if name and feedhash and path: from v1.lib.template_helpers import add_sr path = add_sr(path) try: user = Account._by_name(name) if (user.pref_private_feeds and constant_time_compare( feedhash, make_feedhash(user, path))): return user except NotFound: pass
def _get_client_auth(self): auth = request.headers.get("Authorization") try: client_id, client_secret = parse_http_basic(auth) require(client_id) client = OAuth2Client.get_token(client_id) require(client) if client.is_confidential(): require(client_secret) require(constant_time_compare(client.secret, client_secret)) return client except RequirementException: abort(401, headers=[("WWW-Authenticate", 'Basic realm="verbify"')])
def POST_revoke_token(self, token_id, token_hint): '''Revoke an OAuth2 access or refresh token. token_type_hint is optional, and hints to the server whether the passed token is a refresh or access token. A call to this endpoint is considered a success if the passed `token_id` is no longer valid. Thus, if an invalid `token_id` was passed in, a successful 204 response will be returned. See [RFC7009](http://tools.ietf.org/html/rfc7009) ''' self.OPTIONS_revoke_token() # In success cases, this endpoint returns no data. response.status = 204 if not token_id: return types = (OAuth2AccessToken, OAuth2RefreshToken) if token_hint == "refresh_token": types = reversed(types) for token_type in types: try: token = token_type._byID(token_id) except tdb_cassandra.NotFound: g.stats.simple_event( 'oauth2.POST_revoke_token.cass_not_found.%s' % token_type.__name__) continue else: break else: # No Token found. The given token ID is already gone # or never existed. Either way, from the client's perspective, # the passed in token is no longer valid. return if constant_time_compare(token.client_id, c.oauth2_client._id): token.revoke() else: # RFC 7009 is not clear on how to handle this case. # Given that a malicious client could do much worse things # with a valid token then revoke it, returning an error # here is best as it may help certain clients debug issues response.status = 400 g.stats.simple_event( 'oauth2.errors.REVOKE_TOKEN_UNAUTHORIZED_CLIENT') return self.api_wrapper({"error": "unauthorized_client"})
def get_client_ip(self, environ): try: client_ip = environ["HTTP_CF_CONNECTING_IP"] provided_hash = environ["HTTP_CF_CIP_TAG"].lower() except KeyError: return None secret = g.secrets["cdn_ip_verification"] expected_hash = hashlib.sha1(client_ip + secret).hexdigest() if not constant_time_compare(expected_hash, provided_hash): return None return client_ip
def get_authenticated_account(self): from v1.models import Account, NotFound quoted_session_cookie = request.cookies.get(g.login_cookie) if not quoted_session_cookie: return None session_cookie = urllib.unquote(quoted_session_cookie) try: uid, timestr, hash = session_cookie.split(",") uid = int(uid) except: return None try: account = Account._byID(uid, data=True) except NotFound: return None expected_cookie = account.make_cookie(timestr) if not constant_time_compare(session_cookie, expected_cookie): return None return account
def validate_mailgun_webhook(timestamp, token, signature): """Check whether this is a valid webhook sent by Mailgun. See https://documentation.mailgun.com/user_manual.html#securing-webhooks NOTE: A single Mailgun account is used for both outbound email (Mailgun HTTP API) and inbound email (Mailgun Routes + MailgunWebhookController). As a result the `mailgun_api_key` is used by both. """ message = ''.join((timestamp, token)) expected_mac = hmac.new(g.secrets['mailgun_api_key'], message, hashlib.sha256).hexdigest() if not constant_time_compare(expected_mac, signature): g.stats.simple_event("mailgun.incoming.bad_signature") return False if abs(int(timestamp) - time.time()) > MAX_TIMESTAMP_DEVIATION: g.stats.simple_event("mailgun.incoming.bad_timestamp") return False return True
def valid_otp_cookie(cookie): if g.read_only_mode: return False # parse the cookie try: remembered_at, signature = cookie.split(",") except ValueError: return False # make sure it hasn't expired try: remembered_at_time = datetime.strptime(remembered_at, COOKIE_TIMESTAMP_FORMAT) except ValueError: return False age = datetime.utcnow() - remembered_at_time if age.total_seconds() > g.OTP_COOKIE_TTL: return False # validate expected_cookie = c.user.make_otp_cookie(remembered_at) return constant_time_compare(cookie, expected_cookie)
def POST_report_cache_poisoning( self, report_mac, poisoner_name, poisoner_id, poisoner_canary, victim_canary, render_time, route_name, url, source, cache_policy, resp_headers, ): """Report an instance of cache poisoning and its details""" self.OPTIONS_report_cache_poisoning() if c.errors: abort(400) # prevent simple CSRF by requiring a custom header if not request.headers.get('X-Loggit'): abort(403) # Eh? Why are you reporting this if the canaries are the same? if poisoner_canary == victim_canary: abort(400) expected_mac = make_poisoning_report_mac( poisoner_canary=poisoner_canary, poisoner_name=poisoner_name, poisoner_id=poisoner_id, cache_policy=cache_policy, source=source, route_name=route_name, ) if not constant_time_compare(report_mac, expected_mac): abort(403) if resp_headers: try: resp_headers = json.loads(resp_headers) # Verify this is a JSON map of `header_name => [value, ...]` if not isinstance(resp_headers, dict): abort(400) for hdr_name, hdr_vals in resp_headers.iteritems(): if not isinstance(hdr_name, basestring): abort(400) if not all(isinstance(h, basestring) for h in hdr_vals): abort(400) except ValueError: abort(400) if not resp_headers: resp_headers = {} poison_info = dict( poisoner_name=poisoner_name, poisoner_id=str(poisoner_id), # Convert the JS timestamp to a standard one render_time=render_time * 1000, route_name=route_name, url=url, source=source, cache_policy=cache_policy, resp_headers=resp_headers, ) # For immediate feedback when tracking the effects of caching changes g.stats.simple_event("cache.poisoning.%s.%s" % (source, cache_policy)) # For longer-term diagnosing of caching issues g.events.cache_poisoning_event(poison_info, request=request, context=c) VRatelimit.ratelimit(rate_ip=True, prefix="rate_poison_", seconds=10) return self.api_wrapper({})
def valid_signature(payload, signature, field=None): """Checks if `signature` matches `payload`. `Signature` (at least as of version 1) be of the form: {global_version}:{platform}:{version}:{signature} where: * global_version (currently hard-coded to be "1") can be used to change this header's underlying schema later if needs be. As such, can be treated as a protocol version. * platform is the client platform type (generally "ios" or "android") * version is the client's token version (can be updated and incremented per app build as needs be. * signature is the hmac of the request's POST body with the token derived from the above three parameters via `get_secret_token` :param str payload: the signed data :param str signature: the signature of the payload :param str field: error field to set (one of "ua", "body") :returns: object with signature validity and any errors :rtype: :py:class:`SigningResult` """ result = SigningResult() # if the signature is unparseable, there's not much to do sig_match = SIG_HEADER_RE.match(signature or "") if not sig_match: result.add_error(ERRORS.INVALID_FORMAT, field=field) return result sig_header_dict = sig_match.groupdict() # we're matching \d so this shouldn't throw a TypeError result.global_version = int(sig_header_dict['global_version']) # incrementing this value is drastic. We can't validate a token protocol # we don't understand. if result.global_version > GLOBAL_TOKEN_VERSION: result.add_error(ERRORS.UNKOWN_GLOBAL_VERSION, field=field) return result # currently there's only one version, but here's where we'll eventually # patch in more. sig_match = SIG_CONTENT_V1_RE.match(sig_header_dict['payload']) if not sig_match: result.add_error(ERRORS.UNPARSEABLE, field=field) return result # slop the matched data over to the SigningResult sig_match_dict = sig_match.groupdict() result.platform = sig_match_dict['platform'] result.version = int(sig_match_dict['version']) result.epoch = int(sig_match_dict['epoch']) result.mac = sig_match_dict['mac'] # verify that the token provided hasn't been invalidated if is_invalid_token(result.platform, result.version): result.add_error(ERRORS.INVALIDATED_TOKEN, field=field) return result # check the epoch validity, but don't fail -- leave that up to the # validator! if not valid_epoch(result.platform, result.epoch): result.add_error( ERRORS.EXPIRED_TOKEN, field=field, details=result.epoch, ) # get the expected secret used to verify this request. secret_token = get_secret_token( result.platform, result.version, global_version=result.global_version, ) result.valid_hmac = constant_time_compare( result.mac, versioned_hmac(secret_token, epoch_wrap(result.epoch, payload), result.global_version), ) if not result.valid_hmac: result.add_error(ERRORS.SIGNATURE_MISMATCH, field=field) return result
def is_valid_click_url(link, click_url, click_hash): expected_mac = get_click_url_hmac(link, click_url) return constant_time_compare(click_hash, expected_mac)