def test_signature_wrong(self): blob = '123456789' key_name, sig = signature.sign_blob(blob) sig = chr(ord(sig[0]) + 1) + sig[1:] cert = signature.get_x509_certificate_by_name( signature.get_own_public_certificates(), key_name) self.assertFalse(signature.check_signature(blob, cert, sig))
def test_check_signature_correct(self): blob = '123456789' key_name, sig = signature.sign_blob(blob) certs = signature.get_own_public_certificates() self.assertTrue(certs.check_signature(blob, key_name, sig)) # Again, to hit a code path that uses cached verifier. self.assertTrue(certs.check_signature(blob, key_name, sig))
def pack_auth_db(): """Packs an entire AuthDB into a blob, signing it using app's private key. Returns: Tuple (blob, name of a key used to sign it, base64 encoded signature). """ # Grab the snapshot. state, snapshot = replication.new_auth_db_snapshot() # Serialize to binary proto message. req = replication_pb2.ReplicationPushRequest() req.revision.primary_id = app_identity.get_application_id() req.revision.auth_db_rev = state.auth_db_rev req.revision.modified_ts = utils.datetime_to_timestamp(state.modified_ts) replication.auth_db_snapshot_to_proto(snapshot, req.auth_db) req.auth_code_version = version.__version__ auth_db_blob = req.SerializeToString() # Sign it using primary's private keys. sign_blob is limited to 8KB only, so # hash the body first and sign the digest. key_name, sig = signature.sign_blob(hashlib.sha512(auth_db_blob).digest()) sig = base64.b64encode(sig) logging.debug('AuthDB blob size is %d bytes', len(auth_db_blob)) return auth_db_blob, key_name, sig
def seal_token(subtoken): serialized = subtoken.SerializeToString() signing_key_id, pkcs1_sha256_sig = signature.sign_blob(serialized, 0.5) return delegation_pb2.DelegationToken( serialized_subtoken=serialized, signer_id=model.get_service_self_identity().to_bytes(), signing_key_id=signing_key_id, pkcs1_sha256_sig=pkcs1_sha256_sig)
def sign_auth_db_blob(auth_db_blob): """Signs AuthDB blob with app's private key. Returns: Tuple (name of a key used, base64 encoded signature). """ # sign_blob is limited to 8KB only, so # hash the body first and sign the digest. key_name, sig = signature.sign_blob(hashlib.sha512(auth_db_blob).digest()) return key_name, base64.b64encode(sig)
def publish_authdb_change(state): """Publishes AuthDB change notification to the topic. Args: state: AuthReplicationState with version info. """ if utils.is_local_dev_server(): return msg = replication_pb2.ReplicationPushRequest() msg.revision.primary_id = app_identity.get_application_id() msg.revision.auth_db_rev = state.auth_db_rev msg.revision.modified_ts = utils.datetime_to_timestamp(state.modified_ts) blob = msg.SerializeToString() key_name, sig = signature.sign_blob(blob) pubsub.publish(topic_name(), blob, { 'X-AuthDB-SigKey-v1': key_name, 'X-AuthDB-SigVal-v1': base64.b64encode(sig), })
def test_signature_correct(self): blob = '123456789' key_name, sig = signature.sign_blob(blob) cert = signature.get_x509_certificate_by_name( signature.get_own_public_certificates(), key_name) self.assertTrue(signature.check_signature(blob, cert, sig))
def update_replicas_task(auth_db_rev): """Packs AuthDB and pushes it to all out-of-date Replicas. Called via /internal/taskqueue/replication/<auth_db_rev> task (see backend/handlers.py) enqueued by 'trigger_replication'. Will check that AuthReplicationState.auth_db_rev is still equal to |auth_db_rev| before doing anything. Returns: True if all replicas are up-to-date now, False if task should be retried. """ # Check that the task is not stale before doing any heavy lifting. replication_state = model.get_replication_state() if replication_state.auth_db_rev != auth_db_rev: logging.info( 'Skipping stale task, current rev is %d, task was enqueued for rev %d)', replication_state.auth_db_rev, auth_db_rev) return True # Pack the entire AuthDB into a blob to be stored in the datastore, Google # Storage and directly pushed to Replicas. replication_state, auth_db_blob = pack_auth_db() # Sign the blob, so even if it travels through unprotected channel, consumers # can still verify that it was produced by us. key_name, sig = signature.sign_blob(hashlib.sha512(auth_db_blob).digest()) sig_b64 = base64.b64encode(sig) # Put the blob into datastore. Also updates pointer to the latest stored blob. # This is used by /auth_service/api/v1/authdb/revisions/... endpoint. store_auth_db_snapshot(replication_state, auth_db_blob) # Put the blob into Google Storage, if this feature is enabled. if gcs.is_upload_enabled(): upload_to_gs(replication_state, auth_db_blob, key_name, sig) # Notify PubSub subscribers that new snapshot is available. pubsub.publish_authdb_change(replication_state) # Grab last known replicas state and push only to replicas that are behind. stale_replicas = [ entity for entity in AuthReplicaState.query(ancestor=replicas_root_key()) if entity.auth_db_rev is None or entity.auth_db_rev < auth_db_rev ] if not stale_replicas: logging.info('All replicas are up-to-date.') return True # Push the blob to all out-of-date replicas, in parallel. push_started_ts = utils.utcnow() futures = { push_to_replica( replica.replica_url, auth_db_blob, key_name, sig_b64): replica for replica in stale_replicas } # Wait for all attempts to complete. retry = [] while futures: completed = ndb.Future.wait_any(futures) replica = futures.pop(completed) exception = completed.get_exception() success = exception is None current_revision = None auth_code_version = None if success: current_revision, auth_code_version = completed.get_result() if not success: logging.error( 'Error when pushing update to replica: %s (%s).\nReplica id is %s.', exception.__class__.__name__, exception, replica.key.id()) # Give up only on explicit fatal error, retry on any other exception. if not isinstance(exception, FatalReplicaUpdateError): retry.append(replica) # Eagerly update known replica state in local DB as soon as response is # received. That way if 'update_replicas_task' is killed midway, at least # the state of some replicas will be updated. Note that this transaction is # modifying a single entity group (replicas_root_key()) and thus can't be # called very often (due to 1 QPS limit on entity group updates). # If contention here becomes an issue, adding simple time.sleep(X) before # the transaction is totally fine (since 'update_replicas_task' is executed # on background task queue). try: if success: stored_rev = _update_state_on_success( key=replica.key, started_ts=push_started_ts, finished_ts=utils.utcnow(), current_revision=current_revision, auth_code_version=auth_code_version) logging.info( 'Replica %s is updated to rev %d', replica.key.id(), stored_rev) else: stored_rev = _update_state_on_fail( key=replica.key, started_ts=push_started_ts, finished_ts=utils.utcnow(), old_auth_db_rev=replica.auth_db_rev, exc=exception) # If current push failed, but some other concurrent push (if any) # succeeded (and so replica is up-to-date), do not retry current push. if stored_rev is None or stored_rev > auth_db_rev: if replica in retry: retry.remove(replica) except ( datastore_errors.InternalError, datastore_errors.Timeout, datastore_errors.TransactionFailedError) as exc: logging.exception( 'Datastore error when updating replica state: %s.\n' 'Replica id is %s.', exc.__class__.__name__, replica.key.id()) # Should retry the task because of this. retry.append(replica) # Retry the task if at least one replica reported a retryable error. return not retry