def verifyMerkleProof(*replies: Tuple[Reply]) -> bool: """ Verifies the correctness of the merkle proof provided in the reply from the node. Returns True if verified to be correct, throws an exception otherwise. :param replies: One or more replies for which Merkle Proofs have to be verified :raises ProofError: The proof is invalid :return: True """ sth = namedtuple("sth", ["tree_size", "sha256_root_hash"]) verifier = MerkleVerifier() serializer = JsonSerializer() for r in replies: seqNo = r[f.RESULT.nm][F.seqNo.name] rootHash = base64.b64decode( r[f.RESULT.nm][F.rootHash.name].encode()) auditPath = [ base64.b64decode(a.encode()) for a in r[f.RESULT.nm][F.auditPath.name] ] filtered = ( (k, v) for (k, v) in r[f.RESULT.nm].iteritems() if k not in [F.auditPath.name, F.seqNo.name, F.rootHash.name]) result = serializer.serialize(dict(filtered)) verifier.verify_leaf_inclusion( result, seqNo - 1, auditPath, sth(tree_size=seqNo, sha256_root_hash=rootHash)) return True
def verifyMerkleProof(*replies: Tuple[Reply]) -> bool: """ Verifies the correctness of the merkle proof provided in the reply from the node. Returns True if verified to be correct, throws an exception otherwise. :param replies: One or more replies for which Merkle Proofs have to be verified :raises ProofError: The proof is invalid :return: True """ sth = namedtuple("sth", ["tree_size", "sha256_root_hash"]) verifier = MerkleVerifier() serializer = JsonSerializer() for r in replies: seqNo = r[f.RESULT.nm][F.seqNo.name] rootHash = base64.b64decode(r[f.RESULT.nm][F.rootHash.name].encode()) auditPath = [base64.b64decode( a.encode()) for a in r[f.RESULT.nm][F.auditPath.name]] filtered = ((k, v) for (k, v) in r[f.RESULT.nm].iteritems() if k not in [F.auditPath.name, F.seqNo.name, F.rootHash.name]) result = serializer.serialize(dict(filtered)) verifier.verify_leaf_inclusion(result, seqNo-1, auditPath, sth(tree_size=seqNo, sha256_root_hash=rootHash)) return True
def __init__(self, tree: MerkleTree, dataDir: str, serializer: MappingSerializer = None, fileName: str = None, ensureDurability: bool = True, transactionLogStore: FileStore = None, defaultFile=None): """ :param tree: an implementation of MerkleTree :param dataDir: the directory where the transaction log is stored :param serializer: an object that can serialize the data before hashing it and storing it in the MerkleTree :param fileName: the name of the transaction log file :param defaultFile: file or dir to use for initialization of transaction log store """ assert not transactionLogStore or not defaultFile self.defaultFile = defaultFile self.dataDir = dataDir self.tree = tree self.leafSerializer = serializer or \ JsonSerializer() # type: MappingSerializer self.hasher = TreeHasher() self._transactionLog = None # type: FileStore self._transactionLogName = fileName or "transactions" self.ensureDurability = ensureDurability self._customTransactionLogStore = transactionLogStore self.start() self.seqNo = 0 self.recoverTree()
async def submitPublicKeys(self, id: ID, pk: PublicKey, pkR: RevocationPublicKey = None, signatureType = 'CL') -> \ (PublicKey, RevocationPublicKey): data = {} if pk is not None: data[PRIMARY] = pk.toStrDict() if pkR is not None: data[REVOCATION] = pkR.toStrDict() op = { TXN_TYPE: CLAIM_DEF, REF: id.schemaId, DATA: JsonSerializer.dumps(data, toBytes=False), SIGNATURE_TYPE: signatureType } _, seqNo = await self._sendSubmitReq(op) if seqNo: pk = pk._replace(seqId=seqNo) if pkR is not None: pkR = pkR._replace(seqId=seqNo) return pk, pkR
async def submitPublicKeys(self, id: ID, pk: PublicKey, pkR: RevocationPublicKey = None, signatureType = 'CL') -> \ (PublicKey, RevocationPublicKey): data = {} if pk is not None: data[PRIMARY] = pk.to_str_dict() if pkR is not None: data[REVOCATION] = pkR.toStrDict() op = { TXN_TYPE: CLAIM_DEF, REF: id.schemaId, DATA: JsonSerializer.dumps(data, toBytes=False), SIGNATURE_TYPE: signatureType } _, seqNo = await self._sendSubmitReq(op) if seqNo: pk = pk._replace(seqId=seqNo) if pkR is not None: pkR = pkR._replace(seqId=seqNo) return pk, pkR
async def submitSchema(self, schema: Schema) -> Schema: data = { NAME: schema.name, VERSION: schema.version, ATTR_NAMES: ",".join(schema.attrNames) } op = { TXN_TYPE: SCHEMA, DATA: JsonSerializer.dumps(data, toBytes=False) } _, seqNo = await self._sendSubmitReq(op) if seqNo: schema = schema._replace(issuerId=self.wallet.defaultId, seqId=seqNo) return schema
async def submitSchema(self, schema: Schema) -> Schema: data = { NAME: schema.name, VERSION: schema.version, ATTR_NAMES: schema.attrNames } op = { TXN_TYPE: SCHEMA, DATA: JsonSerializer.dumps(data, toBytes=False) } _, seqNo = await self._sendSubmitReq(op) if seqNo: schema = schema._replace(issuerId=self.wallet.defaultId, seqId=seqNo) return schema
return base64.b64encode(s).decode("utf-8") def b64d(s): return base64.b64decode(s) def lst2str(l): return ",".join(l) orderedFields = OrderedDict([("identifier", (str, str)), ("reqId", (str, int)), ("op", (str, str))]) ledgerSerializer = CompactSerializer(orderedFields) leafSerializer = JsonSerializer() # @pytest.yield_fixture(scope='function') # def tempdir(): # with TemporaryDirectory() as tdir: # yield tdir @pytest.fixture(scope="function") def ledger(tempdir): ledger = Ledger( CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)), dataDir=tempdir, serializer=ledgerSerializer) ledger.reset() return ledger
class DomainRequestHandler(RequestHandler): stateSerializer = JsonSerializer() def __init__(self, ledger, state, reqProcessors): super().__init__(ledger, state) self.reqProcessors = reqProcessors def validate(self, req: Request, config=None): if req.operation.get(TXN_TYPE) == NYM: origin = req.identifier error = None if not self.isSteward(self.state, origin, isCommitted=False): error = "Only Steward is allowed to do these transactions" if req.operation.get(ROLE) == STEWARD: if self.stewardThresholdExceeded(config): error = "New stewards cannot be added by other stewards " \ "as there are already {} stewards in the system".\ format(config.stewardThreshold) if error: raise UnauthorizedClientRequest(req.identifier, req.reqId, error) def _reqToTxn(self, req: Request): txn = reqToTxn(req) for processor in self.reqProcessors: res = processor.process(req) txn.update(res) return txn def apply(self, req: Request): txn = self._reqToTxn(req) (start, end), _ = self.ledger.appendTxns([self.transform_txn_for_ledger(txn)]) self.updateState(txnsWithSeqNo(start, end, [txn])) return txn @staticmethod def transform_txn_for_ledger(txn): """ Some transactions need to be updated before they can be stored in the ledger, eg. storing certain payload in another data store and only its hash in the ledger """ return txn def updateState(self, txns, isCommitted=False): for txn in txns: self._updateStateWithSingleTxn(txn, isCommitted=isCommitted) def _updateStateWithSingleTxn(self, txn, isCommitted=False): typ = txn.get(TXN_TYPE) if typ == NYM: nym = txn.get(TARGET_NYM) self.updateNym(nym, txn, isCommitted=isCommitted) else: logger.debug( 'Cannot apply request of type {} to state'.format(typ)) def countStewards(self) -> int: """ Count the number of stewards added to the pool transaction store Note: This is inefficient, a production use case of this function should require an efficient storage mechanism """ # THIS SHOULD NOT BE DONE FOR PRODUCTION return sum(1 for _, txn in self.ledger.getAllTxn() if (txn[TXN_TYPE] == NYM) and (txn.get(ROLE) == STEWARD)) def stewardThresholdExceeded(self, config) -> bool: """We allow at most `stewardThreshold` number of stewards to be added by other stewards""" return self.countStewards() > config.stewardThreshold def updateNym(self, nym, txn, isCommitted=True): existingData = self.getNymDetails(self.state, nym, isCommitted=isCommitted) newData = {} if not existingData: # New nym being added to state, set the TrustAnchor newData[f.IDENTIFIER.nm] = txn[f.IDENTIFIER.nm] # New nym being added to state, set the role and verkey to None, this makes # the state data always have a value for `role` and `verkey` since we allow # clients to omit specifying `role` and `verkey` in the request consider a # default value of None newData[ROLE] = None newData[VERKEY] = None if ROLE in txn: newData[ROLE] = txn[ROLE] if VERKEY in txn: newData[VERKEY] = txn[VERKEY] newData[F.seqNo.name] = txn.get(F.seqNo.name) existingData.update(newData) key = nym.encode() val = self.stateSerializer.serialize(existingData) self.state.set(key, val) return existingData def hasNym(self, nym, isCommitted: bool = True): key = nym.encode() data = self.state.get(key, isCommitted) return bool(data) @staticmethod def getSteward(state, nym, isCommitted: bool = True): nymData = DomainRequestHandler.getNymDetails(state, nym, isCommitted) if not nymData: return {} else: if nymData.get(ROLE) == STEWARD: return nymData else: return {} @staticmethod def isSteward(state, nym, isCommitted: bool = True): return bool(DomainRequestHandler.getSteward(state, nym, isCommitted)) @staticmethod def getNymDetails(state, nym, isCommitted: bool = True): key = nym.encode() data = state.get(key, isCommitted) return json.loads(data.decode()) if data else {}
reqOpKeys, GET_TXNS, LAST_TXN, TXNS, \ SCHEMA, GET_SCHEMA, openTxns, \ ISSUER_KEY, GET_ISSUER_KEY, REF, IDENTITY_TXN_TYPES, \ CONFIG_TXN_TYPES, POOL_UPGRADE, ACTION, START, CANCEL, SCHEDULE, \ NODE_UPGRADE, COMPLETE, FAIL, ENDPOINT from sovrin_common.txn_util import getTxnOrderedFields from sovrin_common.types import Request from sovrin_common.util import dateTimeEncoding from sovrin_node.persistence.secondary_storage import SecondaryStorage from sovrin_node.server.client_authn import TxnBasedAuthNr from sovrin_node.server.node_authn import NodeAuthNr from sovrin_node.server.pool_manager import HasPoolManager from sovrin_node.server.upgrader import Upgrader logger = getlogger() jsonSerz = JsonSerializer() class Node(PlenumNode, HasPoolManager): keygenScript = "init_sovrin_raet_keep" def __init__(self, name, nodeRegistry=None, clientAuthNr=None, ha=None, cliname=None, cliha=None, basedirpath=None, primaryDecider=None, pluginPaths: Iterable[str] = None,
def testJsonSerializer(): sz = JsonSerializer() m1 = {'integer': 36, 'name': 'Foo', 'surname': 'Bar', 'float': 14.8639, 'index': 1, 'index_start_at': 56, 'email': '*****@*****.**', 'fullname': 'Foo Bar', 'bool': False} m1s = '{"bool":false,"email":"*****@*****.**","float":14.8639,"fullname":"Foo Bar","index":1,"index_start_at":56,"integer":36,"name":"Foo","surname":"Bar"}' m2 = {'latitude': 31.351883, 'longitude': -97.466179, 'tags': ['foo', 'bar', 'baz', 'alice', 'bob', 'carol', 'dave']} m2s = '{"latitude":31.351883,"longitude":-97.466179,"tags":["foo","bar","baz","alice","bob","carol","dave"]}' m3 = {'name': 'Alice Bob', 'website': 'example.com', 'friends': [ { 'id': 0, 'name': 'Dave' }, { 'id': 1, 'name': 'Carol' }, { 'id': 2, 'name': 'Dave' }]} m3s = '{"friends":[{"id":0,"name":"Dave"},{"id":1,"name":"Carol"},{"id":2,"name":"Dave"}],"name":"Alice Bob","website":"example.com"}' assert sz.serialize(m1) == m1s.encode() assert sz.serialize(m1, toBytes=False) == m1s assert sz.serialize(m2) == m2s.encode() assert sz.serialize(m2, toBytes=False) == m2s assert sz.serialize(m3) == m3s.encode() assert sz.serialize(m3, toBytes=False) == m3s assert sz.deserialize(m1s) == m1 assert sz.deserialize(m1s.encode()) == m1 assert sz.deserialize(m2s) == m2 assert sz.deserialize(m2s.encode()) == m2 assert sz.deserialize(m3s) == m3 assert sz.deserialize(m3s.encode()) == m3
def __init__(self, ledger: Ledger, state: State, domainState: State): super().__init__(ledger, state) self.domainState = domainState self.stateSerializer = JsonSerializer()
class PoolRequestHandler(RequestHandler): def __init__(self, ledger: Ledger, state: State, domainState: State): super().__init__(ledger, state) self.domainState = domainState self.stateSerializer = JsonSerializer() def validate(self, req: Request, config=None): typ = req.operation.get(TXN_TYPE) error = None if typ == NODE: nodeNym = req.operation.get(TARGET_NYM) if self.getNodeData(nodeNym, isCommitted=False): error = self.authErrorWhileUpdatingNode(req) else: error = self.authErrorWhileAddingNode(req) if error: raise UnauthorizedClientRequest(req.identifier, req.reqId, error) def apply(self, req: Request): typ = req.operation.get(TXN_TYPE) if typ == NODE: txn = reqToTxn(req) (start, end), _ = self.ledger.appendTxns([txn]) self.updateState(txnsWithSeqNo(start, end, [txn])) return txn else: logger.debug( 'Cannot apply request of type {} to state'.format(typ)) return None def updateState(self, txns, isCommitted=False): for txn in txns: nodeNym = txn.get(TARGET_NYM) data = txn.get(DATA, {}) existingData = self.getNodeData(nodeNym, isCommitted=isCommitted) # Node data did not exist in state, so this is a new node txn, # hence store the author of the txn (steward of node) if not existingData: existingData[f.IDENTIFIER.nm] = txn.get(f.IDENTIFIER.nm) existingData.update(data) self.updateNodeData(nodeNym, existingData) def authErrorWhileAddingNode(self, request): origin = request.identifier operation = request.operation data = operation.get(DATA, {}) error = self.dataErrorWhileValidating(data, skipKeys=False) if error: return error isSteward = self.isSteward(origin, isCommitted=False) if not isSteward: return "{} is not a steward so cannot add a new node".format( origin) if self.stewardHasNode(origin): return "{} already has a node".format(origin) if self.isNodeDataConflicting(operation.get(DATA, {})): return "existing data has conflicts with " \ "request data {}".format(operation.get(DATA)) def authErrorWhileUpdatingNode(self, request): # Check if steward of the node is updating it and its data does not # conflict with any existing node's data origin = request.identifier operation = request.operation isSteward = self.isSteward(origin, isCommitted=False) if not isSteward: return "{} is not a steward so cannot update a node".format(origin) nodeNym = operation.get(TARGET_NYM) if not self.isStewardOfNode(origin, nodeNym, isCommitted=False): return "{} is not a steward of node {}".format(origin, nodeNym) data = operation.get(DATA, {}) return self.dataErrorWhileValidatingUpdate(data, nodeNym) def getNodeData(self, nym, isCommitted: bool = True): key = nym.encode() data = self.state.get(key, isCommitted) return json.loads(data.decode()) if data else {} def updateNodeData(self, nym, data): key = nym.encode() val = self.stateSerializer.serialize(data) self.state.set(key, val) def isSteward(self, nym, isCommitted: bool = True): return DomainRequestHandler.isSteward(self.domainState, nym, isCommitted) @lru_cache(maxsize=64) def isStewardOfNode(self, stewardNym, nodeNym, isCommitted=True): nodeData = self.getNodeData(nodeNym, isCommitted=isCommitted) return nodeData and nodeData[f.IDENTIFIER.nm] == stewardNym def stewardHasNode(self, stewardNym) -> bool: # Cannot use lru_cache since a steward might have a node in future and # unfortunately lru_cache does not allow single entries to be cleared # TODO: Modify lru_cache to clear certain entities for nodeNym, nodeData in self.state.as_dict.items(): nodeData = json.loads(nodeData.decode()) if nodeData.get(f.IDENTIFIER.nm) == stewardNym: return True return False @staticmethod def dataErrorWhileValidating(data, skipKeys): reqKeys = {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT, ALIAS} if not skipKeys and not reqKeys.issubset(set(data.keys())): return 'Missing some of {}'.format(reqKeys) nip = data.get(NODE_IP, 'nip') np = data.get(NODE_PORT, 'np') cip = data.get(CLIENT_IP, 'cip') cp = data.get(CLIENT_PORT, 'cp') if (nip, np) == (cip, cp): return 'node and client ha cannot be same' def isNodeDataSame(self, nodeNym, newData, isCommitted=True): nodeInfo = self.getNodeData(nodeNym, isCommitted=isCommitted) nodeInfo.pop(f.IDENTIFIER.nm, None) return nodeInfo == newData def isNodeDataConflicting(self, data, nodeNym=None): # Check if node's ALIAS or IPs or ports conflicts with other nodes, # also, the node is not allowed to change its alias. # Check ALIAS change nodeData = {} if nodeNym: nodeData = self.getNodeData(nodeNym, isCommitted=False) if nodeData.get(ALIAS) != data.get(ALIAS): return True else: # Preparing node data for check coming next nodeData.pop(f.IDENTIFIER.nm, None) nodeData.pop(SERVICES, None) nodeData.update(data) for otherNode, otherNodeData in self.state.as_dict.items(): otherNode = otherNode.decode() otherNodeData = json.loads(otherNodeData.decode()) otherNodeData.pop(f.IDENTIFIER.nm, None) otherNodeData.pop(SERVICES, None) if not nodeNym or otherNode != nodeNym: # The node's ip, port and alias shuuld be unique bag = set() for d in (nodeData, otherNodeData): bag.add(d.get(ALIAS)) bag.add((d.get(NODE_IP), d.get(NODE_PORT))) bag.add((d.get(CLIENT_IP), d.get(CLIENT_PORT))) list( map(lambda x: bag.remove(x) if x in bag else None, (None, (None, None)))) if (not nodeData and len(bag) != 3) or (nodeData and len(bag) != 6): return True def dataErrorWhileValidatingUpdate(self, data, nodeNym): error = self.dataErrorWhileValidating(data, skipKeys=True) if error: return error if self.isNodeDataSame(nodeNym, data, isCommitted=False): return "node already has the same data as requested" if self.isNodeDataConflicting(data, nodeNym): return "existing data has conflicts with " \ "request data {}".format(data)
def __init__(self, ledger: Ledger, state: State, domainState: State, idrCache: IdrCache): super().__init__(ledger, state, domainState) self.stateSerializer = JsonSerializer() self.idrCache = idrCache
def testJsonSerializer(): sz = JsonSerializer() m1 = { 'integer': 36, 'name': 'Foo', 'surname': 'Bar', 'float': 14.8639, 'index': 1, 'index_start_at': 56, 'email': '*****@*****.**', 'fullname': 'Foo Bar', 'bool': False } m1s = '{"bool":false,"email":"*****@*****.**","float":14.8639,"fullname":"Foo Bar","index":1,"index_start_at":56,"integer":36,"name":"Foo","surname":"Bar"}' m2 = { 'latitude': 31.351883, 'longitude': -97.466179, 'tags': ['foo', 'bar', 'baz', 'alice', 'bob', 'carol', 'dave'] } m2s = '{"latitude":31.351883,"longitude":-97.466179,"tags":["foo","bar","baz","alice","bob","carol","dave"]}' m3 = { 'name': 'Alice Bob', 'website': 'example.com', 'friends': [{ 'id': 0, 'name': 'Dave' }, { 'id': 1, 'name': 'Carol' }, { 'id': 2, 'name': 'Dave' }] } m3s = '{"friends":[{"id":0,"name":"Dave"},{"id":1,"name":"Carol"},{"id":2,"name":"Dave"}],"name":"Alice Bob","website":"example.com"}' assert sz.serialize(m1) == m1s.encode() assert sz.serialize(m1, toBytes=False) == m1s assert sz.serialize(m2) == m2s.encode() assert sz.serialize(m2, toBytes=False) == m2s assert sz.serialize(m3) == m3s.encode() assert sz.serialize(m3, toBytes=False) == m3s assert sz.deserialize(m1s) == m1 assert sz.deserialize(m1s.encode()) == m1 assert sz.deserialize(m2s) == m2 assert sz.deserialize(m2s.encode()) == m2 assert sz.deserialize(m3s) == m3 assert sz.deserialize(m3s.encode()) == m3