def schedule_notification_for_broadcast_sync(
        notification_location: str) -> None:
    """Schedule a certain notification to be checked by the broadcast processor (sync)
    Args:
        notification_location: notification_location to schedule
    """
    redis.sadd_sync(NOTIFICATION_KEY, notification_location)
Exemple #2
0
def store_registered_transaction_type(
    transaction_type_model: transaction_type_model.TransactionTypeModel
) -> None:
    """
    Stores a new transaction type
    """
    _log.info("Uploading to datastore")
    storage.put_object_as_json(
        f"{FOLDER}/TYPES/{transaction_type_model.txn_type}",
        transaction_type_model.export_as_at_rest())
    redis.sadd_sync(TYPE_LIST_KEY, cast(
        str,
        transaction_type_model.txn_type))  # This should defined when passed in
    _log.info("Successfully uploaded new transaction type to datastore")
Exemple #3
0
def rehydrate_transaction_types() -> None:
    existing_list = redis.smembers_sync("type_list_key")
    if len(existing_list) > 0:
        _log.info("redis is already populated")
        return

    transaction_types = filter(lambda x: len(x.split("/")) == 3,
                               storage.list_objects("TRANSACTION_TYPES"))
    txn_types = list(map(lambda x: x.split("/")[2], transaction_types))
    _log.info("Inserting new list into redis")
    if len(txn_types) > 0:
        response_number = redis.sadd_sync("type_list_key", *txn_types)
        if response_number > 0:
            _log.info("Succeeded in updating redis")
        _log.info(f"response number --> {response_number}")
    else:
        _log.info("No transaction types found to be updated...")
Exemple #4
0
def send_receipts(l5_block: "l5_block_model.L5BlockModel") -> None:
    receipt_path = "/v1/receipt"
    get_claim_path = "/v1/claim"
    chain_id_set = set()
    _log.info(f"l5 block to loop {l5_block.__dict__}")
    _log.info(f"Sending receipts to {len(l5_block.l4_blocks)} lower nodes")
    for l4_block in l5_block.l4_blocks:
        try:
            block_dictionary = json.loads(l4_block)
            chain_id = block_dictionary["l1_dc_id"]
            block = block_dictionary["l1_block_id"]
            full_claim_path = f"{get_claim_path}/{block}"
            # Get the claim data for billing
            claim_url = f"{matchmaking.get_dragonchain_address(chain_id)}{full_claim_path}"
            headers, _ = authorization.generate_authenticated_request(
                "GET", chain_id, full_claim_path)
            _log.info(f"getting claim for {block} from {chain_id}")
            try:
                _log.info(f"----> {claim_url}")
                r = requests.get(claim_url, headers=headers, timeout=30)
                _log.info(f"<---- {r.status_code} {r.text}")
            except Exception:
                _log.exception("Failed to get claim!")
            if r.status_code != 200:
                _log.error(
                    f"Claim check failed! Rejecting block {block} from {chain_id}"
                )
                continue
            else:
                claim = r.json()
                # Add this L5's proof to the block
                _log.info(f"Claim received from l1 {claim}")
                _log.info(
                    f"data points blockid {l5_block.block_id}  signature {l5_block.proof}"
                )
                block_data = {}
                block_data["blockId"] = l5_block.block_id
                block_data["signature"] = l5_block.proof
                claim["validations"]["l5"][l5_block.dc_id] = block_data
                chain_id_set.add(chain_id)
                _log.info(f"Sending filled claim {claim}")
                try:
                    claim_check_id = f"{chain_id}-{block}"
                    matchmaking.resolve_claim_check(claim_check_id)
                except exceptions.MatchmakingRetryableError:  # any 500-level server errors
                    _log.exception(
                        f"Adding claim to failed queue.  Claim ID: {claim_check_id}"
                    )
                    redis.sadd_sync(
                        "mq:failed-claims",
                        claim_check_id)  # using a set avoids duplicates
                except Exception:
                    _log.exception(
                        "Failure to finalize claim in matchmaking. Sending receipts to lower level nodes."
                    )
        except Exception as e:
            _log.exception(
                f"[BROADCAST] Error while trying to broadcast down for l4 block {l4_block}\n{e}\n!Will ignore this broadcast!"
            )

    payload = l5_block.export_as_at_rest()
    for chain_id in chain_id_set:
        try:
            headers, data = authorization.generate_authenticated_request(
                "POST", chain_id, receipt_path, payload)
            url = f"{matchmaking.get_dragonchain_address(chain_id)}{receipt_path}"
            _log.info(f"----> {url}")
            r = requests.post(url, data=data, headers=headers, timeout=30)
            _log.info(f"<---- {r.status_code} {r.text}")
            if r.status_code != 200:
                # TODO failed to enqueue block to specific l1, consider another call to matchmaking, etc
                _log.info(
                    f"[BROADCAST] WARNING: failed to transmit to {chain_id} with error {r.text}"
                )
            else:
                _log.info("[BROADCAST] Sucessful receipt sent down to L1")
        except Exception:
            _log.error(
                f"[BROADCAST] ERROR: Couldn't broadcast receipt down to {chain_id}! Ignoring"
            )
Exemple #5
0
 def test_sadd(self):
     redis.sadd_sync("banana", "banana", "banana")
     redis.redis_client.sadd.assert_called_once_with("banana", "banana", "banana")