Exemple #1
0
def main(args):
    """Entry point.
    
    :param args: Parsed CLI arguments.

    """
    # Pull data.
    network_id = factory.create_network_id(args.network)
    network = cache.infra.get_network(network_id)
    if network is None:
        logger.log_warning(f"Network {args.network} is unregistered.")
        return
    data = cache.infra.get_nodes(network_id)
    if not data:
        logger.log_warning(f"Network {args.network} has no nodes.")
        return

    # Set cols/rows.
    cols = ["ID", "Host:Port", "Type", "Status"]
    rows = map(
        lambda i: [
            i.index_label,
            f"{i.host}:{i.port}",
            i.typeof.name,
            i.status.name,
        ], sorted(data, key=lambda i: i.index))

    # Set table.
    t = get_table(cols, rows)
    t.column_alignments['Host:Port'] = BeautifulTable.ALIGN_LEFT

    # Render.
    print(t)
    print(f"{network_id.name} node count = {len(data)}")
Exemple #2
0
def correlate_finalized_deploy(ctx: RunContext, deploy_hash: str):
    """Correlates a finalzied deploy with a workload generator correlation handler.
    
    :param network: Network name.
    :param run_index: Generator run index.
    :param run_type: Generator run type.
    :param deploy_hash: Hash of finalized deploy.

    """
    # Set handlers.
    try:
        verifier, incrementor = HANDLERS[ctx.run_type]
    except KeyError:
        logger.log_warning(
            f"{ctx.run_type} has no registered step verifier/incrementor")
        return

    # Set step.
    step = cache.get_run_step(ctx)

    # Verify.
    if not verifier.verify(ctx, step):
        return

    # Mark current step as complete.
    step.status = RunStepStatus.COMPLETE
    step.timestamp_end = dt.now().timestamp()
    cache.set_run_step(step)

    # Increment.
    incrementor.increment(ctx, step)
Exemple #3
0
def main(args: argparse.Namespace):
    """Entry point.
    
    """
    # Import initialiser to setup upstream services / actors.
    import stests.initialiser

    # Unpack args.
    network_id = factory.create_network_id(args.network_name)
    node_id = factory.create_node_id(network_id, args.node_index)

    # Set execution context.
    ctx = factory.create_run_info(args=Arguments.create(args),
                                  loop_count=args.loop_count,
                                  loop_interval=args.loop_interval,
                                  network_id=network_id,
                                  node_id=node_id,
                                  run_index=args.run_index,
                                  run_type=constants.TYPE,
                                  use_stored_contracts=True)

    # Abort if a run lock cannot be acquired.
    if is_run_locked(ctx):
        logger.log_warning(
            f"{constants.TYPE} :: run {args.run_index} aborted as it is currently executing."
        )

    # Start run.
    else:
        from stests.orchestration.actors import do_run
        do_run.send(ctx)
        logger.log(f"{constants.TYPE} :: run {args.run_index} started")
Exemple #4
0
def do_refund(
    ctx: ExecutionContext,
    cp1: Account,
    cp2: Account,
    amount: int = None,
    contract: ClientContract = None,
) -> typing.Tuple[Deploy, Transfer]:
    """Executes a refund between 2 counter-parties & returns resulting deploy hash.

    :param ctx: Execution context information.
    :param cp1: Account information of counter party 1.
    :param cp2: Account information of counter party 2.
    :param amount: Amount in motes to be refunded.

    :returns: Dispatched deploy.

    """
    # Set amount - escape if cp1 has insufficient funds.
    amount = amount or (get_balance(ctx, cp1) - defaults.CLX_TX_FEE)
    if amount <= 0:
        logger.log_warning(
            "Counter party 1 does not have enough CLX to pay refund transaction fee."
        )
        return

    (node, dhash) = do_transfer(ctx,
                                cp1,
                                cp2,
                                amount,
                                contract,
                                is_refundable=False,
                                deploy_type=DeployType.REFUND)

    return (node, dhash, amount)
Exemple #5
0
def encode(data: typing.Any) -> typing.Any:
    """Encodes input data in readiness for downstream processing.
    
    """
    if isinstance(data, PRIMITIVES):
        return data

    if isinstance(data, datetime.datetime):
        return data.timestamp()

    if isinstance(data, dict):
        return {k: encode(v) for k, v in data.items()}

    if isinstance(data, tuple):
        return tuple(map(encode, data))

    if isinstance(data, list):
        return list(map(encode, data))

    if type(data) in DCLASS_SET:
        return _encode_dclass(data, dataclasses.asdict(data))

    if type(data) in ENUM_TYPE_SET:
        return data.name

    logger.log_warning(f"CORE :: Encoding an unrecognized data type: {data}")

    return data
Exemple #6
0
def on_step_deploy_finalized(ctx: ExecutionContext, dhash: str):
    """Processes a finalized deploy within the context of a step.
    
    :param ctx: Execution context information.
    :param dhash: Hash of a finalized deploy.

    """
    # Set step.
    step = Workflow.get_phase_step(ctx, ctx.phase_index, ctx.step_index)
    if step is None:
        logger.log_warning(
            f"WFLOW :: {ctx.run_type} :: {ctx.run_index_label} :: {ctx.phase_index_label} :: {ctx.step_index_label} -> invalid step"
        )

    # Verify step deploy:
    if not step.has_verifer_for_deploy:
        logger.log_warning(
            f"WFLOW :: {ctx.run_type} :: {ctx.run_index_label} :: {ctx.phase_index_label} :: {ctx.step_index_label} -> deploy verifier undefined"
        )
        return
    else:
        try:
            step.verify_deploy(dhash)
        except AssertionError as err:
            logger.log_warning(
                f"WFLOW :: {ctx.run_type} :: {ctx.run_index_label} :: {ctx.phase_index_label} :: {ctx.step_index_label} -> deploy verification failed"
            )
            return

    # Verify step:
    if not step.has_verifer:
        logger.log_warning(
            f"WFLOW :: {ctx.run_type} :: {ctx.run_index_label} :: {ctx.phase_index_label} :: {ctx.step_index_label} -> step verifier undefined"
        )
    else:
        try:
            step.verify()
        except AssertionError as err:
            logger.log_warning(
                f"WFLOW :: {ctx.run_type} :: {ctx.run_index_label} :: {ctx.phase_index_label} :: {ctx.step_index_label} -> step verification failed"
            )
            return

    # Step verification succeeded therefore signal step end.
    do_step_end.send(ctx)
Exemple #7
0
def main(args):
    """Entry point.
    
    :param args: Parsed CLI arguments.

    """
    network_id = factory.create_network_id(args.network)
    network = cache.infra.get_network(network_id)
    if network is None:
        logger.log_warning(f"Network {args.network} is unregistered.")
        return

    logger.log(
        f"""NETWORK: {network.name} -> faucet pvk {network.faucet.private_key}"""
    )
    logger.log(
        f"""NETWORK: {network.name} -> faucet pbk {network.faucet.public_key}"""
    )
Exemple #8
0
def do_monitor_node(node_id: NodeIdentifier):
    """Launches node monitoring.
    
    :node_id: Identifier of node to be monitored.

    """
    # Attempt to obtain a stream lock.
    locked = False
    for i in range(_STREAM_PER_NETWORK_COUNT):
        lock = StreamLock(network=node_id.network.name,
                          node_index=node_id.index,
                          lock_index=i + 1)
        _, locked = cache.monitoring.set_stream_lock(lock)
        if locked:
            break

    # Escape if sufficient streams are already being processed.
    if not locked:
        return

    # Callback,
    def _on_block_finalized(_, bhash):
        on_finalized_block.send(node_id, bhash)

    # Stream events and re-queue when actor timeout occurs.
    try:
        clx.stream_events(node_id, on_block_finalized=_on_block_finalized)

    # Actor timeout - by default this occurs every 600 seconds.
    except TimeLimitExceeded:
        do_monitor_network.send(node_id.network)

    # Process shutdown.
    except Shutdown:
        pass

    # CLX exception, e.g. node down, comms channel issue ...etc.
    except Exception as err:
        logger.log_warning(f"CHAIN :: stream event error :: {err}")
        do_monitor_network.send(node_id.network)

    # Release lock.
    finally:
        cache.monitoring.delete_stream_lock(lock)
Exemple #9
0
def on_finalized_deploy(network_id: NetworkIdentifier, block_hash: str,
                        deploy_hash: str, ts_finalized: int):
    """Event: raised whenever a deploy is finalized.
    
    :param network_id: Identifier of network upon which a block has been finalized.
    :param block_hash: Hash of finalized block.
    :param deploy_hash: Hash of finalized deploy.
    :param ts_finalized: Moment in time when finalization occurred.

    """
    # Set network deploy.
    deploy = factory.create_deploy(network_id, block_hash, deploy_hash,
                                   DeployStatus.FINALIZED)

    # Encache - skip duplicates.
    _, encached = cache.set_network_deploy(deploy)
    if not encached:
        return

    # Set deploy related entities.
    entities = cache.get_run_deploy_entities(deploy_hash)
    if not entities:
        logger.log_warning(
            f"Could not find finalized run deploy information: {block_hash} : {deploy_hash}"
        )
        return

    # Update deploy related entities.
    for entity in entities:
        if isinstance(entity, Deploy):
            entity.block_hash = block_hash
            entity.status = DeployStatus.FINALIZED
            entity.ts_finalized = ts_finalized
            cache.set_run_deploy(entity)

        elif isinstance(entity, Transfer):
            entity.status = TransferStatus.COMPLETE
            cache.set_run_transfer(entity)

    # Signal downstream to workload generator.
    ctx = cache.get_run_context(entity.network, entity.run, entity.run_type)
    if ctx:
        correlate_finalized_deploy.send(ctx, deploy_hash)