Esempio n. 1
0
def sandbox(
    _ctx,
    image: str,
    protocol: str,
    port: int,
    interval: float,
    blocks: int,
):
    protocol = {
        'edo': EDO,
        'florence': FLORENCE,
    }[protocol]

    SandboxedNodeTestCase.PROTOCOL = protocol
    SandboxedNodeTestCase.IMAGE = image
    SandboxedNodeTestCase.PORT = port
    SandboxedNodeTestCase.setUpClass()

    blocks_baked = 0
    while True:
        try:
            logger.info('Baking block %s...', blocks_baked)
            block_hash = SandboxedNodeTestCase.get_client().using(key='bootstrap1').bake_block().fill().work().sign().inject()
            logger.info('Baked block: %s', block_hash)
            blocks_baked += 1

            if blocks and blocks_baked == blocks:
                break

            time.sleep(interval)
        except KeyboardInterrupt:
            break
def fetch_contract_samples(max_count=100):
    contracts = iter_bcd_contracts(max_count=max_count)
    for contract in contracts:
        name = normalize_alias(contract.get('alias',
                                            '')) or contract['address']
        folder = join(dirname(dirname(__file__)), 'tests', 'contract_tests',
                      name)
        if exists(folder):
            continue
        else:
            makedirs(folder)
        script = fetch_script(contract['address'])
        write_test_data(folder, '__script__', script)
        entrypoints = fetch_entrypoints(contract['address'])
        write_test_data(folder, '__entrypoints__', entrypoints)
        for entrypoint in contract['entrypoints']:
            operation = fetch_bcd_operation(contract['address'], entrypoint)
            if operation:
                result = fetch_operation_result(operation['level'],
                                                operation['hash'],
                                                operation['counter'],
                                                operation['internal'],
                                                contract['address'])
                write_test_data(folder, entrypoint, result)
        logger.info(name)
Esempio n. 3
0
def storage(_ctx, action: str, path: Optional[str]) -> None:
    contract = get_contract(path)
    if action == 'schema':
        logger.info(generate_pydoc(type(contract.storage.data), title='storage'))
    elif action == 'default':
        logger.info(pformat(contract.storage.dummy()))
    else:
        raise Exception('Action must be either `schema` or `default`')
Esempio n. 4
0
    def encode(self, py_obj) -> Dict[str, Any]:
        """ Encode transaction parameters from the given Python object

        :param py_obj: Python object
        :return: {entrypoint, value}
        """
        try:
            view_param_ty = MichelsonType.match(self.param_expr)
            view_param_expr = view_param_ty.from_python_object(py_obj).to_micheline_value()
            storage_expr = self.shell.blocks[self.context.block_id].context.contracts[self.address].storage()
            return {'entrypoint': 'default',
                    'value': {'prim': 'Pair', 'args': [view_param_expr, storage_expr]}}
        except MichelsonRuntimeError as e:
            logger.info(self.__doc__)
            raise ValueError(f'Unexpected arguments: {pformat(py_obj)}', *e.args) from e
Esempio n. 5
0
    def encode(self, py_obj, mode: Optional[str] = None) -> dict:
        """ Encode transaction parameters from the given Python object

        :param py_obj: Python object
        :param mode: whether to use `readable` or `optimized` (or `legacy_optimized`) encoding
        :return: {entrypoint, value}
        """
        try:
            param_ty = ParameterSection.match(self.context.parameter_expr)
            return param_ty.from_python_object({self.entrypoint: py_obj}) \
                .to_parameters(mode=mode or self.context.mode)
        except MichelsonRuntimeError as e:
            logger.info(self.__doc__)
            raise ValueError(f'Unexpected arguments: {pformat(py_obj)}',
                             *e.args) from e
Esempio n. 6
0
def micheline_to_michelson(data, inline=False, wrap=False) -> str:
    """ Converts micheline expression into formatted Michelson source.

    :param data: Micheline expression
    :param inline: produce single line, used for tezos-client arguments (False by default)
    :param wrap: ensure expression is wrapped in brackets
    """
    try:
        res = format_node(data, inline=inline, is_root=True)
        if wrap and any(map(res.startswith,
                            ['Left', 'Right', 'Some', 'Pair'])):
            return f'({res})'
        else:
            return res
    except (KeyError, IndexError, TypeError) as e:
        logger.info(data, compact=True)
        raise MichelsonFormatterError(e.args)
Esempio n. 7
0
    def metadata(self) -> Optional[ContractMetadata]:
        """ Get TZIP-016 contract metadata, if exists

        :rtype: ContractMetadata
        """
        metadata_url = self.metadata_url
        if metadata_url is None:
            return None

        logger.info('Trying to fetch contract metadata from `%s`',
                    metadata_url)
        parsed_url = urlparse(metadata_url)

        if parsed_url.scheme in ('http', 'https'):
            # NOTE: KT1B34qXVRfQrScEaqjjt6cJ5G8LtVFZ7fSc
            metadata = ContractMetadata.from_url(metadata_url, self.context)

        elif parsed_url.scheme == 'ipfs':
            # NOTE: KT1AFA2mwNUMNd4SsujE1YYp29vd8BZejyKW
            metadata = ContractMetadata.from_ipfs(parsed_url.netloc,
                                                  self.context)

        elif parsed_url.scheme == 'tezos-storage':
            parts = parsed_url.path.split('/')
            if len(parts) == 1:
                # NOTE: KT1JBThDEqyqrEHimhxoUBCSnsKAqFcuHMkP
                storage = self.storage
            elif len(parts) == 2:
                # NOTE: KT1REEb5VxWRjcHm5GzDMwErMmNFftsE5Gpf
                context = self._spawn_context(address=parsed_url.netloc)
                storage = ContractInterface.from_context(context).storage
            else:
                raise NotImplementedError('Unknown metadata URL scheme')
            metadata_json = json.loads(
                storage['metadata'][parts[-1]]().decode())
            metadata = ContractMetadata.from_json(metadata_json, self.context)

        elif parsed_url.scheme == 'sha256':
            raise NotImplementedError

        else:
            raise NotImplementedError('Unknown metadata URL scheme')

        return metadata
Esempio n. 8
0
    def wait_next_block(
        self,
        delay_sec=1,
        prev_hash=None,
        time_between_blocks: Optional[int] = None,
        max_iterations: Optional[int] = None,
    ):
        """Wait until next block is finalized.

        :param prev_hash: Current block hash (optional). If not set, current head is used.
        :param time_between_blocks: override the corresponding parameter from constants
        :param max_iterations: Manually set the number of iterations
        :param delay_sec: Sleep delay
        """
        if time_between_blocks is None:
            time_between_blocks = int(
                self.block.context.constants()["time_between_blocks"]
                [0])  # type: ignore

        if time_between_blocks > 0:
            if prev_hash is None:
                header = self.head.header()
                prev_hash = header['hash']
                prev_block_dt = datetime.strptime(header['timestamp'],
                                                  '%Y-%m-%dT%H:%M:%SZ')
                elapsed_sec = (datetime.utcnow() - prev_block_dt).seconds
                sleep_sec = 0 if elapsed_sec > time_between_blocks else time_between_blocks - elapsed_sec
            else:
                sleep_sec = time_between_blocks
            logger.info('Wait %s seconds until block %s is finalized',
                        sleep_sec, prev_hash)
            sleep(sleep_sec)

        if max_iterations is None:
            max_iterations = max(1, time_between_blocks)

        for _ in range(max_iterations):
            current_block_hash = self.head.hash()
            if current_block_hash == prev_hash:
                sleep(delay_sec)
            else:
                return current_block_hash
        raise StopIteration("Timeout")
Esempio n. 9
0
    def get_confirmations(self, opg_hash, kind, branch, head) -> int:
        """Returns the number of blocks applied after the operation was included in chain

        :param opg_hash: Operation group hash
        :param kind: Operation kind ('transaction', 'origination', etc)
        :param branch: Block ID one should stop the search at
        :param head: Block ID one should start the search from
        :return: Number of confirmations (0 if not found)
        """
        start = self.blocks[head].header()['level']
        stop = self.blocks[branch].header()['level']
        for level in range(start, stop, -1):
            vp = validation_passes[kind]
            hashes = self.blocks[level].operation_hashes[vp]()
            for idx, _hash in enumerate(hashes):
                if opg_hash == _hash:
                    _ = self.blocks[level].operations[vp, idx]()
                    logger.info('Operation %s was included in block %s', opg_hash, level)
                    return start - level + 1
        return 0
Esempio n. 10
0
def update_smartpy(ctx, tag):
    client = get_docker_client()
    logger.info('Will now pull latest SmartPy image, please stay put.')
    for line in client.api.pull(f'{SMARTPY_CLI_IMAGE}:{tag}',
                                stream=True,
                                decode=True):
        logger.info(line)
    logger.info('Pulled SmartPy CLI image successfully!')
Esempio n. 11
0
def update_ligo(
    _ctx,
    tag: str,
):
    client = get_docker_client()

    logger.info(f'Pulling ligolang/ligo{(":" + tag) if tag else ""}, please stay put.')
    for line in client.api.pull('ligolang/ligo', tag=tag, stream=True, decode=True):
        logger.info(line)
    logger.info('Pulled Ligo compiler image successfully!')
Esempio n. 12
0
    def wait_blocks(
        self,
        current_block_hash: str,
        max_blocks: int = 1,
        max_priority: int = 2,
        yield_current=False,
        time_between_blocks: Optional[int] = None,
        block_timeout: Optional[int] = None,
    ) -> Generator[str, None, None]:
        """Iterates over future blocks (waits and yields block hash)

        :param current_block_hash: hash of the current block (head)
        :param max_blocks: number of blocks to iterate (not including the current one)
        :param max_priority: wait for blocks with lower priority (increased timeout)
        :param yield_current: yield current block hash at the very beginning
        :param time_between_blocks: override protocol constant
        :param block_timeout: set block timeout (by default Pytezos will wait for a long time)
        :return: block hashes
        """
        prev_block_hash: Optional[str] = None

        if time_between_blocks is None:
            constants = self.blocks[current_block_hash].context.constants()
            time_between_blocks = int(
                constants.get('minimal_block_delay',
                              constants['time_between_blocks'][0]))

        if block_timeout is None:
            block_timeout = MAX_BLOCK_TIMEOUT

        if yield_current:
            yield current_block_hash

        for _ in range(max_blocks):
            header = self.blocks[current_block_hash].header()
            if prev_block_hash and prev_block_hash != header['predecessor']:
                raise StopIteration(
                    'Reorg detected, expected predecessor %s instead of %s',
                    prev_block_hash, header['predecessor'])

            prev_block_dt = datetime.strptime(header['timestamp'],
                                              '%Y-%m-%dT%H:%M:%SZ')
            elapsed_sec = (datetime.utcnow() - prev_block_dt).seconds
            sleep_sec = 1 if elapsed_sec > time_between_blocks else (
                time_between_blocks - elapsed_sec + 1)

            logger.info('Sleep %d seconds until block %s is superseded',
                        sleep_sec, current_block_hash)
            sleep(sleep_sec)

            next_block_hash: Optional[str] = None

            for delay in range(block_timeout):
                next_block_hash = self.head.hash()
                if current_block_hash == next_block_hash:
                    sleep(1)
                else:
                    logger.info('Found new block %s (%d sec delay)',
                                next_block_hash, delay)
                    break

            if current_block_hash != next_block_hash:
                assert next_block_hash
                yield next_block_hash
                prev_block_hash = current_block_hash
                current_block_hash = next_block_hash
            else:
                raise TimeoutError(
                    'Reached timeout (%d sec) while waiting for the next block',
                    block_timeout)
Esempio n. 13
0
    def wait_operations(
        self,
        opg_hashes: List[str],
        ttl: int,
        min_confirmations: int,
        current_block_hash: Optional[str] = None,
        time_between_blocks: Optional[int] = None,
        block_timeout: Optional[int] = None,
    ) -> List[dict]:
        """Wait for one or many operations gain enough confirmations

        :param opg_hashes: list of operation hashes
        :param ttl: max time-to-live value (in mempool)
        :param min_confirmations: minimum number of blocks after inclusion to wait for
        :param current_block_hash: current block hash (head)
        :param time_between_blocks: override protocol constant
        :param block_timeout: set block timeout (by default Pytezos will wait for a long time)
        :return: list of operation contents with metadata
        """

        confirmations = dict()
        pending = set(opg_hashes)
        operations = list()
        block_hash = current_block_hash

        if block_hash is None:
            block_hash = self.head.hash()

        for block_hash in self.wait_blocks(
                current_block_hash=block_hash,
                max_blocks=ttl,
                yield_current=True,
                time_between_blocks=time_between_blocks,
                block_timeout=block_timeout,
        ):
            if len(pending) > 0:
                mempool = set(
                    map(lambda x: x['hash'],
                        self.mempool.pending_operations.flatten()))
                for opg_hash in opg_hashes:
                    if opg_hash in pending:
                        if opg_hash in mempool:
                            logger.info('Operation %s is still in mempool',
                                        opg_hash)
                        else:
                            logger.info('Operation %s has left mempool',
                                        opg_hash)
                            pending.remove(opg_hash)

            if len(pending) < len(opg_hashes):
                included = self.blocks[block_hash].operation_hashes()
                for i, vp in enumerate(included):
                    for j, opg_hash in enumerate(vp):
                        if opg_hash in opg_hashes and opg_hash not in confirmations:
                            logger.info(
                                'Operation %s has been included to block %s',
                                opg_hash, block_hash)
                            confirmations[opg_hash] = 0  # initialize
                            operations.append(
                                self.blocks[block_hash].operations[i][j]())

            for opg_hash in confirmations:
                confirmations[opg_hash] += 1
                logger.info('Operation %s has %d/%d confirmations', opg_hash,
                            confirmations[opg_hash], min_confirmations)

            if len(operations) == len(opg_hashes):
                break

        if len(operations) < len(opg_hashes):
            raise StopIteration(
                'Only %d of %d operations were included, stopping',
                len(operations), len(opg_hashes))

        for _ in self.wait_blocks(block_hash,
                                  max_blocks=min_confirmations - 1,
                                  time_between_blocks=time_between_blocks):
            for opg_hash in opg_hashes:
                confirmations[opg_hash] += 1
                logger.info('Operation %s has %d/%d confirmations', opg_hash,
                            confirmations[opg_hash], min_confirmations)

        return operations
Esempio n. 14
0
def parameter(_ctx, action: str, path: Optional[str]) -> None:
    contract = get_contract(path)
    if action == 'schema':
        logger.info(contract.parameter.__doc__)
    else:
        raise Exception('Action must be `schema`')
Esempio n. 15
0
def deploy(
    _ctx,
    path: str,
    storage: Optional[str],  # pylint: disable=redefined-outer-name
    network: str,
    key: Optional[str],
    github_repo_slug: Optional[str],
    github_oauth_token: Optional[str],
    dry_run: bool,
):
    ptz = pytezos.using(shell=network, key=key)
    logger.info('Deploying contract using %s in the %s',
                ptz.key.public_key_hash(), network)

    contract = get_contract(path)
    try:
        opg = ptz.origination(script=contract.script(
            initial_storage=storage)).autofill().sign()
        logger.info('Injecting origination operation:')
        logger.info(pformat(opg.json_payload()))

        if dry_run:
            logger.info(pformat(opg.preapply()))
            sys.exit(0)
        else:
            opg = opg.inject(_async=False)
    except RpcError as e:
        logger.critical(pformat(e))
        sys.exit(-1)
    else:
        originated_contracts = OperationResult.originated_contracts(opg)
        if len(originated_contracts) != 1:
            raise Exception(
                'Operation group must has exactly one originated contract')
        bcd_link = make_bcd_link(network, originated_contracts[0])
        logger.info('Contract was successfully deployed: %s', bcd_link)

        if github_repo_slug:
            deployment = create_deployment(
                github_repo_slug,
                github_oauth_token,
                environment=network,
            )
            logger.info(pformat(deployment))
            status = create_deployment_status(
                github_repo_slug,
                github_oauth_token,
                deployment_id=deployment['id'],
                state='success',
                environment=network,
                environment_url=bcd_link,
            )
            logger.info(status)
Esempio n. 16
0
def activate(_ctx, path: str, network: str) -> None:
    ptz = pytezos.using(key=path, shell=network)
    logger.info(
        'Activating %s in the %s',
        ptz.key.public_key_hash(),
        network,
    )

    if ptz.balance() == 0:
        try:
            opg = ptz.reveal().autofill().sign()
            logger.info('Injecting reveal operation:')
            logger.info(pformat(opg.json_payload()))
            opg.inject(_async=False)
        except RpcError as e:
            logger.critical(pformat(e))
            sys.exit(-1)
        else:
            logger.info('Activation succeeded! Claimed balance: %s ꜩ',
                        ptz.balance())
    else:
        logger.info('Already activated')

    try:
        opg = ptz.reveal().autofill().sign()
        logger.info('Injecting reveal operation:')
        logger.info(pformat(opg.json_payload()))
        opg.inject(_async=False)
    except RpcError as e:
        logger.critical(pformat(e))
        sys.exit(-1)
    else:
        logger.info('Your key %s is now active and revealed',
                    ptz.key.public_key_hash())
Esempio n. 17
0
    def wait(
        self,
        *operation_groups: OperationGroup,
        min_confirmations: int = 1,
        num_blocks_wait: int = 5,
        time_between_blocks: Optional[int] = None,
        prev_hash: Optional[str] = None,
    ) -> Tuple[OperationGroup, ...]:
        """Wait for multiple injected operations to get enough confirmations

        :param min_confirmations: number of block injections to wait for before returning
        :param num_blocks_wait: number of blocks to wait for injection
        :param time_between_blocks: override the corresponding parameter from constants
        :param prev_hash: Current block hash (optional). If not set, current head is used.
        """
        logger.info('Waiting for %s confirmations in %s blocks', min_confirmations, num_blocks_wait)
        confirmations = {opg.opg_hash: 0 for opg in operation_groups}
        for _ in range(num_blocks_wait):
            logger.info('Waiting for the next block')
            prev_hash = self.shell.wait_next_block(time_between_blocks=time_between_blocks, prev_hash=prev_hash)
            block_operations = self.shell.blocks[prev_hash].operations.managers()

            for opg in operation_groups:
                if confirmations[opg.opg_hash] == 0:
                    res = next((item for item in block_operations if item['hash'] == opg.opg_hash), None)
                    if res is not None:
                        logger.info('Operation %s was included in block %s', opg.opg_hash, prev_hash)
                        confirmations[opg.opg_hash] = 1
                        if not OperationResult.is_applied(res):
                            raise RpcError.from_errors(OperationResult.errors(res)) from None
                else:
                    confirmations[opg.opg_hash] += 1
                    logger.info('Got %s/%s confirmations for %s', confirmations[opg.opg_hash], min_confirmations, opg.opg_hash)

            if any(value == 0 for value in confirmations.values()):
                pending_operations = self.shell.mempool.pending_operations.flatten()
                for opg in operation_groups:
                    if confirmations[opg.opg_hash] == 0:
                        res = next((item for item in pending_operations if item['hash'] == opg.opg_hash), None)
                        if res is not None:
                            logger.info('Operation %s is still in mempool', opg.opg_hash)
                            if not OperationResult.is_applied(res):
                                raise RpcError.from_errors(OperationResult.errors(res)) from None

            for opg in operation_groups:
                if confirmations[opg.opg_hash] == 0:
                    confirmations[opg.opg_hash] = self.shell.get_confirmations(
                        opg_hash=opg.opg_hash,
                        kind=opg.contents[0]['kind'],
                        branch=opg.branch,
                        head=prev_hash,
                    )
                    if confirmations[opg.opg_hash] == 0:
                        raise ValueError(f'Operation {opg.opg_hash} is not found') from None

            if all(value >= min_confirmations for value in confirmations.values()):
                return operation_groups

        required_confirmations = min_confirmations * len(operation_groups)
        gathered_confirmations = sum(confirmations.values())
        raise TimeoutError(f'Operations got {gathered_confirmations}/{required_confirmations} confirmations in {num_blocks_wait} blocks')
Esempio n. 18
0
    def wait_blocks(
        self,
        current_block_hash: str,
        max_blocks: int = 1,
        max_priority: int = 2,
        yield_current=False,
        time_between_blocks: Optional[int] = None,
    ) -> Generator[str, None, None]:
        """Iterates over future blocks (waits and yields block hash)

        :param current_block_hash: hash of the current block (head)
        :param max_blocks: number of blocks to iterate (not including the current one)
        :param max_priority: wait for blocks with lower priority (increased timeout)
        :param yield_current: yield current block hash at the very beginning
        :param time_between_blocks: override protocol constant
        :return: block hashes
        """
        prev_block_hash: Optional[str] = None

        if time_between_blocks:
            block_delay, secondary_delay = time_between_blocks, 0
        else:
            tbb = self.blocks[current_block_hash].context.constants(
            )["time_between_blocks"]
            block_delay, secondary_delay = int(tbb[0]), int(tbb[1])

        if yield_current:
            yield current_block_hash

        for _ in range(max_blocks):
            header = self.blocks[current_block_hash].header()
            if prev_block_hash and prev_block_hash != header['predecessor']:
                raise StopIteration(
                    'Reorg detected, expected predecessor %s instead of %s',
                    prev_block_hash, header['predecessor'])

            prev_block_dt = datetime.strptime(header['timestamp'],
                                              '%Y-%m-%dT%H:%M:%SZ')
            elapsed_sec = (datetime.utcnow() - prev_block_dt).seconds
            sleep_sec = 1 if elapsed_sec > block_delay else (block_delay -
                                                             elapsed_sec + 1)

            logger.info('Sleep %d seconds until block %s is superseded',
                        sleep_sec, current_block_hash)
            sleep(sleep_sec)

            next_block_hash: Optional[str] = None
            timeout = block_delay + secondary_delay * max_priority + 1
            logger.info('Waiting for a new block (%d sec timeout)', timeout)

            for delay in range(timeout):
                next_block_hash = self.head.hash()
                if current_block_hash == next_block_hash:
                    sleep(1)
                else:
                    logger.info('Found new block %s (%d sec delay)',
                                next_block_hash, delay)
                    break

            if current_block_hash != next_block_hash:
                assert next_block_hash
                yield next_block_hash
                prev_block_hash = current_block_hash
                current_block_hash = next_block_hash
            else:
                raise TimeoutError(
                    'Reached timeout (%d sec) while waiting for the next block',
                    timeout)
Esempio n. 19
0
    def inject(
        self,
        check_result: bool = True,
        num_blocks_wait: int = 5,
        time_between_blocks: Optional[int] = None,
        min_confirmations: int = 0,
        **kwargs
    ):
        """Inject the signed operation group.

        :param check_result: raise RpcError in case operation is applied but has runtime errors
        :param num_blocks_wait: number of blocks to wait for injection
        :param time_between_blocks: override the corresponding parameter from constants
        :param min_confirmations: number of block injections to wait for before returning
        :returns: operation group with metadata (raw RPC response)
        """
        if kwargs.get('_async'):
            logger.warning('`_async` argument is deprecated, use `min_confirmations` instead')
            min_confirmations = 0 if kwargs['_async'] is True else 1

        self.context.reset()

        opg_hash = self.shell.injection.operation.post(
            operation=self.binary_payload(),
            _async=False,
        )

        if min_confirmations == 0:
            return {
                'chain_id': self.chain_id,
                'hash': opg_hash,
                **self.json_payload(),
            }

        logger.info('Waiting for %s confirmations in %s blocks', min_confirmations, num_blocks_wait)
        in_mempool = True
        confirmations = 0
        for _ in range(num_blocks_wait):
            logger.info('Waiting for the next block')
            self.shell.wait_next_block(time_between_blocks=time_between_blocks)

            if in_mempool:
                try:
                    pending_opg = self.shell.mempool.pending_operations[opg_hash]
                    if not OperationResult.is_applied(pending_opg):
                        raise RpcError.from_errors(OperationResult.errors(pending_opg))
                    logger.info('Operation %s is still in mempool', opg_hash)
                    continue
                except StopIteration:
                    in_mempool = False

            try:
                res = self.shell.blocks[-1:].find_operation(opg_hash)
            except StopIteration:
                logger.info('Operation %s not found in lastest block', opg_hash)
                continue

            if check_result:
                if not OperationResult.is_applied(res):
                    raise RpcError.from_errors(OperationResult.errors(res))

            confirmations += 1
            logger.info('Got %s/%s confirmations', confirmations, min_confirmations)
            if confirmations == min_confirmations:
                return res

        raise TimeoutError(f'Operation {opg_hash} got {confirmations} confirmations in {num_blocks_wait} blocks')