Example #1
0
    def __init__(self):
        node_config = {
            'archive': True,
            'tracked_shards': [0],
        }

        self._config = cluster.load_config()
        self._near_root, self._node_dirs = cluster.init_cluster(
            num_nodes=1,
            num_observers=2,
            num_shards=1,
            config=self._config,
            genesis_config_changes=[['epoch_length', EPOCH_LENGTH],
                                    ['block_producer_kickout_threshold', 80]],
            client_config_changes={
                0: node_config,
                1: node_config,
                2: node_config,
                3: node_config
            })
        self._nodes = [None] * len(self._node_dirs)
Example #2
0
import sys, time, base58

sys.path.append('lib')

from cluster import init_cluster, spin_up_node
from transaction import sign_staking_tx
from utils import TxContext

TIMEOUT = 300
TWENTY_FIVE = 25

config = {'local': True, 'near_root': '../target/debug/'}
near_root, node_dirs = init_cluster(
    2, 1, 2, config, [["max_inflation_rate", 0], ["epoch_length", 7],
                      ["validator_kickout_threshold", 80]],
    {2: {
        "tracked_shards": [0, 1]
    }})

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
#node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, boot_node.addr())
observer = spin_up_node(config, near_root, node_dirs[2], 2,
                        boot_node.node_key.pk, boot_node.addr())

ctx = TxContext([0, 0, 0], [boot_node, None, observer])

initial_balances = ctx.get_balances()
total_supply = sum(initial_balances)
Example #3
0
 near_root, node_dirs = init_cluster(
     2,
     3,
     2,
     config,
     [
         ["min_gas_price", 0],
         ["max_inflation_rate", [0, 1]],
         ["epoch_length", EPOCH_LENGTH],
         ['num_block_producer_seats', 4],
         ["block_producer_kickout_threshold", 20],
         ["chunk_producer_kickout_threshold", 20],
         ["validators", 0, "amount", "110000000000000000000000000000000"],
         ["validators", 1, "amount", "110000000000000000000000000000000"],
         [
             "records", 0, "Account", "account", "locked",
             "110000000000000000000000000000000"
         ],
         # each validator account is two records, thus the index of a record for the second is 2, not 1
         [
             "records", 2, "Account", "account", "locked",
             "110000000000000000000000000000000"
         ],
         ["total_supply", "6120000000000000000000000000000000"]
     ],
     {
         4: {
             "tracked_shards": [0, 1],
             "archive": True
         },
         3: {
             "archive": True,
             "tracked_shards": [1],
             "network": {
                 "ttl_account_id_router": {
                     "secs": 1,
                     "nanos": 0
                 }
             }
         },
         2: {
             "archive": True,
             "tracked_shards": [0],
             "network": {
                 "ttl_account_id_router": {
                     "secs": 1,
                     "nanos": 0
                 }
             }
         }
     })
Example #4
0
from cluster import init_cluster, spin_up_node, load_config
from transaction import sign_staking_tx
from utils import TxContext

TIMEOUT = 600
# the height we spin up the second node
TARGET_HEIGHT = 35

config = load_config()
# give more stake to the bootnode so that it can produce the blocks alone
near_root, node_dirs = init_cluster(
    4, 1, 4, config,
    [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 12],
     ["block_producer_kickout_threshold", 20], ["chunk_producer_kickout_threshold", 20]],
    {0: {"view_client_throttle_period": {"secs": 0, "nanos": 0}, "consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}},
     1: {"view_client_throttle_period": {"secs": 0, "nanos": 0}, "consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}},
     2: {"view_client_throttle_period": {"secs": 0, "nanos": 0}, "consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}},
     3: {"view_client_throttle_period": {"secs": 0, "nanos": 0}, "consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}}, 4: {
        "tracked_shards": [0, 1, 2, 3],
        "view_client_throttle_period": {"secs": 0, "nanos": 0}
    }})

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
boot_node.stop_checking_store()
node3 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk,
                     boot_node.addr())
node4 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk,
                     boot_node.addr())
observer = spin_up_node(config, near_root, node_dirs[4], 4,
Example #5
0
if len(sys.argv) < 3:
    print("python state_sync.py [notx, onetx, manytx] <launch_at_block>")
    exit(1)

mode = sys.argv[1]
assert mode in ['notx', 'onetx', 'manytx']

from cluster import init_cluster, spin_up_node, load_config
from utils import TxContext, LogTracker

START_AT_BLOCK = int(sys.argv[2])
TIMEOUT = 150 + START_AT_BLOCK * 10

config = load_config()
near_root, node_dirs = init_cluster(2, 1, 1, config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {2: {"tracked_shards": [0]}})

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, boot_node.addr())

ctx = TxContext([0, 0], [boot_node, node1])

sent_txs = False

observed_height = 0
while observed_height < START_AT_BLOCK:
    assert time.time() - started < TIMEOUT
    status = boot_node.get_status()
    new_height = status['sync_info']['latest_block_height']
Example #6
0
def doit(s, n, N, k, monkeys, timeout):
    global block_timeout, balances_timeout, tx_tolerance

    assert 2 <= n <= N

    config = {'local': True, 'near_root': '../target/debug/'}
    local_config_changes = {}

    for i in range(N, N + k + 1):
        # make all the observers track all the shards
        local_config_changes[i] = {"tracked_shards": list(range(s))}

    near_root, node_dirs = init_cluster(N, s, k + 1, config, [["max_inflation_rate", 0], ["epoch_length", EPOCH_LENGTH], ["validator_kickout_threshold", 75]], local_config_changes)

    started = time.time()

    boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
    boot_node.mess_with = False
    nodes = [boot_node]

    for i in range(1, N + k + 1):
        node = spin_up_node(config, near_root, node_dirs[i], i, boot_node.node_key.pk, boot_node.addr())
        nodes.append(node)
        if i >= n and i < N:
            node.kill()
            node.mess_with = True
        else:
            node.mess_with = False

    monkey_names = [x.__name__ for x in monkeys]
    print(monkey_names)
    if 'monkey_local_network' in monkey_names or 'monkey_global_network' in monkey_names:
        print("There are monkeys messing up with network, initializing the infra")
        init_network_pillager()
        expect_network_issues()
        block_timeout += 10
        tx_tolerance += 0.3
    if 'monkey_node_restart' in monkey_names:
        expect_network_issues()
    if 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names:
        block_timeout += 10
        balances_timeout += 10
        tx_tolerance += 0.4

    stopped = Value('i', 0)
    error = Value('i', 0)
    ps = []
    nonces = [(Value('i', 1), Lock()) for _ in range(N + k + 1)]

    def launch_process(func):
        nonlocal stopped, error, ps

        p = Process(target=func, args=(stopped, error, nodes, nonces))
        p.start()
        ps.append((p, func.__name__))

    def check_errors():
        nonlocal error, ps
        if error.value != 0:
            for (p, _) in ps:
                p.terminate()
            assert False, "At least one process failed, check error messages above"

    for monkey in monkeys:
        launch_process(monkey)

    launch_process(blocks_tracker)

    started = time.time()
    while time.time() - started < timeout:
        check_errors()
        time.sleep(1)
    
    print("")
    print("==========================================")
    print("# TIMEOUT IS HIT, SHUTTING DOWN THE TEST #")
    print("==========================================")
    stopped.value = 1
    started_shutdown = time.time()
    while True:
        check_errors()
        still_running = [name for (p, name) in ps if p.is_alive()]

        if len(still_running) == 0:
            break

        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN:
            for (p, _) in ps:
                p.terminate()
            assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (still_running)

    check_errors()
Example #7
0
def doit(s, n, N, k, monkeys):
    assert 2 <= n <= N

    config = {'local': True, 'near_root': '../target/debug/'}
    local_config_changes = {}

    for i in range(N, N + k + 1):
        # make all the observers track all the shards
        local_config_changes[i] = {"tracked_shards": list(range(s))}

    near_root, node_dirs = init_cluster(
        N, s, k + 1, config,
        [["max_inflation_rate", 0], ["epoch_length", EPOCH_LENGTH],
         ["validator_kickout_threshold", 75]], local_config_changes)

    started = time.time()

    boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
    nodes = [boot_node]

    for i in range(1, N + k + 1):
        if i < n or i >= N:
            node = spin_up_node(config, near_root, node_dirs[i], i,
                                boot_node.node_key.pk, boot_node.addr())
            nodes.append(node)
        else:
            nodes.append(None)

    stopped = Value('i', 0)
    error = Value('i', 0)
    ps = []
    nonces = [(Value('i', 1), Lock()) for _ in range(N + k + 1)]

    def launch_process(func):
        nonlocal stopped, error, ps

        p = Process(target=func, args=(stopped, error, nodes, nonces))
        p.start()
        ps.append((p, func.__name__))

    def check_errors():
        nonlocal error, ps
        if error.value != 0:
            for (p, _) in ps:
                p.terminate()
            assert False, "At least one process failed, check error messages above"

    for monkey in monkeys:
        launch_process(globals()['monkey_%s' % monkey])

    launch_process(blocks_tracker)

    started = time.time()
    while time.time() - started < TIMEOUT:
        check_errors()
        time.sleep(1)

    print("")
    print("==========================================")
    print("# TIMEOUT IS HIT, SHUTTING DOWN THE TEST #")
    print("==========================================")
    stopped.value = 1
    started_shutdown = time.time()
    while True:
        check_errors()
        still_running = [name for (p, name) in ps if p.is_alive()]

        if len(still_running) == 0:
            break

        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN:
            for (p, _) in ps:
                p.terminate()
            assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (
                still_running)

    check_errors()
Example #8
0
def doit(s, n, N, k, monkeys, timeout):
    global block_timeout, balances_timeout, tx_tolerance, epoch_length, wait_if_restart, wipe_data, restart_sync_timeout

    assert 2 <= n <= N

    config = load_config()
    local_config_changes = {}

    monkey_names = [x.__name__ for x in monkeys]
    proxy = None
    logging.info(monkey_names)

    for i in range(N + k + 1):
        local_config_changes[i] = {
            "consensus": {"block_header_fetch_horizon": BLOCK_HEADER_FETCH_HORIZON, "state_sync_timeout": {"secs": 5, "nanos": 0}},
            "view_client_throttle_period": {"secs": 0, "nanos": 0}
        }
    for i in range(N, N + k + 1):
        # make all the observers track all the shards
        local_config_changes[i]["tracked_shards"] = list(range(s))
    if 'monkey_wipe_data' in monkey_names:
        # When data can be deleted, with the short epoch length while the node with deleted data folder is syncing,
        # other nodes can run sufficiently far ahead to GC the old data. Have one archival node to address it.
        # It is also needed, because the balances timeout is longer, and the txs can get GCed on the observer node
        # by the time it gets to checking their status.
        local_config_changes[N + k]['archive'] = True

    if 'monkey_local_network' in monkey_names or 'monkey_packets_drop' in monkey_names or 'monkey_node_restart' in monkey_names:
        expect_network_issues()
        block_timeout += 40

    if 'monkey_local_network' in monkey_names or 'monkey_packets_drop' in monkey_names:
        assert config['local'], 'Network stress operations only work on local nodes'
        drop_probability = 0.05 if 'monkey_packets_drop' in monkey_names else 0

        reject_list = RejectListProxy.create_reject_list(1)
        proxy = RejectListProxy(reject_list, drop_probability)
        tx_tolerance += 0.3

    if 'monkey_local_network' in monkey_names or 'monkey_packets_drop' in monkey_names:
        # add 15 seconds + 10 seconds for each unique network-related monkey
        balances_timeout += 15

        if 'monkey_local_network' in monkey_names:
            balances_timeout += 10

        if 'monkey_packets_drop' in monkey_names:
            wait_if_restart = True
            balances_timeout += 10

    if 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names:
        balances_timeout += 10
        tx_tolerance += 0.5

    if 'monkey_wipe_data' in monkey_names:
        assert 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names
        wipe_data = True
        balances_timeout += 25

        # if nodes can restart, we should give them way more time to sync.
        # if packets can also be dropped, each state-sync-related request or response lost adds 10 seconds
        # to the sync process.
        restart_sync_timeout = 45 if 'monkey_packets_drop' not in monkey_names else 90
        block_timeout += (10 if 'monkey_packets_drop' not in monkey_names else 40)

    # We need to make sure that the blocks that include txs are not garbage collected. From the first tx sent until
    # we check balances time equal to `balances_timeout * 2` passes, and the block production is capped at 1.7/s.
    # The GC keeps five epochs of blocks.
    min_epoch_length = (int((balances_timeout * 2) * 1.7) + 4) // 5
    epoch_length = max(epoch_length, min_epoch_length)


    near_root, node_dirs = init_cluster(
        N, k + 1, s, config,
        [["min_gas_price", 0], ["max_inflation_rate", [0, 1]],
         ["epoch_length", epoch_length],
         ["block_producer_kickout_threshold", 10],
         ["chunk_producer_kickout_threshold", 10]], local_config_changes)

    started = time.time()

    boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None, proxy=proxy)
    boot_node.stop_checking_store()
    boot_node.mess_with = False
    nodes = [boot_node]

    for i in range(1, N + k + 1):
        node = spin_up_node(config, near_root, node_dirs[i], i,
                            boot_node.node_key.pk, boot_node.addr(), proxy=proxy)
        node.stop_checking_store()
        nodes.append(node)
        if i >= n and i < N:
            node.kill()
            node.mess_with = True
        else:
            node.mess_with = False

    stopped = Value('i', 0)
    error = Value('i', 0)
    ps = []
    nonces = [(Value('i', 1), Lock()) for _ in range(N + k + 1)]

    def launch_process(func):
        nonlocal stopped, error, ps

        p = Process(target=func, args=(stopped, error, nodes, nonces))
        p.start()
        ps.append((p, func.__name__))

    def check_errors():
        nonlocal error, ps
        if error.value != 0:
            for (p, _) in ps:
                p.terminate()
            assert False, "At least one process failed, check error messages above"

    for monkey in monkeys:
        launch_process(monkey)

    launch_process(blocks_tracker)

    started = time.time()
    while time.time() - started < timeout:
        check_errors()
        time.sleep(1)

    logging.info("")
    logging.info("==========================================")
    logging.info("# TIMEOUT IS HIT, SHUTTING DOWN THE TEST #")
    logging.info("==========================================")
    stopped.value = 1
    started_shutdown = time.time()
    proxies_stopped = False

    while True:
        check_errors()
        still_running = [name for (p, name) in ps if p.is_alive()]

        if len(still_running) == 0:
            break

        # If the test is running with proxies, `node_restart` and `node_set` can get
        # stuck because the proxies now are their child processes. We can't kill the
        # proxies rigth away, because that would interfere with block production, and
        # might prevent other workers (e.g. block_tracker) from completing in a timely
        # manner. Thus, kill the proxies some time into the shut down process.
        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN / 2 and not proxies_stopped:
            logging.info("Shutdown is %s seconds in, shutting down proxies if any" % (TIMEOUT_SHUTDOWN / 2))
            if boot_node.proxy is not None:
                boot_node.proxy.global_stopped.value = 1
                for p in boot_node.proxy.ps:
                    p.terminate()
            proxies_stopped = True


        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN:
            for (p, _) in ps:
                p.terminate()
            assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (
                still_running)

    check_errors()

    logging.info("Shut down complete, executing store validity checks")
    for node in nodes:
        node.is_check_store = True
        node.check_store()
Example #9
0
def doit(s, n, N, k, monkeys, timeout):
    global block_timeout, balances_timeout, tx_tolerance

    assert 2 <= n <= N

    config = load_config()
    local_config_changes = {}

    for i in range(N, N + k + 1):
        # make all the observers track all the shards
        local_config_changes[i] = {"tracked_shards": list(range(s))}

    near_root, node_dirs = init_cluster(
        N, k + 1, s, config,
        [["min_gas_price", 0], ["max_inflation_rate", [0, 1]],
         ["epoch_length", EPOCH_LENGTH],
         ["block_producer_kickout_threshold", 10],
         ["chunk_producer_kickout_threshold", 10]], local_config_changes)

    monkey_names = [x.__name__ for x in monkeys]
    proxy = None
    logging.info(monkey_names)
    if 'monkey_local_network' in monkey_names or 'monkey_global_network' in monkey_names:
        assert config[
            'local'], 'Network stress operations only work on local nodes'
        reject_list = RejectListProxy.create_reject_list(1)
        proxy = RejectListProxy(reject_list)
        expect_network_issues()
        block_timeout += 40
        balances_timeout += 20
        tx_tolerance += 0.3
    if 'monkey_node_restart' in monkey_names:
        expect_network_issues()
    if 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names:
        block_timeout += 40
        balances_timeout += 10
        tx_tolerance += 0.5

    started = time.time()

    boot_node = spin_up_node(config,
                             near_root,
                             node_dirs[0],
                             0,
                             None,
                             None,
                             proxy=proxy)
    boot_node.stop_checking_store()
    boot_node.mess_with = False
    nodes = [boot_node]

    for i in range(1, N + k + 1):
        node = spin_up_node(config,
                            near_root,
                            node_dirs[i],
                            i,
                            boot_node.node_key.pk,
                            boot_node.addr(),
                            proxy=proxy)
        node.stop_checking_store()
        nodes.append(node)
        if i >= n and i < N:
            node.kill()
            node.mess_with = True
        else:
            node.mess_with = False

    stopped = Value('i', 0)
    error = Value('i', 0)
    ps = []
    nonces = [(Value('i', 1), Lock()) for _ in range(N + k + 1)]

    def launch_process(func):
        nonlocal stopped, error, ps

        p = Process(target=func, args=(stopped, error, nodes, nonces))
        p.start()
        ps.append((p, func.__name__))

    def check_errors():
        nonlocal error, ps
        if error.value != 0:
            for (p, _) in ps:
                p.terminate()
            assert False, "At least one process failed, check error messages above"

    for monkey in monkeys:
        launch_process(monkey)

    launch_process(blocks_tracker)

    started = time.time()
    while time.time() - started < timeout:
        check_errors()
        time.sleep(1)

    logging.info("")
    logging.info("==========================================")
    logging.info("# TIMEOUT IS HIT, SHUTTING DOWN THE TEST #")
    logging.info("==========================================")
    stopped.value = 1
    started_shutdown = time.time()
    while True:
        check_errors()
        still_running = [name for (p, name) in ps if p.is_alive()]

        if len(still_running) == 0:
            break

        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN:
            for (p, _) in ps:
                p.terminate()
            assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (
                still_running)

    check_errors()

    logging.info("Shut down complete, executing store validity checks")
    for node in nodes:
        node.is_check_store = True
        node.check_store()
Example #10
0
sys.path.append('lib')

from cluster import init_cluster, spin_up_node, load_config
from transaction import sign_staking_tx
from utils import TxContext

TIMEOUT = 600
# the height we spin up the second node
TARGET_HEIGHT = 35

config = load_config()
# give more stake to the bootnode so that it can produce the blocks alone
near_root, node_dirs = init_cluster(
    4, 1, 4,
    config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]],
             ["epoch_length", 12], ["block_producer_kickout_threshold", 20],
             ["chunk_producer_kickout_threshold", 20]],
    {4: {
        "tracked_shards": [0, 1, 2, 3]
    }})

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
node3 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk,
                     boot_node.addr())
node4 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk,
                     boot_node.addr())
observer = spin_up_node(config, near_root, node_dirs[4], 4,
                        boot_node.node_key.pk, boot_node.addr())

ctx = TxContext([0, 0, 0, 0, 0], [boot_node, None, node3, node4, observer])
Example #11
0
config = load_config()
near_root, node_dirs = init_cluster(
    2, 1, 1, config,
    [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10],
     ["protocol_version", 47],
     [
         "simple_nightshade_shard_layout", {
             "V1": {
                 "fixed_shards": [],
                 "boundary_accounts":
                     ["aurora", "aurora-0", "kkuuue2akv_1630967379.near"],
                 "shards_split_map": [[0, 1, 2, 3]],
                 "to_parent_shard_map": [0, 0, 0, 0],
                 "version": 1
             }
         }
     ], ["block_producer_kickout_threshold", 80]], {
         0: {
             "tracked_shards": [0]
         },
         1: {
             "tracked_shards": [0]
         },
         2: {
             "tracked_shards": [0],
             "consensus": {
                 "block_fetch_horizon": 20,
             },
         }
     })
Example #12
0
from cluster import init_cluster, spin_up_node, load_config
from transaction import sign_staking_tx
from utils import TxContext

TIMEOUT = 600
TWENTY_FIVE = 25

config = load_config()
# give more stake to the bootnode so that it can produce the blocks alone
near_root, node_dirs = init_cluster(
    2, 1, 2, config,
    [["min_gas_price", 0], ["max_inflation_rate", 0], ["epoch_length", 7],
     ["block_producer_kickout_threshold", 80],
     ["validators", 0, "amount", "60000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "60000000000000000000000000000000"
     ]], {2: {
         "tracked_shards": [0, 1]
     }})

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
#node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, boot_node.addr())
observer = spin_up_node(config, near_root, node_dirs[2], 2,
                        boot_node.node_key.pk, boot_node.addr())

# It takes a while for test2 account to appear
ctx = TxContext([0, 0, 0], [boot_node, None, observer])
Example #13
0
import os

sys.path.append('lib')

from cluster import init_cluster, start_cluster, GCloudNode
import retry

args = (3, 1, 1, {
    'local': False,
    'near_root': '../target/debug/',
    'remote': {
        'instance_name': 'near-pytest',
    }
}, [], [])

init_cluster(*args)
subprocess.run([os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../scripts/create_instance_pool.sh"),
                "near-pytest",
                "us-west2-a us-west2-b us-west2-c us-west2-a"])
node_dirs = subprocess.check_output("find ~/.near/* -maxdepth 0", shell=True).decode('utf-8').strip().split('\n')

g = GCloudNode('near-pytest-0', node_dirs[0])
assert g.machine_status() == 'RUNNING'
g.is_ready()
print(g.addr())

start_cluster(*args)

g.change_version('staging')
retry.retry(lambda: g.is_ready(), 1200)