예제 #1
0
파일: stress.py 프로젝트: HugoByte/nearcore
#                `staking2.py` tests some basic stake invariants
#  [v] `wipe_data`: only used in conjunction with `node_set` and `node_restart`. If present, nodes data folders will be periodically cleaned on restart
# This test also completely disables rewards, which simplifies ensuring total supply invariance and balance invariances

import sys, time, base58, random, inspect, traceback, requests, logging
from multiprocessing import Process, Value, Lock

sys.path.append('lib')

from cluster import init_cluster, spin_up_node, load_config
from configured_logger import logger
from utils import TxContext, Unbuffered
from transaction import sign_payment_tx, sign_staking_tx
from proxy_instances import RejectListProxy

sys.stdout = Unbuffered(sys.stdout)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)

TIMEOUT = 1500  # after how much time to shut down the test
TIMEOUT_SHUTDOWN = 120  # time to wait after the shutdown was initiated before failing the test due to process stalling
MAX_STAKE = int(1e32)

# How many times to try to send transactions to each validator.
# Is only applicable in the scenarios where we expect failures in tx sends.
SEND_TX_ATTEMPTS = 10

# Block_header_fetch_horizon + state_fetch_horizon (which is equalto 5) need to be shorter than the epoch length.
# otherwise say epoch boundaries are H and H'. If the current height is H' + eps, and a node finished header sync at
# H' + eps - block_header_fetch_horizon, and then rolled state_fetch_horizon back, it will end up before H, and will
# try to state sync at the beginning of the epoch *two* epochs ago. No node will respond to such state requests.
BLOCK_HEADER_FETCH_HORIZON = 15
예제 #2
0
파일: main.py 프로젝트: arpit9295/ce7455
parser.add_argument('--gradient-clip', default=5.0)
parser.add_argument('--use-gpu', default='true')
parser.add_argument('--plot-every', type=int, default=100)
parser.add_argument('--plot-file', default="losses.png")
parser.add_argument('--print-every', type=int, default=1000)

parameters = vars(parser.parse_args())

output_dir = parameters['output_dir']
use_char_embedding = parameters['use_char_embedding']

if not os.path.exists(output_dir):
    os.system('mkdir ' + output_dir)

f = open(output_dir + '/out.txt', 'w')
sys.stdout = Unbuffered(
    f)  # Change the standard output to the file we created.

use_beam_search = parameters['use_beam_search'] != 'false'
use_attention = parameters['use_attention'] != 'false'
use_gpu = parameters['use_gpu'] != 'false'

if torch.cuda.is_available() and not use_gpu:
    print(
        "WARNING: You have a CUDA device, so you should probably run without --use-gpu=false"
    )

device = torch.device("cuda" if use_gpu else "cpu")
mapping_path = output_dir + '/' + parameters['mapping_file']

# if os.path.isfile(mapping_path):
#     with open(mapping_path, 'rb') as f: