def __init__(self): self.pub_key = os.environ['AE_PUB_KEY'] self.url = "ws://localhost:" + os.environ['AE_WEBSOCKET'] + "/websocket" self.websocket = None self.local_port = os.environ['AE_LOCAL_PORT'] self.local_internal_port = os.environ['AE_LOCAL_INTERNAL_PORT'] self.epoch = Epoch()
def restore(): lock_dict_mac_last.acquire() try: with open('backup.tsv', mode='r') as fp: for line in fp: parts = line.split('\t') dict_mac_last[parts[0]] = Epoch(parts[1]) except FileNotFoundError: print('backup.tsv not found.') lock_dict_mac_last.release()
def thread_watchdog(): print('watchdog is running...') process = subprocess.Popen( shlex.split('./airodump-ng --berlin 5 --update 1 wlan0'), stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) while process.poll() is None: line = process.stdout.readline().decode().rstrip() if regex_mac48.match(line): if line in dict_mac_user and line in dict_mac_last and int( Epoch()) - int(dict_mac_last[line]) > 1800: Slack.notice( 'TYPE_YOUR_ENDPOINT', '{:s}がプロジェクトルームにいるかもしれません.'.format(dict_mac_user[line])) lock_dict_mac_last.acquire() dict_mac_last[line] = Epoch() lock_dict_mac_last.release()
datadir = 'data/dataset' batch_size = 1 subseq_length = 4 step_size = 1 # Separate the sequences for which there is ground truth into test # and train according to the paper's partition. train_seqs = ['00', '02', '08', '09'] test_seqs = ['03', '04', '05', '06', '07', '10'] # Create a data loader to get batches one epoch at a time epoch_data_loader = Epoch(datadir=datadir, flowdir=os.path.join(datadir, "flows"), train_seq_nos=train_seqs, test_seq_nos=test_seqs, window_size=subseq_length, step_size=step_size, batch_size=batch_size) # What is the shape of the input flow images? flow_input_shape = epoch_data_loader.get_input_shape() print("Input shape: {}".format(flow_input_shape)) # Test a batch X, Y = epoch_data_loader.get_training_batch() print("[Batch] X.shape = {}".format(X.shape)) print("[Batch] Y.shape = {}".format(Y.shape)) random_sample_index = np.random.randint(0, len(X))
data_dir = os.path.join(testdir, '..', '..', 'data', 'dataset') batch_size = 1 subseq_length = 50 step_size = 1 # Separate the sequences for which there is ground truth into test # and train according to the paper's partition. train_seqs = ['00', '02', '08', '09'] test_seqs = ['03', '04', '05', '06', '07', '10'] # Create a data loader to get batches one epoch at a time epoch_data_loader = Epoch(datadir=data_dir, flowdir=os.path.join(data_dir, "flows"), train_seq_nos=train_seqs, test_seq_nos=test_seqs, window_size=subseq_length, step_size=step_size, batch_size=batch_size) # For every sequence, load in the ground truth partitioned poses, # reconstruct the partitions into one long sequence, and output # to a file in test_results/sanity_check. This should be the # identity function. out_dir = os.path.join(testdir, '..', '..', 'test_results', 'sanity_check') for seq_num in train_seqs + test_seqs: pose_labels = np.array( [y for x, y in epoch_data_loader.get_testing_samples(seq_num)]) subseq_preds_to_full_pred(pose_labels, os.path.join(out_dir, seq_num + '.csv'))
class Oracle: def __init__(self): self.pub_key = os.environ['AE_PUB_KEY'] self.url = "ws://localhost:" + os.environ['AE_WEBSOCKET'] + "/websocket" self.websocket = None self.local_port = os.environ['AE_LOCAL_PORT'] self.local_internal_port = os.environ['AE_LOCAL_INTERNAL_PORT'] self.epoch = Epoch() def connect_websocket(self): if not self.websocket: self.websocket = create_connection(self.url) def register(self, query_format, response_format, query_fee, ttl, fee): self.connect_websocket() query = { "target": "oracle", "action": "register", "payload": { "type": "OracleRegisterTxObject", "vsn": 1, "account": self.pub_key, "query_format": query_format, "response_format": response_format, "query_fee": int(query_fee), "ttl": { "type": "delta", "value": int(ttl) }, "fee": int(fee) } } j = json.dumps(query) print(j) self.epoch.update_top_block() self.websocket.send(j) response = json.loads(self.websocket.recv()) if not response['payload']['result'] == "ok": raise RuntimeError(response) oracle_id = response['payload']['oracle_id'] self.epoch.wait_for_block() return oracle_id def wait_for_block(self): self.epoch.update_top_block() self.epoch.wait_for_block() def subscribe(self, oracle_id, callback=None): self.connect_websocket() query = { "target": "oracle", "action": "subscribe", "payload": { "type": "query", "oracle_id": oracle_id } } j = json.dumps(query) self.websocket.send(j) while True: response = json.loads(self.websocket.recv()) print(response) if response['action'] == 'mined_block': continue if not response['payload']['result'] == 'ok': raise RuntimeError(response) id = response['payload']['subscribed_to']['oracle_id'] break mining_events = 0 while True: data = self.websocket.recv() j = json.loads(data) print(j) if j['action'] == 'mined_block': mining_events += 1 continue if j['action'] == 'new_oracle_query': if callback: callback(j) else: print("Unhandled") if mining_events == 0: self.epoch.wait_for_block() def query(self, oracle_pubkey, query_fee, query_ttl, response_ttl, fee, query): self.connect_websocket() request = { "target": "oracle", "action": "query", "payload": { "type": "OracleQueryTxObject", "vsn": 1, "oracle_pubkey": oracle_pubkey, "query_fee": int(query_fee), "query_ttl": { "type": "delta", "value": int(query_ttl) }, "response_ttl": { "type": "delta", "value": int(response_ttl) }, "fee": int(fee), "query": query } } j = json.dumps(request) print(j) self.websocket.send(j) response = self.websocket.recv() print(response) response = json.loads(response) if response['payload']['result'] == "ok": return response['payload']['query_id'] self.epoch.wait_for_block() return False def subscribe_query(self, query_id, callback=None): self.connect_websocket() request = { "target": "oracle", "action": "subscribe", "payload": { "type": "response", "query_id": query_id } } j = json.dumps(request) print(j) self.websocket.send(j) # check response, might have to consume a block mined message while True: blocks_mined = 0 response = self.websocket.recv() response = json.loads(response) print(response) if response['action'] == 'mined_block': blocks_mined += 1 continue if response['action'] == 'new_oracle_response': if callback: callback(response['payload']) else: print(response['payload']) break # Should we get here? if not response['payload']['result'] == 'ok': raise RuntimeError(response) def respond(self, query_id, fee, reply): self.connect_websocket() response = { "target": "oracle", "action": "response", "payload": { "type": "OracleResponseTxObject", "vsn": 1, "query_id": query_id, "fee": int(fee), "response": reply } } response = json.dumps(response) print(response) self.websocket.send(response)
from network import Network from timeService import TimeService as TS from epoch import Epoch import flow networks = [Network(time) for time in TS.time_range] epoch = Epoch(networks,"eposzek") flow.simulate(epoch,True) epoch.rate()
#!/usr/bin/python """ A class for aeternity oracle clients and servers. Author: John Newby (c) Ape Unit 2018 """ from epoch import Epoch import sys epoch = Epoch() epoch.wait_for_block()
def parse_output(output_file): def strip_header(lines): start = False ret = [] for line in lines: if ("Processor:" in line): start = True if start: ret.append(line) return ret def strip_space(lines): ret = [] last_line_star = False start_core = False for line in lines: if "Core:" in line: start_core = True if last_line_star: #Fix spacing after ****** ret.append(" " + line) last_line_star = False elif "*****" in line: last_line_star = True elif "Device Type=" in line or " Local Predictor:" in line: continue else: if last_line_star: #Fix spacing after ****** ret.append(" " + line) last_line_star = False elif start_core: ret.append(line.replace(" ", "", 2)) else: ret.append(line) return ret def line_to_dict(line): ret = {} temp = line.split(":")[0].split("=") ret["lspace"] = len(temp[0]) - len(temp[0].lstrip()) return ret def split_list(lines): core_id = 0 ret = [] sub = [] for i in lines: if "Core:" in i: i = i.replace("Core:", "Core" + str(core_id) + ":") core_id += 1 if i == "\n": ret.append(sub) sub = [] else: sub.append(i.rstrip()) return ret def to_devices(intermediate_dev_list): ret = [] for dev in intermediate_dev_list: data = {} #print(dev) for attr in dev[1:]: data[attr.split("=")[0].strip()] = attr.split("=")[1].strip() ret.append(Device(dev[0].split(":")[0].strip(), data, \ int(math.floor((len(dev[0]) - len(dev[0].lstrip()))/2)))) if ret[-1].depth == 4: ret[-1].depth = 3 if ret[-1].depth == 5: ret[-1].depth = 3 if ret[-1].depth == 6: ret[-1].depth = 4 return ret """ Returns an Epochs """ with open(output_file, "r") as of: lines = of.readlines() lines = strip_header(lines) lines = strip_space(lines) temp = split_list(lines) dev_list = to_devices(temp) epoch = Epoch(dev_list) return epoch
def __init__(self, *arg, **karg): """Initializes as Epoch but with duration zero.""" Epoch.__init__(self, *arg, **karg) self.duration = 0 if 'time' in karg.keys(): self.time = karg['time']
""" return loss return weighted_mse # Separate the sequences for which there is ground truth into test # and train according to the paper's partition. train_seqs = ['00', '02', '08', '09'] test_seqs = ['03', '04', '05', '06', '07', '10'] # Create a data loader to get batches one epoch at a time epoch_data_loader = Epoch(datadir=args['data_dir'], flowdir=os.path.join(args['data_dir'], "flows"), train_seq_nos=train_seqs, test_seq_nos=test_seqs, window_size=args['subseq_length'], step_size=args['step_size'], batch_size=args['batch_size']) # What is the shape of the input flow images? flow_input_shape = epoch_data_loader.get_input_shape() # Define Keras model architecture model = K.models.Sequential() # Reducing input dimensions via conv-pool layers model.add( TimeDistributed(Conv2D(10, (3, 3)), input_shape=(args["subseq_length"], *flow_input_shape))) model.add(Activation('relu'))