def start_timer(self): """""" if not self.exit: self.send(encode(common.ID_WLAN, self.wlan_status())) data = ["11.2", "%.2f" % self.get_range()] self.send(encode(common.ID_TELEMETRY, data)) threading.Timer(1, self.start_timer).start()
def main(argv): argparser = argument_parser() args = argparser.parse_args(argv[1:]) seq_len = args.max_seq_length # abbreviation pretrained_model, tokenizer = load_pretrained(args) train_words, train_tags = read_conll(args.train_data) test_words, test_tags = read_conll(args.test_data) train_data = process_sentences(train_words, train_tags, tokenizer, seq_len) test_data = process_sentences(test_words, test_tags, tokenizer, seq_len) label_list = get_labels(train_data.labels) tag_map = {l: i for i, l in enumerate(label_list)} inv_tag_map = {v: k for k, v in tag_map.items()} init_prob, trans_prob = viterbi_probabilities(train_data.labels, tag_map) train_x = encode(train_data.combined_tokens, tokenizer, seq_len) test_x = encode(test_data.combined_tokens, tokenizer, seq_len) train_y, train_weights = label_encode(train_data.combined_labels, tag_map, seq_len) test_y, test_weights = label_encode(test_data.combined_labels, tag_map, seq_len) ner_model = create_ner_model(pretrained_model, len(tag_map)) optimizer = create_optimizer(len(train_x[0]), args) ner_model.compile(optimizer, loss='sparse_categorical_crossentropy', sample_weight_mode='temporal', metrics=['sparse_categorical_accuracy']) ner_model.fit(train_x, train_y, sample_weight=train_weights, epochs=args.num_train_epochs, batch_size=args.batch_size) if args.ner_model_dir is not None: label_list = [v for k, v in sorted(list(inv_tag_map.items()))] save_ner_model(ner_model, tokenizer, label_list, args) save_viterbi_probabilities(init_prob, trans_prob, inv_tag_map, args) probs = ner_model.predict(test_x, batch_size=args.batch_size) preds = np.argmax(probs, axis=-1) pred_tags = [] for i, pred in enumerate(preds): pred_tags.append( [inv_tag_map[t] for t in pred[1:len(test_data.tokens[i]) + 1]]) lines = write_result(args.output_file, test_data.words, test_data.lengths, test_data.tokens, test_data.labels, pred_tags) c = conlleval.evaluate(lines) conlleval.report(c) return 0
def traverse_map(self, map): """""" pos = 0 for direction in map: self.send(encode(common.ID_ROVER, direction)) self.move(direction) time.sleep(1) self.send(encode(common.ID_MAP, pos)) pos += 1 self.send(encode(common.ID_MAP, common.MOVE_END)) self.send(encode(common.ID_ROVER, common.MOVE_STOP))
def main(argv): argparser = argument_parser('predict') args = argparser.parse_args(argv[1:]) ner_model, tokenizer, labels, config = load_ner_model(args.ner_model_dir) max_seq_len = config['max_seq_length'] label_map = {t: i for i, t in enumerate(labels)} inv_label_map = {v: k for k, v in label_map.items()} test_words, dummy_labels = read_conll(args.test_data, mode='test') test_data = process_sentences(test_words, dummy_labels, tokenizer, max_seq_len) test_x = encode(test_data.combined_tokens, tokenizer, max_seq_len) probs = ner_model.predict(test_x, batch_size=args.batch_size) preds = np.argmax(probs, axis=-1) pred_labels = [] for i, pred in enumerate(preds): pred_labels.append( [inv_label_map[t] for t in pred[1:len(test_data.tokens[i]) + 1]]) lines = write_result(args.output_file, test_data.words, test_data.lengths, test_data.tokens, test_data.labels, pred_labels, mode='predict') return 0
def tag(self, text, tokenized=False): max_seq_len = self.config['max_seq_length'] inv_label_map = { i: l for i, l in enumerate(self.labels) } if tokenized: words = text.split() # whitespace tokenization else: words = tokenize(text) # approximate BasicTokenizer dummy = ['O'] * len(words) data = process_sentences([words], [dummy], self.tokenizer, max_seq_len) x = encode(data.combined_tokens, self.tokenizer, max_seq_len) if self.session is None or self.graph is None: probs = self.model.predict(x, batch_size=8) # assume singlethreaded else: with self.session.as_default(): with self.graph.as_default(): probs = self.model.predict(x, batch_size=8) preds = np.argmax(probs, axis=-1) pred_labels = [] for i, pred in enumerate(preds): pred_labels.append([inv_label_map[t] for t in pred[1:len(data.tokens[i])+1]]) lines = write_result( 'output.tsv', data.words, data.lengths, data.tokens, data.labels, pred_labels, mode='predict' ) return ''.join(lines)
def send(self): msg_no = 0 for msg_code, userName, pwd, heartBeatInt in common.data_set: print("userName type=", type(userName.encode())) data = [userName.encode(), pwd.encode(), heartBeatInt.encode()] msg = common.encode(msg_code, msg_no, data) print('sendMsg[%s]' % msg) msg_no += 1 self.socket.send(msg)
def send(self): msg_no = 0 for msg_code,userName,pwd,heartBeatInt in common.data_set: print("userName type=",type(userName.encode())) data = [userName.encode(),pwd.encode(),heartBeatInt.encode()] msg = common.encode(msg_code,msg_no,data) print('sendMsg[%s]'% msg) msg_no += 1 self.socket.send(msg)
def on_play(self, widget): """""" if widget.get_active(): map = self.map.get_map() self.coms.send(encode(common.ID_MAP, map)) self.control.set_sensitive(False) self.pad.set_sensitive(False) else: self.control.set_sensitive(True) self.pad.set_sensitive(True)
def _generate_payload_each(self, crash_uid): msg = self._generate_whisper() raw_msg = b'\x01\x00\x00\x00\x88\x00\x00\x00\x17\x00\x05\x18\x15' raw_msg += (len(msg)).to_bytes(2, "big") + msg.encode() raw_msg += b'\x10\xd6\x01\x0c[' + crash_uid.to_bytes( 4, byteorder='big' ) + b'\x0c;' + self.account.uid_hex + b'\x0c~\x00\x00\x00\x00' raw_msg = (len(raw_msg)).to_bytes(4, "big") + raw_msg return encode(raw_msg)
def on_collision(self, gpio_id, value): """""" result = value if gpio_id == self.SW1_PIN and RPIO.input(self.SW2_PIN): result +=2 elif gpio_id == self.SW2_PIN: result *= 2 if RPIO.input(self.SW1_PIN): result +=1 self.send(encode(common.ID_BUMPER, result))
def update_user_map(name, emoti_user_id): with open(USER_MAP_FILE, "r") as json_file: data = json.load(json_file) name_encode = encode(name) if not data.get(name_encode): data[name_encode] = emoti_user_id USER_NAME_MAP[name] = emoti_user_id print '[ADD-USER]: %s %s' % (name, emoti_user_id) with open(USER_MAP_FILE, "w") as json_file: json_file.write(json.dumps(data))
def get_uid(self, username): msg = b'\x01\x00\x00\x00\xe7\x00\x00\x00\x9a\x00\x01\x18\r' + ( len(username)).to_bytes(2, "big") + username.encode() msg = (len(msg)).to_bytes(4, "big") + msg try: self.second_sock.sendall(encode(msg)) except: return timeout = time.time() + 20 while time.time() < timeout: for searched_user, uid in self.users_searched.items(): if username.lower() == searched_user.lower(): del self.users_searched[searched_user] return searched_user, uid time.sleep(0.1)
def on_control(self, widget, event): """""" if widget.get_active(): if event.type == Gdk.EventType.FOCUS_CHANGE: widget.set_active(False) elif event.type == Gdk.EventType.KEY_PRESS: key = event.string if key in self.CONTROL_KEYS.keys(): i = self.CONTROL_KEYS[key] self.coms.send(encode(common.ID_ROVER, i)) self.rover_img.set_from_pixbuf(ROVER_PIXBUFS[i]) elif event.type == Gdk.EventType.KEY_RELEASE: key = event.string if key in self.CONTROL_KEYS.keys(): self.rover_img.set_from_pixbuf(ROVER_PIXBUFS[0])
def main(argv): argparser = argument_parser('predict') args = argparser.parse_args(argv[1:]) ner_model, tokenizer, labels, config = load_ner_model(args.ner_model_dir) max_seq_len = config['max_seq_length'] label_map = {t: i for i, t in enumerate(labels)} inv_label_map = {v: k for k, v in label_map.items()} if args.viterbi: try: init_prob, trans_prob = load_viterbi_probabilities( args.ner_model_dir, label_map) except Exception as e: error('failed to load viterbi probabilities: {}'.format(e)) init_prob, trans_prob, args.viterbi = None, None, False test_words, dummy_labels = read_conll(args.test_data, mode='test') test_data = process_sentences(test_words, dummy_labels, tokenizer, max_seq_len) test_x = encode(test_data.combined_tokens, tokenizer, max_seq_len) probs = ner_model.predict(test_x, batch_size=args.batch_size) pred_labels = [] if not args.viterbi: preds = np.argmax(probs, axis=-1) for i, pred in enumerate(preds): pred_labels.append([ inv_label_map[t] for t in pred[1:len(test_data.tokens[i]) + 1] ]) else: for i, prob in enumerate(probs): cond_prob = prob[1:len(test_data.tokens[i]) + 1] path = viterbi_path(init_prob, trans_prob, cond_prob) pred_labels.append([inv_label_map[i] for i in path]) write_result(args.output_file, test_data.words, test_data.lengths, test_data.tokens, test_data.labels, pred_labels, mode='predict') return 0
def recvThreadFun(): print("start recvThread") while continue_flag: for sock in recvSockSet: data = '' try: data = recv_msg_queues[sock].get_nowait() except queue.Empty: continue if data == '': continue # ret,msg_len,msg_code,msg_no,result,userName,pwd,heartBeatInt data_set = common.decode(data) ret = data_set[0] msg_len = data_set[1] msg_code = data_set[2] msg_no = data_set[3] print("recvThread msg_code=%s"%msg_code) for case in switch(msg_code): if case('S101'): # 登录请求 result = data_set[4] userName = data_set[5] pwd = data_set[6] heartBeatInt = data_set[7] if ret == 0: print("RecvMsg[%s,%i,%s,%s,%s,%s]"% (msg_code,msg_no,result,userName,pwd,heartBeatInt)) flag = '' if result == 1: flag = SUCCESS else: flag = FAILED utcStamp = time.time() retData = (str(result).encode(), str(utcStamp).encode(), flag.encode()) #send_msg_queues[sock].put(retData) msg = common.encode('A101',msg_no,retData) send_msg_queues[sock].put(msg) break #sock.send(msg) else: print("Error: upack failed") if case('S201'): # 用来测试序列化 result = data_set[4] if ret == 0: print(result) rebuild = json.loads(result, object_hook=lambda d: common.Student(d['name'], d['age'], d['score'])) print(rebuild) if case('S301'): pass
def get_prof(self, uid): uid_hex = uid.to_bytes(4, "big") msg = b"\x01\x00\x00\x00X\x00\x08\x00\x96\x00\x03\x10\x08\x00\x0c[" + uid_hex + b"\x0c\x14\x00\x00\x00\x08" msg = (len(msg)).to_bytes(4, "big") + msg try: self.second_sock.sendall(encode(msg)) except: print("error sending get_prof") return timeout = time.time() + 20 while time.time() < timeout: for user_prof in self.uids_searched: if user_prof.uid == uid: self.uids_searched.remove(user_prof) return user_prof time.sleep(1)
def run(self): while True: if not self._outgoing.empty(): msg = self._outgoing.get() if msg['type'] not in ['ping', 'start']: logging.debug(repr(msg)) self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: self._sock.connect((msg['host'], msg['port'])) self._sock.sendall(encode(msg)) except ConnectionRefusedError: if msg['type'] == 'ping': #logging.debug('failure: {}'.format(msg['to_id'])) failure_msg = dict(type="fail", id=msg['to_id']) self._incoming.put(failure_msg) finally: self._sock.close()
def get_location(self, uid): uid_hex = uid.to_bytes(4, "big") # TODO change to locate msg msg = b'\x00\x00\x00\x11\x01\x00\x00\x00\xd8\x00\x00\x00u\x00\x01\x0c[' + uid_hex try: self.second_sock.sendall(encode(msg)) except: print("error sending get_location") return timeout = time.time() + 10 while time.time() < timeout: for loc_dict in self.users_locations: if loc_dict['located_uid'] == uid: self.users_locations.remove(loc_dict) return loc_dict time.sleep(1)
def run(self): while True: # recieve messages and put them into queue conn, addr = self._sock.accept() conn.settimeout(1) data = conn.recv(1024) msg = decode(data) if msg['type'] not in ['ping', 'start']: logging.debug(repr(msg)) if not msg: continue if msg['type'] == 'ping': # respond with pong to heartbeat messages # no need to put it into queue conn.sendall(encode('{"type": "pong"}')) else: incoming_queue.put(msg) conn.close()
return parser if __name__ == '__main__': parser = opt_parser() opt = parser.parse_args() config = load_config(opt.net_config) #pprint(config) # Start server here server = Server(host=opt.host, port=opt.port, size=len(config)) server.start() time.sleep(0.1) fetch_msg = encode(dict(type="extract", host=opt.host, port=opt.port)) for node in config.values(): send_msg(fetch_msg, host=node['host'], port=node['port']) #print(node) time.sleep(0.1) # wait for server here server.join() id_table = dict() while not server.responses.empty(): resp = server.responses.get() print(repr(resp)) id_table[resp['id']] = resp graph = as_nx(id_table)
def run(self): while True: if not incoming_queue.empty(): msg = incoming_queue.get() if msg['type'] == 'start': time.sleep(2.0) self._heartbeat() elif msg['type'] == 'reconfig': sender_id = int(msg['id']) frag_id = int(msg['frag_id']) if self.status == 'idle': self.status = 'wait' failed_id = int(msg['failed_node']) self.coord_so_far = sender_id self.port_to_coord = self.port_to(sender_id) can_communicate = list( set(self._ports.values()).difference( [sender_id, failed_id])) if len(can_communicate) == 0: self._con.send(sender_id, dict(type='no_contention')) else: for node_id in can_communicate: self.recd_reply[node_id] = None self._con.send( node_id, dict(type='reconfig', node_list=msg['node_list'] + [self.id], frag_id=msg['frag_id'], failed_node=failed_id)) else: e = self.port_to(sender_id) # Message is the copy of message recieved earlier if (frag_id == self.coord_so_far) and ( e not in self.get_port()): logging.debug('Message was recieved earlier') self._con.send(self._ports[e], dict(type='no_contention')) continue # Detected loop if self.id in msg['node_list']: logging.debug('Loop is detected') self._con.send(self._ports[e], dict(type='no_contention')) continue # Resolve contention if (self.coord_so_far > frag_id) or ( (self.coord_so_far == frag_id) and (self.id > sender_id)): logging.debug('Sending out stop message') self._con.send( sender_id, dict(type='stop', frag_id=self.coord_so_far)) else: self.coord_so_far = frag_id if self.port_to_coord is not None: logging.debug('Sending out stop message') self._con.send( self._ports[self.port_to_coord], dict(type='stop', frag_id=frag_id)) self.port_to_coord = self.port_to(sender_id) elif msg['type'] == 'no_contention': sender_id = int(msg['id']) if sender_id in self.recd_reply.keys(): self.recd_reply[sender_id] = 'no_contention' if not there_is_a_none(self.recd_reply): self._on_everybody_responded() elif msg['type'] == 'accepted': sender_id = int(msg['id']) if sender_id in self.recd_reply.keys(): self.recd_reply[sender_id] = 'accepted' if not there_is_a_none(self.recd_reply): self._on_everybody_responded() elif msg['type'] == 'stop': frag_id = msg['frag_id'] from_id = msg['id'] p = self.port_to(from_id) if frag_id > self.coord_so_far: self.coord_so_far = frag_id if self.port_to_coord is not None: self._con.send(self._ports[self.port_to_coord], dict(type='stop', frag_id=frag_id)) self.port_to_coord = p if frag_id == self.coord_so_far: if self.port_to_coord not in self.get_port(): if self.port_to_coord is not None: self._con.send(self._ports[self.port_to_coord], dict(type='no_contention')) self.recd_reply[ self.port_to_coord] = 'no_contention' if not there_is_a_none(self.recd_reply): self._on_everybody_responded() else: self._con.send(self._ports[self.port_to_coord], dict(type='stop', frag_id=frag_id)) self.port_to_coord = p if frag_id < self.coord_so_far: self._con.send( self._ports[p], dict(type='stop', frag_id=self.coord_so_far)) elif msg['type'] == 'fail': failed_node = int(msg['id']) self.remove_edge(failed_node) self.status = 'wait' self.coord_so_far = self.id self.port_to_coord = None for node_id in set(self._ports.values()).difference( [failed_node]): self.recd_reply[node_id] = None self._con.send( node_id, dict(type='reconfig', node_list=[self.id], frag_id=self.id, failed_node=failed_node)) elif msg['type'] == 'extract': host = msg['host'] port = msg['port'] graph_msg = encode( dict(id=self.id, links=list(self._ports.values()), edges=[ self._ports[port_id] for port_id in self._edges ])) send_msg(msg=graph_msg, host=host, port=port)
def sendJson(self, msg_code, data): msg_no = 0 msg = common.encode(msg_code,msg_no,data) self.socket.send(msg)
def main(argv): argparser = argument_parser() args = argparser.parse_args(argv[1:]) seq_len = args.max_seq_length # abbreviation pretrained_model, tokenizer = load_pretrained(args) train_words, train_tags = read_conll(args.train_data) test_words, test_tags = read_conll(args.test_data) print(args.no_context) if args.no_context: train_data = process_no_context(train_words, train_tags, tokenizer, seq_len) test_data = process_no_context(test_words, test_tags, tokenizer, seq_len) elif args.documentwise: tr_docs, tr_doc_tags, tr_line_ids = split_to_documents(train_words, train_tags) te_docs, te_doc_tags, te_line_ids = split_to_documents(test_words, test_tags) train_data = process_docs(tr_docs, tr_doc_tags, tr_line_ids, tokenizer, seq_len) test_data = process_docs(te_docs, te_doc_tags, te_line_ids, tokenizer, seq_len) else: train_data = process_sentences(train_words, train_tags, tokenizer, seq_len, args.predict_position) test_data = process_sentences(test_words, test_tags, tokenizer, seq_len, args.predict_position) label_list = get_labels(train_data.labels) tag_map = { l: i for i, l in enumerate(label_list) } inv_tag_map = { v: k for k, v in tag_map.items() } train_x = encode(train_data.combined_tokens, tokenizer, seq_len) test_x = encode(test_data.combined_tokens, tokenizer, seq_len) train_y, train_weights = label_encode(train_data.combined_labels, tag_map, seq_len) test_y, test_weights = label_encode(test_data.combined_labels, tag_map, seq_len) if args.use_ner_model and (args.ner_model_dir is not None): ner_model, tokenizer, labels, config = load_ner_model(args.ner_model_dir) else: optimizer = create_optimizer(len(train_x[0]), args) model = create_ner_model(pretrained_model, len(tag_map)) if args.num_gpus > 1: ner_model = multi_gpu_model(model, args.num_gpus) else: ner_model = model ner_model.compile( optimizer, loss='sparse_categorical_crossentropy', sample_weight_mode='temporal', metrics=['sparse_categorical_accuracy'] ) ner_model.fit( train_x, train_y, sample_weight=train_weights, epochs=args.num_train_epochs, batch_size=args.batch_size ) if args.ner_model_dir is not None: label_list = [v for k, v in sorted(list(inv_tag_map.items()))] save_ner_model(ner_model, tokenizer, label_list, args) probs = ner_model.predict(test_x, batch_size=args.batch_size) preds = np.argmax(probs, axis=-1) results = [] m_names = [] if args.no_context: pr_ensemble, pr_test_first = get_predictions(preds, test_data.tokens, test_data.sentence_numbers) output_file = "output/{}-NC.tsv".format(args.output_file) m_names.append('NC') ensemble = [] for i,pred in enumerate(pr_test_first): ensemble.append([inv_tag_map[t] for t in pred]) lines_ensemble, sentences_ensemble = write_result( output_file, test_data.words, test_data.lengths, test_data.tokens, test_data.labels, ensemble ) c = conlleval.evaluate(lines_ensemble) conlleval.report(c) results.append([conlleval.metrics(c)[0].prec, conlleval.metrics(c)[0].rec, conlleval.metrics(c)[0].fscore]) else: # First tag then vote pr_ensemble, pr_test_first = get_predictions(preds, test_data.tokens, test_data.sentence_numbers) # Accumulate probabilities, then vote prob_ensemble, prob_test_first = get_predictions2(probs, test_data.tokens, test_data.sentence_numbers) ens = [pr_ensemble, prob_ensemble, pr_test_first, prob_test_first] if args.documentwise: # D-CMV: Documentwise CMV # D-CMVP: Documetwise CMV, probs summed, argmax after that # D-F: Documentwise First # D-FP: Same as D-FP method_names = ['D-CMV','D-CMVP','D-F','D-FP'] else: method_names = ['CMV','CMVP','F','FP'] for i, ensem in enumerate(ens): ensemble = [] for j,pred in enumerate(ensem): ensemble.append([inv_tag_map[t] for t in pred]) output_file = "output/{}-{}.tsv".format(args.output_file, method_names[i]) lines_ensemble, sentences_ensemble = write_result( output_file, test_data.words, test_data.lengths, test_data.tokens, test_data.labels, ensemble) print("Model trained: ", args.ner_model_dir) print("Seq-len: ", args.max_seq_length) print("Learning rate: ", args.learning_rate) print("Batch Size: ", args.batch_size) print("Epochs: ", args.num_train_epochs) print("Training data: ", args.train_data) print("Testing data: ", args.test_data) print("") print("Results with {}".format(method_names[i])) c = conlleval.evaluate(lines_ensemble) print("") conlleval.report(c) results.append([conlleval.metrics(c)[0].prec, conlleval.metrics(c)[0].rec, conlleval.metrics(c)[0].fscore]) m_names.extend(method_names) if args.sentence_in_context: starting_pos = np.arange(0,seq_len+1,32) starting_pos[0] = 1 m_names.extend(starting_pos) for start_p in starting_pos: tt_lines, tt_tags, line_nos, line_starts = combine_sentences2(test_data.tokens, test_data.labels, seq_len-1, start_p-1) tt_x = encode(tt_lines, tokenizer, seq_len) tt_y, train_weights = label_encode(tt_tags, tag_map, seq_len) probs = ner_model.predict(tt_x, batch_size=args.batch_size) preds = np.argmax(probs, axis=-1) pred_tags = [] for i, pred in enumerate(preds): idx = line_nos[i].index(i) pred_tags.append([inv_tag_map[t] for t in pred[line_starts[i][idx]+1:line_starts[i][idx]+len(test_data.tokens[i])+1]]) output_file = "output/{}-{}.tsv".format(args.output_file, start_p) lines_first, sentences_first = write_result( output_file, test_data.words, test_data.lengths, test_data.tokens, test_data.labels, pred_tags ) print("") print("Results with prediction starting position ", start_p) c = conlleval.evaluate(lines_first) conlleval.report(c) results.append([conlleval.metrics(c)[0].prec, conlleval.metrics(c)[0].rec, conlleval.metrics(c)[0].fscore]) result_file = "./results/results-{}.csv".format(args.output_file) with open(result_file, 'w+') as f: for i, line in enumerate(results): params = "{},{},{},{},{},{},{},{},{}".format(args.output_file, args.max_seq_length, args.bert_config_file, args.num_train_epochs, args.learning_rate, args.batch_size, args.predict_position, args.train_data, args.test_data) f.write(params) f.write(",{}".format(m_names[i])) for item in line: f.write(",{}".format(item)) f.write('\n') for i in results: print(i) return 0
def run(self): while True: if not incoming_queue.empty(): msg = incoming_queue.get() logging.debug( 'Processing message: {}\nCurrent recvd: {}\nCurrent status: {}\nCoord so far: {}\nPort to coord: {}' .format(repr(msg), self.recd_reply, self.status, self.coord_so_far, self.port_to_coord)) logging.debug(''.format(self.recd_reply)) if msg['type'] == 'start': print('Activating in a second') time.sleep(2.0) self._heartbeat() elif msg['type'] == 'reconfig': self.recd_reply[self.port_to(int( msg['id']))] = 'no_contention' self._on_reconfig(msg['node_list'], int(msg['frag_id']), int(msg['id'])) elif msg['type'] == 'no_contention': if self.status == 'wait': self.recd_reply[self.port_to(int( msg['id']))] = 'no_contention' if len(self.recd_reply) == len(self._ports): self._on_everybody_responded() elif msg['type'] == 'accept': if self.status == 'wait': self.recd_reply[self.port_to(int( msg['id']))] = 'accepted' if len(self.recd_reply) == len(self._ports): self._on_everybody_responded() elif msg['type'] == 'stop': if self.status == 'wait': self._on_stop(int(msg['frag_id']), int(msg['id'])) elif msg['type'] == 'fail': # set status of the node # TODO: This might be an issue self.status = 'wait' self.coord_so_far = self.id self.port_to_coord = None # What to put here??? # remove the failed edge from our MST self.remove_edge(int(msg['id'])) # send reconfiguration request through all the ports for dest_id in set(self._ports.values()).difference( [int(msg['id'])]): self._con.send(id=dest_id, msg=dict(type='reconfig', node_list=[self.id], frag_id=self.id)) elif msg['type'] == 'extract': host = msg['host'] port = msg['port'] graph_msg = encode( dict(id=self.id, links=list(self._ports.values()), edges=[ self._ports[port_id] for port_id in self._edges ])) send_msg(msg=graph_msg, host=host, port=port)
def _initialise(self): policy_sock = create_socket() policy_sock.sendall(b'<policy-file-request/>\x00') policy_sock.close() second_sock = create_socket() msgs = [ encode( b'\x00\x00\x00\x89\x01\x00\x00\x00\x02\x00\x00\x00=\x00\x0c\x1a\xe1\x00\x12game2.ourworld.com\x0cB\x00\x00\x00\x05\x18E\x00\x04info\x18\xc6\x00\x04home\x0e\xe2\x00\x00$^\x0fk\x00\x00\x00\x006\x8c\x15f9a\x00\x00\x01q\x10N\x01\x18Z\x00 ' + self.account.auth_id.encode() + b'\x18<\x00\x04info\x18]\x00\x03xyz\x0c>\xff\xff\xff\xff'), b"<m>AAAAEQEAAAADAAAA6AABGhEAAmVu</m>\x00", b"<m>AAAAEQEAAAAEAAgA9wABDBQAAAAI</m>\x00", b"<m>AAAADQEAAAAFAAAA4AAAAAA=</m>\x00", encode( b'\x00\x00\x00\x17\x01\x00\x00\x00\x06\x00\x08\x00U\x00\x02\x0c\x14\x00\x00\x00\x08\x0c[' + self.account.uid_hex), b'<m>AAAAFwEAAAAHAAABJgACDgAAAAABDNIAAAAC</m>\x00' ] for msg in msgs: second_sock.sendall(msg) throw_sock = create_socket() # throw_sock.sendall(encode(b'\x00\x00\x00\xbb\x01\x00\x00\x00\t\x00\x00\x00=\xff\xf5\x1a\xe1\x00\x12game2.ourworld.com\x0cB\x00\x00\x00\x04\x0e\xe2\x00\x00$^\x18<\x00\x04town\x18E\x00\x1aarcadia-boardwalk-exterior\x18\xc6\x00\x04home\x18Z\x00 '+self.account.auth_id.encode()+b'\x0fk\x00\x00\x00\x006\x8c\x15f9a\x00\x00\x01q\x18\xcd\x00\x1aarcadia-boardwalk-exterior\x0c\x1e\x00\x00\x00\x00\x00\x01\x00C\x00\x00\x00\x00')) msgs = [ (b"<m>AAAAEQEAAAAKAAwBGwABDBQAAAAM</m>\x00", 2), (b"<m>AAAAMwEAAAALAAAABgADDEAAAAI2GDwADGNvbm5lY3QgdGltZRgVAA5Xb3JsZDogQ29ubmVjdA==</m>\x00", 1), (b"<m>AAAAIwEAAAANAAgAD//+DBQAAAAIDGEAAAACAAEAZAABDD4ALcyf</m>\x00", 2), (b"<m>AAAAFwEAAAAMAAAAegADCBgAAAgWAPsIFwN6</m>\x00", 1), (b"<m>AAAADQEAAAAOAAAAugAAAAA=</m>\x00", 2), (b"<m>AAAADQEAAAAPAAAAlwAAAAA=</m>\x00", 1), (b"<m>AAAADQEAAAAQAAAA9wAAAAA=</m>\x00", 1), (b"<m>AAAAHQEAAAARAAAAUgACGGEACGxvY2F0aW9uDFsAAXcB</m>\x00", 1), (encode( b'\x00\x00\x00\x1d\x01\x00\x00\x00\x13\x00\x00\x00R\x00\x02\x18a\x00\x08location\x0c[' + self.account.uid_hex), 1), (b"<m>AAAAEQEAAAAWAAgABQABDBQAAAAI</m>\x00", 2), (encode( b'\x00\x00\x00\x1e\x01\x00\x00\x00\x14\x00\x00\x00\x04\x00\x03\x0c[' + self.account.uid_hex + b'\x18\x15\x00\x06helper\x10v\x00'), 1), (b'<m>AAAAIwEAAAAXAAgAD//+DBQAAAAIDGEAAAACAAEAZAABDD4AHvYq</m>\x00', 2), (b"<m>AAAADQEAAAAVAAAAnwAAAAA=</m>\x00", 1), (b"<m>AAAAEQEAAAAfAAAAoQABDD4AAAGI</m>\x00", 1), (b"<m>AAAADQEAAAAZAAABCQAAAAA=</m>\x00", 2), (b"<m>AAAAFwEAAAAaAAABJwACDDwAAAABDEQAAAAJ</m>\x00", 2), (b"<m>AAAADQEAAAAbAAABGwAAAAA=</m>\x00", 2), (b"<m>AAAADQEAAAAcAAABDQAAAAA=</m>\x00", 2), (encode( b'\x00\x00\x00\x11\x01\x00\x00\x00\x1d\x00\x00\x00\xfa\x00\x01\x0c[' + self.account.uid_hex), 2) ] for msg, sock_num in msgs: if sock_num == 1: throw_sock.sendall(msg) else: second_sock.sendall(msg) # msg = encode(b'\x00\x00\x00m\x01\x00\x00\x00\xf2\x00\x00\x00>\xff\xf8\x1a\xe1\x00\x12game2.ourworld.com\x0cB\x00\x00\x00\x04\x18E\x00\rcondos/condo1\x18\xc6\x00\x04home\x0e\xe2\x00\x00$^\x18<\x00\x04town\x18\xcd\x00\rcondos/condo1\x0c\x1e'+self.account.uid_hex+b'\x00\x01\x00C\x00\x00\x00\x00') throw_sock.sendall(msg) throw_sock.sendall(msg) time.sleep(1) self.throw_sock = throw_sock self.second_sock = second_sock
#!/usr/bin/env python3 import argparse from pprint import pprint from common import load_config from common import encode from common import send_msg def opt_parser(): parser = argparse.ArgumentParser( description='Network reconfiguration node') parser.add_argument('--net_config', default='config/sample_graph3.json', type=str) return parser if __name__ == '__main__': parser = opt_parser() opt = parser.parse_args() config = load_config(opt.net_config) pprint(config) start_msg = encode(dict(type="start")) for node in config.values(): send_msg(start_msg, host=node['host'], port=node['port']) print(node)
def sendJson(self, msg_code, data): msg_no = 0 msg = common.encode(msg_code, msg_no, data) self.socket.send(msg)
def on_lights(self, widget): """""" if widget.get_active(): self.coms.send(encode(common.ID_LIGHTS, 1)) else: self.coms.send(encode(common.ID_LIGHTS, 0))