def send(self, message): """ Sends the 'message' to all in clinets in 'recipients'. If 'recipients' is either an empty list or None, then the 'message' is sent to all clients. 'message' MUST be a byte object. """ util.send_msg(self.sock, message)
def notifyUser(): req = request.form.to_dict(flat=False) # print('REQ: ', req) # print(req['category']) msg = "You've been donated: $" + \ req['amount'][0] + " for: " + req['category'][0] + \ " pay with pin: " + str(req['pin'][0]) send_msg(req['phone'][0], msg) return 'okay'
def scheduled_gossip(self): global gossip_timer to_send = None with n_lock: nbor = sample(n.nodes, 1)[0] to_send = list(n.clients + n.nodes) if to_send: msg = M.GossipMsg(to_send) u.send_msg(nbor[0], 23456, msg) gossip_timer = threading.Timer(c.GOSSIP, self.scheduled_gossip) gossip_timer.start()
def create_and_send_query_msg(s_node, d_node, sn_long, sn_lat, dn_long, dn_lat, target_r, forbidden_r, q_id): if target_r is None: error = 'TargetRegionNotFound' print(error) return query_request = msg.RequestQueryMsg(q_id, sn_lat, sn_long, dn_lat, dn_long, time_to_live, forbidden_r, target_r, s_node, d_node) response = utils.send_msg(s_node, c.PORT, query_request, w_marshalling=False, reply=True, timeout=7) if response is None: print('Cannot send to nor receive from %s,%d' % (s_node, q_id)) else: query_response = msg.Msg.fromBytes(response) print(query_response) if not query_response.param["succeeded"]: print('Failure,%s,%s' % (query_response.param["query"], query_response.param["result"])) else: print('Success,%s,%s' % (query_response.param["query"], query_response.param["result"]))
def handle_client(self): self.data = b'' flag = True while flag == True: self.data = util.recv_msg(self.request) logging.debug("{}:{} wrote: {}".format(self.client_address[0], self.client_address[1], self.data)) try: if self.data != None: for c in self.clients: util.send_msg(c, self.data) # update log else: flag = False except AttributeError as e: logging.warning("Exception in Handler: {}".format(e))
def joinPeerHandler(self, msg): global initialized, gossip_timer, refresh_neighbors_timer, request_neighborlist_timer if not initialized: mylat = my_latlon_map["lat"] mylon = my_latlon_map["lon"] # now request list of neighbors from all bootstrapping nodes req = M.NeighborReqMsg(mylat, mylon) while True: # do repeatedly until initialization is complete # not all nodes will have enough neighbors in the first step # repeated execution ensures that the current node has enough neghbors for bootnode in c.bootstrap_nodes: # this node itself might be a bootstrap node. #In that case, we just move on to the next node if bootnode == my_hostname: continue # else request neighbor list from that node nlist_msg = u.send_msg(bootnode, 23456, req, False, True) if nlist_msg and nlist_msg != None: logging.info( "Received response from bootstrapping node %s", bootnode) # response received is actually a gossip message x = M.Msg.fromBytes(nlist_msg) self.gossipHandler( x, bootnode, socket.gethostbyname(socket.gethostname())) # if sufficient neighbors have not been obtained, keep repeating if len(n.nodes) < 2: continue gossip_timer = threading.Timer(c.GOSSIP, self.scheduled_gossip) gossip_timer.start() refresh_neighbors_timer = threading.Timer( c.REFRESH_INTERVAL, self.refresh_neighbors_of_neighbors) refresh_neighbors_timer.start() request_neighborlist_timer = threading.Timer( c.NLIST_REQ_INTERVAL, self.refresh_nn_list) request_neighborlist_timer.start() initialized = True break if initialized: break #pause before retrying time.sleep(10)
def calculate_latency(node): nonce = randint(0, 65535) ping = msg.Ping(nonce) (response, latency) = utils.send_msg(node, c.PORT, ping, False, True, True, 1) pr_nonce = msg.Msg.fromBytes(response).param["nonce"] logging.debug('Checking if nonces are matching (%d == %d)', nonce, pr_nonce) return latency if nonce == pr_nonce else None
def get_neighbors_neighbortables(self, nlist): msg = M.NeighborReqMsg(-2000, 0) for nbrs in nlist: #print(nbrs) response = u.send_msg(nbrs[0], 23456, msg, False, True) if response: # DEPENDS ON UTILS m = M.Msg.fromBytes(response) nnlist_a = m.param["neighbors"] #print(nnlist_a) with n_lock: n.nn_list[nbrs[0]] = nnlist_a
parser = optparse.OptionParser() parser.add_option("-a", "--address", action="store", dest="address", default="localhost") parser.add_option("-p", "--port", action="store", dest="port", type=int, default=1337) opts = parser.parse_args(sys.argv[1:])[0] destination = (opts.address, opts.port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(10) s.connect(destination) s.settimeout(None) while True: option = raw_input("Menu de connexion \n1. Se connecter \n2. Creer un compte \n") while option != "1" and option != "2": option = raw_input("Veuillez saisir une option valide:\n") util.send_msg(s, option) if option == "1": id = raw_input("Veuillez saisir votre identifiant:\n") passw = getpass.getpass("Veuillez saisir votre mot de passe:\n") util.send_msg(s, id) util.send_msg(s, passw) idAnswer = util.recv_msg(s) if idAnswer != "0": passwAnsw = util.recv_msg(s) while idAnswer != "1" or passwAnsw != "1": if idAnswer != "1": id = raw_input("Veuillez saisir un identifiant valide:\n") passw = getpass.getpass("Veuillez saisir votre mot de passe:\n") elif passwAnsw == "-1": print("Desole, un probleme est survenu.")
def notifyVerify(r): fields = json.loads(r)[0]['fields'] msg = "Please image verify by texting us an image of your purchase and 'verify'" send_msg(fields['username'], msg)
def main(filename=None, ports=[5000]): if not filename: print 'Using default parameters' params = { 'max_generations': 200, 'max_not_improved': 20, 'size': 100, 'crossover': 0.3, 'mutation': 0.05, 'elitism': 0.2, 'imigration': 0.2, 'tour_size': 8, 'local': None } else: print 'Loading parameters from %s\n' % (filename) params = util.parse_json(filename) # Creating initial population pop = Population(size=params['size'], crossover=params['crossover'], mutation=params['mutation'], elitism=params['elitism'], imigration=params['imigration'], tour_size=params['tour_size'], local=params['local']) sockets = [] for port in ports: sock = util.open_socket(int(port)) sockets.append(sock) Chromo.sockets = sockets Chromo.cross_op = params['cross_op'] # Generations without improvements no_improv = 0 for i in range(params['max_generations']): print 'Generation %d' % (i + 1) print 'Calculating fitness' for sock in Chromo.sockets: util.send_msg(sock, 'NEWGEN\n') best = pop.evaluate() for sock in Chromo.sockets: util.send_msg(sock, 'ENDGEN\n') print 'main:current_best ', best #pop.plot_evolution() if not pop.improved: no_improvements += 1 print "Didn't improve:", no_improvements if no_improvements == params['max_not_improved']: print 'Reached limit for not improved generations' break else: no_improvements = 0 print 'Evolving' pop.evolve() print print '\nBest solution:' #pop.show_first() print 'Fitness: ', best[0] print 'Genes: ', Chromo.to_str(best[1]), '\'' # Closing connection for sock in Chromo.sockets: util.send_msg(sock, 'ENDGA\n') sock.close()
def send(self, msg): util.send_msg(self.request, bytes(msg, 'utf-8'))
def query_request_handler(self, q): # client query requests are routed here global queryq, past_queries, active_queries # requests cannot be handled if node has not been initialized while not initialized: logging.info("Node is not initialized yet") time.sleep(2) #myip, myport = self.request.getsockname() #q = M.Msg.fromBytes(json_data) query = q.param["query"] shost = q.param["shost"] slat = q.param["slat"] slon = q.param["slon"] dhost = q.param["dhost"] dlat = q.param["dlat"] dlon = q.param["dlon"] ttl = q.param["ttl"] forbidden_region = b_loads(bytes.fromhex(q.param["fr"])) target_region = b_loads(bytes.fromhex(q.param["tr"])) #print(forbidden_region) #print(type(target_region)) with qid_lock: if query in past_queries: # this query has been performed before, disregard it logging.info( 'QUERYID %s;SRC %s;DST %s;STRATEGY non-forking; Query Discarded', query, shost, dhost) return if not c.TESTING: with daemon_lock: past_queries.add(query) # at this point the path is just being created, so only add self new_path = [(my_hostname, my_latlon_map["lon"], my_latlon_map["lat"])] # We are not using query forking, so strategy is fixed logging.info( 'QUERYID %s;SRC %s;DST %s;STRATEGY non-forking ;At source.', query, shost, dhost) # If current host is within the target region, our search for an alibi relay was # successful and we can respond accordingly if Point(my_latlon_map["lon"], my_latlon_map["lat"]).intersects(target_region): logging.info('QUERYID %s;SRC %s;DST %s;%s within relayzone', query, shost, dhost, my_hostname) response = M.ResponseQueryMsg(query, True, my_hostname, '[]') # would update query overheads at this point #u.send_msg(shost, 23456, response) self.request.sendall(response.asBytes().encode('utf-8')) return # Otherwise we should find a viable next hop target_node = f.nodelist_within_region(query, my_hostname, my_latlon_map["lon"], my_latlon_map["lat"], n.nodes, n.nn_list, forbidden_region, target_region) if target_node != None: # found some neighboring node within the target regions response = M.ResponseQueryMsg(query, True, target_node, '[]') #u.send_msg(shost, 23456, response) self.request.sendall(response.asBytes().encode('utf-8')) return # Now we must choose a safe next hop. viable_nodes = f.get_next_hop(query, my_hostname, slon, slat, dlon, dlat, forbidden_region, target_region, my_latlon_map["lon"], my_latlon_map["lat"], n.nodes, n.nn_list) # are there any safe nodes available? if len(viable_nodes) == 0: # no safe nodes - return with failure response = M.ResponseQueryMsg(query, False, my_hostname + ": No safe next hop", '[]') # update query overhead here #u.send_msg(shost, 23456, response) self.request.sendall(response.asBytes().encode('utf-8')) return # Not using query forking, so only pick best node for next hop next_hop = viable_nodes[0] if not next_hop: # no safe nodes - return with failure response = M.ResponseQueryMsg(query, False, my_hostname + ": No safe next hop", new_path) # update query overhead here #u.send_msg(shost, 23456, response) self.request.sendall(response.asBytes().encode('utf-8')) return syncQ = queue.Queue() # save query in queue of query actions for this query ID (similar to producer-consumer) # mark the query as active until a response is received #print("Entering Query Queue initialized") with queryq_lock: #print("Query Queue initialized") queryq[query] = syncQ # created a new queue for this query with active_lock: active_queries.add(query) # attempt to forward the query logging.info( 'QUERYID %s;SRC %s;DST %s;Trying to forward from source %s to %s', query, shost, dhost, myip, next_hop) to_send = M.QueryMsg(query, slat, slon, dlat, dlon, ttl, forbidden_region, target_region, path, shost, dhost) u.send_msg(next_hop, 23456, to_send) # IF THE QUERY COMPLETES SUCCESSFULLY, WE EXPECT THE RESULT TO ARRIVE IN THE QUEUE while True: try: # perform a blocking read with a timeout. response = q.get(True, c.TIMEOUT) except: # the read failed. Inform client that the query timed out logging.info('QUERYID %d;SRC %s;DST %s;Query timed out', query, shost, dhost) msg = M.ResponseQueryMsg(query_id, False, "Query timed out at source", '[]') break if response: msg = M.ResponseQueryMsg(query, response.params["succeeded"], response.params["result"], path) if succeeded: logging.info( 'SUCCESS:: QUERYID %d;SRC %s;DST %s;Got a query response (Relay - %s)|%s', query, my_hostname, dhost, response.params["result"], path) break else: logging.info('FAILURE:: QUERYID %d;SRC %s;DST %s', query, my_hostname, dhost) break # remove the query from the active list and empty out its sync queue with active_lock: active_queries.remove(query) with queryq_lock: q.queue.clear() # update query overhead here # send a response to the client client = self.request.getpeername()[0] self.request.sendall(msg.asBytes().encode("utf-8"))
def query_handler(self, q): global past_queries myip, myport = self.request.getsockname() query = q.param["query"] shost = q.param["shost"] slat = q.param["slat"] slon = q.param["slon"] dhost = q.param["dhost"] dlat = q.param["dlat"] dlon = q.param["dlon"] ttl = q.param["ttl"] forbidden_region = q.param["fr"] target_region = q.param["tr"] path = q.param["path"] with qid_lock: if query in past_queries: # this query has been performed before, disregard it return # Add self to the path taken by the packet so far new_path = path.append( (my_hostname, my_latlon_map["lon"], my_latlon_map["lat"])) # We are not using query forking, so strategy is fixed logging.info( 'QUERYID %s;SRC %s;DST %s;STRATEGY non-forking ;Hop Counts %d;ttl %d;Query Received', query, shost, dhost, len(path), ttl) # Check if the query has expired TTL if len(path) > ttl: # We are not using query forking, otherwise we would check strategy here # not storing query overhead at this point either # send a response to requester and exit response = M.ResponseQueryMsg(query, False, my_hostname + ": TTL expired", new_path) u.send_msg(shost, 23456, response) return logging.info( 'QUERYID %s;SRC %s;DST %s;STRATEGY non-forking ;Processing Query', query, shost, dhost) # If current host is within the target region, our search for an alibi relay was # successful and we can respond accordingly if Point(my_latlon_map["lon"], my_latlon_map["lat"]).intersects(tr): logging.info('QUERYID %s;SRC %s;DST %s;%s within relayzone', query, shost, dhost, my_hostname) response = M.ResponseQueryMsg(query, True, my_hostname, new_path) # would update query overheads at this point u.send_msg(shost, 23456, response) return # Otherwise we should find a viable next hop target_node = f.nodelist_within_region(query, my_hostname, my_latlon_map["lon"], my_latlon_map["lat"], n.nodes, n.nn_list, forbidden_region, target_region) if target_node != None: # found some neighboring node within the target regions response = M.ResponseQueryMsg(query, True, target_node, new_path) u.send_msg(shost, 23456, response) return # get the next hop to visit previous_node = self.request.getpeername()[0] # Now we must choose a safe next hop. viable_nodes = f.get_next_hop(query, my_hostname, slon, slat, dlon, dlat, forbidden_region, target_region, my_latlon_map["lon"], my_latlon_map["lat"], n.nodes, n.nn_list, previous_node, path, 1) # are there any safe nodes available? if len(viable_nodes) == 0: # no safe nodes - return with failure response = M.ResponseQueryMsg(query, False, my_hostname + ": No safe next hop", new_path) # update query overhead here u.send_msg(shost, 23456, response) # Not using query forking, so only pick best node for next hop next_hop = viable_nodes[0] if not next_hop: # no safe nodes - return with failure response = M.ResponseQueryMsg(query, False, my_hostname + ": No safe next hop", new_path) # update query overhead here u.send_msg(shost, 23456, response) # attempt to forward the query logging.info( 'QUERYID %s;SRC %s;DST %s;Trying to forward from %s to %s', query, shost, dhost, myip, next_hop) query = M.QueryMsg(query, slat, slon, dlat, dlon, ttl, forbidden_region, target_region, path, shost, dhost) # update query overhead here if not u.send_msg(next_hop, 23456, query): logging.info( 'QUERYID %s;SRC %s;DST %s;Query forwarding to %s failed', query, shost, dhost, next_hop) with n_lock: n.remove_item_from_nlist(next_hop) n.update_neighbor_list() self.handle_query(data)
my_latlon_map["lat"] = file_gps_coords[1] n.set_gps(file_gps_coords) # set logging parameters #logging_map={'hostname':host_name} logging.Formatter.converter = time.gmtime #logging.basicConfig(filename="alibi_daemon.log", format='%(asctime)s|%(levelname)s|%(message)s', level=c.LOGGING_LEVEL) logging.basicConfig(format='%(levelname)s|%(message)s', level=logging.DEBUG) HOST, PORT = 'localhost', c.PORT server = ThreadedTCPServer(('', PORT), ThreadedTCPRequestHandler) # run server server_thread = threading.Thread(target=server.serve_forever) # Exit the server thread when the main thread terminates server_thread.daemon = True server_thread.start() join_msg = M.JoinMsg() u.send_msg(HOST, PORT, join_msg, False) server_thread.join() gossip_timer.cancel() refresh_timer.cancel() nlist_req_timer.cancel() server.shutdown() sys.exit(0)