def talker(): hosts, port, rate, timeout = getArgs() # Init ros publisher pub = rospy.Publisher(TOPIC_NAME, RFID_data, queue_size=1) rospy.init_node(NODE_NAME, anonymous=True) rate = rospy.Rate(rate) connections = [] for host in hosts: connection = http(host, port, timeout) connections.append(connection) rospy.loginfo('Working...') # For each connection read incoming line, # if line not empty, create and publish message while not rospy.is_shutdown(): for connection in connections: try: line = connection.getResponseLine() if http.isLineNotEmpty(line): line = int(line) msg = RFID_data() msg.header.stamp = rospy.Time.now() msg.uid = line msg.port = connection.host pub.publish(msg) except Exception as e: rospy.logerr("Exception when getting line from %s", connection.host) rospy.logerr(str(e)) rate.sleep()
def app(env, response): if env["PATH_INFO"].startswith("/documents"): return handle_documents(env, response, distribution, options.storage) if env["PATH_INFO"] == "/": homepage = "<br><br>".join( [welcome, "Cluster distribution", str(distribution.distribution), "Host pool", str(host_pool)] ) return http(response, homepage) if env["PATH_INFO"].startswith("/clusters"): return handle_clusters(env, response, distribution, options.storage) if env["PATH_INFO"].startswith("/hosts"): hosts_reponse = handle_hosts(env, response, host_pool) new_distribution() return hosts_reponse return http(response, "app:no routes")
def handle_documents(env, response, distribution, storage): # get the content content = parse_request(env) index = content.get('index', [None])[0] document = content.get('document', [None])[0] if index: # take the 2 first hexa value to determine # in which cluster the data should be. # It works with 256 cluster only cluster_index = int(index[0:2], 16) target_host = distribution.get_host_from_cluster(cluster_index) if target_host != distribution.current_host: return http(response, "This document is not on this host but on host (%s)" % target_host) method = env['REQUEST_METHOD'] if method == 'GET': if not index: return http(response, "Must provide index (%s)" % index, code=404) try: document = read_document(storage, index) except IOError: return http(response, "Document not found.", code=404) return http(response, document) if method == 'POST': if not document: return http(response, "Must provide a json document.") if not index: index = create_index(document) try: store_document(storage, index, document) except json.DecodeError: return http(response, "Json is invalid.") return http(response, index) return http(response, "handle_documents:no routes")
def handle_hosts(env, response, host_pool): """Handle all the requests on this host""" method = env['REQUEST_METHOD'] if method == 'GET': return http(response, json.encode(host_pool)) # post method signify a new host as to integrated to the pool # or a request to join a pool of hosts # handling a new server that want to enter the pool if env['PATH_INFO'].startswith('/hosts/register'): content = parse_request(env) new_host_address = content.get('address', [None])[0] # already registered, is some server already migrating? We need to stop here. for host in host_pool: if host['address'] == new_host_address: return http(response, "This server is already in the pool.") if host['state'] == 'migrating': return http(response, "There is already a migrating server in the pool.") new_host = { 'address':new_host_address, 'index':len(host_pool), 'state':'migrating', } # if we were alone, we are now pooling. for host in host_pool: if host['state'] == 'alone': host['state'] = 'pooling' # register the new host host_pool.append(new_host) return http(response, WELCOME_POOL) if env['PATH_INFO'].startswith('/hosts/migration_finished'): content = parse_request(env) new_host_address = content.get('address', [None])[0] for host in host_pool: if host['state'] == 'migrating': if host['address'] != new_host_address: raise "Wrong!" host['state'] = 'pooling' return http(response, WELCOME_POOL) # handling the client that ask to join a pool if env['PATH_INFO'].startswith('/hosts/join'): if len(host_pool) > 1: return http(response, "Already in a pool.") content = parse_request(env) pool_address = content.get('pool_address', [None])[0] my_address = host_pool[0]['address'] if pool_address == my_address: return http(response, "That's silly.") req = get_pool_request(pool_address) try: data = urllib2.urlopen(req) except urllib2.URLError, e: return http(response, "Cannot get pool description.") new_pool = json.decode(data.read()) # empty local host_pool for host in range(len(host_pool)): host_pool.pop(0) for host in new_pool: host_pool.append(host) # notifiy all the other hosts that there is a new host in town for host in new_pool: # avoid deadly infinite loop if my_address != host['address']: req = register_pool_request(my_address, host['address']) data = urllib2.urlopen(req) assert(data.read() == WELCOME_POOL) #TODO: launch the cluster migration migrate_clusters(my_address, host_pool) # notify all the hosts that the data migration was sucessful for host in new_pool: if my_address != host['address']: req = migration_finished_request(my_address, host['address']) data = urllib2.urlopen(req) assert(data.read() == WELCOME_POOL) # request everything again req = get_pool_request(pool_address) try: data = urllib2.urlopen(req) except urllib2.URLError, e: return http(response, "Cannot get pool description.")
except urllib2.URLError, e: return http(response, "Cannot get pool description.") new_pool = json.decode(data.read()) # empty local host_pool for host in range(len(host_pool)): host_pool.pop(0) assert(host_pool == []) for host in new_pool: host_pool.append(host) return http(response, json.encode(host_pool)) return http(response, "handle_hosts:no routes") def register_pool_request(address, pool_address): register_url = 'http://' + pool_address + '/hosts/register' values = {'address' : address} data = urllib.urlencode(values) return urllib2.Request(register_url, data) def join_pool_request(address, pool_address): register_url = 'http://' + address + '/hosts/join' values = {'pool_address' : pool_address} data = urllib.urlencode(values)
def handle_clusters(env, response, distribution, storage): content = parse_request(env) index = content.get('index', [None])[0] return http(response, json.encode(list_cluster(storage, index)))