def build(self, id='-1', state_interval=-1, componentsMapped=False): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time() * 1000) queueResult = self.actionQueue.result() nodeStatus = {"status": "HEALTHY", "cause": "NONE"} nodeStatus["alerts"] = [] heartbeat = { 'responseId': int(id), 'timestamp': timestamp, 'hostname': hostname.hostname(self.config), 'nodeStatus': nodeStatus } commandsInProgress = False if not self.actionQueue.commandQueue.empty(): commandsInProgress = True if len(queueResult) != 0: heartbeat['reports'] = queueResult['reports'] heartbeat['componentStatus'] = queueResult['componentStatus'] if len(heartbeat['reports']) > 0: # There may be IN_PROGRESS tasks commandsInProgress = True pass # For first request/heartbeat assume no components are mapped if int(id) == 0: componentsMapped = False logger.info( "Building Heartbeat: {responseId = %s, timestamp = %s, commandsInProgress = %s, componentsMapped = %s}", str(id), str(timestamp), repr(commandsInProgress), repr(componentsMapped)) if logger.isEnabledFor(logging.DEBUG): logger.debug("Heartbeat: %s", pformat(heartbeat)) hostInfo = HostInfo(self.config) if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0: nodeInfo = {} # for now, just do the same work as registration # this must be the last step before returning heartbeat hostInfo.register(nodeInfo, componentsMapped, commandsInProgress) heartbeat['agentEnv'] = nodeInfo mounts = Hardware.osdisks() heartbeat['mounts'] = mounts if logger.isEnabledFor(logging.DEBUG): logger.debug("agentEnv: %s", str(nodeInfo)) logger.debug("mounts: %s", str(mounts)) nodeStatus["alerts"] = hostInfo.createAlerts(nodeStatus["alerts"]) if self.collector is not None: heartbeat['alerts'] = self.collector.alerts() return heartbeat
def build(self, id='-1', state_interval=-1): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time() * 1000) queueResult = self.actionQueue.result() nodeStatus = {"status": "HEALTHY", "cause": "NONE"} heartbeat = { 'responseId': int(id), 'timestamp': timestamp, 'hostname': hostname.hostname(), 'nodeStatus': nodeStatus } if len(queueResult) != 0: heartbeat['reports'] = queueResult['reports'] heartbeat['componentStatus'] = queueResult['componentStatus'] pass logger.info("Sending heartbeat with response id: " + str(id) + " and " "timestamp: " + str(timestamp)) logger.debug("Heartbeat : " + pformat(heartbeat)) if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0: hostInfo = HostInfo() nodeInfo = {} # for now, just do the same work as registration hostInfo.register(nodeInfo) heartbeat['agentEnv'] = nodeInfo logger.debug("agentEnv : " + str(nodeInfo)) return heartbeat
def build(self, id='-1', state_interval=-1, componentsMapped=False): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time()*1000) queueResult = self.actionQueue.result() nodeStatus = { "status" : "HEALTHY", "cause" : "NONE" } nodeStatus["alerts"] = [] heartbeat = { 'responseId' : int(id), 'timestamp' : timestamp, 'hostname' : hostname.hostname(self.config), 'nodeStatus' : nodeStatus } commandsInProgress = False if not self.actionQueue.commandQueue.empty(): commandsInProgress = True if len(queueResult) != 0: heartbeat['reports'] = queueResult['reports'] heartbeat['componentStatus'] = queueResult['componentStatus'] if len(heartbeat['reports']) > 0: # There may be IN_PROGRESS tasks commandsInProgress = True pass # For first request/heartbeat assume no components are mapped if int(id) == 0: componentsMapped = False logger.info("Building Heartbeat: {responseId = %s, timestamp = %s, commandsInProgress = %s, componentsMapped = %s}", str(id), str(timestamp), repr(commandsInProgress), repr(componentsMapped)) if logger.isEnabledFor(logging.DEBUG): logger.debug("Heartbeat: %s", pformat(heartbeat)) hostInfo = HostInfo(self.config) if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0: nodeInfo = { } # for now, just do the same work as registration # this must be the last step before returning heartbeat hostInfo.register(nodeInfo, componentsMapped, commandsInProgress) heartbeat['agentEnv'] = nodeInfo mounts = Hardware.osdisks() heartbeat['mounts'] = mounts if logger.isEnabledFor(logging.DEBUG): logger.debug("agentEnv: %s", str(nodeInfo)) logger.debug("mounts: %s", str(mounts)) nodeStatus["alerts"] = hostInfo.createAlerts(nodeStatus["alerts"]) return heartbeat
def build(self, id='-1', state_interval=-1, componentsMapped=False): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time()*1000) queueResult = self.actionQueue.result() nodeStatus = { "status" : "HEALTHY", "cause" : "NONE"} heartbeat = { 'responseId' : int(id), 'timestamp' : timestamp, 'hostname' : hostname.hostname(), 'nodeStatus' : nodeStatus } commandsInProgress = False if not self.actionQueue.commandQueue.empty(): commandsInProgress = True if len(queueResult) != 0: heartbeat['reports'] = queueResult['reports'] heartbeat['componentStatus'] = queueResult['componentStatus'] if len(heartbeat['reports']) > 0: # There may be IN_PROGRESS tasks commandsInProgress = True pass # For first request/heartbeat assume no components are mapped if int(id) == 0: componentsMapped = False logger.info("Sending heartbeat with response id: " + str(id) + " and " "timestamp: " + str(timestamp) + ". Command(s) in progress: " + repr(commandsInProgress) + ". Components mapped: " + repr(componentsMapped)) logger.debug("Heartbeat : " + pformat(heartbeat)) if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0: hostInfo = HostInfo(self.config) nodeInfo = { } # for now, just do the same work as registration # this must be the last step before returning heartbeat hostInfo.register(nodeInfo, componentsMapped, commandsInProgress) heartbeat['agentEnv'] = nodeInfo logger.debug("agentEnv : " + str(nodeInfo)) mounts = Hardware.osdisks() heartbeat['mounts'] = mounts logger.debug("mounts : " + str(mounts)) return heartbeat
def build(self, id='-1'): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time()*1000) hostInfo = HostInfo() agentEnv = { } hostInfo.register(agentEnv) version = self.read_agent_version() register = { 'responseId' : int(id), 'timestamp' : timestamp, 'hostname' : hostname.hostname(), 'publicHostname' : hostname.public_hostname(), 'hardwareProfile' : self.hardware.get(), 'agentEnv' : agentEnv, 'agentVersion' : version } return register
def build(self, version, id='-1'): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time()*1000) hostInfo = HostInfo(self.config) agentEnv = { } hostInfo.register(agentEnv, False, False) current_ping_port = self.config.get('agent','current_ping_port') register = { 'responseId' : int(id), 'timestamp' : timestamp, 'hostname' : hostname.hostname(self.config), 'currentPingPort' : int(current_ping_port), 'publicHostname' : hostname.public_hostname(self.config), 'hardwareProfile' : self.hardware.get(), 'agentEnv' : agentEnv, 'agentVersion' : version, 'prefix' : self.config.get('agent', 'prefix') } return register
def build(self, id='-1'): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time()*1000) hostInfo = HostInfo(self.config) agentEnv = { } hostInfo.register(agentEnv, False, False) version = self.agentInfo.version() current_ping_port = self.config.get('agent','current_ping_port') register = { 'responseId' : int(id), 'timestamp' : timestamp, 'hostname' : hostname.hostname(), 'currentPingPort' : int(current_ping_port), 'publicHostname' : hostname.public_hostname(), 'hardwareProfile' : self.hardware.get(), 'agentEnv' : agentEnv, 'agentVersion' : version } return register
def create(): hostInfo = HostInfo(flask_request.json) hostInfo.save() return 'success'
class GitFS(GitFSBase, Operations): """A simple filesystem using Git and FUSE. """ def __init__(self, origin, branch="master", path=".", mount_point="."): super(GitFS, self).__init__() self.origin = origin self.branch = branch self.root = os.path.realpath(path) self.mount_point = mount_point self.halt = False self.rwlock = Lock() self.need_sync_time = None # Can't use the default rlock here since we want to acquire/release from different threads self.sync_c = Condition(Lock()) self.timer_c = Condition(Lock()) self.id = None self.timer = None self.handlers = { "ping": self._handlePing, "lock": self._handleLock, "unlock": self._handleUnlock, "info": self._handleInfo, "getConfig": self._getConfig, } self.lock_timer = None self.lock_lock = Condition() self.locks = {} self.lock_expire_time = time() self.control_dir = self.getControlDirectory() try: os.makedirs(self.control_dir) except OSError: pass self.info_dir = self.getInfoDirectory(self.root) try: os.makedirs(self.info_dir) except OSError: pass self.control_socket_path = self.getControlSocketPath(self.getID(), server=True) self.lockGitFSDir() try: try: client = GitFSClient.getClientByPath(self.mount_point, False, False) raise FuseOSError(EBUSY) except GitFSError: pass try: os.remove(self.control_socket_path) except OSError: pass self.control_server = None self.control_server = ThreadingUnixStreamServer( self.control_socket_path, type( "GitFSRequestHandler", (PacketizeMixIn, BaseRequestHandler, object), dict( fs=self, dictFromString=self.parseDict, stringFromDict=self.marshalDict, handleDict=lambda s, d: s.fs._handleRequest(s, d), ), ), ) self.control_server.daemon_threads = True # setup the threads last so that they don't prevent an exit. self.control_thread = Thread(target=self.control_server.serve_forever, args=()) self.control_thread.start() finally: self.unlockGitFSDir() mt = self.getMTab() mt[mount_point] = self.getID() self.updateMTab(mt) self.repo = GitRepo(path, origin, branch, sync=True) self.sync_thread = Thread(target=self._sync, args=()) self.sync_thread.start() def getID(self): if self.id is None: self.id = mUUID.getUUIDFromFile(self.getUUIDFile(self.root), create=True).toString() return self.id def _lockWithTimeOut(self, name, t): if t <= 0: return self.lock_lock.acquire() expt = t + time() self.locks[name] = expt if self.lock_expire_time - expt < 0: self.lock_expire_time = expt if self.lock_timer != None: self.lock_timer.cancel() else: logging.debug("Aquiring fresh lock") self.sync_c.acquire() self.lock_timer = Timer(t, self._lockTimerExpire, args=()) self.lock_timer.start() self.lock_lock.release() def _lockTimerExpire(self): logging.debug("_lockTimeExpire") self.lock_lock.acquire() self.__lockTimerExpire() self.lock_lock.release() def __lockTimerExpire(self): logging.debug("__lockTimeExpire") now = time() t = now for key in self.locks: if t - self.locks[key] < 0: t = self.locks[key] if now - self.locks[key] > 300: del self.locks[key] if t - now > 0: self.lock_expire_time = t if self.lock_timer != None: self.lock_timer.cancel() else: logging.debug("***** ERROR ***** __lockTimerExpire doesn't have lock. acquiring") self.sync_c.acquire() self.lock_timer = Timer(t - now, self._lockTimerExpire, args=()) self.lock_timer.start() logging.debug("extending lock.") else: if self.lock_timer != None: logging.debug("releasing lock.") self.lock_timer.cancel() self.lock_timer = None self.sync_c.release() def _unlock(self, name): if name not in self.locks: return self.lock_lock.acquire() t = self.locks[name] del self.locks[name] if t >= self.lock_expire_time or len(keys(self.locks)) == 0: self.__lockTimerExpire() self.lock_lock.release() def _handleRequest(self, request, d): if d["action"] in self.handlers: mf = self.handlers[d["action"]] return mf(d, request) logging.debug("No request in packet: %s" % d) self._respond(request, {"status": "Unknown Command"}) return None def _respond(self, request, responseDict): request.sendDict(responseDict) def _handlePing(self, reqDict, request): self._respond(request, {"status": "ok", "message": "pong"}) def _handleLock(self, reqDict, request): self._lockWithTimeOut("%s" % request.request.fileno(), 60) self._respond(request, {"status": "ok", "name": "%s" % request.request.fileno()}) def _handleUnlock(self, reqDict, request): self._unlock("%s" % request.request.fileno()) self._respond(request, {"status": "ok"}) def _handleInfo(self, reqDict, request): self._respond( request, { "status": "ok", "origin": self.repo.origin, "branch": self.repo.branch, "root": self.root, "path": self.mount_point, }, ) def _getConfig(self, reqDict, request): key = reqDict["key"] resp = self.getConfigForInstance(key) self._respond(request, {"status": "ok", key: resp}) def _sync(self): while True: self.sync_c.acquire() if not self.halt: # wait till a sync request comes self.sync_c.wait() self.timer_c.acquire() if self.timer != None: self.timer.cancel() self.timer = None self.timer_c.release() try: if not self.repo.synchronize(): # sync failed, we need to try again. self.needSync() self.need_sync_time = None except Exception as e: logging.debug("synchronize threw exception %s" % e) self.sync_c.release() # can't release this until sync is complete because we can't change files while we sync. else: self.repo.forcePush() self.repo.push() self.sync_c.release() break def getHostInfo(): if self.hostinfo is None: self.hostinfo = HostInfo() self.hostinfo.update() return self.hostinfo def matchesInstance(self, key): """ returns a number determinging how well the past in key matches the current instance.""" if key == "default": return 0.1 # Very weak match hostinfo = self.getHostInfo() try: ip = socket.inet_pton(key) return hostinfo.matchAddress(ip) except SocketError: pass return hostinfo.matchHostName(key) def getConfigForInstanceSingleFile(self, key, name): logging.debug("getConfigForInstanceSingleFile(%s, %s)" % (key, name)) c = self.getConfig(name) if c is None: return None match = 0 if key not in c: return None v = c[key] logging.debug("getConfigForInstanceSingleFile(%s, %s) value=%s" % (key, name, v)) if isinstance(v, dict): rv = None for (key, value) in v.iteritems(): tm = self.matchesInstance(key) if tm > match: match = tm rv = value v = rv if v is None: return None if isinstance(v, list): v = random.choose(v) return (match, v) def getConfigForInstance(self, key): match = 0 value = None logging.debug("getConfigForInstance(%s)" % key) if key in self.config_file_priorities: filenames = self.config_file_priorities[key] else: filenames = self.config_file_priorities["default"] for name in filenames: a = self.getConfigForInstanceSingleFile(key, name) if a is None: continue (m, v) = a if m >= match: match = m value = v return value def forceSync(self): logging.debug("forceSync()") self.sync_c.acquire() self.sync_c.notifyAll() self.sync_c.release() def needSync(self): logging.debug("needSync()") self.timer_c.acquire() if self.need_sync_time is None: self.need_sync_time = time() if self.timer != None: if time() - self.need_sync_time > 5 * 60: return self.timer.cancel() # don't do anything until there is a pause. self.timer = Timer(10, self.forceSync, args=()) self.timer.start() self.timer_c.release() def shutdown(self): # stop sync thread self.sync_c.acquire() self.halt = True self.sync_c.notifyAll() self.sync_c.release() self.repo.shutDown() def destroy(self, path): self.shutdown() if self.control_server != None: self.control_server.shutdown() try: os.remove(self.control_socket_path) except OSError: pass def __call__(self, op, path, *args): try: logging.debug("calling %s on %s" % (op, path)) path = self.escapePath(path) r = super(GitFS, self).__call__(op, self.root + path, *args) pr = "%s" % r pr = pr[:10] logging.debug("returning %s for %s" % (pr, op)) return r except Exception as e: logging.debug("Unhandled exception %s" % e) raise e def access(self, path, mode): if not os.access(path, mode): raise FuseOSError(EACCES) def chmod(self, path, mode): self.needSync() return os.chmod(path, mode) def chown(self, path, uid, gid): self.needSync() return super(GitFS, self).chown(path, uid, gid) def create(self, path, mode): # XXXXX Fixme what should the flags be? return os.open(path, os.O_RDWR | os.O_CREAT, mode) def flush(self, path, fh): self.sync_c.acquire() self.sync_c.notifyAll() self.sync_c.release() return os.fsync(fh) def fsync(self, path, datasync, fh): self.needSync() return os.fsync(fh) def fsyncdir(self, path, datasync, fh): return self.fsync(path, datasync, fh) def getattr(self, path, fh=None): st = os.lstat(path) return dict( (key, getattr(st, key)) for key in ("st_atime", "st_ctime", "st_gid", "st_mode", "st_mtime", "st_nlink", "st_size", "st_uid") ) getxattr = None def link(self, target, source): source = self.escapePath(source) self.needSync() return os.link(source, target) listxattr = None mknod = os.mknod def mkdir(self, path, mode): return os.mkdir(path, mode) def open(self, path, fip): f = os.open(path, fip) logging.debug("open(%s, %s): %d" % (path, fip, f)) return f def read(self, path, size, offset, fh): with self.rwlock: os.lseek(fh, offset, 0) return os.read(fh, size) def readdir(self, path, fh): files = os.listdir(path) uefiles = [] for file in files: if self.isValidPath(file): uefiles = uefiles + [self.unescapePath(file)] return [".", ".."] + uefiles readlink = os.readlink def release(self, path, fh): return os.close(fh) def rename(self, old, new): self.needSync() return os.rename(old, self.root + self.escapePath(new)) def rmdir(self, path): self.needSync() return os.rmdir(path) def statfs(self, path): stv = os.statvfs(path) return dict( (key, getattr(stv, key)) for key in ( "f_bavail", "f_bfree", "f_blocks", "f_bsize", "f_favail", "f_ffree", "f_files", "f_flag", "f_frsize", "f_namemax", ) ) def symlink(self, target, source): self.needSync() return os.symlink(source, target) def truncate(self, path, length, fh=None): self.needSync() with open(path, "r+") as f: f.truncate(length) def unlink(self, path): self.needSync() return os.unlink(path) utimens = os.utime def write(self, path, data, offset, fh): self.needSync() with self.rwlock: os.lseek(fh, offset, 0) return os.write(fh, data)
def connectToServer(self): """ This function initiaties the connection request and sends it to the serverPC to be verified as a host. The function should then recieve an Okay message, and connect to all hosts sent in the message payload. Once the host is connected it will begin the thread listening to the server and work threads """ client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client_sock.bind((self.ip, self.port)) client_sock.connect((self.serverPC_ip, 9090)) #create CREQ message message = Message('CREQ', self.ip, '\0') client_sock.sendall(message.generateByteMessage()) print('Sent CREQ message to serverPC at ' + self.serverPC_ip) #recieve response as string response_message = self.parseMessage(client_sock) if (response_message.type == 'OKAY'): print('OKAY message received') #payload contains host area, and list of connected hosts payload_array = response_message.payload.split(',') i = 1 lost_payload = '' while i < len(payload_array): indicator = self.setupHostConnection(payload_array[i]) if (indicator == False): lost_payload += payload_array[i] + "," else: self.host_ips.append(payload_array[i]) i += 1 self.x_min = self.curr_x_max self.x_max = self.x_min + self.x_scalar self.host_info = HostInfo(self.x_min, self.x_max) #this message contains the hosts that have disconnected from the network and notifes the serverPC about the change del_message = Message('LHST', self.ip, lost_payload) print('sent LHST to serverPC with payload: ' + lost_payload) client_sock.sendall(del_message.generateByteMessage()) if self.curr_x_min_ip == '': #there was no other host, you are your own neighbor self.r_neighbor = self.ip self.host_info.r_neighbor_ip = self.ip self.l_neighbor = self.ip self.host_info.l_neighbor_ip = self.ip else: #else, there is another host - check if there are any other hosts self.r_neighbor = self.curr_x_min_ip self.host_info.r_neighbor_ip = self.curr_x_min_ip if self.l_neighbor == '': #there isnt a left neighbor - left neighbor is right neighbor self.l_neighbor = self.r_neighbor self.host_info.l_neighbor_ip = self.r_neighbor else: #there is a left neighbor - set host info left neighbor self.host_info.l_neighbor_ip = self.l_neighbor # need to start the listening thread and the work thread now host_start_thread = Thread(target=lambda: self.startHostInfo()) host_start_thread.start() listening_thread = Thread(target=lambda: self.listeningPort()) listening_thread.daemon = True listening_thread.start() work_thread = Thread(target=lambda: self.processWork()) work_thread.daemon = True work_thread.start() else: print('Invalid message type received from ' + message.origin)
def index(): host = HostInfo.getLast() return render_template('index.html', name="Raphael", infos=host.getInfo())
def __init__(self): """ Constructor """ # Read the configuration parameters from tulsi.conf self.timestr = time.strftime("%Y:%m:%d-%H:%M:%S") logging.basicConfig(filename="/var/log/tulsi.log", level=logging.DEBUG) try: self.conf = ConfigParser.ConfigParser() self.conf.read("/etc/tulsi/tulsi.conf") self.udp_ip = self.conf.get("tulsi", "host") self.udp_port = int(self.conf.get("tulsi", "port")) # printing the host and port of tulsi logging.info("%s The IP of the host: %s" % (self.timestr, self.udp_ip)) logging.info("%s The Port number of the host :%s" % (self.timestr, self.udp_port)) except: # Error message of tulsi not working logging.error("The tulsi configuration file is not found") # Creating objects of MessageEncode and HostInfo msg_encode = MessageEncode() host_info = HostInfo() # Initializing empty lists self.drives = [] self.service = [] self.ip_array = [] self.ring_ip = [] self.ring_conf_ip = [] self.ring_drives = [] self.ip_set_array = [] self.my_ring_conf = dict() # Read the ring Configuration file self.gz_file = GzipFile("/etc/swift/container.ring.gz", "rb") if hasattr(self.gz_file, "_checkReadable"): self.gz_file = BufferedReader(self.gz_file) magic = self.gz_file.read(4) if magic == "R1NG": version, = struct.unpack("!H", self.gz_file.read(2)) if version == 1: self.ring_data = self.read_ring_file(self.gz_file) else: logging.error("%s Unknown ring format version %d" % (self.timestr, version)) raise Exception("Unknown ring format version %d" % version) # While loop to continuously check the status of swift services and # drives and send information to tulsi client while True: self.ip_array = host_info.read_ip() self.service = host_info.read_services() self.drives = host_info.read_drives(self.drives) self.message = msg_encode.create_message( self.my_ring_conf, self.ring_conf_ip, self.ip_array, self.service, self.drives ) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Internet # UDP sock.sendto(self.message, (self.udp_ip, self.udp_port)) time.sleep(5) self.ip_array = [] self.service = [] self.drives = []
class Host: def __init__(self, ip, port, server_ip): self.ip = ip #Hosts ip self.port = port # Host port self.serverPC_ip = server_ip #ServerPC ip the host is connecting to self.x_scalar = 50 #for incrementing the x_min to an x_max value self.curr_x_max = 0 self.curr_x_min = 50 self.curr_x_min_ip = '' self.left_neighbor = '' self.right_neighbor = '' self.l_neighbor = '' #Area of the shared area between the host and the left neighbor self.r_neighbor = '' #Area shared between the host and the right neighbor self.host_ips = [] #The list of all connected hosts on the network self.connections = [] #A list of connection objects self.work_queue = [ ] #The queue of instructions for the host to execute self.all_alphas = [] #List of all host alphas self.running = True #Running and updated are semaphores responsible for flagging when the program should execute self.updated = False self.updates_received = [ ] # this will be for keeping track of what host we've received an HUPD from self.run() def parseMessage(self, sock): '''ParseMessage is responsible for recieving messages from sockets and and returning them as a string''' try: msg = b'' while True: byte = sock.recv(1) if len(byte) == 0: raise ConnectionError('Socket is closed - 1') if byte == b'\n': break msg += byte datatype = msg.decode() msg = b'' while True: byte = sock.recv(1) if len(byte) == 0: raise ConnectionError('Socket is closed - 2') if byte == b'\n': break msg += byte origin = msg.decode() msg = b'' while True: byte = sock.recv(1) if len(byte) == 0: raise ConnectionError('Socket is closed - 3') if byte == b'\n': break msg += byte payload = msg.decode() return Message(datatype, origin, payload) except ConnectionError as err: print("Error: {0} ".format(err)) return None def parseMessageHost(self, conn): '''ParseMessageHost is responsible for recieving messages from host-related sockets and and returning them as a Message object''' msg = b'' while True: byte = sock.recv(1) if len(byte) == 0: print('Host ' + conn.ip + ' was lost') range = conn.max_x - conn.min_x if self.l_neighbor == conn.ip: self.x_min -= int((range / 2.0) + .5) self.host_info.x_min -= int((self.x_scalar / 2.0) + .5) self.host_info.merge_left_backups() if self.r_neighbor == conn.ip: self.x_max += int((range / 2.0) + .5) self.host_info.x_max += int((self.x_scalar / 2.0) + .5) self.host_info.merge_right_backups() conn.close() if len(self.connections) == 1: self.host_info.alone = True self.host_ips.remove(conn.ip) self.connections.remove(conn) return if byte == b'\n': break msg += byte datatype = msg.decode() msg = b'' while True: byte = sock.recv(1) if len(byte) == 0: print('Host ' + conn.ip + ' was lost') range = conn.max_x - conn.min_x if self.l_neighbor == conn.ip: self.x_min -= int((range / 2.0) + .5) self.host_info.x_min -= int((self.x_scalar / 2.0) + .5) self.host_info.merge_left_backups() if self.r_neighbor == conn.ip: self.x_max += int((range / 2.0) + .5) self.host_info.x_max += int((self.x_scalar / 2.0) + .5) self.host_info.merge_right_backup() conn.close() if len(self.connections) == 1: self.host_info.alone = True self.host_ips.remove(conn.ip) self.connections.remove(conn) return if byte == b'\n': break msg += byte origin = msg.decode() msg = b'' while True: byte = sock.recv(1) if len(byte) == 0: print('Host ' + conn.ip + ' was lost') range = conn.max_x - conn.min_x if self.l_neighbor == conn.ip: self.x_min -= int((range / 2.0) + .5) self.host_info.x_min -= int((self.x_scalar / 2.0) + .5) self.host_info.merge_left_backups() if self.r_neighbor == conn.ip: self.x_max += int((range / 2.0) + .5) self.host_info.x_max += int((self.x_scalar / 2.0) + .5) self.host_info.merge_right_backup() conn.close() if len(self.connections) == 1: self.host_info.alone = True self.host_ips.remove(conn.ip) self.connections.remove(conn) return if byte == b'\n': break msg += byte payload = msg.decode() return Message(datatype, origin, payload) def connectToServer(self): """ This function initiaties the connection request and sends it to the serverPC to be verified as a host. The function should then recieve an Okay message, and connect to all hosts sent in the message payload. Once the host is connected it will begin the thread listening to the server and work threads """ client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client_sock.bind((self.ip, self.port)) client_sock.connect((self.serverPC_ip, 9090)) #create CREQ message message = Message('CREQ', self.ip, '\0') client_sock.sendall(message.generateByteMessage()) print('Sent CREQ message to serverPC at ' + self.serverPC_ip) #recieve response as string response_message = self.parseMessage(client_sock) if (response_message.type == 'OKAY'): print('OKAY message received') #payload contains host area, and list of connected hosts payload_array = response_message.payload.split(',') i = 1 lost_payload = '' while i < len(payload_array): indicator = self.setupHostConnection(payload_array[i]) if (indicator == False): lost_payload += payload_array[i] + "," else: self.host_ips.append(payload_array[i]) i += 1 self.x_min = self.curr_x_max self.x_max = self.x_min + self.x_scalar self.host_info = HostInfo(self.x_min, self.x_max) #this message contains the hosts that have disconnected from the network and notifes the serverPC about the change del_message = Message('LHST', self.ip, lost_payload) print('sent LHST to serverPC with payload: ' + lost_payload) client_sock.sendall(del_message.generateByteMessage()) if self.curr_x_min_ip == '': #there was no other host, you are your own neighbor self.r_neighbor = self.ip self.host_info.r_neighbor_ip = self.ip self.l_neighbor = self.ip self.host_info.l_neighbor_ip = self.ip else: #else, there is another host - check if there are any other hosts self.r_neighbor = self.curr_x_min_ip self.host_info.r_neighbor_ip = self.curr_x_min_ip if self.l_neighbor == '': #there isnt a left neighbor - left neighbor is right neighbor self.l_neighbor = self.r_neighbor self.host_info.l_neighbor_ip = self.r_neighbor else: #there is a left neighbor - set host info left neighbor self.host_info.l_neighbor_ip = self.l_neighbor # need to start the listening thread and the work thread now host_start_thread = Thread(target=lambda: self.startHostInfo()) host_start_thread.start() listening_thread = Thread(target=lambda: self.listeningPort()) listening_thread.daemon = True listening_thread.start() work_thread = Thread(target=lambda: self.processWork()) work_thread.daemon = True work_thread.start() else: print('Invalid message type received from ' + message.origin) def startHostInfo(self): self.host_info.run() def setupHostConnection(self, host_ip): """ This function is responsible for connecting to all hosts on the network. The host will also setup it's left and right neighbor and create the listening threads for host connections """ if host_ip != self.ip and host_ip != '': host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) indicator = host_socket.connect_ex((host_ip, 9090)) if indicator != 0: return False else: new_host_msg = Message("NHST", self.ip, '\0') host_socket.sendall(new_host_msg.generateByteMessage()) print('NHST message sent to Host at ' + host_ip) area_message = self.parseMessage(host_socket) if (area_message.type == 'AREA'): print('AREA message received from ' + area_message.origin) payload_array = area_message.payload.split(':') curr_host_ip = area_message.origin host_min_x = payload_array[0] host_max_x = payload_array[1] if host_max_x > self.curr_x_max: self.curr_x_max = host_max_x if self.min_x == host_max_x: self.l_neighbor = curr_host_ip if host_min_x <= self.curr_x_min: self.curr_x_min = host_min_x self.curr_x_min_ip = curr_host_ip new_thread = Thread( target=lambda: self.listenToHost(host_socket)) new_thread.daemon = True new_thread.start() new_connection = Connection(host_ip, host_socket, new_thread, host_min_x, host_max_x) self.connections.append(new_connection) return True else: print('Invalid message type received from ' + area_message.origin + ' - Host corrupt') return False return True def listeningPort(self): """ The listening port uses a binded socket to accept new incoming host connections. Once accepted the function will add the NHST instruction to the work queue """ listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listening_socket.bind((self.ip, 9090)) listening_socket.listen(1) while self.running == True: new_conn_sock, (new_conn_ip, new_conn_port) = listening_socket.accept() message = self.parseMessage(new_conn_sock) if (message.type == 'NHST'): print('Got NHST message from ' + message.origin) new_instruction = Instruction('NHST') new_instruction.message = message new_instruction.sock = new_conn_sock self.work_queue.append(new_instruction) else: print('Invalid Message Type received from ' + message.origin) new_conn_sock.close() return def processWork(self): """ processWork goes through the intruction objects in the work_queue processWork prioritizes certain instructions in order to implement implicit """ while self.running == True: if len(self.work_queue) == 0: self.work_queue = [ Instruction('Do Math'), Instruction('Send HUPD'), Instruction('Receive All HUPDs') ] else: instruction = self.work_queue.pop(0) print('Instruction is : ' + instruction.type) if instruction.type == 'Do Math': #start calculations self.updated = False #print('Doing Math') # run calculations elif instruction.type == 'Send HUPD': #echo host update to all other hosts on the network all_l, all_r = self.host_info.get_our_backup() all_l_alphas, all_r_alphas = self.host_info.get_our_alpha_backup( ) min_max = str(self.x_min) + ':' + str(self.x_max) self.host_info.create_left_halo() self.host_info.create_right_halo() payload = self.host_info.numpy_array_to_string( self.host_info.my_aboids ) + '\0' + self.host_info.numpy_array_to_string( self.host_info.l_halo ) + '\0' + self.host_info.numpy_array_to_string( self.host_info.r_halo ) + '\0' + all_l + '\0' + all_r + '\0' + all_l_alphas + '\0' + all_r_alphas + '\0' + min_max + '\0' our_update = Message("HUPD", self.ip, payload) q = input('got to Send HUPD 1') #if there are no connections, send to myself if len(self.connections) == 0: self.updateSelf(our_update) else: for connection in self.connections: connection.sock.sendall( our_update.generateByteMessage()) self.host_info.update_my_aboids() print('Sent Out HUPD') elif instruction.type == 'Receive All HUPDs': # make sure to receive all HUPDs from listening threads if len(self.connections) > 0: while len(self.updates_received) != len( self.connections): msg = 'wait' # only set to true once all updates have been received self.updated = True self.updates_received = [] # Once all updates are recieved update ABoid locations self.host_info.merge_halos() self.host_info.update_all_aboids(self.all_alphas) self.all_alphas = [] elif instruction.type == 'NHST': #New host tring to connect to network new_host_ip = instruction.message.origin payload_array = instruction.message.payload.split(':') new_host_min_x = payload_array[0] new_host_max_x = payload_array[1] #check if the new host is a neighbor if self.x_max == new_host_min_x: self.r_neighbor = new_host_ip self.host_info.r_neighbor_ip = new_host_ip if self.x_min == 0: self.l_neighbor = new_host_ip self.host_info.l_neighbor_ip = new_host_ip self.host_ips.append(new_host_ip) #Start the thread that is listening to the socket connected to the new host new_thread = Thread( target=lambda: self.listenToHost(instruction.sock)) new_thread.daemon = True new_thread.start() new_connection = Connection(new_host_ip, instruction.sock, new_thread, new_host_min_x, new_host_max_x) self.connections.append(new_connection) host_area = self.x_min + ':' + self.x_max #send current host area to the newly connected host area_message = Message('AREA', self.ip, host_area) instruction.sock.sendall( area_message.generateByteMessage()) print('Sent AREA message to ' + new_host_ip) elif instruction.type == 'LHST': #Host has disconnected to the network for host_ip in self.host_ips: if host_ip == instruction.message.origin: #remove host from list of connected ips self.host_ips.remove(host_ip) for connection in self.connections: #remove the connection object from list of known connections if connection.ip == instruction.message.origin: #close the hosts socket and thread if len(self.connections) == 1: self.host_info.alone = True connection.close() self.connections.remove(connection) else: print('Invalid Instruction - skipping...') return def updateSelf(self, message): q = input("press enter") self.updated = False self.host_info.alone = True host_ip = message.origin payload = message.payload.split('\0') host_alphas = payload[0] print('host_alphas = ' + host_alphas) num_alphas = len(host_alphas.split(',')) for i in range(num_alphas): self.all_alphas.append(host_alphas.split(',')[i - 1]) host_l_halo = payload[1] host_r_halo = payload[2] host_all_l = payload[3] host_all_r = payload[4] l_alpha_backup = payload[5] r_alpha_backup = payload[6] host_min = payload[7].split(':')[0] host_max = payload[7].split(':')[1] #if the hosts left neighbor then store the halo region data and create back ups of left neighbor data self.host_info.n_l_halo = self.host_info.string_to_numpy_array( host_r_halo) self.host_info.l_backup = host_all_r self.host_info.l_backup_alphas = r_alpha_backup #if the hosts right neighbor then store the halo region data and create back ups of right neighbor data self.host_info.n_r_halo = self.host_info.string_to_numpy_array( host_l_halo) self.host_info.r_backup = host_all_l self.host_info.r_backup_alphas = l_alpha_backup self.updated = True def listenToHost(self, host_sock): """ listenToHost recieves messages from other hosts If a host update is received the host will check if it is a neighbor, and then update the neighbor halo region and backups If a lost host message is recieved it will create the instruction type LHST and added to the queue """ while self.running == True: #turn message into string host_connection = None for conn in self.connections: if conn.sock == host_sock: host_connection = conn message = self.parseMessageHost(host_connection) if message.type == 'HUPD': #host update message payload print('Got HUPD from ' + message.origin) self.updated = False host_ip = message.origin payload = message.payload.split('\0') host_alphas = payload[0] num_alphas = count(host_alphas.split(',')) for i in range(num_alphas): self.all_alphas.append(host_alphas.split(',')[i - 1]) host_l_halo = payload[1] host_r_halo = payload[2] host_all_l = payload[3] host_all_r = payload[4] l_alpha_backup = payload[5] r_alpha_backup = payload[6] host_min = payload[7].split(':')[0] host_max = payload[7].split(':')[1] if self.l_neighbor == host_ip: #if the hosts left neighbor then store the halo region data and create back ups of left neighbor data self.host_info.n_l_halo = self.host_info.string_to_numpy_array( host_r_halo) self.host_info.l_backup = host_all_r self.host_info.l_backup_alphas = r_alpha_backup if self.r_neighbor == host_ip: #if the hosts right neighbor then store the halo region data and create back ups of right neighbor data self.host_info.n_r_halo = self.host_info.string_to_numpy_array( host_l_halo) self.host_info.r_backup = host_all_l self.host_info.r_backup_alphas = l_alpha_backup #may need to parse the different Alpha coordinates before appending to all_alphas self.all_alphas.append(host_alphas) self.updates_received.append(host_ip) while (self.updated != True): wait = 'wait' elif message.type == 'LHST': #LHST message recieved meaning that the host must close that connected socket print('Got LHST from ' + message.origin) new_instruction = Instruction('LHST') new_instruction.message = message self.work_queue.append(new_instruction) else: print('Invalid message type received from ' + message.origin) return def run(self): main_thread = Thread(target=lambda: self.connectToServer()) main_thread.daemon = True main_thread.start() while (self.running == True): user_input = input('Enter "quit" to end program: ') if user_input == 'quit': print('Quitting...') self.running = False
def getHostInfo(): if self.hostinfo is None: self.hostinfo = HostInfo() self.hostinfo.update() return self.hostinfo
class GitFS(GitFSBase, Operations): """A simple filesystem using Git and FUSE. """ def __init__(self, origin, branch='master', path='.', mount_point='.'): super(GitFS, self).__init__() self.origin = origin self.branch = branch self.root = os.path.realpath(path) self.mount_point = mount_point self.halt = False self.rwlock = Lock() self.need_sync_time = None # Can't use the default rlock here since we want to acquire/release from different threads self.sync_c = Condition(Lock()) self.timer_c = Condition(Lock()) self.id = None self.timer = None self.handlers = { 'ping': self._handlePing, 'lock': self._handleLock, 'unlock': self._handleUnlock, 'info': self._handleInfo, 'getConfig': self._getConfig } self.lock_timer = None self.lock_lock = Condition() self.locks = {} self.lock_expire_time = time() self.control_dir = self.getControlDirectory() try: os.makedirs(self.control_dir) except OSError: pass self.info_dir = self.getInfoDirectory(self.root) try: os.makedirs(self.info_dir) except OSError: pass self.control_socket_path = self.getControlSocketPath(self.getID(), server=True) self.lockGitFSDir() try: try: client = GitFSClient.getClientByPath(self.mount_point, False, False) raise FuseOSError(EBUSY) except GitFSError: pass try: os.remove(self.control_socket_path) except OSError: pass self.control_server = None self.control_server = ThreadingUnixStreamServer( self.control_socket_path, type( "GitFSRequestHandler", (PacketizeMixIn, BaseRequestHandler, object), dict(fs=self, dictFromString=self.parseDict, stringFromDict=self.marshalDict, handleDict=lambda s, d: s.fs._handleRequest(s, d)))) self.control_server.daemon_threads = True # setup the threads last so that they don't prevent an exit. self.control_thread = Thread( target=self.control_server.serve_forever, args=()) self.control_thread.start() finally: self.unlockGitFSDir() mt = self.getMTab() mt[mount_point] = self.getID() self.updateMTab(mt) self.repo = GitRepo(path, origin, branch, sync=True) self.sync_thread = Thread(target=self._sync, args=()) self.sync_thread.start() def getID(self): if self.id is None: self.id = mUUID.getUUIDFromFile(self.getUUIDFile(self.root), create=True).toString() return self.id def _lockWithTimeOut(self, name, t): if t <= 0: return self.lock_lock.acquire() expt = t + time() self.locks[name] = expt if self.lock_expire_time - expt < 0: self.lock_expire_time = expt if self.lock_timer != None: self.lock_timer.cancel() else: logging.debug("Aquiring fresh lock") self.sync_c.acquire() self.lock_timer = Timer(t, self._lockTimerExpire, args=()) self.lock_timer.start() self.lock_lock.release() def _lockTimerExpire(self): logging.debug('_lockTimeExpire') self.lock_lock.acquire() self.__lockTimerExpire() self.lock_lock.release() def __lockTimerExpire(self): logging.debug('__lockTimeExpire') now = time() t = now for key in self.locks: if t - self.locks[key] < 0: t = self.locks[key] if now - self.locks[key] > 300: del self.locks[key] if t - now > 0: self.lock_expire_time = t if self.lock_timer != None: self.lock_timer.cancel() else: logging.debug( "***** ERROR ***** __lockTimerExpire doesn't have lock. acquiring" ) self.sync_c.acquire() self.lock_timer = Timer(t - now, self._lockTimerExpire, args=()) self.lock_timer.start() logging.debug("extending lock.") else: if self.lock_timer != None: logging.debug("releasing lock.") self.lock_timer.cancel() self.lock_timer = None self.sync_c.release() def _unlock(self, name): if name not in self.locks: return self.lock_lock.acquire() t = self.locks[name] del self.locks[name] if t >= self.lock_expire_time or len(keys(self.locks)) == 0: self.__lockTimerExpire() self.lock_lock.release() def _handleRequest(self, request, d): if d['action'] in self.handlers: mf = self.handlers[d['action']] return mf(d, request) logging.debug("No request in packet: %s" % d) self._respond(request, {'status': 'Unknown Command'}) return None def _respond(self, request, responseDict): request.sendDict(responseDict) def _handlePing(self, reqDict, request): self._respond(request, {'status': 'ok', 'message': 'pong'}) def _handleLock(self, reqDict, request): self._lockWithTimeOut('%s' % request.request.fileno(), 60) self._respond(request, { 'status': 'ok', 'name': '%s' % request.request.fileno() }) def _handleUnlock(self, reqDict, request): self._unlock('%s' % request.request.fileno()) self._respond(request, {'status': 'ok'}) def _handleInfo(self, reqDict, request): self._respond( request, { 'status': 'ok', 'origin': self.repo.origin, 'branch': self.repo.branch, 'root': self.root, 'path': self.mount_point }) def _getConfig(self, reqDict, request): key = reqDict['key'] resp = self.getConfigForInstance(key) self._respond(request, {'status': 'ok', key: resp}) def _sync(self): while True: self.sync_c.acquire() if not self.halt: # wait till a sync request comes self.sync_c.wait() self.timer_c.acquire() if self.timer != None: self.timer.cancel() self.timer = None self.timer_c.release() try: if not self.repo.synchronize(): #sync failed, we need to try again. self.needSync() self.need_sync_time = None except Exception as e: logging.debug("synchronize threw exception %s" % e) self.sync_c.release( ) # can't release this until sync is complete because we can't change files while we sync. else: self.repo.forcePush() self.repo.push() self.sync_c.release() break def getHostInfo(): if self.hostinfo is None: self.hostinfo = HostInfo() self.hostinfo.update() return self.hostinfo def matchesInstance(self, key): """ returns a number determinging how well the past in key matches the current instance.""" if key == 'default': return .1 #Very weak match hostinfo = self.getHostInfo() try: ip = socket.inet_pton(key) return hostinfo.matchAddress(ip) except SocketError: pass return hostinfo.matchHostName(key) def getConfigForInstanceSingleFile(self, key, name): logging.debug('getConfigForInstanceSingleFile(%s, %s)' % (key, name)) c = self.getConfig(name) if c is None: return None match = 0 if key not in c: return None v = c[key] logging.debug('getConfigForInstanceSingleFile(%s, %s) value=%s' % (key, name, v)) if isinstance(v, dict): rv = None for (key, value) in v.iteritems(): tm = self.matchesInstance(key) if tm > match: match = tm rv = value v = rv if v is None: return None if isinstance(v, list): v = random.choose(v) return (match, v) def getConfigForInstance(self, key): match = 0 value = None logging.debug('getConfigForInstance(%s)' % key) if key in self.config_file_priorities: filenames = self.config_file_priorities[key] else: filenames = self.config_file_priorities['default'] for name in filenames: a = self.getConfigForInstanceSingleFile(key, name) if a is None: continue (m, v) = a if (m >= match): match = m value = v return value def forceSync(self): logging.debug('forceSync()') self.sync_c.acquire() self.sync_c.notifyAll() self.sync_c.release() def needSync(self): logging.debug('needSync()') self.timer_c.acquire() if self.need_sync_time is None: self.need_sync_time = time() if self.timer != None: if time() - self.need_sync_time > 5 * 60: return self.timer.cancel() # don't do anything until there is a pause. self.timer = Timer(10, self.forceSync, args=()) self.timer.start() self.timer_c.release() def shutdown(self): # stop sync thread self.sync_c.acquire() self.halt = True self.sync_c.notifyAll() self.sync_c.release() self.repo.shutDown() def destroy(self, path): self.shutdown() if self.control_server != None: self.control_server.shutdown() try: os.remove(self.control_socket_path) except OSError: pass def __call__(self, op, path, *args): try: logging.debug("calling %s on %s" % (op, path)) path = self.escapePath(path) r = super(GitFS, self).__call__(op, self.root + path, *args) pr = "%s" % r pr = pr[:10] logging.debug("returning %s for %s" % (pr, op)) return r except Exception as e: logging.debug("Unhandled exception %s" % e) raise e def access(self, path, mode): if not os.access(path, mode): raise FuseOSError(EACCES) def chmod(self, path, mode): self.needSync() return os.chmod(path, mode) def chown(self, path, uid, gid): self.needSync() return super(GitFS, self).chown(path, uid, gid) def create(self, path, mode): # XXXXX Fixme what should the flags be? return os.open(path, os.O_RDWR | os.O_CREAT, mode) def flush(self, path, fh): self.sync_c.acquire() self.sync_c.notifyAll() self.sync_c.release() return os.fsync(fh) def fsync(self, path, datasync, fh): self.needSync() return os.fsync(fh) def fsyncdir(self, path, datasync, fh): return self.fsync(path, datasync, fh) def getattr(self, path, fh=None): st = os.lstat(path) return dict((key, getattr(st, key)) for key in ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid')) getxattr = None def link(self, target, source): source = self.escapePath(source) self.needSync() return os.link(source, target) listxattr = None mknod = os.mknod def mkdir(self, path, mode): return os.mkdir(path, mode) def open(self, path, fip): f = os.open(path, fip) logging.debug("open(%s, %s): %d" % (path, fip, f)) return f def read(self, path, size, offset, fh): with self.rwlock: os.lseek(fh, offset, 0) return os.read(fh, size) def readdir(self, path, fh): files = os.listdir(path) uefiles = [] for file in files: if self.isValidPath(file): uefiles = uefiles + [self.unescapePath(file)] return ['.', '..'] + uefiles readlink = os.readlink def release(self, path, fh): return os.close(fh) def rename(self, old, new): self.needSync() return os.rename(old, self.root + self.escapePath(new)) def rmdir(self, path): self.needSync() return os.rmdir(path) def statfs(self, path): stv = os.statvfs(path) return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')) def symlink(self, target, source): self.needSync() return os.symlink(source, target) def truncate(self, path, length, fh=None): self.needSync() with open(path, 'r+') as f: f.truncate(length) def unlink(self, path): self.needSync() return os.unlink(path) utimens = os.utime def write(self, path, data, offset, fh): self.needSync() with self.rwlock: os.lseek(fh, offset, 0) return os.write(fh, data)
def __init__(self): """ Constructor """ # Read the configuration parameters from tulsi.conf self.timestr = time.strftime("%Y:%m:%d-%H:%M:%S") self.conf = ConfigParser.ConfigParser() try: self.conf.read('/etc/tulsi/tulsi.conf') self.udp_ip = self.conf.get('tulsi', 'host') self.udp_port = int(self.conf.get('tulsi', 'port')) # printing the host and port of tulsi logging.info('%s The IP of the host: %s' % (self.timestr, self.udp_ip)) logging.info('%s The Port number of the host :%s' % (self.timestr, self.udp_port)) except: # Error message of tulsi not working logging.error('The tulsi configuration file is not found') # Creating objects of MessageEncode and HostInfo msg_encode = MessageEncode() host_info = HostInfo() # Initializing empty lists self.drives = [] self.service = [] self.ip_array = [] self.ring_ip = [] self.ring_conf_ip = [] self.ring_drives = [] self.ip_set_array = [] self.my_ring_conf = dict() # Read the ring Configuration file self.gz_file = GzipFile("/etc/swift/container.ring.gz", 'rb') if hasattr(self.gz_file, '_checkReadable'): self.gz_file = BufferedReader(self.gz_file) magic = self.gz_file.read(4) if magic == 'R1NG': version, = struct.unpack('!H', self.gz_file.read(2)) if version == 1: #self.ring_data = self.read_ring_file(self.gz_file) self.read_ring_file(self.gz_file) else: logging.error('%s Unknown ring format version %d' % (self.timestr, version)) raise Exception('Unknown ring format version %d' % version) # While loop to continuously check the status of swift services and # drives and send information to tulsi client while True: self.ip_array = host_info.read_ip() self.service = host_info.read_services() self.drives = host_info.read_drives(self.drives) self.message = msg_encode.create_message(self.my_ring_conf, self.ring_conf_ip, self.ip_array, self.service, self.drives) sock = socket.socket( socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.sendto(self.message, (self.udp_ip, self.udp_port)) time.sleep(5) self.ip_array = [] self.service = [] self.drives = []
def main(): host = HostInfo() # Toutes les 5 minutes # Lister les caracteristiques du pc r = requests.post('http://127.0.0.1:5000/api', json=host.getInfo())