def run(self, serverState, request, response): conf = ServerConf() host = request.getParam('host') client_secure_port = request.getParam('client_secure_port') result = dict() #do we have a server with this hostname or fqdn? connectedNodes = conf.getNodes() if (connectedNodes.hostnameOrFQDNExists(host) == False): serv = RawServerMessage(host, client_secure_port) resp = ProcessedResponse(serv.sendAddNodeRequest(host)) if resp.isOK(): result = resp.getData() nodeConnectRequest = NodeConnectRequest( result['serverId'], int(client_secure_port), None, None, result['fqdn'], host) conf.addSentNodeConnectRequest(nodeConnectRequest) result['nodeConnectRequest'] = nodeConnectRequest log.info("Added node %s" % host) response.add('', result) else: response.add("Remote server said: %s" % resp.getMessage(), status="ERROR") else: errorMessage = "%s is already trusted" % host response.add(errorMessage, status="ERROR") log.info(errorMessage)
def getCommandOutputData(cmdID, workerServer): log.log(cpc.util.log.TRACE,"Trying to pull command output from %s"% workerServer) s2smsg=ServerMessage(workerServer) rundata_response = s2smsg.pullAssetRequest(cmdID, Asset.cmdOutput()) if rundata_response.getType() != "application/x-tar": log.error("Incorrect response type: %s, should be %s"% (rundata_response.getType(), 'application/x-tar')) if rundata_response.getType() == "text/json": errormsg=rundata_response.message.read(len(rundata_response. message)) presp=ProcessedResponse(rundata_response) if not presp.isOK(): log.error('Response from worker server not OK: %s'% errormsg) else: s2smsg.clearAssetRequest(cmdID) log.log(cpc.util.log.TRACE, "Successfully pulled command output data from %s."% workerServer) return rundata_response #runfile = rundata_response.getRawData() #this doesnt work because the mmap closes as it is returned return None
def testNetworkRoute(self): self.create5NodeNetwork() #send message from node 0 to 2 http conf = ConnectionBundle(confdir=self.serverConfs[0], reload=True) client = ClientMessage() response = ProcessedResponse( client.pingServer(self.hostname, self.node2HttpsPort)) print response.pprint() #send message from node 0 to 4 https response = ProcessedResponse( client.pingServer(self.hostname, self.node4HttpsPort)) print response.pprint() #send message from node 0 to 4 http #this throws an exception since we should not route with http response = ProcessedResponse( client.pingServer(self.hostname, self.node4Port)) print response.pprint()
def _sendPing(self, first, last): """Do the actual sending""" changed = self.cmdsChanged self.cmdsChanged = False with self.runCondVar: # first write the items to xml cmds = self.worker._getWorkloads() co = StringIO() co.write('<heartbeat worker_id="%s">' % self.workerID) for item in cmds: if item.running: item.hbi.writeXML(co) for subwl in item.joinedTo: subwl.hbi.writeXML(co) co.write("</heartbeat>") clnt = WorkerMessage() resp = clnt.workerHeartbeatRequest(self.workerID, self.workerDir, first, last, changed, co.getvalue()) presp = ProcessedResponse(resp) if last: timestr = " last" else: timestr = "" if first: timestr += " first" if changed: timestr += " update" log.debug("Sent%s heartbeat signal. Result was %s" % (timestr, presp.getStatus())) if presp.getStatus() != "OK": # if the response was not OK, the upstream server thinks we're # dead and has signaled that to the originating server. We # should just die now. faulty = presp.getData() log.info("Error from heartbeat request. Stopping %s" % str(faulty)) #log.error("Got error from heartbeat request. Stopping worker.") if (type(faulty) == type(dict()) and 'faulty' in faulty): for faultyItem in faulty['faulty']: self.worker.killWorkload(faultyItem) else: pass #sys.exit(1) respData = presp.getData() if type(respData) == type(dict()): rettime = int(respData['heartbeat-time']) self.randomFile = respData['random-file'] self._createRandomFile() else: rettime = int(respData) #rettime=int(presp.getData()) log.debug("Waiting %s seconds for next ping" % (rettime)) return rettime
def testStart2Servers(self): numServers = 2 self.createConfFolders(numServers) hostname = gethostname() node0HttpsPort = 13807 node1HttpsPort = 13808 node0HttpPort = 14807 node1HttpPort = 14808 for i in range(numServers): args = [ '../../../../cpc-server', '-c', self.serverConfs[i], 'start' ] #doing cpc.server.server.forkAndRun(cf, debug ) directly here will will for some strange reason mess up things when shutting down, the process wont shutdown subprocess.call(args) time.sleep(2) #connect node 0 to node 1 args = [ '../../../../cpc-server', '-c', self.serverConfs[0], 'connnect-server', hostname, str(node1HttpPort) ] #doing cpc.server.server.forkAndRun(cf, debug ) directly here will will for some strange reason mess up things when shutting down, the process wont shutdown subprocess.call(args) args = [ '../../../../cpc-server', '-c', self.serverConfs[1], 'trust', hostname, str(node0HttpsPort) ] #doing cpc.server.server.forkAndRun(cf, debug ) directly here will will for some strange reason mess up things when shutting down, the process wont shutdown subprocess.call(args) #verify existense of of nodes in each conf file conf1 = ServerConf(confdir=self.serverConfs[0], reload=True) node0Nodes = conf1.getNodes() self.assertTrue(node0Nodes.exists(hostname, node1HttpsPort)) conf2 = ServerConf(confdir=self.serverConfs[1], reload=True) node1Nodes = conf2.getNodes() self.assertTrue(node1Nodes.exists(hostname, node0HttpsPort)) #do a network topology call conf = ConnectionBundle(confdir=self.serverConfs[0], reload=True) client = ClientMessage() topology = ProcessedResponse(client.networkTopology()).getData() self.assertEquals(topology.size(), 2)
def testNetworkTopology(self): self.create5NodeNetwork() conf = ConnectionBundle(confdir=self.serverConfs[0], reload=True) client = ClientMessage() topology = ProcessedResponse(client.networkTopology()).getData() self.assertEquals(topology.size(), 5) #verify that the topology is correct #node 0 should have 2 connections one to node 1 and one to node 2 node0 = "%s:%s" % (self.hostname, str(self.node0HttpsPort)) node = topology.get(node0) self.assertEquals(node.nodes.size(), 2) self.assertTrue(node.nodes.exists(self.hostname, self.node1HttpsPort)) self.assertTrue(node.nodes.exists(self.hostname, self.node2HttpsPort)) #node 1 should have 1 connection to node 2 node1 = "%s:%s" % (self.hostname, str(self.node1HttpsPort)) node = topology.get(node1) self.assertEquals(node.nodes.size(), 2) self.assertTrue(node.nodes.exists(self.hostname, self.node2HttpsPort)) #node2 should have 4 connections one to each other node node2 = "%s:%s" % (self.hostname, str(self.node2HttpsPort)) node = topology.get(node2) self.assertEquals(node.nodes.size(), 4) self.assertTrue(node.nodes.exists(self.hostname, self.node0HttpsPort)) self.assertTrue(node.nodes.exists(self.hostname, self.node1HttpsPort)) self.assertTrue(node.nodes.exists(self.hostname, self.node3HttpsPort)) self.assertTrue(node.nodes.exists(self.hostname, self.node4HttpsPort)) #node 3 should have 1 connection to node 2 node3 = "%s:%s" % (self.hostname, str(self.node3HttpsPort)) node = topology.get(node3) self.assertEquals(node.nodes.size(), 1) self.assertTrue(node.nodes.exists(self.hostname, self.node2HttpsPort)) #node 4 should have 1 connection to node 2 node4 = "%s:%s" % (self.hostname, str(self.node4HttpsPort)) node = topology.get(node4) self.assertEquals(node.nodes.size(), 1) self.assertTrue(node.nodes.exists(self.hostname, self.node2HttpsPort))
def testStartServer(self): self.createConfFolder(0) args = [ '../../../../cpc', '-c', self.serverConfs[0], 'start' ] #doing cpc.server.server.forkAndRun(cf, debug ) directly here will will for some strange reason mess up things when shutting down, the process wont shutdown subprocess.call(args) time.sleep(2) conf = ServerConf(self.serverConfs[0], True) #create a custom request message that sends a file and a parameter #verifty client = ClientMessage() ProcessedResponse(client.pingServer()).pprint() client.closeClient()
def requestNetworkTopology(topology,serverState=None): """ Asks each neigbouring node for their network topology inputs: topology:Nodes The list of the topology generated so far serverState:ServerState if provided worker states are fetched. since this method is called by getNetworkTopology() which in turn is called from places where we do not pass (and don't want) the serverState we provide this option. Also it is not needed as the calling server always knows the most up to date state of its own workers. """ conf = ServerConf() thisNode = Node.getSelfNode(conf) thisNode.setNodes(conf.getNodes()) topology.addNode(thisNode) if serverState: thisNode.workerStates = WorkerStateHandler.getConnectedWorkers(serverState.getWorkerStates()) for node in thisNode.getNodes().nodes.itervalues(): if topology.exists(node.getId()) == False: #connect to correct node if node.isConnected(): try: clnt = DirectServerMessage(node,conf=conf) #send along the current topology rawresp = clnt.networkTopology(topology) processedResponse = ProcessedResponse(rawresp) topology = processedResponse.getData() except ServerConnectionError as e: #we cannot connect to the node, # and its marked as unreachable #we must still add it to the topology log.error("node %s unreachable when asking for network " "topology: error was %s"%(node.getId(),e.__str__())) topology.addNode(node) #todo notify in topology that this node is not connected? return topology