def _work_done(res): log.msg("Completed a piece of work") self.queue.pop(0) if self.queue: log.msg("Preparing next piece of work") eventually(self._process) return res
def buildFinished(self, text, results): """This method must be called when the last Step has completed. It marks the Build as complete and returns the Builder to the 'idle' state. It takes two arguments which describe the overall build status: text, results. 'results' is one of SUCCESS, WARNINGS, or FAILURE. If 'results' is SUCCESS or WARNINGS, we will permit any dependant builds to start. If it is 'FAILURE', those builds will be abandoned.""" self.finished = True if self.remote: self.remote.dontNotifyOnDisconnect(self.lostRemote) self.results = results log.msg(" %s: build finished" % self) self.build_status.setText(text) self.build_status.setResults(results) self.build_status.buildFinished() if self.progress and results == SUCCESS: # XXX: also test a 'timing consistent' flag? log.msg(" setting expectations for next time") self.builder.setExpectations(self.progress) reactor.callLater(0, self.releaseLocks) self.deferred.callback(self) self.deferred = None
def bind_event(self, name, callback, plugin): log.msg( "Binding event %r with callback %r from %r" % ( name, callback, plugin ) ) self.events[name].bind(callback, plugin)
def main(reactor): d5 = defer.Deferred().addCallback(log.msg) reactor.callLater(0.3, d5.callback, "########## simulated request 1 ############") d6 = defer.Deferred().addCallback(log.msg) reactor.callLater(0.5, d6.callback, "########## sim request 2 ############") d7 = defer.Deferred().addCallback(log.msg) reactor.callLater(0.7, d7.callback, "########## simulated request 3 ############") # simulate an external event triggering an expensive computation while # other expensive computations are happening. d8 = defer.Deferred() d8.addCallback(do_some_expensive_things) reactor.callLater(0.1, d8.callback, 201) numbers = [54.0, 42, 10, 34] for number in numbers: result = list(expensive(number)) log.msg("first for {}: {}".format(number, reduce(add, result, 0))) result = list(expensive(number)) log.msg("second for {}: {}".format(number, reduce(add, result, 0))) return defer.gatherResults([d5, d6, d7, d8]).addCallback(log.msg)
def stepDone(self, result, step): """This method is called when the BuildStep completes. It is passed a status object from the BuildStep and is responsible for merging the Step's results into those of the overall Build.""" terminate = False text = None if type(result) == types.TupleType: result, text = result assert type(result) == type(SUCCESS) log.msg(" step '%s' complete: %s" % (step.name, Results[result])) self.results.append(result) if text: self.text.extend(text) if not self.remote: terminate = True if result == FAILURE: if step.warnOnFailure: if self.result != FAILURE: self.result = WARNINGS if step.flunkOnFailure: self.result = FAILURE if step.haltOnFailure: terminate = True elif result == WARNINGS: if step.warnOnWarnings: if self.result != FAILURE: self.result = WARNINGS if step.flunkOnWarnings: self.result = FAILURE elif result == EXCEPTION: self.result = EXCEPTION terminate = True return terminate
def startService(self): for slot in range(self.max_proc): self._wait_for_project(slot) log.msg(format='Scrapyd %(version)s started: max_proc=%(max_proc)r, runner=%(runner)r', version=__version__, max_proc=self.max_proc, runner=self.runner, system='Launcher') log.msg('SCRAPYD_INSTANCE_ID: {}'.format(str(self.config.get('SCRAPYD_INSTANCE'))))
def run(self): log.msg("Running bot as %r" % self.name) self.irc = BottuClientFactory(self) self.join_passive_channel.ready() reactor.connectTCP(self.network, self.port, self.irc) reactor.run() log.msg("Stopping bot")
def performAction(self, req): try: request_id = req.args.get("id", [None])[0] if request_id == "all": cancel_all = True else: cancel_all = False request_id = int(request_id) except: request_id = None authz = self.getAuthz(req) if request_id: c = interfaces.IControl(self.getBuildmaster(req)) builder_control = c.getBuilder(self.builder_status.getName()) brcontrols = yield builder_control.getPendingBuildRequestControls() for build_req in brcontrols: if cancel_all or (build_req.brid == request_id): log.msg("Cancelling %s" % build_req) res = yield authz.actionAllowed('cancelPendingBuild', req, build_req) if res: build_req.cancel() else: defer.returnValue(path_to_authzfail(req)) return if not cancel_all: break defer.returnValue(path_to_builder(req, self.builder_status))
def startBuild(self): scheds = self.master.db.schedulers # if onlyIfChanged is True, then we will skip this build if no # important changes have occurred since the last invocation if self.onlyIfChanged: classifications = \ yield scheds.getChangeClassifications(self.objectid) # see if we have any important changes for imp in classifications.itervalues(): if imp: break else: log.msg(("Nightly Scheduler <%s>: skipping build " + "- No important changes on configured branch") % self.name) return changeids = sorted(classifications.keys()) yield self.addBuildsetForChanges(reason=self.reason, changeids=changeids) max_changeid = changeids[-1] # (changeids are sorted) yield scheds.flushChangeClassifications(self.objectid, less_than=max_changeid+1) else: # start a build of the latest revision, whatever that is yield self.addBuildsetForLatest(reason=self.reason, branch=self.branch)
def requestRemoteForwarding(self, remotePort, hostport): data = forwarding.packGlobal_tcpip_forward(('0.0.0.0', remotePort)) d = self.sendGlobalRequest('tcpip-forward', data, wantReply=1) log.msg('requesting remote forwarding %s:%s' %(remotePort, hostport)) d.addCallback(self._cbRemoteForwarding, remotePort, hostport) d.addErrback(self._ebRemoteForwarding, remotePort, hostport)
def _enterRawMode(): global _inRawMode, _savedRawMode if _inRawMode: return fd = sys.stdin.fileno() try: old = tty.tcgetattr(fd) new = old[:] except: log.msg('not a typewriter!') else: # iflage new[0] = new[0] | tty.IGNPAR new[0] = new[0] & ~(tty.ISTRIP | tty.INLCR | tty.IGNCR | tty.ICRNL | tty.IXON | tty.IXANY | tty.IXOFF) if hasattr(tty, 'IUCLC'): new[0] = new[0] & ~tty.IUCLC # lflag new[3] = new[3] & ~(tty.ISIG | tty.ICANON | tty.ECHO | tty.ECHO | tty.ECHOE | tty.ECHOK | tty.ECHONL) if hasattr(tty, 'IEXTEN'): new[3] = new[3] & ~tty.IEXTEN #oflag new[1] = new[1] & ~tty.OPOST new[6][tty.VMIN] = 1 new[6][tty.VTIME] = 0 _savedRawMode = old tty.tcsetattr(fd, tty.TCSANOW, new) #tty.setraw(fd) _inRawMode = 1
def doCopy(self, res): # now copy tree to workdir fromdir = os.path.join(self.builder.basedir, self.srcdir) todir = os.path.join(self.builder.basedir, self.workdir) if runtime.platformType != "posix": d = threads.deferToThread(shutil.copytree, fromdir, todir) def cb(_): return 0 # rc=0 def eb(f): self.sendStatus( {'header': 'exception from copytree\n' + f.getTraceback()}) return -1 # rc=-1 d.addCallbacks(cb, eb) return d if not os.path.exists(os.path.dirname(todir)): os.makedirs(os.path.dirname(todir)) if os.path.exists(todir): # I don't think this happens, but just in case.. log.msg( "cp target '%s' already exists -- cp will not do what you think!" % todir) command = ['cp', '-R', '-P', '-p', fromdir, todir] c = runprocess.RunProcess(self.builder, command, self.builder.basedir, sendRC=False, timeout=self.timeout, maxTime=self.maxTime, logEnviron=self.logEnviron, usePTY=False) self.command = c d = c.start() d.addCallback(self._abandonOnFailure) return d
def onConnect(): # if keyAgent and options['agent']: # cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal, conn) # cc.connectUNIX(os.environ['SSH_AUTH_SOCK']) if hasattr(conn.transport, 'sendIgnore'): _KeepAlive(conn) if options.localForwards: for localPort, hostport in options.localForwards: s = reactor.listenTCP(localPort, forwarding.SSHListenForwardingFactory(conn, hostport, SSHListenClientForwardingChannel)) conn.localForwards.append(s) if options.remoteForwards: for remotePort, hostport in options.remoteForwards: log.msg('asking for remote forwarding for %s:%s' % (remotePort, hostport)) conn.requestRemoteForwarding(remotePort, hostport) reactor.addSystemEventTrigger('before', 'shutdown', beforeShutdown) if not options['noshell'] or options['agent']: conn.openChannel(SSHSession()) if options['fork']: if os.fork(): os._exit(0) os.setsid() for i in range(3): try: os.close(i) except OSError as e: import errno if e.errno != errno.EBADF: raise
def monitorDrop(self,cmd): name = cmd.keywords['name'].values[0] log.msg('Dropping monitor %s' % name) try: monitor.drop(name) except monitor.MonitorError as e: self.sendLine(str(e))
def run(self): framelog = os.path.join(self.basedir, "driver.log") log.startLogging(open(framelog, "a"), setStdout=False) log.msg("CHECK_MEMORY(mode=%s) STARTING" % self.mode) #logfile = open(os.path.join(self.testdir, "log"), "w") #flo = log.FileLogObserver(logfile) #log.startLoggingWithObserver(flo.emit, setStdout=False) d = fireEventually() d.addCallback(lambda res: self.setUp()) d.addCallback(lambda res: self.record_initial_memusage()) d.addCallback(lambda res: self.make_nodes()) d.addCallback(lambda res: self.wait_for_client_connected()) d.addCallback(lambda res: self.do_test()) d.addBoth(self.tearDown) def _err(err): self.failed = err log.err(err) print err d.addErrback(_err) def _done(res): reactor.stop() return res d.addBoth(_done) reactor.run() if self.failed: # raiseException doesn't work for CopiedFailures self.failed.raiseException()
def get_hmac(key, message): """ Encrypt the given message with the specified key. """ result = hmac.new(key, message, sha).digest() log.msg('[hmac] given %r for %r, made %r' % (key, message, result)) return result
def getAndCheckProperties(req): """ Fetch custom build properties from the HTTP request of a "Force build" or "Resubmit build" HTML form. Check the names for valid strings, and return None if a problem is found. Return a new Properties object containing each property found in req. """ master = req.site.buildbot_service.master pname_validate = master.config.validation['property_name'] pval_validate = master.config.validation['property_value'] properties = Properties() i = 1 while True: pname = req.args.get("property%dname" % i, [""])[0] pvalue = req.args.get("property%dvalue" % i, [""])[0] if not pname: break if not pname_validate.match(pname) \ or not pval_validate.match(pvalue): log.msg("bad property name='%s', value='%s'" % (pname, pvalue)) return None properties.setProperty(pname, pvalue, "Force Build Form") i = i + 1 return properties
def rebuildBuild(self, bs, reason="<rebuild, no reason given>", extraProperties=None): if not bs.isFinished(): return # Make a copy of the properties so as not to modify the original build. properties = Properties() # Don't include runtime-set properties in a rebuild request properties.updateFromPropertiesNoRuntime(bs.getProperties()) if extraProperties is None: properties.updateFromProperties(extraProperties) properties_dict = dict((k,(v,s)) for (k,v,s) in properties.asList()) ssList = bs.getSourceStamps(absolute=True) if ssList: sourcestampsetid = yield ssList[0].getSourceStampSetId(self.control.master) dl = [] for ss in ssList[1:]: # add defered to the list dl.append(ss.addSourceStampToDatabase(self.control.master, sourcestampsetid)) yield defer.gatherResults(dl) bsid, brids = yield self.control.master.addBuildset( builderNames=[self.original.name], sourcestampsetid=sourcestampsetid, reason=reason, properties=properties_dict) defer.returnValue((bsid, brids)) else: log.msg('Cannot start rebuild, rebuild has no sourcestamps for a new build') defer.returnValue(None)
def generate(self, request, node): if self.data: try: child = microdom.parseString(self.data) except Exception, e: log.msg("Error parsing return value, probably invalid xml:", e) child = request.d.createTextNode(self.data)
def stepDone(self, results, step): """This method is called when the BuildStep completes. It is passed a status object from the BuildStep and is responsible for merging the Step's results into those of the overall Build.""" terminate = False text = None if isinstance(results, tuple): results, text = results assert isinstance(results, type(SUCCESS)), "got %r" % (results,) summary = yield step.getBuildResultSummary() if 'build' in summary: text = [summary['build']] log.msg(" step '%s' complete: %s (%s)" % (step.name, statusToString(results), text)) if text: self.text.extend(text) self.master.data.updates.setBuildStateString(self.buildid, bytes2unicode(" ".join(self.text))) self.results, terminate = computeResultAndTermination(step, results, self.results) if not self.conn: # force the results to retry if the connection was lost self.results = RETRY terminate = True defer.returnValue(terminate)
def detached(self, slave): """This is called when the connection to the bot is lost.""" for sb in self.attaching_slaves + self.slaves: if sb.slave == slave: break else: log.msg("WEIRD: Builder.detached(%s) (%s)" " not in attaching_slaves(%s)" " or slaves(%s)" % (slave, slave.slavename, self.attaching_slaves, self.slaves)) return if sb.state == BUILDING: # the Build's .lostRemote method (invoked by a notifyOnDisconnect # handler) will cause the Build to be stopped, probably right # after the notifyOnDisconnect that invoked us finishes running. pass if sb in self.attaching_slaves: self.attaching_slaves.remove(sb) if sb in self.slaves: self.slaves.remove(sb) self.builder_status.addPointEvent(['disconnect', slave.slavename]) sb.detached() # inform the SlaveBuilder that their slave went away self.updateBigStatus()
def test_stop(self): """ Stop will stop a running process. """ runner = Runner() # I'm getting AF_UNIX path too long errors using self.mktemp() base = FilePath(tempfile.mkdtemp()) log.msg('tmpdir: %r' % base.path) root = base.child('root') src = base.child('src') dst = base.child('dst') _ = yield runner.start(root.path, 'unix:'+src.path, 'unix:'+dst.path) pidfile = root.child('grace.pid') pid = pidfile.getContent() self.addCleanup(self.kill, pid) _ = yield runner.stop(root.path) # tail the log until you see Server Shut Down # XXX stop should maybe do the same... so that it doesn't return until # the process has actually stopped. logfile = root.child('grace.log') self.assertTrue(logfile.exists()) _ = yield self.tailUntil(logfile.path, 'Server Shut Down.') self.assertFalse(pidfile.exists(), "pidfile should be gone: %r" % pidfile.path)
def start_instance_result(result): # If we don't report success, then preparation failed. if not result: log.msg( "Worker '%s' does not want to substantiate at this time" % (self.name,)) self._substantiation_notifier.notify(False) return result
def _mail_missing_message(self, subject, text): # FIXME: This should be handled properly via the event api # we should send a missing message on the mq, and let any reporter # handle that # first, see if we have a MailNotifier we can use. This gives us a # fromaddr and a relayhost. buildmaster = self.botmaster.master for st in buildmaster.services: if isinstance(st, MailNotifier): break else: # if not, they get a default MailNotifier, which always uses SMTP # to localhost and uses a dummy fromaddr of "buildbot". log.msg("worker-missing msg using default MailNotifier") st = MailNotifier("buildbot") # now construct the mail m = Message() m.set_payload(text) m['Date'] = formatdate(localtime=True) m['Subject'] = subject m['From'] = st.fromaddr recipients = self.notify_on_missing m['To'] = ", ".join(recipients) d = st.sendMessage(m, recipients) # return the Deferred for testing purposes return d
def shutdown(self): """Shutdown the worker""" if not self.conn: log.msg("no remote; worker is already shut down") return yield self.conn.remoteShutdown()
def process(self): log.msg("PROCESS: %s" % id(self)) log.msg("URI:%s PATH %s" % (self.uri, self.path + str(self.args))) log.msg( "Request:\n\t%s" % "\n\t".join(("%s\t%s" % (x[0], ";".join(x[1])) for x in self.requestHeaders.getAllRawHeaders())) ) session = Session(self) session.preRequest() host = self.getHeader("host") if not host: log.err("No host header given") self.setResponseCode(400) self.finish() return port = 80 if ":" in host: host, port = host.split(":") port = int(port) self.setHost(host, port) log.msg("URI:%s PATH %s" % (self.uri, self.path + str(self.args))) log.msg( "Request:\n\t%s" % "\n\t".join(("%s\t%s" % (x[0], ";".join(x[1])) for x in self.requestHeaders.getAllRawHeaders())) ) self.content.seek(0, 0) postData = self.content.read() factory = ProxyClientFactory(self.method, self.uri, postData, self.requestHeaders.getAllRawHeaders(), session) self.reactor.connectTCP(host, port, factory)
def releaseLocks(self): """ I am called to release any locks after a build has finished """ log.msg("releaseLocks(%s): %s" % (self, self.locks)) for lock, access in self.locks: lock.release(self, access)
def _soft_disconnect(self, fast=False): # a negative build_wait_timeout means the worker should never be shut # down, so just disconnect. if self.build_wait_timeout < 0: yield AbstractWorker.disconnect(self) return if self.missing_timer: self.missing_timer.cancel() self.missing_timer = None if self._substantiation_notifier: log.msg("Weird: Got request to stop before started. Allowing " "worker to start cleanly to avoid inconsistent state") yield self._substantiation_notifier.wait() self.substantiation_build = None log.msg("Substantiation complete, immediately terminating.") if self.conn is not None: yield defer.DeferredList([ AbstractWorker.disconnect(self), self.insubstantiate(fast) ], consumeErrors=True, fireOnOneErrback=True) else: yield AbstractWorker.disconnect(self) yield self.stop_instance(fast)
def _readAndWrite(self, source, condition): # note: gtk-1.2's gtk_input_add presents an API in terms of gdk # constants like INPUT_READ and INPUT_WRITE. Internally, it will add # POLL_HUP and POLL_ERR to the poll() events, but if they happen it # will turn them back into INPUT_READ and INPUT_WRITE. gdkevents.c # maps IN/HUP/ERR to INPUT_READ, and OUT/ERR to INPUT_WRITE. This # means there is no immediate way to detect a disconnected socket. # The g_io_add_watch() API is more suited to this task. I don't think # pygtk exposes it, though. why = None didRead = None try: if condition & gtk.GDK.INPUT_READ: why = source.doRead() didRead = source.doRead if not why and condition & gtk.GDK.INPUT_WRITE: # if doRead caused connectionLost, don't call doWrite # if doRead is doWrite, don't call it again. if not source.disconnected and source.doWrite != didRead: why = source.doWrite() didRead = source.doWrite # if failed it was in write except: why = sys.exc_info()[1] log.msg('Error In %s' % source) log.deferr() if why: self._disconnectSelectable(source, why, didRead == source.doRead)
def test_switch(self): """ Switch should work """ runner = Runner() # I'm getting AF_UNIX path too long errors using self.mktemp() base = FilePath(tempfile.mkdtemp()) log.msg('tmpdir: %r' % base.path) root = base.child('root') src = base.child('src') dst = base.child('dst') _ = yield runner.start(root.path, 'unix:'+src.path, 'unix:'+dst.path) pidfile = root.child('grace.pid') pid = pidfile.getContent() self.addCleanup(self.kill, pid) r = yield runner.switch(root.path, 'unix:'+src.path, 'unix:/foo') r = yield runner.ls(root.path) self.assertEqual(r, [ { 'src': 'unix:'+src.path, 'dst': 'unix:/foo', 'conns': 0, 'active': True, } ], "Should have switched")
def unset(x): log.msg('debounced function complete') self.callDeferred = None return x
def config(self, config, dbconfig=None, memconfig=None, masterconf=None): '''配置服务器 ''' netport = config.get('netport') #客户端连接 webport = config.get('webport') #http连接 rootport = config.get('rootport') #root节点配置 remoteportlist = config.get('remoteport', []) #remote节点配置列表 servername = config.get('name') #服务器名称 logpath = config.get('log') #日志 hasdb = config.get('db') #数据库连接 hasmem = config.get('mem') #memcached连接 app = config.get('app') #入口模块名称 cpuid = config.get('cpu') #绑定cpu mreload = config.get('reload') #重新加载模块名称 self.servername = servername if masterconf: masterport = masterconf.get('rootport') self.master_remote = RemoteObject(servername) addr = ('localhost', masterport) self.master_remote.connect(addr) GlobalObject().masterremote = self.master_remote if netport: self.netfactory = LiberateFactory() netservice = services.CommandService("netservice") self.netfactory.addServiceChannel(netservice) reactor.listenTCP(netport, self.netfactory) if webport: self.webroot = vhost.NameVirtualHost() GlobalObject().webroot = self.webroot reactor.listenTCP(webport, DelaySite(self.webroot)) if rootport: self.root = PBRoot() rootservice = services.Service("rootservice") self.root.addServiceChannel(rootservice) reactor.listenTCP(rootport, BilateralFactory(self.root)) for cnf in remoteportlist: rname = cnf.get('rootname') rport = cnf.get('rootport') self.remote[rname] = RemoteObject(servername) addr = ('localhost', rport) self.remote[rname].connect(addr) if hasdb and dbconfig: log.msg(str(dbconfig)) dbpool.initPool(**dbconfig) if hasmem and memconfig: urls = memconfig.get('urls') hostname = str(memconfig.get('hostname')) mclient.connect(urls, hostname) if logpath: log.addObserver(loogoo(logpath)) #日志处理 log.startLogging(sys.stdout) if cpuid: affinity.set_process_affinity_mask(os.getpid(), cpuid) GlobalObject().config(netfactory=self.netfactory, root=self.root, remote=self.remote) if app: reactor.callLater(0.1, __import__, app) if mreload: GlobalObject().reloadmodule = __import__(mreload) import admin
def serverStop(): log.msg('stop') if GlobalObject().stophandler: GlobalObject().stophandler() reactor.callLater(0.5, reactor.stop) return True
def start(self): '''启动服务器 ''' log.msg('%s start...' % self.servername) log.msg('%s pid: %s' % (self.servername, os.getpid())) reactor.run()
def handlePagedPush(self, serial, key): log.msg("I got a serial! " + repr(serial) + key) d = self.service.pushSerialized(**{key: serial}) d.addErrback(packageFailure) self.removeCollector(key) return d
def _referenceSent(self, registrationDict): self.service.id = registrationDict['id'] log.msg("got ID: %r" % self.service.id) return self.service.id
def writeError(msg, errcode=404, jsonrpccode=None): if self.debug: log.msg("REST error: %s" % (msg,)) request.setResponseCode(errcode) request.setHeader('content-type', 'text/plain; charset=utf-8') request.write(json.dumps(dict(error=msg)))
def check(matched): if not matched: log.msg("invalid login from user '%s'" % creds.username) return failure.Failure(error.UnauthorizedLogin()) return creds.username
def newConnection(self, conn, workerName): if workerName in self.connections: log.msg("Got duplication connection from '%s'" " starting arbitration procedure" % workerName) old_conn = self.connections[workerName] try: yield misc.cancelAfter( self.PING_TIMEOUT, old_conn.remotePrint("master got a duplicate connection")) # if we get here then old connection is still alive, and new # should be rejected raise RuntimeError("rejecting duplicate worker") except defer.CancelledError: old_conn.loseConnection() log.msg( "Connected worker '%s' ping timed out after %d seconds" % (workerName, self.PING_TIMEOUT)) except RuntimeError: raise except Exception as e: old_conn.loseConnection() log.msg("Got error while trying to ping connected worker %s:" "%s" % (workerName, e)) log.msg("Old connection for '%s' was lost, accepting new" % workerName) try: yield conn.remotePrint(message="attached") info = yield conn.remoteGetWorkerInfo() log.msg("Got workerinfo from '%s'" % workerName) except Exception as e: log.msg("Failed to communicate with worker '%s'\n" "%s" % (workerName, e)) raise conn.info = info self.connections[workerName] = conn def remove(): del self.connections[workerName] conn.notifyOnDisconnect(remove) # accept the connection defer.returnValue(True)
WebDAV-aware static resources. """ __all__ = ["DAVFile"] from twisted.python import log from ipython1.external.twisted.web2.static import File from ipython1.external.twisted.web2.dav import davxml from ipython1.external.twisted.web2.dav.idav import IDAVResource from ipython1.external.twisted.web2.dav.resource import DAVResource from ipython1.external.twisted.web2.dav.util import bindMethods try: from ipython1.external.twisted.web2.dav.xattrprops import xattrPropertyStore as DeadPropertyStore except ImportError: log.msg("No dead property store available; using nonePropertyStore.") log.msg("Setting of dead properties will not be allowed.") from ipython1.external.twisted.web2.dav.noneprops import NonePropertyStore as DeadPropertyStore class DAVFile (DAVResource, File): """ WebDAV-accessible File resource. Extends ipython1.external.twisted.web2.static.File to handle WebDAV methods. """ def __init__(self, path, defaultType="text/plain", indexNames=None): """ @param path: the path of the file backing this resource. @param defaultType: the default mime type (as a string) for this
def process(self, request, **kwargs): log.msg("Processing results: ", kwargs) return RESTART_RENDERING
def unregister(self, username): if debug: log.msg("unregistering username '%s' on pb port %s" % (username, self.portstr)) del self.users[username]
def startedConnecting(self, connector): log.msg("Connecting to %s:%s" % (self.buildmaster_host, self.port)) ReconnectingPBClientFactory.startedConnecting(self, connector) self.connector = connector
def _errback(why): if why.check(pb.PBConnectionLost): log.msg("Lost connection to %s" % name) else: log.err("Unexpected error when trying to shutdown %s" % name)
def processFailed(err): #if err.check(FTPCmdError): # self.sendLine(err.value.response()) #else: log.msg("Unexpected FTP error") log.err(err)
def __init__(self, *args, **kwargs): log.msg( "DeprecationWarning: DOMController is deprecated; it has been renamed twisted.web.woven.controller.Controller.\n" ) controller.Controller.__init__(self, *args, **kwargs) Resource.__init__(self)
def __init__(self, data=None, with_acls=None): # Here comes all of the bare minimum set of attributes a NetDevice # object needs for basic functionality within the existing suite. # Hostname self.nodeName = None self.ipv4 = None self.nodePort = None self.nodeProtocol = None # Hardware Info self.deviceType = None self.operatingSystem = None self.make = None self.manufacturer = settings.FALLBACK_MANUFACTURER self.vendor = None self.model = None self.serialNumber = None # Administrivia self.adminStatus = settings.DEFAULT_ADMIN_STATUS self.assetID = None self.budgetCode = None self.budgetName = None self.enablePW = None self.owningTeam = None self.owner = None self.onCallName = None self.operationStatus = None self.lastUpdate = None self.lifecycleStatus = None self.projectName = None self.deviceTags = None # Location self.site = None self.location = None self.room = None self.coordinate = None # If `data` has been passed, use it to update our attributes if data is not None: self._populate_data(data) # Set node remote port based on "hostname:port" as nodeName self._set_node_port() # Cleanup the attributes (strip whitespace, lowercase values, etc.) self._cleanup_attributes() # Map the manufacturer name to a Vendor object that has extra sauce if self.manufacturer is not None: self.vendor = vendor_factory(self.manufacturer) # Use the vendor to populate the deviceType if it's not set already if self.deviceType is None: self._populate_deviceType() # ACLs (defaults to empty sets) self.explicit_acls = self.implicit_acls = self.acls = self.bulk_acls = set( ) if with_acls: log.msg('[%s] Populating ACLs' % self.nodeName) self._populate_acls(aclsdb=with_acls) # Bind the correct execute/connect methods based on deviceType self._bind_dynamic_methods() # Set the correct command(s) to run on startup based on deviceType self.startup_commands = self._set_startup_commands() # Assign the configuration commit commands (e.g. 'write memory') self.commit_commands = self._set_commit_commands() # Determine whether we require an async pty SSH channel self.requires_async_pty = self._set_requires_async_pty() # Set the correct line-ending per vendor self.delimiter = self._set_delimiter() # Set initial endpoint state self.factories = {} self._connected = False self._endpoint = None
def _handleSIGHUP(self, *args): log.msg("Initiating shutdown because we got SIGHUP") return self.gracefulShutdown()
from twisted.internet import reactor from twisted.internet import defer from trigger.conf import settings from trigger.utils import network, parse_node_port from trigger.utils.url import parse_url from trigger import changemgmt, exceptions, rancid from crochet import setup, run_in_reactor, wait_for from . import loader try: from trigger.acl.db import AclsDB except ImportError: log.msg("ACLs database could not be loaded; Loading without ACL support") settings.WITH_ACLS = False # Constants JUNIPER_COMMIT = ET.Element('commit-configuration') JUNIPER_COMMIT_FULL = copy.copy(JUNIPER_COMMIT) ET.SubElement(JUNIPER_COMMIT_FULL, 'full') # Exports __all__ = ['device_match', 'NetDevice', 'NetDevices', 'Vendor'] # Functions def _munge_source_data(data_source=settings.NETDEVICES_SOURCE): """ Read the source data in the specified format, parse it, and return a
def evaluateCommand(_): if cmd.rc != 0 and abandonOnFailure: log.msg("Source step failed while running command %s" % cmd) raise buildstep.BuildStepFailed() return cmd.rc
def buildMessage(self, buf, mask=True): import sys log.msg("protoc buildMessage ", mask, sys.getdefaultencoding()) c_buf = buf msg = "" if mask: key = "".join( [str(chr(random.randrange(1, 255))) for i in xrange(4)]) # first byte o = (1 << 7) + 2 log.msg('first byte ', o) msg += str(chr(o)) # second byte buf_len = len(buf) if buf_len < 126: log.msg("protoc bm 1 ", mask) o = buf_len if mask: msg += str(chr(o + (1 << 7))) else: msg += str(chr(o)) log.msg("protoc bm 1 add buff ", mask, msg, buf) if mask: msg += key msg += self.encodeMessage(buf, key) else: msg += buf return msg elif buf_len <= ((1 << 16) - 1): log.msg("protoc bm 2 ", mask) if mask: msg += str(chr(126 + (1 << 7))) else: msg += str(chr(126)) for i in range(1, 3): o = (buf_len >> (16 - (8 * i))) & (2**8 - 1) msg += str(chr(o)) log.msg("protoc bm 2 add buff", mask) if mask: msg += key msg += self.encodeMessage(buf, key) else: msg += buf return msg elif buf_len <= ((1 << 64) - 1): log.msg("protoc bm 3 ", mask) if mask: msg += str(chr(127 + (1 << 7))) else: msg += str(chr(127)) for i in range(1, 9): o = (buf_len >> (64 - (8 * i))) & (2**8 - 1) msg += str(chr(o)) log.msg("protoc bm 3 add buff", mask) if mask: msg += key msg += self.encodeMessage(buf, key) else: msg += buf return msg log.msg("protoc BuildMessage end ", len(msg)) return msg
def reload(self, **kwargs): """Reload NetDevices metadata.""" log.msg('Reloading NetDevices.') classobj = self.__class__ classobj._Singleton = classobj._actual(**kwargs)
def log(self, msg): log.msg("%s: %s" % (self, msg))
def connectionLost(self, reason): '''连接断开处理 ''' log.msg('Client %d login out.' % (self.transport.sessionno)) self.factory.doConnectionLost(self) self.factory.connmanager.dropConnectionByID(self.transport.sessionno)
def builderRemoved(self, builderName): log.msg('[Contact] Builder %s removed' % (builderName))
def dataHandleCoroutine(self): """ """ length = self.factory.dataprotocl.getHeadlength() # 获取协议头的长度 while True: data = yield self.buff += data log.msg('protoc dataHandleCoroutine get data ', data.__len__(), self.buff.__len__(), self._b_ready) if not self._b_ready: if not self.check_handshake_key(self.buff): continue hc, use_len = self.get_handshake_key(self.buff) self.buff = self.buff[use_len:] self.transport.write(hc) self._b_ready = True # buff_len = self.get_buff_len(self.buff) if buff_len == -1: continue opcode = self.get_opcode(self.buff) use_len, c_buff = self.parse_buff(self.buff, buff_len) log.msg('protoc dataHandleCoroutine ', buff_len, use_len, self.buff.__len__(), c_buff.__len__(), opcode) self.buff = self.buff[use_len:] if opcode == 0x8: log.msg('protoc quit ', c_buff) continue while c_buff.__len__() >= length: unpackdata = self.factory.dataprotocl.unpack(c_buff[:length]) if not unpackdata.get('result'): log.msg('illegal data package --1') self.transport.loseConnection() break command = unpackdata.get('command') rlength = unpackdata.get('length') request = c_buff[length:length + rlength] if request.__len__() < rlength: log.msg('some data lose %d %d %s', request.__len__(), rlength, command) break c_buff = c_buff[length + rlength:] d = self.factory.doDataReceived(self, command, request) log.msg('protoc doDataReceived ', command, c_buff.__len__(), rlength, d) if not d: continue d.addCallback(self.safeToWriteData, command) d.addErrback(DefferedErrorHandle)
def closed(self): """ Called when the channel is closed. This means that both our side and the remote side have closed the channel. """ log.msg('closed')
def command_FORCE(self, args, who): errReply = "try 'force build [--branch=BRANCH] [--revision=REVISION] [--props=PROP1=VAL1,PROP2=VAL2...] <WHICH> <REASON>'" args = self.splitArgs(args) if not args: raise UsageError(errReply) what = args.pop(0) if what != "build": raise UsageError(errReply) opts = ForceOptions() opts.parseOptions(args) which = opts['builder'] branch = opts['branch'] revision = opts['revision'] reason = opts['reason'] props = opts['props'] if which is None: raise UsageError("you must provide a Builder, " + errReply) # keep weird stuff out of the branch, revision, and properties args. branch_validate = self.master.config.validation['branch'] revision_validate = self.master.config.validation['revision'] pname_validate = self.master.config.validation['property_name'] pval_validate = self.master.config.validation['property_value'] if branch and not branch_validate.match(branch): log.msg("bad branch '%s'" % branch) self.send("sorry, bad branch '%s'" % branch) return if revision and not revision_validate.match(revision): log.msg("bad revision '%s'" % revision) self.send("sorry, bad revision '%s'" % revision) return properties = Properties() if props: # split props into name:value dict pdict = {} propertylist = props.split(",") for i in range(0, len(propertylist)): splitproperty = propertylist[i].split("=", 1) pdict[splitproperty[0]] = splitproperty[1] # set properties for prop in pdict: pname = prop pvalue = pdict[prop] if not pname_validate.match(pname) \ or not pval_validate.match(pvalue): log.msg("bad property name='%s', value='%s'" % (pname, pvalue)) self.send("sorry, bad property name='%s', value='%s'" % (pname, pvalue)) return properties.setProperty(pname, pvalue, "Force Build IRC") bc = self.getControl(which) reason = "forced: by %s: %s" % (self.describeUser(who), reason) ss = SourceStamp(branch=branch, revision=revision) d = bc.submitBuildRequest(ss, reason, props=properties.asDict()) def subscribe(buildreq): ireq = IrcBuildRequest(self, self.useRevisions) buildreq.subscribe(ireq.started) d.addCallback(subscribe) d.addErrback(log.err, "while forcing a build")
def closeReceived(self): """ Called when the other side has closed the channel. """ log.msg('remote close') self.loseConnection()
def clientConnectionFailed(self, connector, reason): if self.shuttingDown: log.msg("not scheduling reconnection attempt") return ThrottledClientFactory.clientConnectionFailed(self, connector, reason)