Example #1
0
class Processor():
    """
    Whenever mixtapeReceived is called, deferToThread is scheduled to be run as
    soon as a "slot" for being run is available. There is currently 1 slot
    deferToThread runs process_mixtape in another thread, and releases the
    slot when its that process is done
    """
    def __init__(self):
        self.sem = DeferredSemaphore(1) #do one thing at a time

    def mixtapeReceived(self, mixtape):
        debug("Adding %s to be processed" % mixtape)
        self.sem.run(deferToThread, process.process_mixtape, *mixtape)
    def collect(self, config):
        log.debug('Starting Delivery orders collect')
        # TODO : cleanup job collect

        ip_address = config.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        applicationList = []
        deferreds = []
        sem = DeferredSemaphore(1)
        for datasource in config.datasources:
            applicationNameID = datasource.params['applicationNameID']
            if applicationNameID in applicationList:
                continue
            applicationList.append(applicationNameID)
            serviceURL = datasource.params['serviceURL']
            url = self.urls[datasource.datasource].format(serviceURL)
            # TODO : move headers to Config properties
            d = sem.run(getPage, url,
                        headers={
                            "Accept": "application/json",
                            "User-Agent": "Mozilla/3.0Gold",
                            "iv-groups": datasource.zIVGroups,
                            "iv-user": datasource.zIVUser,
                        },
                        )
            tag = '{}_{}'.format(datasource.datasource, applicationNameID)      # order_app_delivery_service_3db30547
            d.addCallback(self.add_tag, tag)
            deferreds.append(d)
        return DeferredList(deferreds)
Example #3
0
	def recordVideoProcess(self, resW, resH, totalTimeSec, framerate, serverIP, piName, recordTimesList, file):
		semi = DeferredSemaphore(1)

		jobs = []
		for runs in range(len(recordTimesList)/2):
			print  "recordTimes recordVideoProcess:", recordTimesList
			self.writeFile("recordTimes recordVideoProcess:")
			try:
				startAtTime = self.calculateTimeDifference(recordTimesList.pop(0), recordTimesList.pop(0))
				jobs.append(semi.run(tv.takeVideo, int(resW), int(resH), int(totalTimeSec),\
						int(framerate), startAtTime, serverIP, piName, file))
			except:
				self.writeFile("That time was not valid. Calling next time.")
				self.writeFile("len recordTimesList: " + str(len(recordTimesList)))
				if len(recordTimesList)%2>0:
					self.writeFile("odd number")
					recordTimesList.pop(0)
					self.writeFile("new len: " + str(len(recordTimesList)))
					reactor.callLater(0.5, self.transport.write, "TIMEINPUTERROR {0}\n".format(piName))
				continue
			
		jobs = DeferredList(jobs)

		print  "Results: ", jobs.addCallback(self.getResults, piName)
		# self.writeFile("Results: ", jobs.addCallback(self.getResults, piName))
		jobs.addCallback(lambda _: reactor.callLater(5, reactor.stop))
    def collect(self, config):
        log.debug('Starting Delivery health collect')
        # Runs once at Application level and once more at components level

        ip_address = config.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        # Gather the info about applications
        applicationList = []
        deferreds = []
        sem = DeferredSemaphore(1)
        for datasource in config.datasources:
            applicationComponentID = datasource.params['applicationComponentID']
            if applicationComponentID in applicationList:
                continue
            applicationList.append(applicationComponentID)
            applicationNameID = datasource.params['applicationNameID']
            serviceURL = datasource.params['serviceURL']
            url = self.urls[datasource.datasource].format(serviceURL)
            d = sem.run(getPage, url,
                        headers={
                            "Accept": "application/json",
                            "User-Agent": "Mozilla/3.0Gold",
                            "iv-groups": datasource.zIVGroups,
                            "iv-user": datasource.zIVUser,
                        },
                        )
            tag = '{}_{}'.format(datasource.datasource, applicationNameID)
            d.addCallback(self.add_tag, tag)
            deferreds.append(d)
        return DeferredList(deferreds)
Example #5
0
def trigger_convergence_groups(authenticator, region, groups,
                               concurrency_limit, no_error_group):
    """
    Trigger convergence on given groups

    :param IAuthenticator authenticator: Otter authenticator
    :param str region: Region where this is running
    :param list groups: List of group dicts
    :param int concurrency_limit: Concurrency limit
    :param bool no_error_group: If true then do not converge ERROR groups

    :return: Deferred fired with None
    """
    sem = DeferredSemaphore(concurrency_limit)
    d = DeferredList(
        [sem.run(trigger_convergence, authenticator, region, group,
                 no_error_group)
         for group in groups],
        fireOnOneCallback=False,
        fireOnOneErrback=False,
        consumeErrors=True)
    d.addCallback(
        lambda results: [(g["tenantId"], g["groupId"], f.value)
                         for g, (s, f) in zip(groups, results) if not s])
    return d
Example #6
0
def parallel_map(iterable, fn, *args, **kwargs):
    deferreds = []
    parallelism_limiter = DeferredSemaphore(MAX_PARALLELISM)
    for item in iterable:
        d = parallelism_limiter.run(fn, item, *args, **kwargs)
        deferreds.append(d)
    results = yield gatherResults(deferreds)
    returnValue(results)
Example #7
0
def parallel_map(iterable, fn, *args, **kwargs):
    deferreds = []
    parallelism_limiter = DeferredSemaphore(MAX_PARALLELISM)
    for item in iterable:
        d = parallelism_limiter.run(fn, item, *args, **kwargs)
        deferreds.append(d)
    results = yield gatherResults(deferreds)
    returnValue(results)
Example #8
0
def trigger_convergence_groups(authenticator, region, groups,
                               concurrency_limit):
    """
    Trigger convergence on given groups
    """
    sem = DeferredSemaphore(concurrency_limit)
    return gatherResults(
        [sem.run(trigger_convergence, authenticator, region, group)
         for group in groups],
        consumeErrors=True)
Example #9
0
	def runFiles():
	    semi = DeferredSemaphore(1)

	    jobs = []
	    for runs in range(5):
	        jobs.append(semi.run(collectFiles))

	    jobs = DeferredList(jobs)
	    def cbFinished(ignored):
	        print 'Finishing job'
	    jobs.addCallback(cbFinished)
	    return jobs
Example #10
0
def query_all_nodes(nodes, max_concurrency=5, clock=reactor):
    """Queries the given nodes for their power state.

    Nodes' states are reported back to the region.

    :return: A deferred, which fires once all nodes have been queried,
        successfully or not.
    """
    semaphore = DeferredSemaphore(tokens=max_concurrency)
    queries = (semaphore.run(query_node, node, clock) for node in nodes
               if node["power_type"] in PowerDriverRegistry)
    return DeferredList(queries, consumeErrors=True)
Example #11
0
    def request(self, method, uri, headers=None, bodyProducer=None):
        """
        Issue a new request.
        @param method: The request method to send.
        @type method: C{str}
        @param uri: The request URI send.
        @type uri: C{str}
        @param scheme: A string like C{'http'} or C{'https'} (the only two
            supported values) to use to determine how to establish the
            connection.
 
        @param host: A C{str} giving the hostname which will be connected to in
            order to issue a request.

        @param port: An C{int} giving the port number the connection will be on.

        @param path: A C{str} giving the path portion of the request URL.
        @param headers: The request headers to send.  If no I{Host} header is
            included, one will be added based on the request URI.
        @type headers: L{Headers}
        @param bodyProducer: An object which will produce the request body or,
            if the request body is to be empty, L{None}.
        @type bodyProducer: L{IBodyProducer} provider
        @return: A L{Deferred} which fires with the result of the request (a
            L{Response} instance), or fails if there is a problem setting up a
            connection over which to issue the request.  It may also fail with
            L{SchemeNotSupported} if the scheme of the given URI is not
            supported.
        @rtype: L{Deferred}
        """
        scheme, host, port, path = _parse(uri)
        if headers is None:
            headers = Headers()
        if not headers.hasHeader('host'):
            # This is a lot of copying.  It might be nice if there were a bit
            # less.
            headers = Headers(dict(headers.getAllRawHeaders()))
            headers.addRawHeader(
                'host', self._computeHostValue(scheme, host, port))
        if self.persistent:
            sem = self._semaphores.get((scheme, host, port))
            if sem is None:
                sem = DeferredSemaphore(self.maxConnectionsPerHostName)
                self._semaphores[scheme, host, port] = sem
            return sem.run(self._request, method, scheme, host, port, path,
                           headers, bodyProducer)
        else:
            return self._request(
                method, scheme, host, port, path, headers, bodyProducer)
Example #12
0
	def collectFiles():
	    semaphore = DeferredSemaphore(1)
	    files = glob.glob('*.py')
	    dl = list()

	    for item in range(len(files)):
	        #Queues list of things to be sent and runs it
	        dl.append(semaphore.run(sendFiles, files[item]))

	    # convert to a DefferedList. Allows for callback call
	    dl = DeferredList(dl)
	    def cbFinished(ignored):
	        print 'Finishing job'
	    dl.addCallback(cbFinished)
	    return dl
Example #13
0
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(5)
    print "Loading IDs"
    ids = getBeermeIds()
    ids = ids[:100]
    print "Done Loading %s IDs" % str(len(ids))
    jobs = []
    for id in ids:
        jobs.append(sem.run(beerme_request, id, agent))
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)

    print "Starting reactor..."
    reactor.run()
Example #14
0
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(5)
    print "Loading IDs"
    ids = getBeermeIds()
    ids = ids[:100]
    print "Done Loading %s IDs" % str(len(ids))
    jobs = []
    for id in ids:
        jobs.append(sem.run(beerme_request,id,agent))
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)

    print "Starting reactor..."
    reactor.run()
Example #15
0
    def start(self):
        """ get each page """
        deferreds = []
        sem = DeferredSemaphore(self.MAX_RUN)
        
        for key in self.book.keys():

            sleep(self.RATE_LIMIT)
            d =  sem.run(getPage, self.book[key])
            d.addCallback(self.pageCallback, key)
            d.addErrback(self.errorHandler, key)
            deferreds.append(d)

        dl = DeferredList(deferreds)
        dl.addCallback(self.listCallback)
        dl.addCallback(self.finish)
Example #16
0
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(10)
    print "Loading breweries..."
    mongo = MongoClient().entities.breweries
    breweries = loadBreweries(mongo)
    print "Done loading breweries."
    jobs = []
    for brewery in breweries:
        jobs.append(sem.run(socialRequest, brewery, agent, mongo))
    #    if len(jobs) % 50 == 0:
    #        print "Brewery Jobs started: %d" % len(jobs)
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)
    print "Let the Reactor BEGIN!"
    reactor.run()
Example #17
0
def main():
    agent = Agent(reactor)
    sem = DeferredSemaphore(10)
    print "Loading breweries..."
    mongo = MongoClient().entities.breweries
    breweries = loadBreweries(mongo)
    print "Done loading breweries."
    jobs = []
    for brewery in breweries:
        jobs.append(sem.run(socialRequest,brewery,agent,mongo))
    #    if len(jobs) % 50 == 0:
    #        print "Brewery Jobs started: %d" % len(jobs) 
    d = gatherResults(jobs)
    d.addBoth(cbShutdown)
    print "Let the Reactor BEGIN!"
    reactor.run()
Example #18
0
def trigger_convergence_groups(authenticator, region, groups, concurrency_limit, no_error_group):
    """
    Trigger convergence on given groups

    :param IAuthenticator authenticator: Otter authenticator
    :param str region: Region where this is running
    :param list groups: List of group dicts
    :param int concurrency_limit: Concurrency limit
    :param bool no_error_group: If true then do not converge ERROR groups

    :return: Deferred fired with None
    """
    sem = DeferredSemaphore(concurrency_limit)
    return gatherResults(
        [sem.run(trigger_convergence, authenticator, region, group, no_error_group) for group in groups],
        consumeErrors=True,
    ).addCallback(lambda _: None)
    def _cbReqPhotoPage(self, photo_list):
        def photoConnectLost(message, url):
            log.err(message)
            raise Exception("can't access {0!s}".format(url))

        dl = []
        sem = DeferredSemaphore(20)
        for i in photo_list:
            d = sem.run(self._agent.request, b'GET', bytes(i.get('url'), 'ascii'))
            d.addCallback(self._cbGetPhotoDlLink, *[i.get('url')])
            d.addErrback(photoConnectLost, *[i.get('url')])
            d.addCallback(self._cbDownloadPhoto)
            d.addErrback(log.err)
            dl.append(d)
        deferreds = DeferredList(dl, consumeErrors=True)

        return deferreds
Example #20
0
def trigger_convergence_groups(authenticator, region, groups,
                               concurrency_limit, no_error_group):
    """
    Trigger convergence on given groups

    :param IAuthenticator authenticator: Otter authenticator
    :param str region: Region where this is running
    :param list groups: List of group dicts
    :param int concurrency_limit: Concurrency limit
    :param bool no_error_group: If true then do not converge ERROR groups

    :return: Deferred fired with None
    """
    sem = DeferredSemaphore(concurrency_limit)
    return gatherResults([
        sem.run(trigger_convergence, authenticator, region, group,
                no_error_group) for group in groups
    ],
                         consumeErrors=True).addCallback(lambda _: None)
Example #21
0
def run(packages, modules, other_args, reactor, limit, excludes):
    sem = DeferredSemaphore(limit)
    proc_argss = get_cafe_args(packages, modules, excludes)
    deferreds = [
        sem.run(getProcessOutputAndValue, 'cafe-runner',
                other_args + proc_args, env=os.environ, reactor=reactor)
        for proc_args in proc_argss]
    results = yield gatherResults(deferreds, consumeErrors=True)

    failed = False
    for proc_args, (stdout, stderr, code) in zip(proc_argss, results):
        if code == 0:
            continue
        failed = True
        print('Error when running ', ' '.join(proc_args))
        print('Stdout\n', stdout, 'Stderr\n', stderr)

    if failed:
        raise SystemExit('Some tests failed')
Example #22
0
    def runDeferredCommand(self):
        self.startTime = time.time()
        self.log.info('%s using BBCApplicationSNMP' % self.device.id)
        self.stats['totalRequests'] = len(self.device.modelledOids)
        semaf = DeferredSemaphore(self.workers)
        jobs = []

        for oids in self.device.modelledOids:
            self.stats['collectedAppOidCounter'] += oids.count('.75025.')
            df = semaf.run(utils.getProcessOutputAndValue, '/usr/bin/snmpget',
                           self.prepareSnmpCmdArgList() + oids.split())
            df.addErrback(self.handleError)
            jobs.append(df)

        df = gatherResults(jobs)
        df.addErrback(self.handleError)
        df.addCallback(self.parseOutput)

        return df
Example #23
0
    def moveFiles(self, virtSrcBase, virtDestBase, relFiles):
        self.debug("MOVING: %r, %r, %r", virtSrcBase, virtDestBase, relFiles)
        if not self._local:
            raise TranscoderError("Component not properly setup yet")

        def move_failed(failure, src, dest):
            msg = ("Fail to move file '%s' to '%s': %s"
                   % (src, dest, log.getFailureMessage(failure)))
            self.warning("%s", msg)
            raise TranscoderError(msg, cause=failure)

        def move_file(src, dest, attr=None):
            self.debug("Moving file '%s' to '%s'", src, dest)
            dest_dir = os.path.dirname(dest)
            safe_mkdirs(dest_dir, "input file destination", attr)
            d = deferToThread(shutil.move, src, dest)
            d.addErrback(move_failed, src, dest)
            return d

        def move_files_failed(results):
            first_failure = None
            for ok, result in results:
                if not ok:
                    if not first_failure:
                        first_failure = result
            return first_failure

        sem = DeferredSemaphore(1)
        move_tasks = []

        for file in relFiles:
            source_path = virtSrcBase.append(file).localize(self._local)
            dest_path = virtDestBase.append(file).localize(self._local)
            source_path = os.path.realpath(source_path)
            dest_path = os.path.realpath(dest_path)
            d = sem.run(move_file, source_path, dest_path, self._pathAttr)
            move_tasks.append(d)

        dl = DeferredList(move_tasks, consumeErrors=True)
        dl.addErrback(move_files_failed)
        return d
Example #24
0
    def collect(self, device, log):
        log.debug('{}: Modeling collect'.format(device.id))

        port = getattr(device, 'zSpringBootPort', None)
        uri = getattr(device, 'zSpringBootURI', None)
        ivGroups = getattr(device, 'zIVGroups', None)
        ivUser = getattr(device, 'zIVUser', None)

        ip_address = device.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        deferreds = []
        sem = DeferredSemaphore(1)

        # TODO: remove loop
        for query in self.queries:
            url = query[1].format(ip_address, port, uri)
            log.debug('SBA collect url: {}'.format(url))
            d = sem.run(
                getPage,
                url,
                headers={
                    "Accept": "application/json",
                    "User-Agent": "Mozilla/3.0Gold",
                    "iv-groups": ivGroups,
                    "iv-user": ivUser,
                },
            )
            d.addCallback(self.add_tag, '{}'.format(query[0]))
            deferreds.append(d)

        results = yield DeferredList(deferreds, consumeErrors=True)
        for success, result in results:
            if not success:
                log.error('{}: {}'.format(device.id, result.getErrorMessage()))

        returnValue(results)
Example #25
0
def run(packages, modules, other_args, reactor, limit, excludes):
    sem = DeferredSemaphore(limit)
    proc_argss = get_cafe_args(packages, modules, excludes)
    deferreds = [
        sem.run(getProcessOutputAndValue,
                'cafe-runner',
                other_args + proc_args,
                env=os.environ,
                reactor=reactor) for proc_args in proc_argss
    ]
    results = yield gatherResults(deferreds, consumeErrors=True)

    failed = False
    for proc_args, (stdout, stderr, code) in zip(proc_argss, results):
        if code == 0:
            continue
        failed = True
        print('Error when running ', ' '.join(proc_args))
        print('Stdout\n', stdout, 'Stderr\n', stderr)

    if failed:
        raise SystemExit('Some tests failed')
Example #26
0
	def runFiles(self):
	    semi = DeferredSemaphore(1)

	    jobs = []
	    recordTimes = "01/24/17 12:00:00 01/24/17 12:15:00 01/24/17 12:30:00"
	    recordTimesList = [data for data in recordTimes.split()]
	    for runs in range(len(recordTimesList)/2):
	    	print "recordTimes:", recordTimesList
	    	# recordTimeStartTime = recordTimesList.pop(0) + " " + recordTimesList.pop(0)
	    	# print "start time: ", recordTimeStartTime

	    	startAtTime = self.calculateTimeDifference(recordTimesList.pop(0), recordTimesList.pop(0))
	    #     jobs.append(semi.run(tv.takeVideo, int(resW), int(resH), int(totalTimeSec),\
					# int(framerate), startAtTime, serverIP, piName))
	        jobs.append(semi.run(self.sendFiles, startAtTime))

	    jobs = DeferredList(jobs)
	    def cbFinished(ignored):
	        print 'Finishing job'
	        # reactor.callLater(0.5, self.transport.write, 'finished')
	    jobs.addCallback(cbFinished)
	    return jobs
    def collect(self, config):
        log.debug('Starting SBA JVM collect')
        # TODO : cleanup job collect

        ip_address = config.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        ds0 = config.datasources[0]
        applicationList = []
        deferreds = []
        sem = DeferredSemaphore(1)
        for metric, props in self.metrics.iteritems():
            applicationNameID = ds0.params['applicationNameID']        # app_delivery_service_8df95ae5
            # if applicationNameID in applicationList:
            #     continue
            # applicationList.append(applicationNameID)
            serviceURL = ds0.params['serviceURL']

            # url = self.urls[datasource.datasource].format(serviceURL)
            endpoint = props['endpoint']
            url = '{}/management/metrics/{}'.format(serviceURL, endpoint)
            log.debug('AAA url: {}'.format(url))

            d = sem.run(getPage, url,
                        headers={
                            "Accept": "application/json",
                            "User-Agent": "Mozilla/3.0Gold",
                            "iv-groups": ds0.zIVGroups,
                            "iv-user": ds0.zIVUser,
                            },
                        )
            tag = 'jvm_{}_{}'.format(metric, applicationNameID)      # order_delivery_service_3db30547
            d.addCallback(self.add_tag, tag)
            deferreds.append(d)
        return DeferredList(deferreds)
Example #28
0
    def run_deploy(self, hosts, components, commands):
        try:
            self.transport.initialize()
        except TransportError as e:
            raise DeployError("could not initialize transport: %s" % e)

        def signal_handler(sig, _):
            reason = SIGNAL_MESSAGES[sig]
            reactor.callFromThread(self.abort, reason)
        signal.signal(signal.SIGINT, signal_handler)
        signal.signal(signal.SIGHUP, signal_handler)

        yield self.event_bus.trigger("deploy.begin")

        try:
            if components:
                yield self.event_bus.trigger("build.begin")

                try:
                    # synchronize the code host with upstreams
                    # this will return a build token and build host for each
                    # component
                    sync_command = ["synchronize"] + components
                    (sync,) = yield self.process_host(
                        self.code_host, [sync_command])

                    # this is where we build up the final deploy command
                    # resulting from all our syncing and building
                    deploy_command = ["deploy"]

                    # collect the results of the sync per-buildhost
                    by_buildhost = collections.defaultdict(list)
                    for component, sync_info in sync.iteritems():
                        component_ref = component + "@" + sync_info["token"]

                        build_host = sync_info.get("buildhost", None)
                        if build_host:
                            by_buildhost[build_host].append(component_ref)
                        else:
                            # no build host means we just pass the sync token
                            # straight through as a deploy token
                            deploy_command.append(component_ref)

                    # ask each build host to build our components and return
                    # a deploy token
                    for build_host, build_refs in by_buildhost.iteritems():
                        build_command = ["build"] + build_refs
                        (tokens,) = yield self.process_host(
                            build_host, [build_command])

                        for ref in build_refs:
                            component, at, sync_token = ref.partition("@")
                            assert at == "@"
                            try:
                                deploy_ref = component + "@" + tokens[ref]
                            except KeyError:
                                raise ComponentNotBuiltError(component)
                            deploy_command.append(deploy_ref)
                except Exception:
                    traceback.print_exc()
                    raise DeployError("unexpected error in sync/build")
                else:
                    # inject our built-up deploy command at the beginning of
                    # the command list for each host
                    commands = [deploy_command] + commands

                yield self.event_bus.trigger("build.end")

            parallelism_limiter = DeferredSemaphore(tokens=self.parallel)
            host_deploys = []
            first_host = True
            for host in hosts:
                if not first_host:
                    for i in xrange(self.sleeptime, 0, -1):
                        yield self.event_bus.trigger(
                            "deploy.sleep", host=host, count=i)
                        yield sleep(1)
                else:
                    first_host = False

                deferred = parallelism_limiter.run(
                    self.process_host, host, commands)
                deferred.addErrback(self.on_host_error)
                host_deploys.append(deferred)

                yield self.event_bus.trigger(
                    "deploy.enqueue", deploys=host_deploys)
            yield DeferredList(host_deploys)
        except (DeployError, AbortDeploy, TransportError) as e:
            yield self.abort(str(e))
        else:
            yield self.event_bus.trigger("deploy.end")
Example #29
0
class View(TabManager,iterator):
    def __init__(self,controller):
        self.name,self.version="rattlekekz-qt",20100806  # Diese Variablen werden vom View abgefragt
        self.controller=controller
        self.revision=rev
        self.alert=app.alert
        TabManager.__init__(self)
        self.spaces=re.compile(r"  {1,}")
        self.urls=re.compile(r"(?=\b)((?#Protocol)(?:(?:ht|f)tp(?:s?)\:\/\/|~/|/)(?#Username:Password)(?:\w+:\w+@)?(?#Subdomains)(?:(?:[-\w]+\.)+(?#TopLevel Domains)(?:com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel|edu|pro|asia|cat|coop|int|tel|post|xxx|[a-z]{2}))(?#Port)(?::[\d]{1,5})?(?#Directories)(?:(?:(?:/(?:[-\w~!$+|.,=]|%[a-f\d]{2})+)+|/)+|#)?(?#Query)(?:(?:\?(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)(?:&(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)*)*(?#Anchor)(?:#(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)?)(?=\b)",re.I)
        self.blubb=lambda x:chr(ord(x)-43)
        self.plugins={}
        self._setup()
        self.addTab("$login",rattlekekzLoginTab)
        self.changeTab("$login")
        self.main.show()
        self.smilie_data=self.readSmilies()
        self.loading_data=open(sys.prefix+os.sep+'share'+os.sep+'emoticons'+os.sep+'rattlekekz'+os.sep+'loading.png').read()
        self.loading_image=QtGui.QImage()
        self.loading_image.loadFromData(self.loading_data,"PNG")
        self.images={}
        self.pendingImages=[]
        self.smilies={"s6":":-)",
                 "s4":":-(",
                 "s1":":-/",
                 "s8":"X-O",
                 "s7":"(-:",
                 "s9":"?-|",
                 "s10":"X-|",
                 "s11":"8-)",
                 "s2":":-D",
                 "s3":":-P",
                 "s5":";-)",
                 "sxmas":"o:)",
                 "s12":":-E",
                 "s13":":-G"}
        self.colors={"red":"FF0000",
                     "blue":"0000FF",
                     "green":"008000",
                     "gray":"696969",
                     "cyan":"00FFFF",
                     "magenta":"FF00FF",
                     "orange":"FFA500",
                     "pink":"FFC0CB",
                     "yellow":"FFFF00",
                     "normal":"000000",
                     "normalaway":"696969",
                     "greenaway":"4D804D",
                     "blueaway":"5050E6",
                     "orangeaway":"E5B151",
                     "redaway":"E65151"}
        self.imageLock = DeferredSemaphore(1)

    def _setup(self):
        self.main=rattlekekzMainWidget()
        self.main.setWindowTitle(self.name)
        self.main.setMenuBar(rattlekekzMenuBar())
        self.main.setStatusBar(rattlekekzStatusBar())
        self.menu=self.main.menuBar()
        self.status=self.main.statusBar()
        self.main.setCentralWidget(QtGui.QTabWidget())
        self.tabs=self.main.centralWidget()
        self.tabs.setMovable(True)
        self.tabs.setTabsClosable(True)
        #self.tabs.setMinimumSize(400,550)
        self.main.connect(self.tabs,QtCore.SIGNAL("tabCloseRequested(int)"),self.closeTab)
        self.main.connect(self.main,QtCore.SIGNAL("closed()"),self.quit)
        self.main.connect(self.menu,QtCore.SIGNAL("quit()"),self.quit)
        self.main.connect(self.menu,QtCore.SIGNAL("config()"),self.openConfig)
        self.main.connect(self.tabs,QtCore.SIGNAL("currentChanged(int)"),self.activateTab)
        self.main.connect(self.main,QtCore.SIGNAL("gotFocus()"),self.changeFocus)

    def readSmilies(self):
        data=[]
        for i in glob(sys.prefix+os.sep+'share'+os.sep+'emoticons'+os.sep+'rattlekekz'+os.sep+'*.png'):
            data.append((QtCore.QUrl("smilie://"+i.split(os.sep)[-1]),QtGui.QImage(i,"PNG")))
        return data

    def activateTab(self,integer):
        self.ShownRoom=self.stringHandler(self.tabs.tabText(integer))
        self.tabs.tabBar().setTabTextColor(integer,QtCore.Qt.black)
        self.tabs.widget(integer).gotFocus()

    def closeTab(self,integer):
        if isinstance(self.tabs.widget(integer),rattlekekzMsgTab):
            self.sendStr(self.stringHandler(self.tabs.tabText(integer)),"/part")
        else:
            if isinstance(self.tabs.widget(integer),rattlekekzInfoTab):
                text = self.stringHandler(self.tabs.tabText(integer))
                if text.startswith("whois"):
                    self.controller.closedWhois(text.split(" ")[1])
            self.tabs.removeTab(integer)
            del self.lookupRooms[0]

    def changeFocus(self):
        self.tabs.widget(self.getTabId(self.ShownRoom)).gotFocus()

    def getRooms(self):
        rooms=[]
        for i in range(self.tabs.count()):
            room = self.stringHandler(self.tabs.tabText(i))
            if not room.startswith("#") and not room.startswith("whois: "):
                rooms.append(room)
        return rooms

    def quit(self):
        self.iterPlugins('quitConnection')
        reactor.stop()

    def openConfig(self):
        print "STUB: implement config tab"

    def deparse(self,msg):
        text,format=self.controller.decode(msg)
        msg=[]
        for i in range(len(text)):
            text[i] = self.escapeText(text[i])
            if format[i] == "newline":
                msg.append("<br>")
                continue
            if format[i] == "hline":
                msg.append("<hr>")
                continue
            if format[i] == "imageurl":
                image=self.controller.loadImage(text[i])
                if image[0] == "image":
                    self.pendingImages.append(str(image[1]))
                msg.append("<img src='image://"+str(image[1])+".jpg'>")
                #try:
                #    image=urllib.urlretrieve(text[i])[0]
                #    msg.append("<img src='"+self.stringHandler(image)+"'>")
                #except:
                #    msg.append(self.escapeText("<image url is invalid>"))
                #image=urllib2.urlopen(text[i]).read()
                #for y in range(self.tabs.count()):
                #    if isinstance(self.tabs.widget(y),rattlekekzPrivTab):
                #        self.tabs.widget(y).output.document().addResource(QtGui.QTextDocument.ImageResource,QtCore.QUrl("mydata://"+self.stringHandler(text[i])),QVariant(image))
                #        msg.append(("<img src='"+"mydata://"+self.stringHandler(text[i])+"'>"))
                continue
            if len(format[i]) > 1:
                if format[i][0] == "ownnick":
                    if not "red" in format[i][1]:
                        color = "red"
                    else:
                        color = "green"
                    msg.append("<font color='#"+self.colors[color]+"'><b>"+text[i]+"</b></font>")
                    continue
            #if text[i].isspace() or text[i]=="":   # NOTE: If there are any bugs with new rooms and the roomop-message THIS could be is the reason ;)
            #    continue                           # 
            if text[i] == "":                       #
                continue                            #
            text[i]=self.urls.sub(r'<a href="\1">\1</a>',text[i])
            form=format[i].split(",")
            color=""
            font=([],[])
            for a in form:
                if a in ["red", "blue", "green", "gray", "cyan", "magenta", "orange", "pink", "yellow","white","reset"]:
                    if a != "reset":
                        color=a
                    else:
                        color=""
                if a == "bold":
                    font[0].append("<b>")
                    font[1].append("</b>")
                if a == "italic":
                    font[0].append("<i>")
                    font[1].append("</i>")
                if a == "sb":
                    text[i]="<img src='"+"smilie://"+self.stringHandler(text[i])+".png' />"
                if a == "button":
                    color=""
                    font[0].append("<a href='button:"+text[i]+"'>")
                    font[1].append("</a>")
                    text[i] = "["+text[i]+"]"
            if color != "":
                msg.append("<font color='#"+self.colors[color]+"'>"+"".join(font[0])+text[i]+"".join(font[1])+"</font>")
            else:
                msg.append("".join(font[0])+text[i]+"".join(font[1]))
        return msg

    def escapeText(self,text):
        text="&amp;".join(text.split("&"))
        text="&lt;".join(text.split("<"))
        text="&gt;".join(text.split(">"))
        count=self.spaces.findall(text)
        text=self.spaces.split(text)
        for i in range(len(text)):
            if len(count) != 0:
                text[i]=text[i]+" "+(len(count.pop(0))-1)*"&nbsp;"
            else:
                break
        return "".join(text)

    def loadedImage(self,id,image_data):
        id=str(id)
        image=QtGui.QImage()
        image.loadFromData(image_data)
        if self.images.has_key(id):
            tab = self.getTab(self.images[id])
            self.imageLock.run(tab.refreshImage,id,image)
        else:
            reactor.callLater(10,self.loadedImage,id,image_data)

    def stringHandler(self,string,return_utf8=False):
        if type(string) is list:
            result=[]
            for i in string:
                if return_utf8 == False:
                    try:
                        i=str(i)
                    except UnicodeEncodeError:
                        i=unicode(i).encode("utf_8")
                    result.append(i)
                else:
                    try:
                        i=unicode(i)
                    except UnicodeDecodeError:
                        i=str(i).decode("utf_8","replace")
                    result.append(i)
            return result
        else:
            if return_utf8 == False:
                try:
                    return str(string)
                except UnicodeEncodeError:
                    string=unicode(string)
                    return string.encode("utf_8")
            else:
                try:
                    return unicode(string)
                except UnicodeDecodeError:
                    string=str(string)
                    return string.decode("utf_8","replace")

    def finishedReadingConfigfile(self):
        pass

    def receivedPreLoginData(self,rooms,array):
        self.isConnected=True
        self.status.showMessage("connected")
        self.addTab("$login",rattlekekzLoginTab)
        self.getTab("$login").receivedPreLoginData(rooms,array)

    def updateRooms(self,rooms):
        try:
            tab=self.getTab("$login")
        except:
            pass
        else:
            tab.updateRooms(rooms)

    def startConnection(self,host,port):
        self.controller.startConnection(host,port)

    def addRoom(self,room,tab):
        tablist={"ChatRoom":rattlekekzMsgTab,
                 "PrivRoom":rattlekekzPrivTab,
                 "InfoRoom":rattlekekzInfoTab,
                 "WhoisRoom":rattlekekzInfoTab,
                 "MailRoom":rattlekekzMailTab,
                 "SecureRoom":rattlekekzSecureTab,
                 "EditRoom":rattlekekzWhoisEditTab,
                 "MailEditRoom":rattlekekzMailEditTab}
        self.addTab(room,tablist[tab])

    def newTopic(self,room,topic):
        self.getTab(room).newTopic(topic)

    def sendLogin(self, nick, passwd, room):
        self.getTab("$login").grayOut()
        self.iterPlugins('sendLogin', [nick, passwd, room])

    def registerNick(self, nick, passwd, email):
        self.iterPlugins('registerNick', [nick, passwd, email])

    def changePassword(self, oldPassword, newPassword):
        self.iterPlugins('changePassword', [oldPassword, newPassword])

    def updateProfile(self, newName, newLocation, newHomepage, newHobbies, newSignature, passwd):
        self.iterPlugins('updateProfile', [newName, newLocation, newHomepage, newHobbies, newSignature, passwd])

    def startedConnection(self):
        self.status.showMessage("connecting ...")

    def connectionLost(self,reason):
        self.status.showMessage(self.stringHandler("connection lost",True))

    def connectionFailed(self):
        self.status.showMessage("connection attempt failed")

    def successLogin(self,nick,status,room,reconnected=False):
        self.nickname=nick
        self.addTab(room,rattlekekzMsgTab)
        if not reconnected:
            self.ShownRoom=room
            self.changeTab(room)
        try:
            self.delTab("$login")
        except:
            pass

    def successRegister(self):
        self.status.showMessage("nick registered")

    def successNewPassword(self):
        self.status.showMessage("password changed")

    def receivedProfile(self,name,location,homepage,hobbies,signature):
        self.changeTab("$edit")
        self.getTab("$edit").receivedProfile(name,location,homepage,hobbies,signature)

    def successNewProfile(self):
        self.status.showMessage("profile updated")

    def securityCheck(self, infotext):
        pass

    def receivedPing(self,deltaPing):
        self.status.showMessage(self.stringHandler("Ping: "+str(deltaPing)+" ms",True))

    def printMsg(self,room,msg):
        #print "<%s> %s" % (self.stringHandler(room),"".join(self.stringHandler(msg)))
        msg = "".join(msg)
        ids=re.findall(r"<img\s+src='image://(\d+)\.jpg'\s*>",msg)
        if len(ids) != 0:
            if isinstance(self.getTab(room),(rattlekekzPrivTab,rattlekekzInfoTab,rattlekekzMailTab)):
                for i in ids:
                    self.images[i]=room
                    if i in self.pendingImages:
                        image=QtGui.QImage()
                        image.loadFromData(self.controller.getImage(int(i)))
                        self.getTab(room).addImage(i,image)
                        self.pendingImages.remove(i)
                    else:
                        self.getTab(room).addImage(i,self.loading_image)
        self.getTab(room).addLine(msg)

    def gotException(self, message):
        self.status.showMessage(message)

    def gotLoginException(self,message):
        try:
            tab = self.getTab("$login")
        except:
            pass
        else:
            tab.grayOut(False)
            tab.prelogin=True
            tab.gotFocus()
        self.status.showMessage(message)

    def listUser(self,room,users):
        usercolors = self.controller.getValue("usercolors")
        if usercolors == None: usercolors = True
        self.getTab(room).listUser(users,usercolors)

    def meJoin(self,room,background):
        self.addTab(room,rattlekekzMsgTab)
        if not background:
            self.changeTab(room)

    def mePart(self,room):
        self.delTab(room)

    def meGo(self,oldroom,newroom):
        index = self.getTabId(oldroom)
        self.addTab(newroom,rattlekekzMsgTab,index)
        self.changeTab(newroom)
        self.delTab(oldroom)

    def newTopic(self,room,topic):
        self.getTab(room).newTopic(topic)

    def loggedOut(self):
        pass

    def fubar(self):
        """This function sends bullshit to the controller for debugging purposes"""
        self.iterPlugins('sendBullshit',["".join(map(self.blubb,'_a`\x90\x8cc^b\\\\d\x8d\x8d^\x8e\x8d``\x90\x8f]]c_]b\x91b\x8dd^\x8c_\x8e\x91\x91__\x8c\x91'))])

    def receivedInformation(self,info):
        pass

    def minorInfo(self,room,nick):
        """this method is used to determine wether a new info-tab must be opened to display e.g. a bot message"""
        pre=None
        if nick=="":
            pre="Info:°nn°"
        if not isinstance(self.getTab(self.ShownRoom),rattlekekzMsgTab):
            self.addRoom("$info","InfoRoom")
            self.changeTab("$info")
            return (pre,"$info")
        elif room=="":
            return (pre,self.ShownRoom)
        else:
            return (pre,room)
        #if isinstance(self.getTab(self.ShownRoom),self.rattlekekzMsgTab):
        #    self.getTab(self.ShownRoom).addLine("Info: "+self.stringHandler(message))
        #else:
        #    self.addTab("$infos",rattlekekzInfoTab)
        #    self.changeTab("$infos")
        #    self.getTab(self.ShownRoom).addLine("Info: "+self.stringHandler(message))

    def receivedWhois(self,nick,array):
        title=u"whois: "+self.stringHandler(nick,True)
        out = map(lambda x:"".join(self.deparse(x)), array)
        try:
            tab = self.getTab(title)
        except:
            self.addRoom(title,"WhoisRoom")
            self.changeTab(title)
        self.getTab(title).addWhois()
        for msg in out:
            self.printMsg(title,msg)
        self.highlightTab(title,2)

    def MailInfo(self,info):
        pass

    def openLinkTab(self,room,links):
        room,links=self.stringHandler(room,True),self.stringHandler(links,True)
        self.addTab("$links of "+room,rattlekekzInfoTab)
        self.changeTab("$links of "+room)
        for i in links:
            self.getTab("$links of "+room).addLine('<a href="'+i+'">'+i+'</a>')

    def openMailEditTab(self,receiver=""):
        tabname="$mail_to"
        if receiver!="":
            tabname=tabname+" "+receiver
        self.addTab(tabname,rattlekekzMailEditTab)
        self.getTab(tabname).setContent(receiver)
        self.changeTab(tabname)

    def openMailTab(self):
        self.addTab("$mails",rattlekekzMailTab)
        self.changeTab("$mails")

    def getMail(self,index):
        self.iterPlugins('getMail', [index])

    def refreshMaillist(self):
        self.iterPlugins('refreshMaillist')

    def receivedMails(self,userid,mailcount,mails):
        self.openMailTab()
        self.getTab("$mails").receivedMails(userid,mailcount,mails)

    def printMail(self,user,date,mail):
        self.openMailTab()
        header = u"Mail by "+user+" from "+date+u": °np° ---begin of mail ---- °np°" 
        end = u"°np°---end of mail---°np°"
        mail = header + self.stringHandler(mail,True) + end
        msg = self.deparse(mail)
        self.getTab("$mails").addLine(msg)

    def sendStr(self,channel,string):
        self.iterPlugins('sendStr', [channel, string])

    def sendMail(self,nick,msg):
        self.iterPlugins('sendMail', [nick, msg])

    def timestamp(self, string):
        return "<font color='#"+self.colors["green"]+"'>"+string+"</font>"

    def colorizeText(self, color, text):
        return "<font color='#"+self.colors[color]+"'>"+text+"</font>"

    def unknownMethod(self,name):
        pass

    def __getattr__(self, name):
        return self.unknownMethod(name)
Example #30
0
class MapUpdater(object):
    def __init__(self, mapsPath, fetchURL, deleteIfNotPresent, tfLevelSounds):
        assert isinstance(mapsPath, str) and len(mapsPath)
        assert isinstance(fetchURL, str) and len(fetchURL)
        self.mapsPath = FilePath(mapsPath)
        self.downloadTempPath = self.mapsPath.child('mapupdater')
        self.fetchURL = URLPath.fromString(fetchURL)
        self.deleteIfNotPresent = deleteIfNotPresent
        self.tfLevelSounds = tfLevelSounds
        self.semaphore = DeferredSemaphore(1)
        self.downloadSemaphore = DeferredSemaphore(4)
        for fp in self.downloadTempPath.globChildren('*.bsp.bz2'):
            fp.remove()

    def checkMaps(self, *a, **kw):
        """
        Wrap self._checkMaps to prevent running multiple checks at once.
        """
        return self.semaphore.run(self._checkMaps, *a, **kw)

    def _checkMaps(self, forceDownloadMaps=None):
        def _cb(remoteMaps):
            if forceDownloadMaps:
                remoteMaps = list(set(remoteMaps + forceDownloadMaps))
            remoteMapsLower = [f.lower() for f in remoteMaps]
            ourMaps = filter(
                lambda p: not p.isdir() and p.path.endswith('.bsp'),
                self.mapsPath.children())
            ourMapFilenames = [p.basename().lower() + '.bz2' for p in ourMaps]

            missing = []
            for f in remoteMaps:
                if f.lower() not in ourMapFilenames:
                    missing.append(f)

            delete = []
            for p in ourMaps:
                filename = p.basename().lower() + '.bz2'
                if filename not in remoteMapsLower:
                    delete.append(p)

            if self.deleteIfNotPresent and delete:
                for fp in delete:
                    fp.remove()

                print 'Deleted {} map(s) not present at remote server:'.format(
                    len(delete))
                print ', '.join([x.basename() for x in delete])

            if missing:
                print 'Fetching {} map(s)'.format(len(missing))

                def _allFinished(ignored):
                    self.mapsPath.child('tempus_map_updater_run_once').touch()
                    if self.tfLevelSounds:
                        self.addLevelSounds(ourMaps)
                    print 'Now up-to-date.'

                ds = []
                for filename in missing:
                    ds.append(self.fetchMap(filename))
                return gatherResults(ds).addCallback(_allFinished)
            elif self.tfLevelSounds:
                self.addLevelSounds(ourMaps)

        return self.getMapList().addCallback(_cb)

    def fetchMap(self, *a, **kw):
        return self.downloadSemaphore.run(self._fetchMap, *a, **kw)

    def _fetchMap(self, filename):
        downloadTempPath = self.downloadTempPath
        if not downloadTempPath.exists():
            downloadTempPath.makedirs()

        def _cb(response, fn):
            tp = downloadTempPath.child(fn)
            fd = tp.open('wb')

            def _extracted(ignored):
                extractedPath = tp.sibling(tp.basename().replace('.bz2', ''))
                extractedPath.moveTo(
                    self.mapsPath.child(tp.basename().replace('.bz2', '')))
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass
                print 'Finished downloading {}'.format(fn)

            def _finished(ignored):
                fd.close()
                d = getProcessOutputAndValue(
                    'aunpack', (tp.path, '-X', downloadTempPath.path))
                d.addErrback(log.err)
                d.addCallback(_extracted)
                return d

            def _eb(failure):
                print 'Error downloading {}:'.format(fn)
                print failure.getTraceback()
                fd.close()
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass

            d = treq.collect(response, fd.write)
            d.addCallback(_finished)
            d.addErrback(_eb)
            return d

        d = treq.get(str(self.fetchURL.child(filename)))
        return d.addCallback(_cb, filename)

    def getMapList(self, forceDownloadMaps):
        raise NotImplementedError('Subclasses must override this method.')

    def addLevelSounds(self, mapPaths):
        content = FilePath(
            mapupdater.__file__).sibling('tf_level_sounds.txt').getContent()
        added = []
        for p in mapPaths:
            mapName = p.basename()[:-4]
            p2 = p.sibling('{}_level_sounds.txt'.format(mapName))
            if p2.exists() and p2.getContent() == content:
                continue
            added.append(mapName)
            p2.setContent(content)
        if added:
            print 'Added level sounds for:'
            print ', '.join(added)
Example #31
0
class Supervisor(object):
    """
    The Supervisor class manages the environments of testifi, and allows
    scheduling of work.
    """
    def __init__(self, queue):
        self.managedPackageVersions = set()
        self.certifiDirectory = tempfile.mkdtemp()
        self.testSemaphore = DeferredSemaphore(tokens=32)
        self.binPath = os.path.join(
            os.path.split(__file__)[0], 'certifi_test.py')
        self.queue = queue
        self._log = logger.new(object="supervisor")

    @inlineCallbacks
    def pollForNewReleases(self):
        """
        Check whether any new certifi releases have been made.
        """
        log = self._log.bind(function="pollForNewReleases")
        log.msg("polling for new releases")

        releases = yield certifiVersions()

        for release, file in releases:
            if release in self.managedPackageVersions:
                log.msg("skipping existing release", release=release)
                continue

            # The release is new. We need to start processing it.
            yield self.addNewRelease(release, file)

    @inlineCallbacks
    def addNewRelease(self, release, tarball_path):
        """
        For each new release that we've discovered, set up processing for it.
        """
        log = self._log.bind(function="addNewRelease", release=release)
        log.msg("adding release")

        # Download the tarball.
        file_path = os.path.join(self.certifiDirectory,
                                 'certifi-' + release + '.tar.gz')
        with open(file_path, 'wb') as f:
            yield downloadFile(tarball_path, f)

        log.msg("tarball saved", tarball=file_path)

        rc = yield getProcessValue(
            distutils.spawn.find_executable('tar'),
            args=['xvf', file_path, '-C', self.certifiDirectory])
        assert not rc

        log.msg("adding to managed versions")
        self.managedPackageVersions.add(release)

    @inlineCallbacks
    def _testHostAgainstRelease(self, host, release):
        """
        Tests a given host against a specific certifi release. Returns a
        Deferred that fires with the result of the test.
        """
        log = self._log.bind(function="_testHostAgainstRelease")
        file_path = os.path.join(self.certifiDirectory, 'certifi-' + release)
        args = [self.binPath, file_path, host]
        log.msg("running test", args=args)

        result = yield getProcessValue(
            distutils.spawn.find_executable('pypy'),
            args=args,
        )
        returnValue(result)

    @inlineCallbacks
    def testHost(self, host):
        """
        Runs the certifi tests against a given host.

        Returns a deferred that fires with a list of tuples: certifi release
        and whether the test passed.
        """
        log = self._log.bind(function="testHost", host=host)
        results = []
        for release in self.managedPackageVersions:
            result = yield self.testSemaphore.run(self._testHostAgainstRelease,
                                                  host, release)
            results.append((release, result))
            log.msg("test complete", release=release, result=result)

        returnValue(results)

    @inlineCallbacks
    def testLoop(self):
        """
        Loop forever, popping tests off the queue and running them.
        """
        log = self._log.bind(function="testLoop")
        while True:
            test = yield self.queue.get()
            log = log.bind(test_id=test.id, host=test.host)
            log.msg("beginning test")
            try:
                results = yield self.testHost(test.host)
            except Exception as e:
                print e
                continue
            log.msg("test suite complete", results=results)
Example #32
0
class VBMSClient(object):
    def __init__(self, reactor, connect_vbms_path, bundle_path, endpoint_url,
                 keyfile, samlfile, key, keypass, ca_cert, client_cert):
        self._reactor = reactor

        self._connect_vbms_path = connect_vbms_path
        self._bundle_path = bundle_path
        self._endpoint_url = endpoint_url
        self._keyfile = keyfile
        self._samlfile = samlfile
        self._key = key
        self._keypass = keypass
        self._ca_cert = ca_cert
        self._client_cert = client_cert

        self._connect_vbms_semaphore = DeferredSemaphore(tokens=8)

    def _path_to_ruby(self, path):
        if path is None:
            return "nil"
        else:
            return repr(path)

    def _execute_connect_vbms(self, logger, request, formatter, args):
        ruby_code = """#!/usr/bin/env ruby

$LOAD_PATH << '{connect_vbms_path}/src/'

require 'json'

require 'vbms'


client = VBMS::Client.new(
    {endpoint_url!r},
    {keyfile},
    {samlfile},
    {key},
    {keypass!r},
    {ca_cert},
    {client_cert},
)
request = {request}
result = client.send(request)
STDOUT.write({formatter})
STDOUT.flush()
        """.format(
            connect_vbms_path=self._connect_vbms_path,
            endpoint_url=self._endpoint_url,
            keyfile=self._path_to_ruby(self._keyfile),
            samlfile=self._path_to_ruby(self._samlfile),
            key=self._path_to_ruby(self._key),
            keypass=self._keypass,
            ca_cert=self._path_to_ruby(self._ca_cert),
            client_cert=self._path_to_ruby(self._client_cert),
            request=request,
            formatter=formatter,
        ).strip()
        with tempfile.NamedTemporaryFile(suffix=".rb", delete=False) as f:
            f.write(ruby_code)

        st = os.stat(f.name)
        os.chmod(f.name, st.st_mode | stat.S_IEXEC)

        @inlineCallbacks
        def run():
            timer = logger.time("process.spawn")
            try:
                stdout, stderr, exit_code = yield getProcessOutputAndValue(
                    '/bin/bash', [
                        '-lc', '{} exec {} {}'.format(
                            self._bundle_path, f.name, " ".join(
                                map(pipes.quote, args)))
                    ],
                    env=os.environ,
                    path=self._connect_vbms_path,
                    reactor=self._reactor)
            finally:
                timer.stop()
            if exit_code != 0:
                raise VBMSError(stdout, stderr, exit_code)
            returnValue(stdout)

        return self._connect_vbms_semaphore.run(run)

    @inlineCallbacks
    def get_document_types(self, logger):
        response = yield self._execute_connect_vbms(
            logger.bind(process="GetDocumentTypes"),
            "VBMS::Requests::GetDocumentTypes.new()",
            "result.map(&:to_h).to_json",
            [],
        )
        returnValue(json.loads(response))

    @inlineCallbacks
    def list_documents(self, logger, file_number):
        response = yield self._execute_connect_vbms(
            logger.bind(process="ListDocuments"),
            "VBMS::Requests::ListDocuments.new(ARGV[0])",
            'result.map(&:to_h).to_json',
            [file_number],
        )
        returnValue(json.loads(response))

    def fetch_document_contents(self, logger, document_id):
        return self._execute_connect_vbms(
            logger.bind(process="FetchDocumentById"),
            "VBMS::Requests::FetchDocumentById.new(ARGV[0])",
            "result.content",
            [document_id],
        )
Example #33
0
class wholeProcess():

	def __init__(self):
		self.semi = DeferredSemaphore(1)
		#Should create a global queue list that items can be appended to. 
		#Then use methods to run process and append items to consecutively.

	def scp(self):
		print "stuff"
		time.sleep(0.5)
		return "done"

	def globProcess(self):
		print "globbing"
		file = glob.glob('*.py')
		dl = list()

		for item in range(len(files)):
			dl.append(self.semi.run(sendFiles, files[item]))

		dl = DeferredList(dl)
		def cbFinished(ignored):
			print 'Finishing job'
		dl.addCallback(cbFinished)
		return dl

	def manage(self):
		print "managing queue"
		file = glob.glob('*.py')
		self.semi.run(self.sendFiles, file) #Runs command


	def addToQueue(self):
		self.semi.run(sendFiles, files)



	#Manage whole process
	def runFiles():
	    semi = DeferredSemaphore(1)

	    jobs = []
	    for runs in range(5):
	        jobs.append(semi.run(collectFiles))

	    jobs = DeferredList(jobs)
	    def cbFinished(ignored):
	        print 'Finishing job'
	    jobs.addCallback(cbFinished)
	    return jobs

	#Glob + upload > every 45 mins run this process?
	def collectFiles():
	    semaphore = DeferredSemaphore(1)
	    files = glob.glob('*.py')
	    dl = list()

	    for item in range(len(files)):
	        #Queues list of things to be sent and runs it
	        dl.append(semaphore.run(sendFiles, files[item]))

	    # convert to a DefferedList. Allows for callback call
	    dl = DeferredList(dl)
	    def cbFinished(ignored):
	        print 'Finishing job'
	    dl.addCallback(cbFinished)
	    return dl



	# def runFiles(self, resW, resH, totalTimeSec, framerate, serverIP, piName, recordTimesList):
	def runFiles(self):
	    semi = DeferredSemaphore(1)

	    jobs = []
	    recordTimes = "01/24/17 12:00:00 01/24/17 12:15:00 01/24/17 12:30:00"
	    recordTimesList = [data for data in recordTimes.split()]
	    for runs in range(len(recordTimesList)/2):
	    	print "recordTimes:", recordTimesList
	    	# recordTimeStartTime = recordTimesList.pop(0) + " " + recordTimesList.pop(0)
	    	# print "start time: ", recordTimeStartTime

	    	startAtTime = self.calculateTimeDifference(recordTimesList.pop(0), recordTimesList.pop(0))
	    #     jobs.append(semi.run(tv.takeVideo, int(resW), int(resH), int(totalTimeSec),\
					# int(framerate), startAtTime, serverIP, piName))
	        jobs.append(semi.run(self.sendFiles, startAtTime))

	    jobs = DeferredList(jobs)
	    def cbFinished(ignored):
	        print 'Finishing job'
	        # reactor.callLater(0.5, self.transport.write, 'finished')
	    jobs.addCallback(cbFinished)
	    return jobs

	def calculateTimeDifference(self, dateToEnd, timeToEnd):
		fullString = dateToEnd + " " + timeToEnd
		endTime = datetime.datetime.strptime(fullString, "%x %X")
		nowTime = datetime.datetime.today()
		difference = endTime - nowTime
		return time.time() + difference.total_seconds()


	#Upload SCP files
	def sendFiles(self, img):
	    print "sending img: ", img
	    time.sleep(5)
	    return "finished"
Example #34
0
    def run_deploy(self, hosts, components, commands):
        try:
            yield self.event_bus.trigger("deploy.precheck")
        except AbortDeploy as e:
            yield self.abort(str(e))
            return

        try:
            self.transport.initialize()
        except TransportError as e:
            raise DeployError("could not initialize transport: %s" % e)

        def signal_handler(sig, _):
            reason = SIGNAL_MESSAGES[sig]
            reactor.callFromThread(self.abort, reason)
        signal.signal(signal.SIGINT, signal_handler)
        signal.signal(signal.SIGHUP, signal_handler)

        yield self.event_bus.trigger("deploy.begin")

        try:
            if components:
                yield self.event_bus.trigger("build.begin")

                try:
                    # synchronize the code host with upstreams
                    # this will return a build token and build host for each
                    # component
                    sync_command = ["synchronize"] + components
                    code_host = Host.from_hostname(self.code_host)
                    (sync_result,) = yield self.process_host(
                        code_host, [sync_command])
                    yield self.event_bus.trigger("build.sync",
                                                 sync_info=sync_result.result)

                    # this is where we build up the final deploy command
                    # resulting from all our syncing and building
                    deploy_command = ["deploy"]

                    # collect the results of the sync per-buildhost
                    by_buildhost = collections.defaultdict(list)
                    for component, sync_info in sync_result.result.iteritems():
                        component_ref = component + "@" + sync_info["token"]

                        build_host = sync_info.get("buildhost", None)
                        if build_host:
                            by_buildhost[build_host].append(component_ref)
                        else:
                            # no build host means we just pass the sync token
                            # straight through as a deploy token
                            deploy_command.append(component_ref)

                    # ask each build host to build our components and return
                    # a deploy token
                    for build_hostname, build_refs in by_buildhost.iteritems():
                        build_command = ["build"] + build_refs
                        build_host = Host.from_hostname(build_hostname)
                        (build_result,) = yield self.process_host(
                            build_host, [build_command])

                        for ref in build_refs:
                            component, at, sync_token = ref.partition("@")
                            assert at == "@"
                            try:
                                deploy_ref = (component + "@" +
                                              build_result.result[ref])
                            except KeyError:
                                raise ComponentNotBuiltError(component)
                            deploy_command.append(deploy_ref)

                    # Wait until components report ready IF:
                    # * we are actually restarting a component
                    # * we aren't going --dangerously-fast
                    restarting_component = any(
                        ["restart" in val for val in commands])
                    if restarting_component and not self.dangerously_fast:
                        commands.append(["wait-until-components-ready"])

                except Exception:
                    traceback.print_exc()
                    raise DeployError("unexpected error in sync/build")
                else:
                    # inject our built-up deploy command at the beginning of
                    # the command list for each host
                    commands = [deploy_command] + commands

                yield self.event_bus.trigger("build.end")

            parallelism_limiter = DeferredSemaphore(tokens=self.parallel)
            host_deploys = []
            first_host = True
            for host in hosts:
                if not first_host:
                    for i in xrange(self.sleeptime, 0, -1):
                        yield self.event_bus.trigger(
                            "deploy.sleep", host=host, count=i)
                        yield sleep(1)
                else:
                    first_host = False

                deferred = parallelism_limiter.run(
                    self.process_host, host, commands,
                    timeout=self.execution_timeout)
                deferred.addErrback(self.on_host_error)
                host_deploys.append(deferred)

                yield self.event_bus.trigger(
                    "deploy.enqueue", deploys=host_deploys)
            yield DeferredList(host_deploys)
        except (DeployError, AbortDeploy, TransportError) as e:
            yield self.abort(str(e))
        else:
            yield self.event_bus.trigger("deploy.end")
Example #35
0
class ExperimentServiceFactory(Factory):
    protocol = ExperimentServiceProto

    def __init__(self, expected_subscribers, experiment_start_delay):
        self.expected_subscribers = expected_subscribers
        self.experiment_start_delay = experiment_start_delay
        self.parsing_semaphore = DeferredSemaphore(500)
        self.connection_counter = -1
        self.connections_made = []
        self.connections_ready = []
        self.vars_received = []

        self._made_looping_call = None
        self._subscriber_looping_call = None
        self._subscriber_received_looping_call = None
        self._timeout_delayed_call = None

    def buildProtocol(self, addr):
        self.connection_counter += 1
        return ExperimentServiceProto(self, self.connection_counter + 1)

    def setConnectionMade(self, proto):
        if not self._timeout_delayed_call:
            self._timeout_delayed_call = reactor.callLater(EXPERIMENT_SYNC_TIMEOUT, self.onExperimentSetupTimeout)
        else:
            self._timeout_delayed_call.reset(EXPERIMENT_SYNC_TIMEOUT)

        self.connections_made.append(proto)
        if len(self.connections_made) >= self.expected_subscribers:
            msg("All subscribers connected!")
            if self._made_looping_call and self._made_looping_call.running:
                self._made_looping_call.stop()

            self.pushIdToSubscribers()
        else:
            if not self._made_looping_call:
                self._made_looping_call = task.LoopingCall(self._print_subscribers_made)
                self._made_looping_call.start(1.0)

    def _print_subscribers_made(self):
        if len(self.connections_made) < self.expected_subscribers:
            msg("%d of %d expected subscribers connected." % (len(self.connections_made), self.expected_subscribers))

    def pushIdToSubscribers(self):
        for proto in self.connections_made:
            self.parsing_semaphore.run(proto.sendAndWaitForReady)

    def setConnectionReady(self, proto):
        self._timeout_delayed_call.reset(EXPERIMENT_SYNC_TIMEOUT)
        self.connections_ready.append(proto)

        if len(self.connections_ready) >= self.expected_subscribers:
            msg("All subscribers are ready, pushing data!")
            if self._subscriber_looping_call and self._subscriber_looping_call.running:
                self._subscriber_looping_call.stop()

            self.pushInfoToSubscribers()
        else:
            if not self._subscriber_looping_call:
                self._subscriber_looping_call = task.LoopingCall(self._print_subscribers_ready)
                self._subscriber_looping_call.start(1.0)

    def _print_subscribers_ready(self):
        msg("%d of %d expected subscribers ready." % (len(self.connections_ready), self.expected_subscribers))

    def pushInfoToSubscribers(self):
        # Generate the json doc
        vars = {}
        for subscriber in self.connections_ready:
            subscriber_vars = subscriber.vars.copy()
            subscriber_vars['port'] = subscriber.id + 12000
            subscriber_vars['host'] = subscriber.transport.getPeer().host
            vars[subscriber.id] = subscriber_vars

        json_vars = json.dumps(vars)
        del vars
        msg("Pushing a %d bytes long json doc." % len(json_vars))

        # Send the json doc to the subscribers
        task.cooperate(self._sendLineToAllGenerator(json_vars))

    def _sendLineToAllGenerator(self, line):
        for subscriber in self.connections_ready:
            yield subscriber.sendLine(line)

    def setConnectionReceived(self, proto):
        self._timeout_delayed_call.reset(EXPERIMENT_SYNC_TIMEOUT)
        self.vars_received.append(proto)

        if len(self.vars_received) >= self.expected_subscribers:
            msg("Data sent to all subscribers, giving the go signal in %f secs." % self.experiment_start_delay)
            reactor.callLater(0, self.startExperiment)
            self._timeout_delayed_call.cancel()
        else:
            if not self._subscriber_received_looping_call:
                self._subscriber_received_looping_call = task.LoopingCall(self._print_subscribers_received)
                self._subscriber_received_looping_call.start(1.0)

    def _print_subscribers_received(self):
        msg("%d of %d expected subscribers received the data." % (len(self.vars_received), self.expected_subscribers))

    def startExperiment(self):
        # Give the go signal and disconnect
        msg("Starting the experiment!")

        if self._subscriber_received_looping_call and self._subscriber_received_looping_call.running:
            self._subscriber_received_looping_call.stop()

        start_time = time() + self.experiment_start_delay
        for subscriber in self.connections_ready:
            # Sync the experiment start time among instances
            subscriber.sendLine("go:%f" % (start_time + subscriber.vars['time_offset']))

        d = task.deferLater(reactor, 5, lambda: msg("Done, disconnecting all clients."))
        d.addCallback(lambda _: self.disconnectAll())
        d.addCallbacks(self.onExperimentStarted, self.onExperimentStartError)

    def disconnectAll(self):
        reactor.runUntilCurrent()

        def _disconnectAll():
            for subscriber in self.connections_ready:
                yield subscriber.transport.loseConnection()
        task.cooperate(_disconnectAll())

    def unregisterConnection(self, proto):
        if proto in self.connections_ready:
            self.connections_ready.remove(proto)
        if proto in self.vars_received:
            self.vars_received.remove(proto)
        if proto.id in self.vars_received:
            self.vars_received.remove(proto.id)

        msg("Connection cleanly unregistered.", logLevel=logging.DEBUG)

    def onExperimentStarted(self, _):
        msg("Experiment started, shutting down sync server.")
        reactor.callLater(0, stopReactor)

    def onExperimentStartError(self, failure):
        err("Failed to start experiment")
        reactor.exitCode = 1
        reactor.callLater(0, stopReactor)
        return failure

    def onExperimentSetupTimeout(self):
        err("Waiting for all peers timed out, exiting.")
        reactor.exitCode = 1
        reactor.callLater(0, stopReactor)

    def lineLengthExceeded(self, line):
        err("Line length exceeded, %d bytes remain." % len(line))
Example #36
0
class Supervisor(object):
    """
    The Supervisor class manages the environments of testifi, and allows
    scheduling of work.
    """
    def __init__(self, queue):
        self.managedPackageVersions = set()
        self.certifiDirectory = tempfile.mkdtemp()
        self.testSemaphore = DeferredSemaphore(tokens=32)
        self.binPath = os.path.join(
            os.path.split(__file__)[0], 'certifi_test.py'
        )
        self.queue = queue
        self._log = logger.new(object="supervisor")

    @inlineCallbacks
    def pollForNewReleases(self):
        """
        Check whether any new certifi releases have been made.
        """
        log = self._log.bind(function="pollForNewReleases")
        log.msg("polling for new releases")

        releases = yield certifiVersions()

        for release, file in releases:
            if release in self.managedPackageVersions:
                log.msg("skipping existing release", release=release)
                continue

            # The release is new. We need to start processing it.
            yield self.addNewRelease(release, file)

    @inlineCallbacks
    def addNewRelease(self, release, tarball_path):
        """
        For each new release that we've discovered, set up processing for it.
        """
        log = self._log.bind(function="addNewRelease", release=release)
        log.msg("adding release")

        # Download the tarball.
        file_path = os.path.join(
            self.certifiDirectory, 'certifi-' + release + '.tar.gz'
        )
        with open(file_path, 'wb') as f:
            yield downloadFile(tarball_path, f)

        log.msg("tarball saved", tarball=file_path)

        rc = yield getProcessValue(
            distutils.spawn.find_executable('tar'),
            args=['xvf', file_path, '-C', self.certifiDirectory]
        )
        assert not rc

        log.msg("adding to managed versions")
        self.managedPackageVersions.add(release)

    @inlineCallbacks
    def _testHostAgainstRelease(self, host, release):
        """
        Tests a given host against a specific certifi release. Returns a
        Deferred that fires with the result of the test.
        """
        log = self._log.bind(function="_testHostAgainstRelease")
        file_path = os.path.join(self.certifiDirectory, 'certifi-' + release)
        args = [self.binPath, file_path, host]
        log.msg("running test", args=args)

        result = yield getProcessValue(
            distutils.spawn.find_executable('pypy'),
            args=args,
        )
        returnValue(result)

    @inlineCallbacks
    def testHost(self, host):
        """
        Runs the certifi tests against a given host.

        Returns a deferred that fires with a list of tuples: certifi release
        and whether the test passed.
        """
        log = self._log.bind(function="testHost", host=host)
        results = []
        for release in self.managedPackageVersions:
            result = yield self.testSemaphore.run(
                self._testHostAgainstRelease, host, release
            )
            results.append((release, result))
            log.msg("test complete", release=release, result=result)

        returnValue(results)

    @inlineCallbacks
    def testLoop(self):
        """
        Loop forever, popping tests off the queue and running them.
        """
        log = self._log.bind(function="testLoop")
        while True:
            test = yield self.queue.get()
            log = log.bind(test_id=test.id, host=test.host)
            log.msg("beginning test")
            try:
                results = yield self.testHost(test.host)
            except Exception as e:
                print e
                continue
            log.msg("test suite complete", results=results)
Example #37
0
    def collect(self, device, log):
        log.debug('{}: Modeling collect'.format(device.id))

        port = getattr(device, 'zSpringBootPort', None)
        ivGroups = getattr(device, 'zIVGroups', None)
        if not ivGroups:
            log.error("%s: zIVGroups is not defined", device.id)
            returnValue(None)

        ivUser = getattr(device, 'zIVUser', None)
        if not ivUser:
            log.error("%s: zIVUser is not defined", device.id)
            returnValue(None)

        applications = device.get_SBAApplications
        app_result = (True, ('apps', json.dumps(applications)))

        ip_address = device.manageIp
        if not ip_address:
            log.error("%s: IP Address cannot be empty", device.id)
            returnValue(None)

        deferreds = []
        sem = DeferredSemaphore(1)

        for app in applications:
            #  {'hostingServer': 'dvb-app-l15.dev.credoc.be',
            #   'mgmtURL': 'http://dvb-app-l15.dev.credoc.be:8105/delivery-service_v1/management',
            #   'healthURL': 'http://dvb-app-l15.dev.credoc.be:8105/delivery-service_v1/management/health',
            #   'id': 'app_Delivery Service_b0acc0ef',
            #   'serviceURL': 'http://dvb-app-l15.dev.credoc.be:8105/delivery-service_v1'}
            # TODO: drop obsolete getPage
            d = sem.run(getPage,
                        app['healthURL'],
                        headers={
                            "Accept": "application/json",
                            "iv-groups": ivGroups,
                            "iv-user": ivUser,
                        })
            d.addCallback(self.add_tag, '{}_{}'.format(app['id'], 'health'))
            deferreds.append(d)

            url = '{}/metrics/job'.format(app['mgmtURL'])
            d = sem.run(getPage,
                        url,
                        headers={
                            "Accept": "application/json",
                            "iv-groups": ivGroups,
                            "iv-user": ivUser,
                        })
            d.addCallback(self.add_tag, '{}_{}'.format(app['id'],
                                                       'metricsJob'))
            deferreds.append(d)

        results = yield DeferredList(deferreds, consumeErrors=True)
        results.append(app_result)
        for success, result in results:
            if not success:
                log.error('{}: {}'.format(device.id, result.getErrorMessage()))

        returnValue(results)
Example #38
0
class MapUpdater(object):
    def __init__(self, mapsPath, fetchURL, deleteIfNotPresent, tfLevelSounds):
        assert isinstance(mapsPath, str) and len(mapsPath)
        assert isinstance(fetchURL, str) and len(fetchURL)
        self.mapsPath = FilePath(mapsPath)
        self.downloadTempPath = self.mapsPath.child('mapupdater')
        self.fetchURL = URLPath.fromString(fetchURL)
        self.deleteIfNotPresent = deleteIfNotPresent
        self.tfLevelSounds = tfLevelSounds
        self.semaphore = DeferredSemaphore(1)
        self.downloadSemaphore = DeferredSemaphore(4)
        for fp in self.downloadTempPath.globChildren('*.bsp.bz2'):
            fp.remove()


    def checkMaps(self, *a, **kw):
        """
        Wrap self._checkMaps to prevent running multiple checks at once.
        """
        return self.semaphore.run(self._checkMaps, *a, **kw)


    def _checkMaps(self, forceDownloadMaps=None):
        def _cb(remoteMaps):
            if forceDownloadMaps:
                remoteMaps = list(set(remoteMaps + forceDownloadMaps))
            remoteMapsLower = [f.lower() for f in remoteMaps]
            ourMaps = filter(lambda p: not p.isdir() and p.path.endswith('.bsp'),
                             self.mapsPath.children())
            ourMapFilenames = [p.basename().lower() + '.bz2' for p in ourMaps]

            missing = []
            for f in remoteMaps:
                if f.lower() not in ourMapFilenames:
                    missing.append(f)

            delete = []
            for p in ourMaps:
                filename = p.basename().lower() + '.bz2'
                if filename not in remoteMapsLower:
                    delete.append(p)

            if self.deleteIfNotPresent and delete:
                for fp in delete:
                    fp.remove()

                print 'Deleted {} map(s) not present at remote server:'.format(len(delete))
                print ', '.join([x.basename() for x in delete])

            if missing:
                print 'Fetching {} map(s)'.format(len(missing))

                def _allFinished(ignored):
                    self.mapsPath.child('tempus_map_updater_run_once').touch()
                    if self.tfLevelSounds:
                        self.addLevelSounds(ourMaps)
                    print 'Now up-to-date.'

                ds = []
                for filename in missing:
                    ds.append(self.fetchMap(filename))
                return gatherResults(ds).addCallback(_allFinished)
            elif self.tfLevelSounds:
                self.addLevelSounds(ourMaps)


        return self.getMapList().addCallback(_cb)


    def fetchMap(self, *a, **kw):
        return self.downloadSemaphore.run(self._fetchMap, *a, **kw)


    def _fetchMap(self, filename):
        downloadTempPath = self.downloadTempPath
        if not downloadTempPath.exists():
            downloadTempPath.makedirs()

        def _cb(response, fn):
            tp = downloadTempPath.child(fn)
            fd = tp.open('wb')

            def _extracted(ignored):
                extractedPath = tp.sibling(tp.basename().replace('.bz2', ''))
                extractedPath.moveTo(
                    self.mapsPath.child(tp.basename().replace('.bz2', '')))
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass
                print 'Finished downloading {}'.format(fn)

            def _finished(ignored):
                fd.close()
                d = getProcessOutputAndValue(
                    'aunpack', (tp.path, '-X', downloadTempPath.path))
                d.addErrback(log.err)
                d.addCallback(_extracted)
                return d

            def _eb(failure):
                print 'Error downloading {}:'.format(fn)
                print failure.getTraceback()
                fd.close()
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass

            d = treq.collect(response, fd.write)
            d.addCallback(_finished)
            d.addErrback(_eb)
            return d

        d = treq.get(str(self.fetchURL.child(filename)))
        return d.addCallback(_cb, filename)


    def getMapList(self, forceDownloadMaps):
        raise NotImplementedError('Subclasses must override this method.')


    def addLevelSounds(self, mapPaths):
        content = FilePath(mapupdater.__file__).sibling(
            'tf_level_sounds.txt').getContent()
        added = []
        for p in mapPaths:
            mapName = p.basename()[:-4]
            p2 = p.sibling('{}_level_sounds.txt'.format(mapName))
            if p2.exists() and p2.getContent() == content:
                continue
            added.append(mapName)
            p2.setContent(content)
        if added:
            print 'Added level sounds for:'
            print ', '.join(added)
Example #39
0
    def run_deploy(self, hosts, components, commands):
        try:
            yield self.event_bus.trigger("deploy.precheck")
        except AbortDeploy as e:
            yield self.abort(str(e))
            return

        try:
            self.transport.initialize()
        except TransportError as e:
            raise DeployError("could not initialize transport: %s" % e)

        def signal_handler(sig, _):
            reason = SIGNAL_MESSAGES[sig]
            reactor.callFromThread(self.abort, reason)
        signal.signal(signal.SIGINT, signal_handler)
        signal.signal(signal.SIGHUP, signal_handler)

        yield self.event_bus.trigger("deploy.begin")

        try:
            if components:
                yield self.event_bus.trigger("build.begin")

                try:
                    # synchronize the code host with upstreams
                    # this will return a build token and build host for each
                    # component
                    sync_command = SynchronizeCommand(components)
                    code_host = Host.from_hostname(self.code_host)
                    (sync_result,) = yield self.process_host(
                        code_host, [sync_command])
                    yield self.event_bus.trigger("build.sync",
                                                 sync_info=sync_result.result)

                    # this is where we build up the final deploy command
                    # resulting from all our syncing and building
                    deploy_command = DeployCommand()

                    # collect the results of the sync per-buildhost
                    by_buildhost = collections.defaultdict(list)
                    for component, sync_info in sync_result.result.iteritems():
                        component_ref = component + "@" + sync_info["token"]

                        build_host = sync_info.get("buildhost", None)
                        if build_host:
                            by_buildhost[build_host].append(component_ref)
                        else:
                            # no build host means we just pass the sync token
                            # straight through as a deploy token
                            deploy_command.add_argument(component_ref)

                    # ask each build host to build our components and return
                    # a deploy token
                    for build_hostname, build_refs in by_buildhost.iteritems():
                        build_command = BuildCommand(build_refs)
                        build_host = Host.from_hostname(build_hostname)
                        (build_result,) = yield self.process_host(
                            build_host, [build_command])

                        for ref in build_refs:
                            component, at, sync_token = ref.partition("@")
                            assert at == "@"
                            try:
                                deploy_ref = (component + "@" +
                                              build_result.result[ref])
                            except KeyError:
                                raise ComponentNotBuiltError(component)
                            deploy_command.add_argument(deploy_ref)

                    # Wait until components report ready IF:
                    # * we are actually restarting a component
                    # * we aren't going --dangerously-fast
                    restarting_component = any(
                        [isinstance(val, RestartCommand) for val in commands])
                    if restarting_component and not self.dangerously_fast:
                        commands.append(WaitUntilComponentsReadyCommand())
                except Exception:
                    traceback.print_exc()
                    raise DeployError("unexpected error in sync/build")
                else:
                    # inject our built-up deploy command at the beginning of
                    # the command list for each host
                    commands = [deploy_command] + commands

                yield self.event_bus.trigger("build.end")

            parallelism_limiter = DeferredSemaphore(tokens=self.parallel)
            host_deploys = []
            first_host = True
            for host in hosts:
                if not first_host:
                    for i in xrange(self.sleeptime, 0, -1):
                        yield self.event_bus.trigger(
                            "deploy.sleep", host=host, count=i)
                        yield sleep(1)
                else:
                    first_host = False

                deferred = parallelism_limiter.run(
                    self.process_host, host, commands,
                    timeout=self.execution_timeout)
                deferred.addErrback(self.on_host_error)
                host_deploys.append(deferred)

                yield self.event_bus.trigger(
                    "deploy.enqueue", host=host, deferred=deferred)
            yield DeferredList(host_deploys)
        except (DeployError, AbortDeploy, TransportError) as e:
            yield self.abort(str(e))
        else:
            yield self.event_bus.trigger("deploy.end")
class VBMSClient(object):
    def __init__(self, reactor, connect_vbms_path, bundle_path, endpoint_url,
                 keyfile, samlfile, key, keypass, ca_cert, client_cert):
        self._reactor = reactor

        self._connect_vbms_path = connect_vbms_path
        self._bundle_path = bundle_path
        self._endpoint_url = endpoint_url
        self._keyfile = keyfile
        self._samlfile = samlfile
        self._key = key
        self._keypass = keypass
        self._ca_cert = ca_cert
        self._client_cert = client_cert

        self._connect_vbms_semaphore = DeferredSemaphore(tokens=8)

    def _path_to_ruby(self, path):
        if path is None:
            return "nil"
        else:
            return repr(path)

    def _execute_connect_vbms(self, logger, request, formatter, args):
        ruby_code = """#!/usr/bin/env ruby

$LOAD_PATH << '{connect_vbms_path}/src/'

require 'json'

require 'vbms'


client = VBMS::Client.new(
    {endpoint_url!r},
    {keyfile},
    {samlfile},
    {key},
    {keypass!r},
    {ca_cert},
    {client_cert},
)
request = {request}
result = client.send(request)
STDOUT.write({formatter})
STDOUT.flush()
        """.format(
            connect_vbms_path=self._connect_vbms_path,
            endpoint_url=self._endpoint_url,
            keyfile=self._path_to_ruby(self._keyfile),
            samlfile=self._path_to_ruby(self._samlfile),
            key=self._path_to_ruby(self._key),
            keypass=self._keypass,
            ca_cert=self._path_to_ruby(self._ca_cert),
            client_cert=self._path_to_ruby(self._client_cert),

            request=request,
            formatter=formatter,
        ).strip()
        with tempfile.NamedTemporaryFile(suffix=".rb", delete=False) as f:
            f.write(ruby_code)

        st = os.stat(f.name)
        os.chmod(f.name, st.st_mode | stat.S_IEXEC)

        @inlineCallbacks
        def run():
            timer = logger.time("process.spawn")
            try:
                stdout, stderr, exit_code = yield getProcessOutputAndValue(
                    '/bin/bash', [
                        '-lc',
                        '{} exec {} {}'.format(
                            self._bundle_path,
                            f.name,
                            " ".join(map(pipes.quote, args))
                        )
                    ],
                    env=os.environ,
                    path=self._connect_vbms_path,
                    reactor=self._reactor
                )
            finally:
                timer.stop()
            if exit_code != 0:
                raise VBMSError(stdout, stderr, exit_code)
            returnValue(stdout)

        return self._connect_vbms_semaphore.run(run)

    @inlineCallbacks
    def get_document_types(self, logger):
        response = yield self._execute_connect_vbms(
            logger.bind(process="GetDocumentTypes"),
            "VBMS::Requests::GetDocumentTypes.new()",
            "result.map(&:to_h).to_json",
            [],
        )
        returnValue(json.loads(response))

    @inlineCallbacks
    def list_documents(self, logger, file_number):
        response = yield self._execute_connect_vbms(
            logger.bind(process="ListDocuments"),
            "VBMS::Requests::ListDocuments.new(ARGV[0])",
            'result.map(&:to_h).to_json',
            [file_number],
        )
        returnValue(json.loads(response))

    def fetch_document_contents(self, logger, document_id):
        return self._execute_connect_vbms(
            logger.bind(process="FetchDocumentById"),
            "VBMS::Requests::FetchDocumentById.new(ARGV[0])",
            "result.content",
            [document_id],
        )
class LookupImportController:
    def __init__(self, dbSessionCreator):
        self._dbSessionCreator = dbSessionCreator

        self._semaphore = DeferredSemaphore(1)

    def shutdown(self):
        pass

    @inlineCallbacks
    def importLookups(self, modelSetKey: str, coordSetKey: Optional[str],
                      lookupTupleType: str, lookupTuples: List,
                      deleteOthers: bool, updateExisting: bool):

        yield self._semaphore.run(self._importInThread, modelSetKey, coordSetKey,
                                   lookupTupleType, lookupTuples,
                                   deleteOthers, updateExisting)

        logger.debug("TODO, Notify the observable")

        return True

    @deferToThreadWrapWithLogger(logger)
    def _importInThread(self, modelSetKey: str, coordSetKey: str, tupleType: str,
                        tuples,
                        deleteOthers: bool, updateExisting: bool):
        LookupType = ORM_TUPLE_MAP[tupleType]

        if LookupType == DispLineStyle:
            self._convertLineStyles(tuples)

        itemsByImportHash = {}

        addCount = 0
        updateCount = 0
        deleteCount = 0

        ormSession = self._dbSessionCreator()
        try:

            modelSet = getOrCreateModelSet(ormSession, modelSetKey)
            coordSet = None

            if coordSetKey:
                coordSet = getOrCreateCoordSet(
                    ormSession, modelSetKey, coordSetKey)

                all = (ormSession.query(LookupType)
                       .filter(LookupType.coordSetId == coordSet.id)
                       .all())

            else:
                all = (ormSession.query(LookupType)
                       .filter(LookupType.modelSetId == modelSet.id)
                       .all())

            def updateFks(lookup):
                if hasattr(lookup, "coordSetId"):
                    assert coordSet
                    lookup.coordSetId = coordSet.id
                else:
                    lookup.modelSetId = modelSet.id

            for lookup in all:
                # Initialise
                itemsByImportHash[lookup.importHash] = lookup

            for lookup in tuples:
                importHash = str(lookup.importHash)

                # If it's an existing item, update it
                if importHash in itemsByImportHash:
                    existing = itemsByImportHash.pop(importHash)

                    if updateExisting:
                        for fieldName in lookup.tupleFieldNames():
                            setattr(existing, fieldName,
                                    getattr(lookup, fieldName))

                        updateFks(existing)
                        updateCount += 1

                # If it's a new item, create it
                else:
                    newTuple = LookupType()

                    for fieldName in lookup.tupleFieldNames():
                        if fieldName in ("id", "coordSetId", "modelSetId"):
                            continue
                        setattr(newTuple, fieldName,
                                getattr(lookup, fieldName))

                    updateFks(newTuple)
                    ormSession.add(newTuple)
                    addCount += 1

            if deleteOthers:
                for lookup in list(itemsByImportHash.values()):
                    ormSession.delete(lookup)
                    deleteCount += 1

            try:
                ormSession.commit()

            except Exception as e:
                ormSession.rollback()
                logger.exception(e)
                raise

            logger.debug("Updates for %s received, Added %s, Updated %s, Deleted %s",
                        tupleType, addCount, updateCount, deleteCount)

        except Exception as e:
            logger.exception(e)
            raise

        finally:
            ormSession.close()

    @deferToThreadWrapWithLogger(logger)
    def getLookups(self, modelSetKey: str, coordSetKey: Optional[str],
                   tupleType: str):

        LookupType = ORM_TUPLE_MAP[tupleType]

        ormSession = self._dbSessionCreator()
        try:

            modelSet = getOrCreateModelSet(ormSession, modelSetKey)

            if coordSetKey:
                coordSet = getOrCreateCoordSet(
                    ormSession, modelSetKey, coordSetKey)

                all = (ormSession.query(LookupType)
                       .filter(LookupType.coordSetId == coordSet.id)
                       .all())

            else:
                all = (ormSession.query(LookupType)
                       .filter(LookupType.modelSetId == modelSet.id)
                       .all())

            importTuples = []
            ImportTuple = TUPLE_TYPES_BY_NAME[tupleType]

            for ormTuple in all:
                newTuple = ImportTuple()

                for fieldName in newTuple.tupleFieldNames():
                    if fieldName == 'modelSetKey':
                        newTuple.modelSetKey = modelSetKey

                    elif fieldName == 'coordSetKey':
                        newTuple.coordSetKey = coordSetKey

                    else:
                        setattr(newTuple, fieldName,
                                getattr(ormTuple, fieldName))

                importTuples.append(newTuple)

            return importTuples

        except Exception as e:
            logger.exception(e)

        finally:
            ormSession.close()

    def _convertLineStyles(self, importLineStyles: List[ImportDispTextStyleTuple]):
        for style in importLineStyles:
            dp = style.dashPattern

            if dp is None:
                continue

            if not isinstance(dp, list):
                dp = [dp]

            style.dashPattern = json.dumps(dp)
Example #42
0
class ExperimentServiceFactory(Factory):
    protocol = ExperimentServiceProto

    def __init__(self, expected_subscribers, experiment_start_delay):
        self._logger = logging.getLogger(self.__class__.__name__)

        self.expected_subscribers = expected_subscribers
        self.experiment_start_delay = experiment_start_delay
        self.parsing_semaphore = DeferredSemaphore(500)
        self.connection_counter = -1
        self.connections_made = []
        self.connections_ready = []
        self.vars_received = []

        self._made_looping_call = None
        self._subscriber_looping_call = None
        self._subscriber_received_looping_call = None
        self._timeout_delayed_call = None

    def buildProtocol(self, addr):
        self.connection_counter += 1
        return ExperimentServiceProto(self, self.connection_counter + 1)

    def setConnectionMade(self, proto):
        if not self._timeout_delayed_call:
            self._timeout_delayed_call = reactor.callLater(
                EXPERIMENT_SYNC_TIMEOUT, self.onExperimentSetupTimeout)
        else:
            self._timeout_delayed_call.reset(EXPERIMENT_SYNC_TIMEOUT)

        self.connections_made.append(proto)
        if len(self.connections_made) >= self.expected_subscribers:
            self._logger.info("All subscribers connected!")
            if self._made_looping_call and self._made_looping_call.running:
                self._made_looping_call.stop()

            self.pushIdToSubscribers()
        else:
            if not self._made_looping_call:
                self._made_looping_call = task.LoopingCall(
                    self._print_subscribers_made)
                self._made_looping_call.start(1.0)

    def _print_subscribers_made(self):
        if len(self.connections_made) < self.expected_subscribers:
            self._logger.info("%d of %d expected subscribers connected.",
                              len(self.connections_made),
                              self.expected_subscribers)

    def pushIdToSubscribers(self):
        for proto in self.connections_made:
            self.parsing_semaphore.run(proto.sendAndWaitForReady)

    def setConnectionReady(self, proto):
        self._timeout_delayed_call.reset(EXPERIMENT_SYNC_TIMEOUT)
        self.connections_ready.append(proto)

        if len(self.connections_ready) >= self.expected_subscribers:
            self._logger.info("All subscribers are ready, pushing data!")
            if self._subscriber_looping_call and self._subscriber_looping_call.running:
                self._subscriber_looping_call.stop()

            self.pushInfoToSubscribers()
        else:
            if not self._subscriber_looping_call:
                self._subscriber_looping_call = task.LoopingCall(
                    self._print_subscribers_ready)
                self._subscriber_looping_call.start(1.0)

    def _print_subscribers_ready(self):
        self._logger.info("%d of %d expected subscribers ready.",
                          len(self.connections_ready),
                          self.expected_subscribers)

    def pushInfoToSubscribers(self):
        # Generate the json doc
        vars = {}
        for subscriber in self.connections_ready:
            subscriber_vars = subscriber.vars.copy()
            subscriber_vars['port'] = subscriber.id + 12000
            subscriber_vars['host'] = subscriber.transport.getPeer().host
            vars[subscriber.id] = subscriber_vars

        json_vars = json.dumps(vars)
        del vars
        self._logger.info("Pushing a %d bytes long json doc.", len(json_vars))

        # Send the json doc to the subscribers
        task.cooperate(self._sendLineToAllGenerator(json_vars))

    def _sendLineToAllGenerator(self, line):
        for subscriber in self.connections_ready:
            yield subscriber.sendLine(line)

    def setConnectionReceived(self, proto):
        self._timeout_delayed_call.reset(EXPERIMENT_SYNC_TIMEOUT)
        self.vars_received.append(proto)

        if len(self.vars_received) >= self.expected_subscribers:
            self._logger.info(
                "Data sent to all subscribers, giving the go signal in %f secs.",
                self.experiment_start_delay)
            reactor.callLater(0, self.startExperiment)
            self._timeout_delayed_call.cancel()
        else:
            if not self._subscriber_received_looping_call:
                self._subscriber_received_looping_call = task.LoopingCall(
                    self._print_subscribers_received)
                self._subscriber_received_looping_call.start(1.0)

    def _print_subscribers_received(self):
        self._logger.info("%d of %d expected subscribers received the data.",
                          len(self.vars_received), self.expected_subscribers)

    def startExperiment(self):
        # Give the go signal and disconnect
        self._logger.info("Starting the experiment!")

        if self._subscriber_received_looping_call and self._subscriber_received_looping_call.running:
            self._subscriber_received_looping_call.stop()

        start_time = time() + self.experiment_start_delay
        for subscriber in self.connections_ready:
            # Sync the experiment start time among instances
            subscriber.sendLine("go:%f" %
                                (start_time + subscriber.vars['time_offset']))

        d = task.deferLater(
            reactor, 5,
            lambda: self._logger.info("Done, disconnecting all clients."))
        d.addCallback(lambda _: self.disconnectAll())
        d.addCallbacks(self.onExperimentStarted, self.onExperimentStartError)

    def disconnectAll(self):
        reactor.runUntilCurrent()

        def _disconnectAll():
            for subscriber in self.connections_ready:
                yield subscriber.transport.loseConnection()

        task.cooperate(_disconnectAll())

    def unregisterConnection(self, proto):
        if proto in self.connections_ready:
            self.connections_ready.remove(proto)
        if proto in self.vars_received:
            self.vars_received.remove(proto)
        if proto.id in self.vars_received:
            self.vars_received.remove(proto.id)

        self._logger.debug("Connection cleanly unregistered.")

    def onExperimentStarted(self, _):
        self._logger.info("Experiment started, shutting down sync server.")
        reactor.callLater(0, stopReactor)

    def onExperimentStartError(self, failure):
        self._logger.error("Failed to start experiment")
        reactor.exitCode = 1
        reactor.callLater(0, stopReactor)
        return failure

    def onExperimentSetupTimeout(self):
        self._logger.error("Waiting for all peers timed out, exiting.")
        reactor.exitCode = 1
        reactor.callLater(0, stopReactor)

    def lineLengthExceeded(self, line):
        self._logger.error("Line length exceeded, %d bytes remain.", len(line))