Exemplo n.º 1
0
def application(environ, start_response):
    """
    application handler
    """
    gev = None
    path = environ.get("PATH_INFO", "").lstrip("/")

    for regex, callback in ROUTES:
        match = re.search(regex, path)
        if match is not None:
            environ["web.url_args"] = match.groups()
            qs = parse_qs(environ["QUERY_STRING"])
            environ["web.qs"] = {k: [escape(v) for v in qs[k]] for k in qs}
            gev = gevent.spawn(callback, environ)
            gevent.joinall([gev])
            break

    if gev is None:
        start_response("404 NOT FOUND", HEADERS)
        yield ("%s\n" % json.dumps({"error": "Not Found"})).encode("utf-8")
        return

    if gev.value[0]:
        start_response(STATUS, HEADERS)
        yield ("%s\n" % json.dumps(gev.value[1])).encode("utf-8")
        return
    else:
        start_response(500, HEADERS)
        yield ("%s\n" % json.dumps({"error": gev.value[1]})).encode("utf-8")
        return
Exemplo n.º 2
0
def main():
	gevent.joinall([
		gevent.spawn(setter),
		gevent.spawn(waiter),
		gevent.spawn(waiter),
		gevent.spawn(waiter)
	])
Exemplo n.º 3
0
def run_schema_transformer(args):
    global _vnc_lib

    def connection_state_update(status, message=None):
        ConnectionState.update(
            conn_type=ConnType.APISERVER, name='ApiServer',
            status=status, message=message or '',
            server_addrs=['%s:%s' % (args.api_server_ip,
                                     args.api_server_port)])
    # end connection_state_update

    # Retry till API server is up
    connected = False
    connection_state_update(ConnectionStatus.INIT)
    while not connected:
        try:
            _vnc_lib = VncApi(
                args.admin_user, args.admin_password, args.admin_tenant_name,
                args.api_server_ip, args.api_server_port, api_server_use_ssl=args.api_server_use_ssl)
            connected = True
            connection_state_update(ConnectionStatus.UP)
        except requests.exceptions.ConnectionError as e:
            # Update connection info
            connection_state_update(ConnectionStatus.DOWN, str(e))
            time.sleep(3)
        except (RuntimeError, ResourceExhaustionError):
            # auth failure or haproxy throws 503
            time.sleep(3)

    global transformer
    transformer = SchemaTransformer(args)
    gevent.joinall(transformer._vnc_kombu.greenlets())
Exemplo n.º 4
0
    def needFile(self, inner_path, *args, **kwargs):
        if inner_path.endswith("|all"):
            @util.Pooled(20)
            def pooledNeedBigfile(*args, **kwargs):
                return self.needFile(*args, **kwargs)

            inner_path = inner_path.replace("|all", "")
            file_info = self.needFileInfo(inner_path)
            file_size = file_info["size"]
            piece_size = file_info["piece_size"]

            piece_num = int(math.ceil(float(file_size) / piece_size))

            file_threads = []

            piecefield = self.storage.piecefields.get(file_info["sha512"])

            for piece_i in range(piece_num):
                piece_from = piece_i * piece_size
                piece_to = min(file_size, piece_from + piece_size)
                if not piecefield or not piecefield[piece_i]:
                    res = pooledNeedBigfile("%s|%s-%s" % (inner_path, piece_from, piece_to), blocking=False)
                    if res is not True and res is not False:
                        file_threads.append(res)
            gevent.joinall(file_threads)
        else:
            return super(SitePlugin, self).needFile(inner_path, *args, **kwargs)
Exemplo n.º 5
0
def run_svc_monitor(args=None):
    monitor = SvcMonitor(args)

    monitor._zookeeper_client = _zookeeper_client

    # Retry till API server is up
    connected = False
    monitor.logger.api_conn_status_update(ConnectionStatus.INIT)

    while not connected:
        try:
            vnc_api = VncApi(
                args.admin_user, args.admin_password, args.admin_tenant_name,
                args.api_server_ip, args.api_server_port,
                api_server_use_ssl=args.api_server_use_ssl)
            connected = True
            monitor.logger.api_conn_status_update(ConnectionStatus.UP)
        except requests.exceptions.ConnectionError as e:
            monitor.logger.api_conn_status_update(
                ConnectionStatus.DOWN, str(e))
            time.sleep(3)
        except ResourceExhaustionError:  # haproxy throws 503
            time.sleep(3)

    monitor.post_init(vnc_api, args)
    timer_task = gevent.spawn(launch_timer, monitor)
    gevent.joinall([timer_task])
Exemplo n.º 6
0
 def check_domain(self, domain, timeout):
     """Check specified domain."""
     resolver = dns.resolver.Resolver()
     try:
         answers = resolver.query(domain.name, "MX")
     except dns.resolver.NoAnswer:
         return
     ip_list = []
     for answer in answers:
         address = None
         try:
             ipaddress.ip_address(str(answer.exchange))
         except ValueError:
             try:
                 address = socket.gethostbyname(str(answer.exchange))
             except socket.gaierror:
                 pass
         else:
             address = str(answer.exchange)
         finally:
             if address is not None:
                 ip_list.append(address)
     if len(ip_list) == 0:
         return
     jobs = [
         gevent.spawn(self.query, ip_list, provider)
         for provider in self.providers]
     gevent.joinall(jobs, timeout)
     for job in jobs:
         if not job.successful():
             continue
         provider, results = job.value
         self.store_domain_result(domain, provider, results)
def map(requests, stream = False, size = 5, exception_handler = None):
    """Concurrently converts a list of Requests to Responses.

    :param requests: a collection of Request objects.
    :param stream: If True, the content will not be downloaded immediately.
    :param size: Specifies the number of requests to make at a time. If None, no throttling occurs.
    :param exception_handler: Callback function, called when exception occured. Params: Request, Exception
    """

    requests = list(requests)

    pool = Pool(size) if size else None
    jobs = [send(r, pool, stream=stream) for r in requests]
    gevent.joinall(jobs)

    ret = []
    #up until this point, identical to grequests library
    for request in requests:#error handling is different.
        if request.response:#if 200 code
            if(len(request.response.text)!= 9420):#sometimes a 200 code is
                                                  #associated with a bad page
                                                  #that has no content but no errors
                ret.append(request.response)
            else:
                ret.append("fail:bad Page")#return a string logging the error
        else:
            ret.append(request.exception)
            del request.exception

    return ret
Exemplo n.º 8
0
    def start_service(self):
        """
        启动服务
        :return:
        """
        args = ArgumentParser().args
        try:
            Jobs().add_interval_job(UPDATE_INTERVAL, self.update)

            if not self.is_sm:
                port = {"tcp": args.tcp_port}
                port.update({"https": args.http_port} if args.is_https else {"http": args.http_port})
                self.adv = ServiceAdvertiser(self.service_type, port, self.get_jid(), self.service_version)
                self.adv.advertise()

            checker_ls = []
            self.add_port_checker(args, checker_ls)
            PortChecker(checker_ls).start()

            self.services(args, self.thread_ls)
            self.__sys_services(args, self.thread_ls)

            logger.warn("start services for %s, args:%s" % (self.service_type, args))
            gevent.joinall([thread.start() for thread in self.thread_ls])
        except:
            logger.error(traceback.format_exc())
            sys.exit(0)
Exemplo n.º 9
0
def exploration(exp):
    global started
    if not started:
        return False

    evt.wait()
    do_alignment(robot.alignment())

    global sensors
    cur = exp.getRealTimeMap(sensors, robot.explored_map)
    if cur[1]:
        done_exploration()
        return False

    if cur[0]:
#         if robot.try_left:
#             evt.wait()
#             do_alignment(robot.alignment())

        
        send_cmd(cur[0])
        robot.step(cur[0])
        #Tues:add evt.wait() to see where it can slow down the simulator
#         evt.wait()
        

    print("[Tornado] exploration > %s" %(robot.current))
    msg1 = robot.msg_for_android()
    btq.append(msg1)
    gevent.joinall([
        gevent.spawn(delay_call, exploration, exp)
    ])
Exemplo n.º 10
0
def main():

    if len(sys.argv) > 1:
        code = sys.argv[1]
        print 'stock code %s' % (code)
        global TID
        TID = code

    with open('robot.yaml') as f:
        cfg = yaml.load(f)
    rip = cfg['robot_listen']['rip']
    rport = cfg['robot_listen']['rport']
    addr = (rip, rport)
    print addr
    tester = Tester(TID, addr)

    pq_ip = cfg['pre_quo']['pqip']
    pq_port = cfg['pre_quo']['port']
    print pq_ip, pq_port
    pre_quo = pre_quotation.PreQuotation(tester, (pq_ip, pq_port))
    tester.pre_quo = pre_quo

    q_ip = cfg['quo_server']['qip']
    q_port = cfg['quo_server']['port']
    print q_ip, q_port
    quota = quotation.Quotation(tester, (q_ip, q_port))
    tester.quo = quota

    jobs = []
    jobs.append(gevent.spawn(pre_quo.recv_data))
    jobs.append(gevent.spawn(quota.recv_data))
    jobs.append(gevent.spawn(tester.recv_data))
    gevent.joinall(jobs)

    return 0
Exemplo n.º 11
0
def client(cid):
    s = socket.create_connection(('127.0.0.1', 7890))
    w = s.makefile('w')
    r = s.makefile('r')

    def send():
        times = 0
        while times < 100:
            times += 1
            gevent.sleep(1)
            data = str(random.randint(10, 10000))
            w.write('%s, from client %d\n' % (data, cid))
            w.flush()
            print 'client', cid, 'send:', data

    def recv():
        while True:
            line = r.readline()
            print 'client', cid, 'recive:', line,
            if not line:
                break

    send_job = gevent.spawn_later(1, send)
    recv_job = gevent.spawn(recv)

    def clear(*args):
        gevent.killall([send_job, recv_job])
        s.close()

    send_job.link(clear)
    recv_job.link(clear)
    gevent.joinall([send_job, recv_job])
    print 'client', cid, 'finish'
Exemplo n.º 12
0
def main():
    gevent.monkey.patch_all()
    signal.signal(signal.SIGTERM, lambda signum, fame: teardown())
    signal.signal(signal.SIGINT, lambda signum, fame: teardown())
    atexit.register(teardown)
    setup()
    gevent.joinall([gevent.spawn(process.communicate) for process in processes])
Exemplo n.º 13
0
    def _get_media(self, api, mid):
        media = spawn(api.media, mid)
        likes = spawn(api.media_likes, media_id=mid)
        gevent.joinall([media, likes])
        media, likes = media.get(), likes.get()
        errors = get_errors(media, likes)
        if errors:
            if any([e.error_type == 'APINotAllowedError' for e in errors]):
                return render('profile-noauth.html', ukey=request.ukey)
            app.logger.error([str(e) for e in errors])
            return notfound(u'服务器暂时出问题了')

        ukey = media.user.id
        isfollow = False
        if request.ukey:
            try:
                isfollow = is_follow(ukey, api)
            except InstagramAPIError:
                return notfound(u'服务器暂时出问题了')

        isstar = False
        for i in likes:
            if request.ukey and request.ukey == i.id:
                isstar = True

        isme = False
        if request.ukey and ukey == request.ukey:
            isme = True
        return dict(media=media, isme=isme, isfollow=isfollow,
                    likes=likes[:5], isstar=isstar)
Exemplo n.º 14
0
def fetch_remote(base_config, name="origin"):
    jobs = []
    seen = set()
    fetched = set()
    for path, config in base_config.span_configs():
        if path in seen:
            continue
        seen.add(path)
        git = get_git(path)
        print("  [{cwd}] fetching {name}".format(cwd=path, name=name))
        jobs.append(gevent.spawn(git.fetch, name))
        for branch in (b for b in config.branches if ":" in b):
            remote, branch = branch.split(":", 1)
            if remote not in git.remote().split():
                url = remote_url(git, remote)
                print("  [{path}] adding remote: {remote} -> {url}"
                      .format(**locals()))
                git.remote("add", remote, url)
            print("  [{path}] fetching {remote} {branch}".format(**locals()))
            jobs.append(gevent.spawn(git.fetch, remote, branch))
            fetched.add(remote)

        for pr in config.pull_requests:
            print("  [{path}] fetching pull request {pr}".format(**locals()))
            pr = 'pull/{pr}/head:enterprise-{pr}'.format(pr=pr)
            jobs.append(gevent.spawn(git.fetch, 'origin', pr))

    gevent.joinall(jobs)
    print("fetched {}".format(", ".join(['origin'] + sorted(fetched))))
Exemplo n.º 15
0
def main(args):

    S = Scraper(args)

    input_check(args)

    if args.targets:
        targets = get_targets_from_args(args.targets)
    elif args.shodansearch:
        targets = shodan_search(args.shodansearch, args.apikey, int(args.numpages), args.ipfile)
    elif args.ipfile:
        targets = get_ips_from_file(args.ipfile)

    if targets == [] or targets == None:
        exit('[!] No valid targets')

    # Mechanize doesn't respect timeouts when it comes to reading/waiting for SSL info so this is necessary
    setdefaulttimeout(int(args.timeout))

    con = int(args.concurrent)

    # By default run 1000 concurrently at a time
    target_groups = [targets[x:x+con] for x in xrange(0, len(targets), con)]
    for chunk_targets in target_groups:
        jobs = [gevent.spawn(S.run, target) for target in chunk_targets]
        gevent.joinall(jobs)
Exemplo n.º 16
0
def createThread(nums):
    strnums = [str(x) for x in nums]
    threads = [gevent.spawn(getInfo, i) for i in strnums]
    print "start! -- %d" % len(threads)
    gevent.joinall(threads)
#     once one coroutine is done, all others will be killed
    print "It's done! "
Exemplo n.º 17
0
def main():
  """ Main Avogadro entrypoint.  Runs all metric collection agents. """

  parser = OptionParser(version=__version__.__version__)

  AvogadroAgent.addParserOptions(parser)

  try:
    (options, args) = parser.parse_args(sys.argv)

    gevent.joinall([AvogadroCPUTimesAgent.spawn(options=options),
                    AvogadroMemoryAgent.spawn(options=options),
                    AvogadroDiskReadBytesAgent.spawn(options=options),
                    AvogadroDiskWriteBytesAgent.spawn(options=options),
                    AvogadroDiskReadTimeAgent.spawn(options=options),
                    AvogadroDiskWriteTimeAgent.spawn(options=options),
                    AvogadroNetworkBytesSentAgent.spawn(options=options),
                    AvogadroNetworkBytesReceivedAgent.spawn(options=options),
                    AvogadroKeyCountAgent.spawn(options=options),
                    AvogadroKeyDownDownAgent.spawn(options=options),
                    AvogadroKeyUpDownAgent.spawn(options=options),
                    AvogadroKeyHoldAgent.spawn(options=options)])

  except IndexError:
    parser.print_help(sys.stderr)
    sys.exit()
Exemplo n.º 18
0
def main():
    global qtimes,qerr
    reps = int(sys.argv[1])
    
    for j in range(reps):
        jobs = [gevent.spawn(get_url, url) for url in urls]
        #print("Size of jobs is {n}".format(n=len(jobs)))
        gevent.joinall(jobs, timeout=30)
    
    if not qerr.empty():
        qerr.put(StopIteration)
        for err in qerr:
            print(err)
           
    print("jobs size {s}".format(s=len(jobs)))
    print("qstart size {n}".format(n=qstart.qsize()))
    print("qtimes size {n}".format(n=qtimes.qsize()))
    qtimes.put(StopIteration)
    times = []
    for item in qtimes:
        times.append(item)
        
    print("Min {t}".format(t=min(times)))
    print("Max {t}".format(t=max(times)))
    print("Mean {t}".format(t=stats.mean(times)))
    print("StdDev {t}".format(t=stats.stdev(times)))
    def fetch_info(self):
        """
        Fetch info for each entity in collection.
        """

        jobs = [gevent.spawn(entity.fetch_info) for entity in self._entities]
        gevent.joinall(jobs, timeout=5)
Exemplo n.º 20
0
 def do_net(self, dst, block, method, params):
     node = self.node
     uid = node.uid
     res = {}
     inc = False
     try:
         if uid in dst:
             inc = True
     except:
         pass
     if dst is None or inc:
         try:
             h = getattr(self, "net_" + method)
             res[uid] = h(*params)
         except GreenletExit:
             return
         except:
             pass
     if dst is None or not inc:
         if block:
             job = node.call(dst, self.uid, method, *params)
         else:
             node.notify(dst, self.uid, method, *params)
             job = None
     if block:
         if job is not None:
             joinall(job)
             for v in job.itervalues():
                 res.update(v.get())
         return res
Exemplo n.º 21
0
def run_all():
    gevent.signal(signal.SIGQUIT, gevent.kill)
    run_emitters()
    run_listeners()
    eventloop = gevent.spawn(run_eventloop)
    __jobs.append(eventloop)
    gevent.joinall(__jobs, raise_error=True)
Exemplo n.º 22
0
 def start(self,oneshot=False):
     redis_kwargs = {}
     for x in self.conf:
         if x.startswith('redis_'):
             redis_kwargs[x[6:]] = self.conf[x]
     self.redis = redis.StrictRedis(**redis_kwargs)
     try:
         self.init_handlers()
         # starte or not start observer
         if not oneshot:
             self.start_observer()
         else:
             self._oneshot = True
         # set our signal handler and save reference to default
         self.default_signal_handler = signal.signal(
             signal.SIGINT, self.loop_interrupt_handler)
         # now block until finnished
         loops = [
             gevent.spawn(self.process_loop),
             gevent.spawn(self.send_loop),
         ]
         gevent.joinall(loops)
         # save the state,
         signal.signal(signal.SIGINT, self.save_interrupt_handler)
         self.save_state()
     except redis.ConnectionError:
         raise click.ClickException(
             'connection to redis with:\n\n {0}\n\n failed!'.format(
                 pprint.pformat(self.redis.connection_pool.connection_kwargs)))
Exemplo n.º 23
0
    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port")
        args = parser.parse_args()
        configure_logging('/var/log/containerd.log', 'DEBUG')
        setproctitle.setproctitle('containerd')

        gevent.signal(signal.SIGTERM, self.die)
        gevent.signal(signal.SIGQUIT, self.die)

        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.init_mgmt()
        self.init_nat()
        self.init_ec2()
        self.logger.info('Started')

        # WebSockets server
        kwargs = {}
        s4 = WebSocketServer(('', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        s6 = WebSocketServer(('::', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)]
        gevent.joinall(serv_threads)
Exemplo n.º 24
0
    def _stop_pending_timers(self):
        """
        Safely stops all pending and active timers.

        For all timers still waiting to run, calls kill on them. For active timers, let
        them exit naturally and prevent the reschedule by setting the _no_reschedule flag.
        """
        # prevent reschedules
        self._no_reschedule = True

        gls = []
        for timer_id in self.schedule_entries:
            spawns = self.__get_spawns(timer_id)

            for spawn in spawns:
                gls.append(spawn)
                # only kill spawns that haven't started yet
                if spawn._start_event is not None:
                    spawn.kill()

            log.debug("_stop_pending_timers: timer %s deleted", timer_id)

        self.schedule_entries.clear()

        # wait for running gls to finish up
        gevent.joinall(gls, timeout=10)

        # allow reschedules from here on out
        self._no_reschedule = False
Exemplo n.º 25
0
    def issues(page=0):
        def gh_request(issueUrl):
            req = urllib2.Request(
                "%s?access_token=%s" % (issueUrl, access_token)
                )
            response = urllib2.urlopen(req)
            respData = json.loads(response.read())
            return respData

        # we'll be getting issues from here:
        # def getPostedIssues(page = 0, recent = True, pageSize = 25):

        issues = getPostedIssues(page, True, int(settings.webappPageSize))
        issueUrls = issues['todoList']

        jobs = [gevent.spawn(gh_request, url) for url in issueUrls]
        gevent.joinall(jobs, timeout=8)
        
        data = [job.value for job in jobs]

        return jsonify(
            data = data,
            pageNumber = issues['pageNumber'],
            pageCount = issues['pageCount'],
            )
def _split_commits_and_tags(obj_store, lst,
                            ignore_unknown=False, pool=None):
    """Split object id list into two list with commit SHA1s and tag SHA1s.

    Same implementation as object_store._split_commits_and_tags
    except we use gevent to parallelize object retrieval.
    """
    commits = set()
    tags = set()

    def find_commit_type(sha):
        try:
            o = obj_store[sha]
        except KeyError:
            if not ignore_unknown:
                raise
        else:
            if isinstance(o, Commit):
                commits.add(sha)
            elif isinstance(o, Tag):
                tags.add(sha)
                commits.add(o.object[1])
            else:
                raise KeyError('Not a commit or a tag: %s' % sha)
    jobs = [pool.spawn(find_commit_type, s) for s in lst]
    gevent.joinall(jobs)
    return (commits, tags)
    def __init__(self, object_store, haves, wants,
                 progress=None, get_tagged=None,
                 concurrency=1, get_parents=None):

        def collect_tree_sha(sha):
            self.sha_done.add(sha)
            cmt = object_store[sha]
            _collect_filetree_revs(object_store, cmt.tree, self.sha_done)

        self.object_store = object_store
        p = pool.Pool(size=concurrency)

        have_commits, have_tags = \
            _split_commits_and_tags(object_store, haves,
                                    True, p)
        want_commits, want_tags = \
            _split_commits_and_tags(object_store, wants,
                                    False, p)
        all_ancestors = object_store._collect_ancestors(have_commits)[0]
        missing_commits, common_commits = \
            object_store._collect_ancestors(want_commits, all_ancestors)

        self.sha_done = set()
        jobs = [p.spawn(collect_tree_sha, c) for c in common_commits]
        gevent.joinall(jobs)
        for t in have_tags:
            self.sha_done.add(t)
        missing_tags = want_tags.difference(have_tags)
        wants = missing_commits.union(missing_tags)
        self.objects_to_send = set([(w, None, False) for w in wants])
        if progress is None:
            self.progress = lambda x: None
        else:
            self.progress = progress
        self._tagged = get_tagged and get_tagged() or {}
Exemplo n.º 28
0
    def parallelPost(self, payload):
        queue = gevent.queue.Queue()
        gList = []
        if type(self.resourceList) is list:
            for i in self.resourceList:
                queue.put(i)

            for t in range(len(self.resourceList)):
                gl = gevent.spawn(postRequest, queue, payload)
                gList.append(gl)

            print str(gList)
            gevent.joinall(gList)

            return GreenletRequests.NodeResponsesPost

        if type(self.resourceList) is dict:
            for k, v in self.resourceList.iteritems():
                print k
                queue.put(k)

            for k, v in self.resourceList.iteritems():
                gl = gevent.spawn(postRequest, queue, v)
                gList.append(gl)

            print str(gList)
            gevent.joinall(gList)

            return GreenletRequests.NodeResponsesPost
Exemplo n.º 29
0
def search_weekends():
  if not (request.args.get("departureCity") and
          request.args.get("arrivalCity")):
    raise Exception("missing args")

  greenlets = []

  for delta in [0, 1]:
    departure_date = date.today() + timedelta(days=7 * delta)
    friday = next_weekday(departure_date, 4)
    sunday = next_weekday(departure_date, 6)

    greenlets.append(gevent.spawn(
      ScrapyRTClient.get_round_trip,
      request.args["departureCity"],
      request.args["arrivalCity"],
      friday, sunday
    ))

  gevent.joinall(greenlets)
  round_trips = [g.value for g in greenlets]

  return jsonify(
    results=[rt.to_json() for rt in round_trips],
    success=True
  )
def main(args):
    users = User.objects.filter(username__in=args.users.split(","))
    try:
        provider = Provider.objects.get(id=args.provider_id)
    except Provider.DoesNotExist:
        print("A provider for id=%s could not be found" % args.provider_id)
        sys.exit(1)

    async_request_list = []

    for user in users:
        identity = Identity.objects.get(created_by=user, provider=provider)
        user_tokens = Token.objects.filter(user=user).order_by('-issuedTime')
        if user_tokens.count() == 0:
            print("No tokens for user: "******". No instances will launch on their account.")
            continue

        latest_token = user_tokens[0]

        headers = {
            'Authorization': 'Token ' + latest_token.key
        }

        instances = Instance.objects.filter(only_current(), created_by=user,
                                            name=args.name)

        for instance in instances:
            launch_url = settings.SERVER_URL + "/api/v1/provider/" + provider.uuid + "/identity/" + identity.uuid + "/instance/" + instance.provider_alias
            job = gevent.spawn(delete_instance, launch_url, headers, provider, user)
            async_request_list.append(job)

    print "Sending requests to Atmosphere..."
    gevent.joinall(async_request_list)
    print "Script finished running successfully!"
Exemplo n.º 31
0
def process_start(url_list):
    # with Loc:
    tasks = []
    for data in url_list:
        tasks.append(gevent.spawn(get_data, data))
    gevent.joinall(tasks)  #使用协程来执行
Exemplo n.º 32
0
def asynchronous():
    threads = []
    for i in range(1, 10):
        threads.append(gevent.spawn(fetch, i))
    gevent.joinall(threads)
Exemplo n.º 33
0
import gevent
from gevent import socket

hosts = ['www.crappytaxidermy.com',
         'www.walterpottertaxidermy.com',
#         'www.antique-taxidermy.com']
         'www.yahoo.com']

jobs = [gevent.spawn(gevent.socket.gethostbyname, host) for host in hosts]
gevent.joinall(jobs, timeout=5)
for job in jobs:
    print(job.value)

Exemplo n.º 34
0
 def close_spider(self, spider):
     self.file.close()
     # 堵塞协程池
     print(len(self.gevent_pools))
     gevent.joinall(self.gevent_pools)
     print('close_spider----', spider.name)
Exemplo n.º 35
0
 def start(self, *entry_points):
     """Run entry point."""
     gevent.joinall([gevent.spawn(ep) for ep in entry_points])
Exemplo n.º 36
0
                Lock.release()
    return


try:
    print "\n\n\nGoGo Tester Py - Ver %s\n\nDevloped by NKUCodingcat <*****@*****.**>\n\n" % Ver
    log = open(root + "log.log", "w")
    jobs = [
        gevent.spawn(LimitCheck),
    ] + [
        gevent.spawn(Socket_TestNext, ippool)
        for i in range(int(sock_thread_num) / 2)
    ] + [
        gevent.spawn(Socket_TestNext, ippool, False)
        for i in range(int(sock_thread_num) - int(sock_thread_num) / 2)
    ] + [gevent.spawn(SSL_TestNext) for i in range(int(ssl_thread_num))]
    gevent.joinall(jobs)

finally:
    print "\n\nSearch Complete. The result will be saved at %s/res" % root
    if not os.path.exists(root + "res"):
        os.mkdir(root + "res")
    HTMLGEN.HTMLGEN(
        json.dumps([i for i in Succ if IPy.IP(i[0][1]).version() == 4]),
        open(root + "res/ip_4.txt", "w")).close()
    HTMLGEN.HTMLGEN(
        json.dumps([i for i in Succ if IPy.IP(i[0][1]).version() == 6]),
        open(root + "res/ip_6.txt", "w")).close()
    log.close()
    print "\n\n"
Exemplo n.º 37
0
def query_using_greenlets(module):
    g1 = gevent.spawn(query, module, 10, 100)
    g2 = gevent.spawn(query, module, 100, 1000)
    g3 = gevent.spawn(query, module, 1000, 5000)
    gevent.joinall([g1, g2, g3])
Exemplo n.º 38
0
    def announce(self, force=False, mode="start", pex=True):
        if time.time() < self.time_announce + 30 and not force:
            return  # No reannouncing within 30 secs
        self.time_announce = time.time()

        trackers = config.trackers
        # Filter trackers based on supported networks
        if config.disable_udp:
            trackers = [
                tracker for tracker in trackers
                if not tracker.startswith("udp://")
            ]
        if not self.connection_server.tor_manager.enabled:
            trackers = [
                tracker for tracker in trackers if ".onion" not in tracker
            ]

        if mode == "update" or mode == "more":  # Only announce on one tracker, increment the queried tracker id
            self.last_tracker_id += 1
            self.last_tracker_id = self.last_tracker_id % len(trackers)
            trackers = [trackers[self.last_tracker_id]
                        ]  # We only going to use this one

        errors = []
        slow = []
        add_types = []
        if self.connection_server:
            my_peer_id = self.connection_server.peer_id

            # Type of addresses they can reach me
            if self.connection_server.port_opened:
                add_types.append("ip4")
            if self.connection_server.tor_manager.enabled and self.connection_server.tor_manager.start_onions:
                add_types.append("onion")
        else:
            my_peer_id = ""

        s = time.time()
        announced = 0
        threads = []
        fileserver_port = config.fileserver_port

        for tracker in trackers:  # Start announce threads
            tracker_protocol, tracker_address = tracker.split("://")
            thread = gevent.spawn(self.announceTracker, tracker_protocol,
                                  tracker_address, fileserver_port, add_types,
                                  my_peer_id, mode)
            threads.append(thread)
            thread.tracker_address = tracker_address
            thread.tracker_protocol = tracker_protocol

        gevent.joinall(threads, timeout=10)  # Wait for announce finish

        for thread in threads:
            if thread.value:
                if thread.value > 1:
                    slow.append("%.2fs %s://%s" %
                                (thread.value, thread.tracker_protocol,
                                 thread.tracker_address))
                announced += 1
            else:
                if thread.ready():
                    errors.append(
                        "%s://%s" %
                        (thread.tracker_protocol, thread.tracker_address))
                else:  # Still running
                    slow.append(
                        "10s+ %s://%s" %
                        (thread.tracker_protocol, thread.tracker_address))

        # Save peers num
        self.settings["peers"] = len(self.peers)
        self.saveSettings()

        if len(errors) < len(threads):  # Less errors than total tracker nums
            self.log.debug(
                "Announced types %s in mode %s to %s trackers in %.3fs, errors: %s, slow: %s"
                % (add_types, mode, announced, time.time() - s, errors, slow))
        else:
            if mode != "update":
                self.log.error("Announce to %s trackers in %.3fs, failed" %
                               (announced, time.time() - s))

        if pex:
            if not [
                    peer for peer in self.peers.values()
                    if peer.connection and peer.connection.connected
            ]:
                # If no connected peer yet then wait for connections
                gevent.spawn_later(3, self.announcePex,
                                   need_num=10)  # Spawn 3 secs later
            else:  # Else announce immediately
                if mode == "more":  # Need more peers
                    self.announcePex(need_num=10)
                else:
                    self.announcePex()
Exemplo n.º 39
0
    def downloadContent(self,
                        inner_path,
                        download_files=True,
                        peer=None,
                        check_modifications=False):
        s = time.time()
        self.log.debug("Downloading %s..." % inner_path)
        found = self.needFile(inner_path,
                              update=self.bad_files.get(inner_path))
        content_inner_dir = helper.getDirname(inner_path)
        if not found:
            self.log.debug("Download %s failed, check_modifications: %s" %
                           (inner_path, check_modifications))
            if check_modifications:  # Download failed, but check modifications if its succed later
                self.onFileDone.once(
                    lambda file_name: self.checkModifications(0),
                    "check_modifications")
            return False  # Could not download content.json

        self.log.debug("Got %s" % inner_path)
        changed, deleted = self.content_manager.loadContent(
            inner_path, load_includes=False)

        # Start download files
        file_threads = []
        if download_files:
            for file_relative_path in self.content_manager.contents[
                    inner_path].get("files", {}).keys():
                file_inner_path = content_inner_dir + file_relative_path
                # Start download and dont wait for finish, return the event
                res = self.needFile(file_inner_path,
                                    blocking=False,
                                    update=self.bad_files.get(file_inner_path),
                                    peer=peer)
                if res is not True and res is not False:  # Need downloading and file is allowed
                    file_threads.append(res)  # Append evt

            # Optionals files
            if inner_path == "content.json":
                gevent.spawn(self.updateHashfield)

            if self.settings.get("autodownloadoptional"):
                for file_relative_path in self.content_manager.contents[
                        inner_path].get("files_optional", {}).keys():
                    file_inner_path = content_inner_dir + file_relative_path
                    # Start download and dont wait for finish, return the event
                    res = self.needFile(
                        file_inner_path,
                        blocking=False,
                        update=self.bad_files.get(file_inner_path),
                        peer=peer)
                    if res is not True and res is not False:  # Need downloading and file is allowed
                        file_threads.append(res)  # Append evt

        # Wait for includes download
        include_threads = []
        for file_relative_path in self.content_manager.contents[
                inner_path].get("includes", {}).keys():
            file_inner_path = content_inner_dir + file_relative_path
            include_thread = gevent.spawn(self.downloadContent,
                                          file_inner_path,
                                          download_files=download_files,
                                          peer=peer)
            include_threads.append(include_thread)

        self.log.debug("%s: Downloading %s includes..." %
                       (inner_path, len(include_threads)))
        gevent.joinall(include_threads)
        self.log.debug("%s: Includes download ended" % inner_path)

        if check_modifications:  # Check if every file is up-to-date
            self.checkModifications(0)

        self.log.debug("%s: Downloading %s files, changed: %s..." %
                       (inner_path, len(file_threads), len(changed)))
        gevent.joinall(file_threads)
        self.log.debug("%s: DownloadContent ended in %.2fs" %
                       (inner_path, time.time() - s))

        if not self.worker_manager.tasks:
            self.onComplete()  # No more task trigger site complete

        return True
                          protocols=['http-only', 'chat'])
        ws.daemon = False
        ws2.daemon = False
        ws.name = "Reader/Writer"
        ws2.name = "Reader"
        ws.connect()
        ws2.connect()
        ''' what we're doing here is that we're sending new entities and getting them
            back on the websocket '''
        greenlets = [
            gevent.spawn(ws.incoming),
            gevent.spawn(ws.outgoing),
        ]

        gws2 = gevent.spawn(ws2.incoming)
        gevent.joinall(greenlets)
        ws2.close()
        gws2.join(timeout=1)
        # here's our final test
        print("Counts: %s %s" % (ws.count, ws2.count))
        assert ws.count == calls, ("Expected Responses were given! %d %d" %
                                   (ws.count, calls))
        assert ws2.count >= (9 * calls / 10), (
            "2nd Client got less than 9/10 of the results! %s" % ws2.count)
        print("Looks like the tests passed!")
    finally:
        #except KeyboardInterrupt:
        ws.close()
        ws2.close()
        gevent.sleep(1)
        os.system("kill -9 $(lsof -t -i:8000)")
Exemplo n.º 41
0
def process_start(tasks, myip, queue2, cntl):
    spawns = []
    for task in tasks:
        spawns.append(gevent.spawn(detect_proxy, myip, task, queue2))
    gevent.joinall(spawns)
    cntl.put(os.getpid())  # 子进程退出是加入控制队列
Exemplo n.º 42
0
    def publish(self, limit="default", inner_path="content.json"):
        published = []  # Successfully published (Peer)
        publishers = []  # Publisher threads

        if not self.peers:
            self.announce()

        threads = 5
        if limit == "default":
            if len(self.peers) > 50:
                limit = 3
                threads = 3
            else:
                limit = 5

        connected_peers = self.getConnectedPeers()
        if len(connected_peers
               ) > limit * 2:  # Publish to already connected peers if possible
            peers = connected_peers
        else:
            peers = self.peers.values()

        self.log.info(
            "Publishing %s to %s/%s peers (connected: %s)..." %
            (inner_path, limit, len(self.peers), len(connected_peers)))

        if not peers:
            return 0  # No peers found

        random.shuffle(peers)
        event_done = gevent.event.AsyncResult()
        for i in range(min(len(self.peers), limit, threads)):
            publisher = gevent.spawn(self.publisher, inner_path, peers,
                                     published, limit, event_done)
            publishers.append(publisher)

        event_done.get()  # Wait for done
        if len(published) < min(len(self.peers), limit):
            time.sleep(0.2)  # If less than we need sleep a bit
        if len(published) == 0:
            gevent.joinall(
                publishers)  # No successful publish, wait for all publisher

        # Make sure the connected passive peers got the update
        passive_peers = [
            peer for peer in peers
            if peer.connection and not peer.connection.closed
            and peer.key.endswith(":0") and peer not in published
        ]  # Every connected passive peer that we not published to

        self.log.info(
            "Successfuly %s published to %s peers, publishing to %s more passive peers"
            % (inner_path, len(published), len(passive_peers)))

        for peer in passive_peers[0:3]:
            gevent.spawn(self.publisher,
                         inner_path,
                         passive_peers,
                         published,
                         limit=limit + 3)

        # Send my hashfield to every connected peer if changed
        gevent.spawn(self.sendMyHashfield, 100)

        return len(published)
Exemplo n.º 43
0
def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument("config")

    args = parser.parse_args()

    with open(args.config, "r") as handler:
        config = json.load(handler)

    # validate the endpoints
    node_to_endpoint = dict()
    node_to_address = dict()
    for node_name, node_info in config["nodes"].items():
        if urlsplit(node_info["endpoint"]).scheme == "":
            raise ValueError("'endpoint' must have the protocol defined")

        url_deposit = f"{node_info['endpoint']}/api/{API_VERSION}/address"
        result = requests.get(url_deposit).json()

        if result["our_address"] != node_info["address"]:
            raise ValueError(
                f"Address mismatch, configuration {node_info['address']}, "
                f"API response {result['our_address']}")

        node_to_endpoint[node_name] = node_info["endpoint"]
        node_to_address[node_name] = node_info["address"]

    nodeaddress_to_channelopenqueue: OpenQueue = defaultdict(list)
    nodeaddress_to_channeldepositqueue: DepositQueue = defaultdict(list)

    # Schedule the requests to evenly distribute the load. This is important
    # because as of 0.100.5 channel cannot be opened concurrently, by dividing
    # the load evenly we make sure the channels are opened as fast as possible.
    for token_address, channels_to_open in config["networks"].items():
        for channel in channels_to_open:
            node1 = channel["node1"]
            node2 = channel["node2"]

            participant1 = node_to_address[node1]
            participant2 = node_to_address[node2]

            current_channel1 = channel_details(node_to_endpoint[node1],
                                               token_address, participant2)
            current_channel2 = channel_details(node_to_endpoint[node2],
                                               token_address, participant1)

            nodes_are_synchronized = bool(current_channel1) == bool(
                current_channel2)
            msg = (f"The channel must exist in both or neither of the nodes.\n"
                   f"{current_channel1}\n"
                   f"{current_channel2}")
            assert nodes_are_synchronized, msg

            if current_channel1 is None:
                queue_channel_open(
                    nodeaddress_to_channelopenqueue,
                    nodeaddress_to_channeldepositqueue,
                    channel,
                    token_address,
                    node_to_address,
                    node_to_endpoint,
                )
            else:
                assert current_channel1 and current_channel2

                queue_channel_deposit(
                    nodeaddress_to_channeldepositqueue,
                    channel,
                    current_channel1,
                    current_channel2,
                    token_address,
                    node_to_address,
                    node_to_endpoint,
                )

    open_greenlets = set(
        gevent.spawn(channel_open, open_queue)
        for open_queue in nodeaddress_to_channelopenqueue.values())
    deposit_greenlets = set(
        gevent.spawn(channel_deposit_with_the_same_token_network,
                     deposit_queue)
        for deposit_queue in nodeaddress_to_channeldepositqueue.values())

    all_greenlets = set()
    all_greenlets.update(open_greenlets)
    all_greenlets.update(deposit_greenlets)

    gevent.joinall(all_greenlets, raise_error=True)
Exemplo n.º 44
0
import gevent, time


def foo1():
    print('a')
    gevent.sleep(2)
    print(3)


def foo2():
    print(2)
    gevent.sleep(2)
    print(4)


a = gevent.spawn(foo1)
b = gevent.spawn(foo2)

gevent.joinall([a, b])
Exemplo n.º 45
0
def main():
    """Entry point for RWS.

    return (int): exit code (0 on success, 1 on error)

    """
    parser = argparse.ArgumentParser(
        description="Ranking for CMS.")
    parser.add_argument("--config", type=argparse.FileType("rt"),
                        help="override config file")
    parser.add_argument("-d", "--drop", action="store_true",
                        help="drop the data already stored")
    args = parser.parse_args()

    config.load(args.config)

    if args.drop:
        print("Are you sure you want to delete directory %s? [y/N]" %
              config.lib_dir, end='')
        ans = raw_input().lower()
        if ans in ['y', 'yes']:
            print("Removing directory %s." % config.lib_dir)
            shutil.rmtree(config.lib_dir)
        else:
            print("Not removing directory %s." % config.lib_dir)
        return 1

    Contest.store.load_from_disk()
    Task.store.load_from_disk()
    Team.store.load_from_disk()
    User.store.load_from_disk()
    Submission.store.load_from_disk()
    Subchange.store.load_from_disk()

    Scoring.store.init_store()

    toplevel_handler = RoutingHandler(DataWatcher(), ImageHandler(
        os.path.join(config.lib_dir, '%(name)s'),
        os.path.join(config.web_dir, 'img', 'logo.png')))

    wsgi_app = SharedDataMiddleware(DispatcherMiddleware(
        toplevel_handler, {
            '/contests': StoreHandler(Contest.store),
            '/tasks': StoreHandler(Task.store),
            '/teams': StoreHandler(Team.store),
            '/users': StoreHandler(User.store),
            '/submissions': StoreHandler(Submission.store),
            '/subchanges': StoreHandler(Subchange.store),
            '/faces': ImageHandler(
                os.path.join(config.lib_dir, 'faces', '%(name)s'),
                os.path.join(config.web_dir, 'img', 'face.png')),
            '/flags': ImageHandler(
                os.path.join(config.lib_dir, 'flags', '%(name)s'),
                os.path.join(config.web_dir, 'img', 'flag.png')),
        }), {'/': config.web_dir})

    servers = list()
    if config.http_port is not None:
        http_server = WSGIServer(
            (config.bind_address, config.http_port), wsgi_app)
        servers.append(http_server)
    if config.https_port is not None:
        https_server = WSGIServer(
            (config.bind_address, config.https_port), wsgi_app,
            certfile=config.https_certfile, keyfile=config.https_keyfile)
        servers.append(https_server)

    try:
        gevent.joinall(list(gevent.spawn(s.serve_forever) for s in servers))
    except KeyboardInterrupt:
        pass
    finally:
        gevent.joinall(list(gevent.spawn(s.stop) for s in servers))
    return 0
Exemplo n.º 46
0
import gevent
from gevent import Greenlet


def foo(message, n):
    gevent.sleep(0)
    print message


thread1 = Greenlet.spawn(foo, "Hello", 1)
thread2 = Greenlet.spawn(foo, "I live!", 2)
thread3 = gevent.spawn(lambda x: (x + 1), 2)

threads = [thread1, thread2, thread3]
gevent.joinall(threads)
Exemplo n.º 47
0
 def run(self):
     generator_run_tasks = [gen.run_generator() for gen in self._generators]
     generator_tasks = [gen_task for gen_task_sublist in \
         generator_run_tasks for gen_task in gen_task_sublist ]
     gevent.joinall(generator_tasks)
Exemplo n.º 48
0
def run(
    privatekey,
    registry_contract_address,
    secret_registry_contract_address,
    discovery_contract_address,
    listen_address,
    structlog,
    logfile,
    scenario,
    stage_prefix,
):  # pylint: disable=unused-argument

    # TODO: only enabled structlog on "initiators"
    structlog.configure(structlog, log_file=logfile)

    (listen_host, listen_port) = split_endpoint(listen_address)

    config = App.DEFAULT_CONFIG.copy()
    config['host'] = listen_host
    config['port'] = listen_port
    config['privatekey_hex'] = privatekey

    privatekey_bin = decode_hex(privatekey)

    rpc_client = JSONRPCClient(
        '127.0.0.1',
        8545,
        privatekey_bin,
    )

    blockchain_service = BlockChainService(privatekey_bin, rpc_client)

    discovery = ContractDiscovery(
        blockchain_service,
        decode_hex(discovery_contract_address),
    )

    registry = blockchain_service.token_network_registry(
        registry_contract_address, )

    secret_registry = blockchain_service.secret_registry(
        secret_registry_contract_address, )

    throttle_policy = TokenBucket(
        config['protocol']['throttle_capacity'],
        config['protocol']['throttle_fill_rate'],
    )

    transport = UDPTransport(
        discovery=discovery,
        udpsocket=gevent.server._udp_socket((listen_host, listen_port)),
        throttle_policy=throttle_policy,
        config=config['protocol'],
    )

    app = App(
        config=config,
        chain=blockchain_service,
        query_start_block=0,
        default_registry=registry,
        default_secret_registry=secret_registry,
        transport=transport,
        discovery=discovery,
    )
    app.start()

    app.discovery.register(
        app.raiden.address,
        listen_host,
        listen_port,
    )

    from_block = 0
    app.raiden.install_all_blockchain_filters(
        app.raiden.default_registry,
        app.raiden.default_secret_registry,
        from_block,
    )

    if scenario:
        script = json.load(scenario)

        tools = ConsoleTools(
            app.raiden,
            app.discovery,
            app.config['settle_timeout'],
            app.config['reveal_timeout'],
        )

        transfers_by_peer = {}

        tokens = script['tokens']
        token_address = None
        peer = None
        our_node = hexlify(app.raiden.address)
        log.warning('our address is {}'.format(our_node))
        for token in tokens:
            # skip tokens that we're not part of
            nodes = token['channels']
            if our_node not in nodes:
                continue

            partner_nodes = [node for node in nodes if node != our_node]

            # allow for prefunded tokens
            if 'token_address' in token:
                token_address = token['token_address']
            else:
                token_address = tools.create_token(registry_contract_address)

            transfers_with_amount = token['transfers_with_amount']

            # FIXME: in order to do bidirectional channels, only one side
            # (i.e. only token['channels'][0]) should
            # open; others should join by calling
            # raiden.api.deposit, AFTER the channel came alive!

            # NOTE: leaving unidirectional for now because it most
            #       probably will get to higher throughput

            log.warning('Waiting for all nodes to come online')

            api = RaidenAPI(app.raiden)

            for node in partner_nodes:
                api.start_health_check_for(node)

            while True:
                all_reachable = all(
                    api.get_node_network_state(node) == NODE_NETWORK_REACHABLE
                    for node in partner_nodes)

                if all_reachable:
                    break

                gevent.sleep(5)

            log.warning('All nodes are online')

            if our_node != nodes[-1]:
                our_index = nodes.index(our_node)
                peer = nodes[our_index + 1]

                tools.token_network_register(
                    app.raiden.default_registry.address, token_address)
                amount = transfers_with_amount[nodes[-1]]

                while True:
                    try:
                        app.discovery.get(peer.decode('hex'))
                        break
                    except KeyError:
                        log.warning(
                            'Error: peer {} not found in discovery'.format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning('Opening channel with {} for {}'.format(
                            peer, token_address))
                        api.channel_open(app.raiden.default_registry.address,
                                         token_address, peer)
                        break
                    except KeyError:
                        log.warning(
                            'Error: could not open channel with {}'.format(
                                peer))
                        time.sleep(random.randrange(30))

                while True:
                    try:
                        log.warning('Funding channel with {} for {}'.format(
                            peer, token_address))
                        api.channel_deposit(
                            app.raiden.default_registry.address,
                            token_address,
                            peer,
                            amount,
                        )
                        break
                    except Exception:
                        log.warning(
                            'Error: could not deposit {} for {}'.format(
                                amount, peer))
                        time.sleep(random.randrange(30))

                if our_index == 0:
                    last_node = nodes[-1]
                    transfers_by_peer[last_node] = int(amount)

        if stage_prefix is not None:
            open('{}.stage1'.format(stage_prefix), 'a').close()
            log.warning('Done with initialization, waiting to continue...')
            event = gevent.event.Event()
            gevent.signal(signal.SIGUSR2, event.set)
            event.wait()

        transfer_results = {'total_time': 0, 'timestamps': []}

        def transfer(token_address, amount_per_transfer, total_transfers, peer,
                     is_async):
            def transfer_():
                log.warning('Making {} transfers to {}'.format(
                    total_transfers, peer))
                initial_time = time.time()
                times = [0] * total_transfers
                for index in range(total_transfers):
                    RaidenAPI(app.raiden).transfer(
                        app.raiden.default_registry.address,
                        token_address.decode('hex'),
                        amount_per_transfer,
                        peer,
                    )
                    times[index] = time.time()

                transfer_results['total_time'] = time.time() - initial_time
                transfer_results['timestamps'] = times

                log.warning('Making {} transfers took {}'.format(
                    total_transfers, transfer_results['total_time']))
                log.warning('Times: {}'.format(times))

            if is_async:
                return gevent.spawn(transfer_)
            else:
                transfer_()

        # If sending to multiple targets, do it asynchronously, otherwise
        # keep it simple and just send to the single target on my thread.
        if len(transfers_by_peer) > 1:
            greenlets = []
            for peer_, amount in transfers_by_peer.items():
                greenlet = transfer(token_address, 1, amount, peer_, True)
                if greenlet is not None:
                    greenlets.append(greenlet)

            gevent.joinall(greenlets)

        elif len(transfers_by_peer) == 1:
            for peer_, amount in transfers_by_peer.items():
                transfer(token_address, 1, amount, peer_, False)

        log.warning('Waiting for termination')

        open('{}.stage2'.format(stage_prefix), 'a').close()
        log.warning('Waiting for transfers to finish, will write results...')
        event = gevent.event.Event()
        gevent.signal(signal.SIGUSR2, event.set)
        event.wait()

        open('{}.stage3'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    else:
        log.warning('No scenario file supplied, doing nothing!')

        open('{}.stage2'.format(stage_prefix), 'a').close()
        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

    app.stop()
Exemplo n.º 49
0
    def startFindOptional(self,
                          reset_task=False,
                          find_more=False,
                          high_priority=False):
        # Wait for more file requests
        if len(self.tasks) < 20 or high_priority:
            time.sleep(0.01)
        if len(self.tasks) > 90:
            time.sleep(5)
        else:
            time.sleep(0.5)

        optional_tasks = [
            task for task in self.tasks if task["optional_hash_id"]
        ]
        if not optional_tasks:
            return False
        optional_hash_ids = set(
            [task["optional_hash_id"] for task in optional_tasks])
        time_tasks = self.time_task_added

        self.log.debug(
            "Finding peers for optional files: %s (reset_task: %s, find_more: %s)"
            % (optional_hash_ids, reset_task, find_more))
        found = self.findOptionalTasks(optional_tasks, reset_task=reset_task)

        if found:
            found_peers = set(
                [peer for peers in found.values() for peer in peers])
            self.startWorkers(found_peers)

        if len(found) < len(optional_hash_ids) or find_more or (
                high_priority
                and any(len(peers) < 10 for peers in found.itervalues())):
            self.log.debug("No local result for optional files: %s" %
                           (optional_hash_ids - set(found)))

            # Query hashfield from connected peers
            threads = []
            peers = self.site.getConnectedPeers()
            if not peers:
                peers = self.site.getConnectablePeers()
            for peer in peers:
                if not peer.time_hashfield:
                    threads.append(gevent.spawn(peer.updateHashfield))
            gevent.joinall(threads, timeout=5)

            if time_tasks != self.time_task_added:  # New task added since start
                optional_tasks = [
                    task for task in self.tasks if task["optional_hash_id"]
                ]
                optional_hash_ids = set(
                    [task["optional_hash_id"] for task in optional_tasks])

            found = self.findOptionalTasks(optional_tasks)
            self.log.debug(
                "Found optional files after query hashtable connected peers: %s/%s"
                % (len(found), len(optional_hash_ids)))

            if found:
                found_peers = set([
                    peer for hash_id_peers in found.values()
                    for peer in hash_id_peers
                ])
                self.startWorkers(found_peers)

        if len(found) < len(optional_hash_ids) or find_more:
            self.log.debug(
                "No connected hashtable result for optional files: %s" %
                (optional_hash_ids - set(found)))

            # Try to query connected peers
            threads = []
            peers = [
                peer for peer in self.site.getConnectedPeers()
                if peer not in self.asked_peers
            ]
            if not peers:
                peers = self.site.getConnectablePeers()

            for peer in peers:
                threads.append(
                    gevent.spawn(peer.findHashIds, list(optional_hash_ids)))
                self.asked_peers.append(peer)

            for i in range(5):
                time.sleep(1)
                thread_values = [
                    thread.value for thread in threads if thread.value
                ]
                if not thread_values:
                    continue

                found_ips = helper.mergeDicts(thread_values)
                found = self.addOptionalPeers(found_ips)
                self.log.debug(
                    "Found optional files after findhash connected peers: %s/%s (asked: %s)"
                    % (len(found), len(optional_hash_ids), len(threads)))

                if found:
                    found_peers = set([
                        peer for hash_id_peers in found.values()
                        for peer in hash_id_peers
                    ])
                    self.startWorkers(found_peers)

                if len(thread_values) == len(threads):
                    # Got result from all started thread
                    break

        if len(found) < len(optional_hash_ids):
            self.log.debug("No findHash result, try random peers: %s" %
                           (optional_hash_ids - set(found)))
            # Try to query random peers

            if time_tasks != self.time_task_added:  # New task added since start
                optional_tasks = [
                    task for task in self.tasks if task["optional_hash_id"]
                ]
                optional_hash_ids = set(
                    [task["optional_hash_id"] for task in optional_tasks])

            threads = []
            peers = self.site.getConnectablePeers(ignore=self.asked_peers)

            for peer in peers:
                threads.append(
                    gevent.spawn(peer.findHashIds, list(optional_hash_ids)))
                self.asked_peers.append(peer)

            gevent.joinall(threads, timeout=15)

            found_ips = helper.mergeDicts(
                [thread.value for thread in threads if thread.value])
            found = self.addOptionalPeers(found_ips)
            self.log.debug(
                "Found optional files after findhash random peers: %s/%s" %
                (len(found), len(optional_hash_ids)))

            if found:
                found_peers = set([
                    peer for hash_id_peers in found.values()
                    for peer in hash_id_peers
                ])
                self.startWorkers(found_peers)

        if len(found) < len(optional_hash_ids):
            self.log.debug("No findhash result for optional files: %s" %
                           (optional_hash_ids - set(found)))
Exemplo n.º 50
0
def mesos_cpu_metrics_provider(
    marathon_service_config,
    marathon_tasks,
    mesos_tasks,
    log_utilization_data={},
    noop=False,
    **kwargs,
):
    """
    Gets the mean cpu utilization of a service across all of its tasks.

    :param marathon_service_config: the MarathonServiceConfig to get data from
    :param marathon_tasks: Marathon tasks to get data from
    :param mesos_tasks: Mesos tasks to get data from
    :param log_utilization_data: A dict used to transfer utilization data to autoscale_marathon_instance()

    :returns: the service's mean utilization, from 0 to 1
    """

    autoscaling_root = compose_autoscaling_zookeeper_root(
        service=marathon_service_config.service,
        instance=marathon_service_config.instance,
    )
    zk_last_time_path = '%s/cpu_last_time' % autoscaling_root
    zk_last_cpu_data = '%s/cpu_data' % autoscaling_root

    with ZookeeperPool() as zk:
        try:
            last_time = zk.get(zk_last_time_path)[0].decode('utf8')
            last_cpu_data = zk.get(zk_last_cpu_data)[0].decode('utf8')
            log_utilization_data[last_time] = last_cpu_data
            last_time = float(last_time)
            last_cpu_data = (datum for datum in last_cpu_data.split(',')
                             if datum)
        except NoNodeError:
            last_time = 0.0
            last_cpu_data = []

    monkey.patch_socket()
    jobs = [gevent.spawn(task.stats_callable) for task in mesos_tasks]
    gevent.joinall(jobs, timeout=60)
    mesos_tasks_first_run = dict(
        zip([task['id'] for task in mesos_tasks], [job.value for job in jobs]))

    current_time = int(datetime.now().strftime('%s'))
    time_delta = current_time - last_time

    # Mesos slave statistics endpoint returns a crazy CPU value when a container is
    # in the process of being killed. So we fetch twice, and use the lower of the two.
    # First, we clear the cache.
    for task in mesos_tasks:
        del task.slave._cache['stats']
    jobs = [gevent.spawn(task.stats_callable) for task in mesos_tasks]
    gevent.joinall(jobs, timeout=60)
    mesos_tasks_second_run = dict(
        zip([task['id'] for task in mesos_tasks], [job.value for job in jobs]))

    mesos_cpu_data = {}
    for task_id, stats in mesos_tasks_first_run.items():
        stats2 = mesos_tasks_second_run.get(task_id)
        if stats is not None and stats2 is not None:
            try:
                utime = min(float(stats['cpus_user_time_secs']),
                            float(stats2['cpus_user_time_secs']))
                stime = min(float(stats['cpus_system_time_secs']),
                            float(stats2['cpus_system_time_secs']))
                limit = float(stats['cpus_limit']) - .1
                mesos_cpu_data[task_id] = (stime + utime) / limit
            except KeyError:
                pass

    if not mesos_cpu_data:
        raise MetricsProviderNoDataError(
            "Couldn't get any cpu data from Mesos")

    cpu_data_csv = ','.join('%s:%s' % (cpu_seconds, task_id)
                            for task_id, cpu_seconds in mesos_cpu_data.items())
    log_utilization_data[str(current_time)] = cpu_data_csv

    if not noop:
        with ZookeeperPool() as zk:
            zk.ensure_path(zk_last_cpu_data)
            zk.ensure_path(zk_last_time_path)
            zk.set(zk_last_cpu_data, str(cpu_data_csv).encode('utf8'))
            zk.set(zk_last_time_path, str(current_time).encode('utf8'))

    utilization = {}
    for datum in last_cpu_data:
        last_cpu_seconds, task_id = datum.split(':')
        if task_id in mesos_cpu_data:
            utilization[task_id] = (mesos_cpu_data[task_id] -
                                    float(last_cpu_seconds)) / time_delta

    if not utilization:
        raise MetricsProviderNoDataError(
            """The mesos_cpu metrics provider doesn't have Zookeeper data for this service.
                                         This is expected for its first run."""
        )

    task_utilization = utilization.values()
    mean_utilization = mean(task_utilization)

    return mean_utilization
Exemplo n.º 51
0
import time

start = time.time()
tic = lambda: 'at %1.1f seconds' % (time.time() - start)


def gr1():
    # Busy waits for a second, but we don't want to stick around...
    print('Started Polling: %s' % tic())
    select.select([], [], [], 2)
    print('Ended Polling: %s' % tic())


def gr2():
    # Busy waits for a second, but we don't want to stick around...
    print('Started Polling: %s' % tic())
    select.select([], [], [], 2)
    print('Ended Polling: %s' % tic())


def gr3():
    print("Hey lets do some stuff while the greenlets poll, %s" % tic())
    gevent.sleep(1)


gevent.joinall([
    gevent.spawn(gr1),
    gevent.spawn(gr2),
    gevent.spawn(gr3),
])
Exemplo n.º 52
0
 def run(self):
     logger.info('Starting databridge')
     for n, w in self.workers.items():
         logger.info('starting {}'.format(n))
         w.start()
     gevent.joinall([w.g for w in self.workers.values()])
Exemplo n.º 53
0
def by_urllib2():
    jobs = [gevent.spawn(worker, url, True) for url in urls]
    gevent.joinall(jobs)
 def tearDown(self):
     self.kamstrup_management_server.stop()
     gevent.joinall([self.server_greenlet])
     # tidy up (again)...
     conpot_core.get_sessionManager().purge_sessions()
import gevent


def test(n):
    for x in range(n):
        print(gevent.getcurrent(), x)
        #写一个耗时操作,协程在运行的时候碰见了耗时操作就会切换另一个协程执行
        #不写效果不一样
        #协程中休眠使用如下这种
        gevent.sleep(0.5)


gevent.joinall(
    [gevent.spawn(test, 5),
     gevent.spawn(test, 5),
     gevent.spawn(test, 5)])
Exemplo n.º 56
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# import time
import gevent
from gevent import monkey, time
monkey.patch_all()


def func1():
    print(123)
    time.sleep(3)
    print(456)


def func2():
    print('---------')
    time.sleep(1)
    print('=========')


g1 = gevent.spawn(func1)
g2 = gevent.spawn(func2)
gevent.joinall([g1, g2])
print('main')
Exemplo n.º 57
0
#!/usr/bin/env python
# -*-coding:utf-8-*-
# date:2018/8/7

from urllib.request import urlopen
import gevent


def f(url):
    print("GET: %s" % url)
    resp = urlopen(url)
    data = resp.read()
    with open("xx.html", "wb") as f:
        f.write(data)

    print("%d bytes received from %s" % (len(data), url))


f('http://www.ifeng.com/?_zbs_firefox_gg')
f('http://www.ifeng.com/?_zbs_firefox_gg')
f('http://www.ifeng.com/?_zbs_firefox_gg')

# 这种方式 gevent.joinall 快很多

gevent.joinall([
    gevent.spawn(f, 'http://www.ifeng.com/?_zbs_firefox_gg'),
    gevent.spawn(f, 'http://www.ifeng.com/?_zbs_firefox_gg'),
    gevent.spawn(f, 'http://www.ifeng.com/?_zbs_firefox_gg')
])
Exemplo n.º 58
0
def by_requests():
    jobs = [gevent.spawn(worker, url) for url in urls]
    gevent.joinall(jobs)
Exemplo n.º 59
0
def func1():
    print("func1 start")
    time.sleep(2)
    print("func1 end")


def func2():
    print("func2 start")
    time.sleep(2)
    print("func2 end")


f1 = gevent.spawn(func1)
f2 = gevent.spawn(func2)

gevent.joinall([f1, f2])

"""
func1 start
func2 start
func1 end
func2 end
"""

"""
	gevent.spawn(func.argv)
		功能:生成协程对象
		参数:
			func: 协程函数
			argv: 协程函数的参数(不定参)
	
Exemplo n.º 60
0
def test_regression_multiple_revealsecret(raiden_network, token_addresses):
    """ Multiple RevealSecret messages arriving at the same time must be
    handled properly.

    Secret handling followed these steps:

        The Secret message arrives
        The secret is registered
        The channel is updated and the correspoding lock is removed
        * A balance proof for the new channel state is created and sent to the
          payer
        The channel is unregistered for the given hashlock

    The step marked with an asterisk above introduced a context-switch, this
    allowed a second Reveal Secret message to be handled before the channel was
    unregistered, because the channel was already updated an exception was raised
    for an unknown secret.
    """
    app0, app1 = raiden_network
    token = token_addresses[0]

    identifier = 1
    secret = sha3('test_regression_multiple_revealsecret')
    hashlock = sha3(secret)
    expiration = app0.raiden.get_block_number() + 100
    amount = 10

    mediated_transfer = channel(app0, app1, token).create_mediatedtransfer(
        transfer_initiator=app0.raiden.address,
        transfer_target=app1.raiden.address,
        fee=0,
        amount=amount,
        identifier=identifier,
        expiration=expiration,
        hashlock=hashlock,
    )
    app0.raiden.sign(mediated_transfer)

    message_data = mediated_transfer.encode()
    app1.raiden.protocol.receive(message_data)

    reveal_secret = RevealSecret(secret)
    app0.raiden.sign(reveal_secret)
    reveal_secret_data = reveal_secret.encode()

    secret = Secret(
        identifier=identifier,
        nonce=mediated_transfer.nonce + 1,
        channel=channel(app0, app1, token).channel_address,
        transferred_amount=amount,
        locksroot=EMPTY_MERKLE_ROOT,
        secret=secret,
    )
    app0.raiden.sign(secret)
    secret_data = secret.encode()

    messages = [
        secret_data,
        reveal_secret_data,
    ]

    wait = [
        gevent.spawn_later(
            .1,
            app1.raiden.protocol.receive,
            data,
        ) for data in messages
    ]

    gevent.joinall(wait)