def testCallAsync(self): obj1 = ExampleClass() obj2 = ExampleClass() s = time.time() RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #1").join() assert obj1.counted == 1 # First instant assert around(time.time() - s, 0.0) # After that the calls delayed s = time.time() t1 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #2") # Dumped by the next call time.sleep(0.03) t2 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #3") # Dumped by the next call time.sleep(0.03) t3 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #4") # Will be called assert obj1.counted == 1 # Delay still in progress: Not called yet t3.join() assert t3.value == "call #4" assert around(time.time() - s, 0.1) # Only the last one called assert obj1.counted == 2 assert obj1.last_called == "call #4" # Just called, not allowed again assert not RateLimit.isAllowed("counting async", 0.1) s = time.time() t4 = RateLimit.callAsync("counting async", allowed_again=0.1, func=obj1.count, back="call #5").join() assert obj1.counted == 3 assert around(time.time() - s, 0.1) assert not RateLimit.isAllowed("counting async", 0.1) time.sleep(0.11) assert RateLimit.isAllowed("counting async", 0.1)
def testCall(self): obj1 = ExampleClass() obj2 = ExampleClass() s = time.time() assert RateLimit.call("counting", allowed_again=0.1, func=obj1.count) == "counted" assert around(time.time() - s, 0.0) # First allow to call instantly assert obj1.counted == 1 # Call again assert not RateLimit.isAllowed("counting", 0.1) assert RateLimit.isAllowed("something else", 0.1) assert RateLimit.call("counting", allowed_again=0.1, func=obj1.count) == "counted" assert around(time.time() - s, 0.1) # Delays second call within interval assert obj1.counted == 2 # Call 3 times async s = time.time() assert obj2.counted == 0 threads = [ gevent.spawn(lambda: RateLimit.call("counting", allowed_again=0.1, func=obj2.count)), # Instant gevent.spawn(lambda: RateLimit.call("counting", allowed_again=0.1, func=obj2.count)), # 0.1s delay gevent.spawn(lambda: RateLimit.call("counting", allowed_again=0.1, func=obj2.count)) # 0.2s delay ] gevent.joinall(threads) assert [thread.value for thread in threads] == ["counted", "counted", "counted"] assert around(time.time() - s, 0.2) # No queue = instant again s = time.time() assert RateLimit.isAllowed("counting", 0.1) assert RateLimit.call("counting", allowed_again=0.1, func=obj2.count) == "counted" assert around(time.time() - s, 0.0) assert obj2.counted == 4
def actionSitePublish(self, to, privatekey=None, inner_path="content.json", sign=True): if sign: inner_path = self.actionSiteSign(to, privatekey, inner_path, response_ok=False) if not inner_path: return # Publishing if not self.site.settings["serving"]: # Enable site if paused self.site.settings["serving"] = True self.site.saveSettings() self.site.announce() event_name = "publish %s %s" % (self.site.address, inner_path) called_instantly = RateLimit.isAllowed(event_name, 30) thread = RateLimit.callAsync(event_name, 30, self.doSitePublish, self.site, inner_path) # Only publish once in 30 seconds notification = "linked" not in dir(thread) # Only display notification on first callback thread.linked = True if called_instantly: # Allowed to call instantly # At the end callback with request id and thread self.cmd("progress", ["publish", _["Content published to {0}/{1} peers."].format(0, 5), 0]) thread.link(lambda thread: self.cbSitePublish(to, self.site, thread, notification, callback=notification)) else: self.cmd( "notification", ["info", _["Content publish queued for {0:.0f} seconds."].format(RateLimit.delayLeft(event_name, 30)), 5000] ) self.response(to, "ok") # At the end display notification thread.link(lambda thread: self.cbSitePublish(to, self.site, thread, notification, callback=False))
def route(self, cmd, req_id, params): self.req_id = req_id if cmd == "getFile": self.actionGetFile(params) elif cmd == "streamFile": self.actionStreamFile(params) elif cmd == "update": event = "%s update %s %s" % (self.connection.id, params["site"], params["inner_path"]) if not RateLimit.isAllowed(event): # There was already an update for this file in the last 10 second self.response({"ok": "File update queued"}) # If called more than once within 10 sec only keep the last update RateLimit.callAsync(event, 10, self.actionUpdate, params) elif cmd == "pex": self.actionPex(params) elif cmd == "listModified": self.actionListModified(params) elif cmd == "getHashfield": self.actionGetHashfield(params) elif cmd == "findHashIds": self.actionFindHashIds(params) elif cmd == "ping": self.actionPing() else: self.actionUnknown(cmd, params)
def route(self, cmd, req_id, params): self.req_id = req_id # Don't allow other sites than locked if ( "site" in params and self.connection.site_lock and self.connection.site_lock not in (params["site"], "global") ): self.response({"error": "Invalid site"}) self.log.error("Site lock violation: %s != %s" % (self.connection.site_lock != params["site"])) return False if cmd == "update": event = "%s update %s %s" % (self.connection.id, params["site"], params["inner_path"]) if not RateLimit.isAllowed(event): # There was already an update for this file in the last 10 second self.response({"ok": "File update queued"}) # If called more than once within 10 sec only keep the last update RateLimit.callAsync(event, 10, self.actionUpdate, params) else: func_name = "action" + cmd[0].upper() + cmd[1:] func = getattr(self, func_name, None) if func: func(params) else: self.actionUnknown(cmd, params)
def route(self, cmd, req_id, params): self.req_id = req_id # Don't allow other sites than locked if "site" in params and self.connection.site_lock and self.connection.site_lock not in (params["site"], "global"): self.response({"error": "Invalid site"}) self.log.error("Site lock violation: %s != %s" % (self.connection.site_lock != params["site"])) self.connection.badAction(5) return False if cmd == "update": event = "%s update %s %s" % (self.connection.id, params["site"], params["inner_path"]) if not RateLimit.isAllowed(event): # There was already an update for this file in the last 10 second time.sleep(5) self.response({"ok": "File update queued"}) # If called more than once within 15 sec only keep the last update RateLimit.callAsync(event, max(self.connection.bad_actions, 15), self.actionUpdate, params) else: func_name = "action" + cmd[0].upper() + cmd[1:] func = getattr(self, func_name, None) if cmd not in ["getFile", "streamFile"]: # Skip IO bound functions s = time.time() if self.connection.cpu_time > 0.5: self.log.debug("Delay %s %s, cpu_time used by connection: %.3fs" % (self.connection.ip, cmd, self.connection.cpu_time)) time.sleep(self.connection.cpu_time) if self.connection.cpu_time > 5: self.connection.close() if func: func(params) else: self.actionUnknown(cmd, params) if cmd not in ["getFile", "streamFile"]: taken = time.time() - s self.connection.cpu_time += taken
def actionFindHashIds(self, params): site = self.sites.get(params["site"]) s = time.time() if not site or not site.settings["serving"]: # Site unknown or not serving self.response({"error": "Unknown site"}) self.connection.badAction(5) return False event_key = "%s_findHashIds_%s_%s" % (self.connection.ip, params["site"], len(params["hash_ids"])) if self.connection.cpu_time > 0.5 or not RateLimit.isAllowed(event_key, 60 * 5): time.sleep(0.1) back_ip4, back_onion = self.findHashIds(site, params["hash_ids"], limit=10) else: back_ip4, back_onion = self.findHashIds(site, params["hash_ids"]) RateLimit.called(event_key) # Check my hashfield if self.server.tor_manager and self.server.tor_manager.site_onions.get(site.address): # Running onion my_ip = helper.packOnionAddress(self.server.tor_manager.site_onions[site.address], self.server.port) my_back = back_onion elif config.ip_external: # External ip defined my_ip = helper.packAddress(config.ip_external, self.server.port) my_back = back_ip4 elif self.server.ip and self.server.ip != "*": # No external ip defined my_ip = helper.packAddress(self.server.ip, self.server.port) my_back = back_ip4 else: my_ip = None my_back = back_ip4 my_hashfield_set = set(site.content_manager.hashfield) for hash_id in params["hash_ids"]: if hash_id in my_hashfield_set: if hash_id not in my_back: my_back[hash_id] = [] if my_ip: my_back[hash_id].append(my_ip) # Add myself if config.verbose: self.log.debug( "Found: IP4: %s, Onion: %s for %s hashids in %.3fs" % (len(back_ip4), len(back_onion), len(params["hash_ids"]), time.time() - s) ) self.response({"peers": back_ip4, "peers_onion": back_onion})
def actionMergerSiteAdd(self, to, addresses): if type(addresses) != list: # Single site add addresses = [addresses] # Check if the site has merger permission merger_types = merger_db.get(self.site.address) if not merger_types: return self.response(to, {"error": "Not a merger site"}) if RateLimit.isAllowed(self.site.address + "-MergerSiteAdd", 10) and len(addresses) == 1: # Without confirmation if only one site address and not called in last 10 sec self.cbMergerSiteAdd(to, addresses) else: self.cmd( "confirm", ["Add <b>%s</b> new site?" % len(addresses), "Add"], lambda (res): self.cbMergerSiteAdd(to, addresses) ) self.response(to, "ok")
def actionFindHashIds(self, params): site = self.sites.get(params["site"]) s = time.time() if not site or not site.settings[ "serving"]: # Site unknown or not serving self.response({"error": "Unknown site"}) self.connection.badAction(5) return False event_key = "%s_findHashIds_%s_%s" % ( self.connection.ip, params["site"], len(params["hash_ids"])) if self.connection.cpu_time > 0.5 or not RateLimit.isAllowed( event_key, 60 * 5): time.sleep(0.1) back = self.findHashIds(site, params["hash_ids"], limit=10) else: back = self.findHashIds(site, params["hash_ids"]) RateLimit.called(event_key) my_hashes = [] my_hashfield_set = set(site.content_manager.hashfield) for hash_id in params["hash_ids"]: if hash_id in my_hashfield_set: my_hashes.append(hash_id) if config.verbose: self.log.debug("Found: %s for %s hashids in %.3fs" % ({key: len(val) for key, val in back.iteritems() }, len(params["hash_ids"]), time.time() - s)) self.response({ "peers": back["ipv4"], "peers_onion": back["onion"], "peers_ipv6": back["ipv6"], "my": my_hashes })
def actionFindHashIds(self, params): site = self.sites.get(params["site"]) s = time.time() if not site or not site.settings[ "serving"]: # Site unknown or not serving self.response({"error": "Unknown site"}) self.connection.badAction(5) return False event_key = "%s_findHashIds_%s_%s" % ( self.connection.ip, params["site"], len(params["hash_ids"])) if self.connection.cpu_time > 0.5 or not RateLimit.isAllowed( event_key, 60 * 5): time.sleep(0.1) back_ip4, back_onion, back_i2p = self.findHashIds( site, params["hash_ids"], limit=10) else: back_ip4, back_onion, back_i2p = self.findHashIds( site, params["hash_ids"]) RateLimit.called(event_key) # Check my hashfield if self.server.tor_manager and self.server.tor_manager.site_onions.get( site.address): # Running onion my_ip = helper.packOnionAddress( self.server.tor_manager.site_onions[site.address], self.server.port) my_back = back_onion elif self.server.i2p_manager and self.server.i2p_manager.site_onions.get( site.address): # Running i2p my_ip = helper.packI2PAddress( self.server.i2p_manager.site_onions[site.address], self.server.port) my_back = back_i2p elif config.ip_external: # External ip defined my_ip = helper.packAddress(config.ip_external, self.server.port) my_back = back_ip4 elif self.server.ip and self.server.ip != "*": # No external ip defined my_ip = helper.packAddress(self.server.ip, self.server.port) my_back = back_ip4 else: my_ip = None my_back = back_ip4 my_hashfield_set = set(site.content_manager.hashfield) for hash_id in params["hash_ids"]: if hash_id in my_hashfield_set: if hash_id not in my_back: my_back[hash_id] = [] if my_ip: my_back[hash_id].append(my_ip) # Add myself if config.verbose: self.log.debug( "Found: IP4: %s, Onion: %s, I2P: %s for %s hashids in %.3fs" % (len(back_ip4), len(back_onion), len(back_i2p), len(params["hash_ids"]), time.time() - s)) self.response({ "peers": back_ip4, "peers_onion": back_onion, "peers_i2p": back_i2p })