def init_my_peer(cls, config, mongo, network): import socket from miniupnpc import UPnP # deploy as an eventlet WSGI server try: raise ValueError('test') sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((config.serve_host, 0)) server_port = sock.getsockname()[1] sock.close() eport = server_port u = UPnP(None, None, 200, 0) u.discover() u.selectigd() r = u.getspecificportmapping(eport, 'TCP') while r is not None and eport < 65536: eport = eport + 1 r = u.getspecificportmapping(eport, 'TCP') b = u.addportmapping(eport, 'TCP', u.lanaddr, server_port, 'UPnP YadaCoin Serve port %u' % eport, '') config.serve_host = '0.0.0.0' config.serve_port = server_port config.peer_host = u.externalipaddress() config.peer_port = server_port except Exception as e: print(e) config.serve_host = config.serve_host config.serve_port = config.serve_port config.peer_host = config.peer_host config.peer_port = config.peer_port print('UPnP failed: you must forward and/or whitelist port', config.peer_port) cls.save_my_peer(config, mongo, network) return cls(config, mongo, config.peer_host, config.peer_port)
def ensure_port_is_forwarded( protocol: SupportedTransportProtocols = SupportedTransportProtocols. TCP, external_port: int = LISTENING_PORT, internal_port=LISTENING_PORT): """ Ensures that the given external to internal port mapping has been forwarded to allow inbound connections to this host on this port. If already done, whether by lack of NAT, firewall, or static port forwarding, no action is taken. Otherwise, the port is forwarded dynamically using UPnP (if supported by the router and OS) for the duration of the application's runtime. :param protocol: Transport protocol of port to be mapped :param external_port: Port as viewable from the internet :param internal_port: Port as viewable from the LAN """ upnp = UPnP() upnp.discoverdelay = 20 # Gives up after 20 ms port_forwarded = False discovered_count = upnp.discover() if discovered_count > 0: upnp.selectigd() port_forwarded = upnp.addportmapping(external_port, protocol.value, upnp.lanaddr, internal_port, 'UChat P2P Messaging', '') if not port_forwarded: # Send signal to show failure message and how to set up static port forwarding print_err( 2, "Unable to open UPnP {} port {}".format(protocol.value, external_port)) print("UPnP Port Mapping added")
def upnp(): u = UPnP() u.discoverdelay = 2000 if u.discover(): u.selectigd() eport = 81 r = u.getspecificportmapping(eport, 'TCP') while r != None and eport < 65536: eport = eport + 1 r = u.getspecificportmapping(eport, 'TCP') u.addportmapping(eport, 'TCP', u.lanaddr, 80, 'HubDNS port forwarding', '')
def delete_port_mapping( protocol: SupportedTransportProtocols = SupportedTransportProtocols. TCP, external_port: int = LISTENING_PORT): """ Removes any UPnP temporary port forwards, to be executed on the application's closure :param protocol: Transport protocol of port to be deleted :param external_port: Port as viewable from internet """ port_forward_deleted = False upnp = UPnP() upnp.discoverdelay = 20 # Gives up after 20 ms try: discovered_count = upnp.discover() if discovered_count > 0: upnp.selectigd() port_forward_deleted = upnp.deleteportmapping( external_port, protocol.value) if not port_forward_deleted: # Send signal to show failure message and how to set up static port forwarding print_err( 2, "Failed to delete UPnP Port Mapping on {} port {}".format( protocol.value, external_port)) pass except Exception as e: print(type(e)) print(str(e)) pass print("UPnP Port Mapping Deleted")
def start(self): self.upnp = UPnP() self.upnp.discoverdelay = 3000 devices = self.upnp.discover() if not devices: reactor.callFromThread(self.log.error, 'no upnp device found') raise ValueError('no devices found') f = self.upnp.selectigd() reactor.callFromThread(self.log.info, 'upnp device found') self.ip = self.upnp.lanaddr newcport = self.addPortMapping(self.cport) if newcport: newdport = self.addPortMapping(self.dport) if newdport: return (newcport, newdport) else: raise ValueError('could not forward control port') else: raise ValueError('could not forward data port')
def __init__(self, debug=False, config_dir=None): self.platform_config = PlatformConfig(config_dir=config_dir) if not logger.factory_instance: console = True if debug else False level = logging.DEBUG if debug else logging.INFO logger.init(level, console, join(self.platform_config.get_platform_log())) self.user_platform_config = PlatformUserConfig(self.platform_config.get_user_config()) self.log_aggregator = Aggregator(self.platform_config) self.platform_app_paths = AppPaths(PLATFORM_APP_NAME, self.platform_config) self.platform_app_paths.get_data_dir() self.versions = Versions(self.platform_config) self.redirect_service = RedirectService(self.user_platform_config, self.versions) self.port_config = PortConfig(self.platform_app_paths.get_data_dir()) self.nat_pmp_port_mapper = NatPmpPortMapper() self.upnp_port_mapper = UpnpPortMapper(UPnP()) self.port_mapper_factory = PortMapperFactory(self.nat_pmp_port_mapper, self.upnp_port_mapper) self.port_drill_factory = PortDrillFactory(self.user_platform_config, self.port_config, self.port_mapper_factory) self.device_info = DeviceInfo(self.user_platform_config, self.port_config) if self.platform_config.get_installer() == 'sam': self.sam = SamStub(self.platform_config, self.device_info) else: self.sam = Snap(self.platform_config, self.device_info) self.platform_cron = PlatformCron(self.platform_config) self.systemctl = Systemctl(self.platform_config) self.ldap_auth = LdapAuth(self.platform_config, self.systemctl) self.event_trigger = EventTrigger(self.sam, self.platform_config) self.nginx = Nginx(self.platform_config, self.systemctl, self.device_info) self.certbot_genetator = CertbotGenerator(self.platform_config, self.user_platform_config, self.device_info, self.sam) self.tls = CertificateGenerator(self.platform_config, self.user_platform_config, self.device_info, self.nginx, self.certbot_genetator) self.device = Device(self.platform_config, self.user_platform_config, self.redirect_service, self.port_drill_factory, self.sam, self.platform_cron, self.ldap_auth, self.event_trigger, self.tls, self.nginx) self.internal = Internal(self.platform_config, self.device, self.redirect_service, self.log_aggregator) self.path_checker = PathChecker(self.platform_config) self.lsblk = Lsblk(self.platform_config, self.path_checker) self.hardware = Hardware(self.platform_config, self.event_trigger, self.lsblk, self.path_checker, self.systemctl) self.network = Network() self.public = Public(self.platform_config, self.user_platform_config, self.device, self.device_info, self.sam, self.hardware, self.redirect_service, self.log_aggregator, self.certbot_genetator, self.port_mapper_factory, self.network, self.port_config) self.udev = Udev(self.platform_config)
def initupnp(port): from miniupnpc import UPnP upnp = UPnP() upnp.discover() upnp.selectigd() map_res = upnp.addportmapping( port, 'TCP', upnp.lanaddr, port, 'some str', '') return map_res
def __init__(self, host, port, certfile, keyfile, phrase, profileId): """ Peer to Peer server constructor @param host: IP to listen on @param port: TCP port to listen for incoming commands @param certfile: SSL certificate file path @param keyfile: SSL key file path @param phrase: logged in user's pass phrase @param profileId: logged in profile ID @return: P2P server object """ self.upnpClient = UPnP() # port forward successful self.forwarded = False self.server = None self.sock = None self.timeout = int(Config.idle_timeout) #self.loop = asyncio.get_event_loop() self.host = host self.port = port self.certfile = certfile self.keyfile = keyfile self.safe = SecretBox(getLocalAuth(profileId, phrase)) self.profileId = profileId self.uid, _ = getAccount(self.safe, profileId) self.hashchain = defaultdict(bytes) # messages awaiting to be read. uid -> [message, ] self.messages = defaultdict(list) # auth tokens awaiting server storage self.auth = [] # new avatar received self.avatar = False # incoming transfer requests self.fileRequests = FileRequests(self.safe, self.profileId) # outgoing transfers self.fileRequestsOut = FileRequests(self.safe, self.profileId, outgoing=True) # friend uid->mask container self.friendMasks = Masks(self.safe, self.profileId) logging.basicConfig(filename='Logs/{:s}.log'.format(datetime.date(datetime.now()).isoformat()), level=logging.DEBUG, format='%(asctime)s %(message)s')
def __init__(self, debug=False): self.platform_config = PlatformConfig() if not logger.factory_instance: console = True if debug else False level = logging.DEBUG if debug else logging.INFO logger.init(level, console, join(self.platform_config.get_platform_log())) self.user_platform_config = PlatformUserConfig(self.platform_config.get_user_config()) self.log_aggregator = Aggregator(self.platform_config) self.platform_app_paths = AppPaths(PLATFORM_APP_NAME, self.platform_config) self.platform_app_paths.get_data_dir() self.redirect_service = RedirectService(self.user_platform_config, platform_version) self.port_config = PortConfig(self.platform_app_paths.get_data_dir()) self.nat_pmp_port_mapper = NatPmpPortMapper() self.upnp_port_mapper = UpnpPortMapper(UPnP()) self.port_drill_factory = PortDrillFactory(self.user_platform_config, self.port_config, self.nat_pmp_port_mapper, self.upnp_port_mapper) self.info = DeviceInfo(self.user_platform_config, self.port_config) self.sam = SamStub(self.platform_config, self.info) self.platform_cron = PlatformCron(self.platform_config) self.ldap_auth = LdapAuth(self.platform_config) self.event_trigger = EventTrigger(self.sam) self.nginx = Nginx(self.platform_config) self.tls = Tls(self.platform_config, self.info, self.nginx) self.device = Device(self.platform_config, self.user_platform_config, self.redirect_service, self.port_drill_factory, self.sam, self.platform_cron, self.ldap_auth, self.event_trigger, self.tls) self.internal = Internal(self.platform_config, self.device, self.redirect_service, self.log_aggregator) self.path_checker = PathChecker(self.platform_config) self.lsblk = Lsblk(self.platform_config, self.path_checker) self.hardware = Hardware(self.platform_config, self.event_trigger, self.lsblk, self.path_checker) self.public = Public(self.platform_config, self.user_platform_config, self.device, self.info, self.sam, self.hardware, self.redirect_service, self.log_aggregator) self.udev = Udev(self.platform_config)
@pytest.fixture(scope="module") def http_server(request): server = SomeHttpServer(18088) server.start() def fin(): server.stop() request.addfinalizer(fin) return server ids = [] mappers = [] mapper = provide_mapper(NatPmpPortMapper(), UpnpPortMapper(UPnP())) if mapper is not None: ids.append(mapper.name()) mappers.append(mapper) @pytest.mark.parametrize("mapper", mappers, ids=ids) def test_external_ip(mapper): external_ip = mapper.external_ip() assert external_ip is not None @pytest.mark.parametrize("mapper", mappers, ids=ids) def test_add_mapping_simple(http_server, mapper): external_port = mapper.add_mapping(http_server.port, http_server.port, 'TCP') assert external_port is not None
class miniUPNP(object): def __init__(self, cport, dport, log=None): self.cport = cport self.dport = dport self.log = log def start(self): self.upnp = UPnP() self.upnp.discoverdelay = 3000 devices = self.upnp.discover() if not devices: reactor.callFromThread(self.log.error, 'no upnp device found') raise ValueError('no devices found') f = self.upnp.selectigd() reactor.callFromThread(self.log.info, 'upnp device found') self.ip = self.upnp.lanaddr newcport = self.addPortMapping(self.cport) if newcport: newdport = self.addPortMapping(self.dport) if newdport: return (newcport, newdport) else: raise ValueError('could not forward control port') else: raise ValueError('could not forward data port') def addPortMapping(self, port): reactor.callFromThread(self.log.info, 'trying to forward port %d', port) pm = self.upnp.getspecificportmapping(port, 'UDP') if pm: if self.ip == pm[0]: print 'port is already forwarded for this ip' reactor.callFromThread(self.log.info, 'port %d is already forwarded for %s', port, self.ip) return port else: return self.addPortMapping(port=findNextUDPPort(port)) try: b = self.upnp.addportmapping(port, 'UDP', self.ip, port, 'P2NER', '') except: reactor.callFromThread( self.log.warning, 'a problem occured trying to forward port %d', port) reactor.callFromThread( self.log.warning, 'validating if port %d was correctly forwarded', port) self.upnp.discover() d = self.upnp.selectigd() pm = self.upnp.getspecificportmapping(port, 'UDP') b = False if pm and self.ip in pm: b = True if b: reactor.callFromThread(self.log.info, 'port %d was successfully forwarded', port) return port else: reactor.callFromThread(self.log.warning, "couldn't forward port %d", port) return False
def http_server(request): server = SomeHttpServer(18088) server.start() def fin(): server.stop() request.addfinalizer(fin) return server ids = [] mappers = [] mapper = PortMapperFactory(NatPmpPortMapper(), UpnpPortMapper(UPnP())).provide_mapper() if mapper is not None: ids.append(mapper.name()) mappers.append(mapper) # @pytest.mark.parametrize("mapper", mappers, ids=ids) # def test_external_ip(mapper): # external_ip = mapper.external_ip() # assert external_ip is not None # # # @pytest.mark.parametrize("mapper", mappers, ids=ids) # def test_add_mapping_simple(http_server, mapper): # external_port = mapper.add_mapping(http_server.port, http_server.port, 'TCP') # assert external_port is not None # external_ip = mapper.external_ip()
from yaml import load, FullLoader from miniupnpc import UPnP import requests # load the configuration file with open('config.yaml') as f: config = load(f, Loader=FullLoader) # use UPnP to get the external IP address upnp = UPnP() upnp.discoverdelay = 200 upnp.discover() upnp.selectigd() ip = upnp.externalipaddress() # set up variables needed for api access api_url = 'https://api.cloudflare.com/client/v4' headers = { 'Authorization': 'Bearer ' + config['api_token'], 'Content-Type': 'application/json' } # get the DNS record so we can save the ID and check the IP url = '{}/zones/{}/dns_records?name={}'.format(api_url, config['zone_id'], config['hostname']) response = requests.get(url, headers=headers).json() if response['result'][0]['content'] == ip: exit
class P2PServer: """ An asynchronous TLS server for quip client communication """ def __init__(self, host, port, certfile, keyfile, phrase, profileId): """ Peer to Peer server constructor @param host: IP to listen on @param port: TCP port to listen for incoming commands @param certfile: SSL certificate file path @param keyfile: SSL key file path @param phrase: logged in user's pass phrase @param profileId: logged in profile ID @return: P2P server object """ self.upnpClient = UPnP() # port forward successful self.forwarded = False self.server = None self.sock = None self.timeout = int(Config.idle_timeout) #self.loop = asyncio.get_event_loop() self.host = host self.port = port self.certfile = certfile self.keyfile = keyfile self.safe = SecretBox(getLocalAuth(profileId, phrase)) self.profileId = profileId self.uid, _ = getAccount(self.safe, profileId) self.hashchain = defaultdict(bytes) # messages awaiting to be read. uid -> [message, ] self.messages = defaultdict(list) # auth tokens awaiting server storage self.auth = [] # new avatar received self.avatar = False # incoming transfer requests self.fileRequests = FileRequests(self.safe, self.profileId) # outgoing transfers self.fileRequestsOut = FileRequests(self.safe, self.profileId, outgoing=True) # friend uid->mask container self.friendMasks = Masks(self.safe, self.profileId) logging.basicConfig(filename='Logs/{:s}.log'.format(datetime.date(datetime.now()).isoformat()), level=logging.DEBUG, format='%(asctime)s %(message)s') def _createSocket(self): """ Create a TCP socket @return: socket object """ # TCP socket self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind((self.host, self.port)) return self.sock def _createSSLContext(self): """ Create a secure TLS context. @return: SSLContext object """ context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) # disallow other protocols context.options |= ssl.OP_NO_SSLv2 context.options |= ssl.OP_NO_SSLv3 context.options |= ssl.OP_NO_TLSv1 context.options |= ssl.OP_NO_TLSv1_1 # implement correct cipher suite setup # NOTE: secp256k1 used until openssl 1.0.2 is stable, where brainpoolp256t1 is the potentially chosen curve. # Additionally, see http://safecurves.cr.yp.to and http://safecurves.cr.yp.to/rigid.html for recommended curves context.set_ecdh_curve('secp256k1') context.set_ciphers('ECDHE-ECDSA-AES256-GCM-SHA384') # disable compression on ssl channel due to issues like CRIME and BREACH attacks context.options |= ssl.OP_NO_COMPRESSION # Prevents re-use of the same ECDH key for distinct SSL sessions context.options |= ssl.OP_SINGLE_ECDH_USE # Add DH re-use prevention in case of future cipher change context.options |= ssl.OP_SINGLE_DH_USE # Enforce server's cipher ordering preference context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE # Client certificate validation not used context.verify_mode = ssl.CERT_NONE # cert loading if not self.keyfile: context.load_cert_chain(self.certfile) else: context.load_cert_chain(self.certfile, self.keyfile) return context @asyncio.coroutine def _close_connection(self, writer, reason=None): """ Close the connection @param writer: StreamWriter object @param reason: (Optional) bytes/bytestring reason connection is closed (API errno) @return: True if successful, else False """ try: if reason is not None: writer.write(reason) yield from writer.drain() writer.close() except Exception: return False return True @asyncio.coroutine def _command_dispatch(self, client_reader, client_writer, command, data): """ Process client command @param client_reader: connected TLS client StreamReader object @param client_writer: connected TLS client StreamWriter object @param command: authorised command @param data: first line of data @return: command output """ returnData = b'' ############################### # Authorised Command Execution ############################### mask = self.friendMasks[data[-36:]] if command is receiveMessage: msg = yield from receiveMessage(self.safe, self.profileId, mask, data) if msg: # uid-> [(rowid, message, tstamp), ...] self.messages[msg[1]].append((msg[0], msg[2], None)) returnData = BTRUE else: returnData = BFALSE elif command is receiveAvatar: returnData = yield from receiveAvatar(client_reader, client_writer, self.safe, self.profileId, mask, data[:-36 - COMMAND_LENGTH]) if len(returnData) > 3: self.avatar = True elif command is requestSendFile: # timeout set to config file_timeout fdata = yield from requestSendFile(self.safe, self.profileId, mask, data) if fdata: # reload available incoming file transfer requests self.fileRequests.reload() returnData = BTRUE if fdata else BFALSE elif command is sendFile: success = yield from sendFile(client_writer, self.safe, self.profileId, mask, data[:-36 - COMMAND_LENGTH], Config.file_expiry, Config.max_chunk) if not success and (mask, data[:-36 - COMMAND_LENGTH]) in self.fileRequestsOut.keys(): self.fileRequestsOut.reload() # return no data as all communication is handled in the sendFile handler returnData = b'' elif command is inviteChat: # chat invite returnData = yield from inviteChat(client_reader, data) return returnData @asyncio.coroutine def _handle_client(self, client_reader, client_writer, address): """ This method actually does the work to handle the requests for a specific client. The protocol is byte AND line oriented, the command is read first (8 bytes) and matched against available commands, the complete line (up to 64k) is read into memory and must end with newline character. The received data is verified against the received user id's public key. Once verified, the matched command is executed. @param client_reader: StreamReader object @param client_writer: StreamWriter object """ while True: # recevied incoming data time stamp stamp = int(time()) # read command first, only take first 8 bytes cmd = yield from client_reader.readexactly(COMMAND_LENGTH) if not cmd: # nothing received from client break ################ # Data checking ################ # check received command is valid try: command = Commands[int(cmd)] except ValueError: logging.info("\t".join(("Invalid Command Provided By Client", "IP: {!r}".format(address), "Command: {!r}".format(cmd)))) yield from self._close_connection(client_writer, INVALID_COMMAND) return try: # use of encoded data allows for direct readline() data = yield from client_reader.readline() except asyncio.futures.TimeoutError: yield from self._close_connection(client_writer, TIMEOUT) return # clear up data before proceeding (i.e newline char) data = data.strip() ################ # Authorisation ################ if command != friendAcceptance: try: data = a85decode(data, foldspaces=True) except ValueError: logging.info("Invalid data received, unable to decode as ASCII85") yield from self._close_connection(client_writer, INVALID_COMMAND) return # verify data integrity try: # using getAuthority hits the disk db data = VerifyKey(getAuthority(self.safe, self.profileId, self.friendMasks[data[-36:]])[0], encoder=HexEncoder).verify(data) except (BadSignatureError, TypeError): # signed data does not match stored public key for provided user id logging.warning('\t'.join(("Unable to verify sent data", "IP: {!r}".format(address), "Data: {!r}".format(data), "Sent ID: {!r}".format(data[-36:])))) yield from self._close_connection(client_writer, INVALID_DATA) return # data received as: timestamp, hash chain hex, destination user id, data, sender user id tstamp, chain, dest, origin = data[:10], data[10:50], data[50:86], data[-36:] data = data[86:-36] ########################### # Message integrity checks ########################### integrity = True # validation checks if dest != self.uid: logging.warning("Message Integrity Failure: User id destination {!r} differs from logged in user".format(dest)) integrity = False try: if integrity and (stamp + LIMIT_MESSAGE_TIME) < int(tstamp) < (stamp - LIMIT_MESSAGE_TIME): logging.warning("Message Integrity Failure: Message is either too old or in the future, current time '{}' received stamp '{}'".format(stamp, int(tstamp))) integrity = False except ValueError: logging.warning("Message Integrity Failure: Message time {!r} is invalid".format(tstamp)) if integrity and isValidUUID(origin): hchain = bytes(sha1(b''.join((self.hashchain[address], data))).hexdigest(), encoding='ascii') if hchain != chain: logging.warning("Message Integrity Failure: Provided hash chain {!r} does not match local {!r}".format(chain, hchain)) integrity = False else: logging.warning("Message Integrity Failure: Invalid UUID provided by {!r}".format(origin)) integrity = False if not integrity: yield from self._close_connection(client_writer, INVALID_COMMAND) return try: # ensure command integrity for authorised commands assert Commands[int(data[-COMMAND_LENGTH:])] == command except (ValueError, AssertionError): logging.info('\t'.join(("Client unsigned CMD data does not equal signed CMD", "IP: {!r}".format(address), "Unsigned Command: {!r}".format(cmd), "Signed Command: {!r}".format(data[-COMMAND_LENGTH:])))) yield from self._close_connection(client_writer, INVALID_DATA) return # all checks cleared, update hash chain self.hashchain[address] = hchain # data verified, execute command with data and origin user id returnData = yield from self._command_dispatch(client_reader, client_writer, command, b''.join((data, origin))) else: returnData, authToken = yield from friendAcceptance(client_reader, client_writer, self.safe, self.profileId, data) if authToken is not None: self.auth.append(authToken) # send command result to client client_writer.write(returnData) # Flush buffer yield from client_writer.drain() @asyncio.coroutine def _accept_client(self, client_reader, client_writer): """ Callback method used by start_server (or create_server). Accepts a new client connection and dispatch asynchronous client handling """ address = client_writer.transport.get_extra_info('peername') # reset hash chain on new connection self.hashchain[address] = b'' # start a new Task to handle this specific client connection #task = asyncio.Task(self._handle_client(client_reader, client_writer, address)) #loop = asyncio.get_event_loop() # task = loop.create_task(self._handle_client(client_reader, client_writer, address)) yield from self._handle_client(client_reader, client_writer, address) def portForward(self, port=None, protocol='TCP'): if port: self.port = port try: # clear any previous port-forwarding rule on router self.upnpClient.deleteportmapping(self.port, protocol) except Exception as e: # if the port is already removed, a base Exception will be thrown pass # (externalPort, protocol, internalHost, internalPort, desc, remoteHost) self.forwarded = self.upnpClient.addportmapping(self.port, protocol, self.host if self.host else self.upnpClient.lanaddr, self.port, 'Quip Client', '') def start(self, loop): """ Starts the TLS server to listen for incoming peers For each client that connects, the accept_client method gets called. This method runs the loop until the server sockets are ready to accept connections. """ # attempt automatic port-forwarding self.upnpClient.discover() try: self.upnpClient.selectigd() self.portForward() except Exception as e: # unable to find router which supports automatic port forwarding pass # use created SSLContext with create_server() or start_server() (abstracts create_server(), takes direct callback # instead of Protocol Factory) (see PEP 3156) self.server = loop.run_until_complete( asyncio.streams.start_server(self._accept_client, loop=loop, ssl=self._createSSLContext(), sock=self._createSocket())) def stop(self, loop): """ Stops the TCP server, closes the listening socket(s). """ # clear port-forwarding rule on router if self.forwarded: try: self.upnpClient.deleteportmapping(self.port, 'TCP') except Exception as e: logging.warning("Unable to delete port-forwarded port", exc_info=True) if self.server is not None: self.server.close() loop.run_until_complete(self.server.wait_closed()) self.server = None
'graphnewmessages'), methods=['GET', 'POST']) app.add_url_rule('/wallet', view_func=endpoints.WalletView.as_view('wallet')) app.add_url_rule('/faucet', view_func=endpoints.FaucetView.as_view('faucet')) app = socketio.Middleware(sio, app) # deploy as an eventlet WSGI server try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((Config.serve_host, 0)) server_port = sock.getsockname()[1] sock.close() eport = server_port u = UPnP(None, None, 200, 0) u.discover() u.selectigd() r = u.getspecificportmapping(eport, 'TCP') while r != None and eport < 65536: eport = eport + 1 r = u.getspecificportmapping(eport, 'TCP') b = u.addportmapping(eport, 'TCP', u.lanaddr, server_port, 'UPnP YadaCoin Serve port %u' % eport, '') Config.serve_host = '0.0.0.0' Config.serve_port = server_port Config.peer_host = u.externalipaddress() Config.peer_port = server_port print "http://{}:{}/".format(u.externalipaddress(), server_port) except: Config.serve_host = Config.serve_host
def create_upnp_mapping(cls, config): from miniupnpc import UPnP config = get_config() try: u = UPnP(None, None, 200, 0) u.discover() config.igd = u.selectigd() except: config.igd = "" if config.use_pnp: import socket # deploy as an eventlet WSGI server try: server_port = config.peer_port eport = server_port r = u.getspecificportmapping(eport, 'TCP') if r: u.deleteportmapping(eport, 'TCP') u.addportmapping(eport, 'TCP', u.lanaddr, server_port, 'UPnP YadaCoin Serve port %u' % eport, '') config.peer_host = u.externalipaddress() if 'web' in config.modes: server_port = config.serve_port eport = server_port r = u.getspecificportmapping(eport, 'TCP') if r: u.deleteportmapping(eport, 'TCP') u.addportmapping(eport, 'TCP', u.lanaddr, server_port, 'UPnP YadaCoin Serve port %u' % eport, '') except Exception as e: print(e) config.serve_host = config.serve_host config.serve_port = config.serve_port config.peer_host = config.peer_host config.peer_port = config.peer_port print('UPnP failed: you must forward and/or whitelist port', config.peer_port)
def test_many_ports_until_fail(): mapper = UpnpPortMapper(UPnP()) base_port = 11000 for port in range(base_port, base_port + 50, 1): mapper.add_mapping(port, port, 'TCP')