def deadlock(): logger.error( 'Server resource deadlocked, check for mismatching datetime', 'server', server_id=self.server.id, instance_id=self.id, )
def reserve(cls, reserve_id, reserve_data, block=False, block_timeout=90): if block: cursor_id = messenger.get_cursor_id('queue') doc = cls.collection.find_and_modify({ 'state': PENDING, 'reserve_id': reserve_id, 'reserve_data': None, }, {'$set': { 'reserve_data': reserve_data, }}, new=True) if not doc: return if block: for msg in messenger.subscribe('queue', cursor_id=cursor_id, timeout=block_timeout): try: if msg['message'] == [COMPLETE, doc['_id']]: return doc elif msg['message'] == [ERROR, doc['_id']]: raise QueueTaskError('Error occured running ' + 'queue task', { 'queue_id': doc['_id'], 'queue_type': doc['type'], }) except TypeError: pass logger.error('Blocking queue reserve timed out', { 'queue_id': doc['_id'], 'queue_type': doc['type'], }) return doc else: return doc
def _keep_alive_thread(self): while True: time.sleep(self.ttl - 6) if self.queue_com.state in (COMPLETE, STOPPED): break response = self.collection.update({ '_id': self.id, 'runner_id': self.runner_id, }, {'$set': { 'ttl_timestamp': utils.now() + \ datetime.timedelta(seconds=self.ttl), }}) if response['updatedExisting']: messenger.publish('queue', [UPDATE, self.id]) else: self.queue_com.state_lock.acquire() try: self.queue_com.state = STOPPED finally: self.queue_com.state_lock.release() logger.error('Lost reserve, queue stopped', 'queue', queue_id=self.id, queue_type=self.type, )
def _init_hosts(self): self.running_lock.acquire() try: self.running = True finally: self.running_lock.release() doc = self.vxlan_collection.find_one({ '_id': self.vxlan_id, 'server_id': self.server_id, }) if not doc: logger.error('Lost vxlan doc', 'vxlan', vxlan_id=self.vxlan_id, server_id=self.server_id, ) return for host_vxlan_id, data in enumerate(doc['hosts']): host_dst = data.get('host_dst') host_dst6 = data.get('host_dst6') vxlan_mac = data.get('vxlan_mac') if not host_dst or not vxlan_mac: continue self.add_host(host_vxlan_id + 1, vxlan_mac, host_dst, host_dst6)
def __init__(self, network): self.host_interface_data = utils.find_interface(network) if not self.host_interface_data: logger.error("Failed to find bridged network interface", "server", network=network) raise BridgeLookupError("Failed to find bridged network interface") self.bridge_interface = "br" + self.host_interface_data["interface"] self.interfaces = set()
def setup_server(): db_ver_int = utils.get_db_ver_int() listener.add_listener('system', on_system_msg) if db_ver_int > settings.local.version_int: logger.error('Database version is newer than server version', 'setup', db_version=db_ver_int, server_version=settings.local.version_int, ) exit(75) global db_setup db_setup = not settings.conf.mongodb_uri global server_upgrade server_upgrade = db_ver_int < settings.local.version_int if db_setup or server_upgrade: logger.info('Starting setup server', 'setup') if not db_setup: upgrade_database() settings.local.server_start.clear() thread = threading.Thread(target=server_thread) thread.daemon = True thread.start() setup_ready.wait() utils.set_db_ver(__version__)
def check_output_logged(*args, **kwargs): if 'stdout' in kwargs or 'stderr' in kwargs: raise ValueError('Output arguments not allowed, it will be overridden') process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, *args, **kwargs) stdoutdata, stderrdata = process.communicate() return_code = process.poll() if return_code: from pritunl import logger cmd = kwargs.get('args', args[0]) logger.error('Popen returned error exit code', 'utils', cmd=cmd, return_code=return_code, stdout=stdoutdata, stderr=stderrdata, ) raise subprocess.CalledProcessError( return_code, cmd, output=stdoutdata) return stdoutdata
def _keep_alive_thread(self): while not self.interrupt: try: doc = self.collection.find_and_modify({ '_id': self.server.id, 'instances.instance_id': self.id, }, {'$set': { 'instances.$.ping_timestamp': utils.now(), }}, fields={ '_id': False, 'instances': True, }, new=True) yield if not doc: logger.error( 'Instance doc lost, stopping server', 'server', server_id=self.server.id, ) if self.stop_process(): break else: time.sleep(0.1) continue except: logger.exception('Failed to update server ping', 'server', server_id=self.server.id, ) yield interrupter_sleep(settings.vpn.server_ping)
def _auth(self, factor): params = { 'username': self.username, 'factor': factor, } if self.remote_ip: params['ipaddr'] = self.remote_ip if factor in ('push', 'phone'): params['device'] = 'auto' if factor == 'push': if self.auth_type: params['type'] = self.auth_type if self.info: params['pushinfo'] = urllib.urlencode(self.info) if factor == 'passcode': params['passcode'] = self.passcode headers = _sign('POST', '/auth/v2/auth', params) url = 'https://%s/auth/v2/auth' % settings.app.sso_duo_host try: response = requests.post(url, headers=headers, params=params, timeout=settings.app.sso_timeout, ) except httplib.HTTPException: return data = response.json() resp_data = data.get('response') if resp_data and resp_data.get('result') == 'allow': if resp_data.get('status') == 'bypass': if settings.app.sso == DUO_AUTH: logger.error('Cannot use Duo bypass with Duo sso', 'sso', data=resp_data, ) return else: logger.info('Skipping Duo auth with bypass', 'sso', username=self.username, ) self._valid = True elif data.get('code') == 40002: if factor == 'push' and self.factor == 'push_phone': self._auth('phone') else: raise InvalidUser('Invalid username') else: logger.error('Duo authentication failure', 'sso', data=data, )
def timeout(): logger.error('Server startup timed out, stopping server', 'server', server_id=self.server.id, instance_id=self.id, state=self.state, ) self.stop_process()
def error_log(self, msg='', level=None, traceback=False): if not settings.app.log_web_errors: return if traceback: logger.exception(msg, 'app') else: logger.error(msg, 'app')
def _monitoring_thread(): while True: try: mode = settings.app.monitoring prometheus_port = settings.app.prometheus_port datadog_api_key = settings.app.datadog_api_key if not mode: yield interrupter_sleep(3) continue process = subprocess.Popen( ["pritunl-monitor"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=dict( os.environ, **{ "HOST_ID": settings.local.host_id, "MODE": mode, "DB": settings.conf.mongodb_uri, "DB_PREFIX": settings.conf.mongodb_collection_prefix or "", "PROMETHEUS_PORT": str(prometheus_port), "DATADOG_API_KEY": str(datadog_api_key), } ), ) while True: if ( settings.app.monitoring != mode or settings.app.prometheus_port != prometheus_port or settings.app.datadog_api_key != datadog_api_key ): process.terminate() yield interrupter_sleep(3) process.kill() break elif process.poll() is not None: output = None try: output = process.stdout.readall() output += process.stderr.readall() except: pass logger.error("Monitoring service stopped unexpectedly", "setup", output=output) break yield interrupter_sleep(3) process = None except GeneratorExit: raise except: logger.exception("Error in monitoring service", "setup") yield interrupter_sleep(1)
def poll_thread(): if process.wait() and process_state and not check_global_interrupt(): stdout, stderr = process._communicate(None) logger.error("Web server process exited unexpectedly", "app", stdout=stdout, stderr=stderr, ) time.sleep(1) restart_server(1)
def setup_server(): global setup_state last_error = time.time() - 24 while True: try: db_ver_int = utils.get_db_ver_int() break except: time.sleep(0.5) if time.time() - last_error > 30: last_error = time.time() logger.exception('Error connecting to mongodb server') listener.add_listener('system', on_system_msg) if db_ver_int > settings.local.version_int: logger.error('Database version is newer than server version', 'setup', db_version=db_ver_int, server_version=settings.local.version_int, ) exit(75) if not settings.conf.mongodb_uri: setup_state = 'setup' elif check_db_ver(db_ver_int): setup_state = 'upgrade' if setup_state: logger.info('Starting setup server', 'setup') if setup_state == 'upgrade': upgrade_database() settings.local.server_start.clear() thread = threading.Thread(target=server_thread) thread.daemon = True thread.start() setup_ready.wait() time.sleep(1) upgrade.database_clean_up() last_error = time.time() - 24 while True: try: utils.set_db_ver(__version__) break except: time.sleep(0.5) if time.time() - last_error > 30: last_error = time.time() logger.exception('Error connecting to mongodb server')
def _keep_alive_thread(self): try: error_count = 0 while not self.interrupt: try: doc = self.collection.find_and_modify({ '_id': self.server.id, 'availability_group': \ settings.local.host.availability_group, 'instances.instance_id': self.id, }, {'$set': { 'instances.$.ping_timestamp': utils.now(), }}, fields={ '_id': False, 'instances': True, }, new=True) yield if not doc: logger.error( 'Instance doc lost, stopping server', 'server', server_id=self.server.id, instance_id=self.id, cur_timestamp=utils.now(), ) if self.stop_process(): break else: time.sleep(0.1) continue else: error_count = 0 yield except: error_count += 1 if error_count >= 2 and self.stop_process(): logger.exception( 'Failed to update server ping, stopping server', 'server', server_id=self.server.id, ) break logger.exception('Failed to update server ping', 'server', server_id=self.server.id, ) time.sleep(2) yield interrupter_sleep(settings.vpn.server_ping) except GeneratorExit: self.stop_process()
def get_interfaces(): gateway = get_gateway() if not gateway: from pritunl import logger logger.error('Failed to find gateway address', 'utils') gateway_inf, gateway_addr = gateway output = check_output_logged(['ifconfig']) interfaces = {} for interface in output.split('\n\n'): data = {} interface_name = re.findall(r'[a-z0-9]+', interface, re.IGNORECASE) if not interface_name: continue interface_name = interface_name[0] data['interface'] = interface_name addr = re.findall(r'inet.{0,10}' + IP_REGEX, interface, re.IGNORECASE) if not addr: continue addr = re.findall(IP_REGEX, addr[0], re.IGNORECASE) if not addr: continue data['address'] = addr[0] netmask = re.findall(r'mask.{0,10}' + IP_REGEX, interface, re.IGNORECASE) if not netmask: continue netmask = re.findall(IP_REGEX, netmask[0], re.IGNORECASE) if not netmask: continue data['netmask'] = netmask[0] broadcast = re.findall(r'broadcast.{0,10}' + IP_REGEX, interface, re.IGNORECASE) if not broadcast: broadcast = re.findall(r'bcast.{0,10}' + IP_REGEX, interface, re.IGNORECASE) if not broadcast: continue broadcast = re.findall(IP_REGEX, broadcast[0], re.IGNORECASE) if not broadcast: continue data['broadcast'] = broadcast[0] if data['interface'] == gateway_inf: data['gateway'] = gateway_addr else: data['gateway'] = None interfaces[interface_name] = data return interfaces
def wait_for_socket(self): for _ in xrange(10000): if os.path.exists(self.socket_path): return time.sleep(0.001) logger.error('Server management socket path not found', 'server', server_id=self.server.id, instance_id=self.instance.id, socket_path=self.socket_path, )
def __init__(self, network): self.host_interface_data = utils.find_interface(network) if not self.host_interface_data: logger.error('Failed to find bridged network interface', 'server', network=network, ) raise BridgeLookupError( 'Failed to find bridged network interface') self.bridge_interface = 'br' + self.host_interface_data['interface'] self.interfaces = set()
def check_db_ver(db_ver_int): if db_ver_int > settings.local.version_int: logger.error('Database version is newer than server version', 'setup', db_version=db_ver_int, server_version=settings.local.version_int, ) exit(75) return db_ver_int and db_ver_int < settings.local.version_int
def add_host(self, host_vxlan_id, vxlan_mac, host_dst): if settings.local.host.local_addr == host_dst: return self.running_lock.acquire() try: if not self.running: return for i in xrange(2): try: if i == 0: check_func = utils.check_call_silent else: check_func = utils.check_output_logged check_func([ 'bridge', 'fdb', 'add', vxlan_mac, 'dev', self.iface_name, 'dst', host_dst, ]) break except subprocess.CalledProcessError: if i == 0: utils.check_output_logged([ 'bridge', 'fdb', 'del', vxlan_mac, 'dev', self.iface_name, ]) else: raise utils.check_output_logged([ 'arp', '-s', self.get_host_addr(host_vxlan_id), vxlan_mac, ]) except: logger.error('Failed to ad vxlan host', 'vxlan', vxlan_id=self.vxlan_id, server_id=self.server_id, ) raise finally: self.running_lock.release()
def _dns_thread(): from pritunl import host while True: process = None try: if not host.dns_mapping_servers: yield interrupter_sleep(3) continue process = subprocess.Popen( ['pritunl-dns'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=dict(os.environ, **{ 'DB': settings.conf.mongodb_uri, 'DB_PREFIX': settings.conf.mongodb_collection_prefix or '', }), ) while True: if not host.dns_mapping_servers: process.terminate() yield interrupter_sleep(3) process.kill() process = None break elif process.poll() is not None: output = None try: output = process.stdout.readall() output += process.stderr.readall() except: pass logger.error( 'DNS mapping service stopped unexpectedly', 'setup', output=output, ) process = None break yield interrupter_sleep(1) except GeneratorExit: if process: process.terminate() time.sleep(1) process.kill() return except: logger.exception('Error in dns service', 'setup') yield interrupter_sleep(1)
def stop_process(self): terminated = utils.stop_process(self.process) if not terminated: logger.error('Failed to stop server process. %r' % { 'server_id': self.server.id, 'instance_id': self.instance_id, }) return False return terminated
def host_interface_data(self): data = utils.find_interface(self.server.network) if not data: self.server.output.push_output( 'ERROR Failed to find bridged network interface') logger.error('Failed to find bridged network interface', 'server', server_id=self.server.id, network=self.server.network, ) raise ValueError('Failed to find bridged network interface') return data
def poll_thread(): time.sleep(0.5) if process.wait() and process_state: time.sleep(0.25) if not check_global_interrupt(): stdout, stderr = process._communicate(None) logger.error('Web server process exited unexpectedly', 'app', stdout=stdout, stderr=stderr, ) time.sleep(1) restart_server(1)
def stop_process(self): self.sock_interrupt = True terminated = utils.stop_process(self.process) if not terminated: logger.error('Failed to stop server process', 'server', server_id=self.server.id, instance_id=self.id, ) return False return terminated
def _init_host(self): local_addr = settings.local.host.local_addr local_addr6 = None if self.ipv6: local_addr6 = settings.local.host.local_addr6 doc = self.vxlan_collection.find_and_modify({ '_id': self.vxlan_id, 'server_id': self.server_id, 'hosts.host_dst': {'$nin': [local_addr]}, }, {'$push': { 'hosts': { 'vxlan_mac': self.vxlan_mac, 'host_dst': local_addr, 'host_dst6': local_addr6, }, }}, new=True) if not doc: doc = self.vxlan_collection.find_and_modify({ '_id': self.vxlan_id, 'server_id': self.server_id, 'hosts.host_dst': local_addr, }, {'$set': { 'hosts.$.vxlan_mac': self.vxlan_mac, 'hosts.$.host_dst': local_addr, 'hosts.$.host_dst6': local_addr6, }}, new=True) if doc: for host_vxlan_id, data in enumerate(doc['hosts']): if data['host_dst'] == local_addr: self.host_vxlan_id = host_vxlan_id + 1 if not self.host_vxlan_id: logger.error('Failed to get host vxlan id', 'vxlan', vxlan_id=self.vxlan_id, server_id=self.server_id, host_id=settings.local.host_id, local_addr=local_addr, local_addr6=local_addr6, ) raise ValueError('Failed to get host vxlan id') messenger.publish('vxlan', { 'vxlan_id': self.vxlan_id, 'server_id': self.server_id, 'host_vxlan_id': self.host_vxlan_id, 'vxlan_mac': self.vxlan_mac, 'host_dst': local_addr, 'host_dst6': local_addr6, })
def set_ip6tables_rule(self, rule): for i in xrange(3): try: utils.check_output_logged(['ip6tables', '-I'] + rule) break except: if i == 2: raise logger.error( 'Failed to insert ip6tables rule, retrying...', 'instance', rule=rule, ) time.sleep(1)
def iter_hosts(self): for host_id in self.hosts: hst = host.get_host(id=host_id) if hst: yield hst else: logger.error('Removing non-existent host ' + 'from server. %r' % { 'server_id': self.id, 'host_id': host_id, }) self.remove_host(host_id) self.commit('hosts') event.Event(type=SERVER_HOSTS_UPDATED, resource_id=self.id)
def poll_thread(): time.sleep(0.5) if web_process.wait() and web_process_state: time.sleep(0.25) if not check_global_interrupt(): stdout, stderr = web_process._communicate(None) logger.error( 'Setup web server process exited unexpectedly', 'setup', stdout=stdout, stderr=stderr, ) set_global_interrupt() else: server.interrupt = ServerStop('Stop server')
def _socket_thread(self): try: self.connect() time.sleep(1) self.sock.send('bytecount %s\n' % self.bandwidth_rate) data = '' while True: data += self.sock.recv(SOCKET_BUFFER) if not data: if not self.instance.sock_interrupt and \ not check_global_interrupt(): self.instance.stop_process() self.push_output( 'ERROR Management socket exited unexpectedly') logger.error('Management socket exited unexpectedly') return lines = data.split('\n') data = lines.pop() for line in lines: line = line.strip() if not line: continue try: self.parse_line(line) except: logger.exception( 'Failed to parse line from vpn com', 'server', server_id=self.server.id, instance_id=self.instance.id, line=line, ) except: self.push_output('ERROR Management socket exception') logger.exception( 'Error in management socket thread', 'server', server_id=self.server.id, instance_id=self.instance.id, ) self.instance.stop_process()
def setup_server(): last_error = time.time() - 24 while True: try: db_ver_int = utils.get_db_ver_int() break except pymongo.errors.ConnectionFailure: time.sleep(0.5) if time.time() - last_error > 30: last_error = time.time() logger.exception('Error connecting to mongodb server') listener.add_listener('system', on_system_msg) if db_ver_int > settings.local.version_int: logger.error('Database version is newer than server version', 'setup', db_version=db_ver_int, server_version=settings.local.version_int, ) exit(75) global db_setup db_setup = not settings.conf.mongodb_uri global server_upgrade server_upgrade = db_ver_int and db_ver_int < settings.local.version_int if db_setup or server_upgrade: logger.info('Starting setup server', 'setup') if not db_setup: upgrade_database() settings.local.server_start.clear() thread = threading.Thread(target=server_thread) thread.daemon = True thread.start() setup_ready.wait() utils.set_db_ver(__version__)
def _check_primary(self): if self.user.disabled: self.user.audit_event('user_connection', 'User connection to "%s" denied. User is disabled' % ( self.server.name), remote_addr=self.remote_ip, ) raise AuthError('User is disabled') if self.user.link_server_id: return if not self.server.check_groups(self.user.groups): self.user.audit_event( 'user_connection', ('User connection to "%s" denied. User not in ' + 'servers groups') % (self.server.name), remote_addr=self.remote_ip, ) raise AuthError('User not in servers groups') if self.server.allowed_devices: if self.server.allowed_devices == 'mobile': platforms = MOBILE_PLATFORMS elif self.server.allowed_devices == 'desktop': platforms = DESKTOP_PLATFORMS else: logger.error('Unknown allowed devices option', 'server', server_id=self.server.id, allowed_devices=self.server.allowed_devices, ) platforms = {} if self.platform not in platforms: self.user.audit_event( 'user_connection', ('User connection to "%s" denied. User platform ' + 'not allowed') % (self.server.name), remote_addr=self.remote_ip, ) raise AuthError( 'User platform %s not allowed' % self.platform)
def stop_watch(self): try: while True: if self.stop_event.wait(1): return yield finally: try: if not utils.stop_process(self.process): logger.error( 'Failed to stop openvpn link process', 'server', server_id=self.server.id, ) finally: if self.interface: utils.interface_release(self.linked_server.adapter_type, self.interface) self.interface = None
def _keep_alive_thread(self): try: while not self.interrupt: try: doc = self.collection.find_and_modify({ '_id': self.server.id, 'availability_group': \ settings.local.host.availability_group, 'instances.instance_id': self.id, }, {'$set': { 'instances.$.ping_timestamp': utils.now(), }}, fields={ '_id': False, 'instances': True, }, new=True) yield if not doc: logger.error( 'Instance doc lost, stopping server', 'server', server_id=self.server.id, ) if self.stop_process(): break else: time.sleep(0.1) continue yield except: logger.exception( 'Failed to update server ping', 'server', server_id=self.server.id, ) time.sleep(1) yield interrupter_sleep(settings.vpn.server_ping) except GeneratorExit: self.stop_process()
def stop_process(self): self.sock_interrupt = True for instance_link in self.server_links: instance_link.stop() if self.process: terminated = utils.stop_process(self.process) else: terminated = True if not terminated: logger.error('Failed to stop server process', 'server', server_id=self.server.id, instance_id=self.id, ) return False return terminated
def sync_time(): nounce = None doc = {} try: collection = mongo.get_collection('time_sync') nounce = ObjectId() collection.insert({ 'nounce': nounce, }, manipulate=False, w=1) mongo_time_start = datetime.datetime.utcnow() cur_mongo_time = settings.local.mongo_time doc = collection.find_one({ 'nounce': nounce, }) mongo_time = doc['_id'].generation_time.replace(tzinfo=None) settings.local.mongo_time = (mongo_time_start, mongo_time) if cur_mongo_time: time_diff = abs(_now(cur_mongo_time) - now()) if time_diff > datetime.timedelta(milliseconds=1000): from pritunl import logger logger.error( 'Unexpected time deviation from mongodb', 'utils', deviation=str(time_diff), ) collection.remove(doc['_id']) except: from pritunl import logger logger.exception( 'Failed to sync time', nounce=nounce, doc_id=doc.get('id'), ) raise
def _route_ad_keep_alive_thread(self): try: while not self.interrupt: try: for ra_id in self.route_advertisements.copy(): yield response = self.routes_collection.update_one( { '_id': ra_id, 'instance_id': self.id, }, {'$set': { 'timestamp': utils.now(), }}) if not response.modified_count: logger.error( 'Lost route advertisement reserve', 'server', server_id=self.server.id, instance_id=self.id, route_id=ra_id, ) try: self.route_advertisements.remove(ra_id) except KeyError: pass yield except GeneratorExit: pass except: logger.exception( 'Failed to update route advertisement', 'server', server_id=self.server.id, ) time.sleep(1) yield interrupter_sleep(settings.vpn.route_ping) except GeneratorExit: pass
def stop_watch(self): try: while True: if self.stop_event.wait(1): return yield finally: try: if not utils.stop_process(self.process): logger.error( 'Failed to stop openvpn link process', 'server', server_id=self.server.id, ) finally: if self.interface: utils.interface_release( 'tap' if self.linked_server.network_mode == BRIDGE else 'tun', self.interface) self.interface = None
def set_iptables_rules(self, log=False): logger.debug( 'Setting iptables rules', 'server', server_id=self.server.id, ) self.iptables_lock.acquire() try: if self.iptables_rules is not None: for rule in self.iptables_rules: if not self.exists_iptables_rule(rule): if log and not self.interrupt: logger.error( 'Unexpected loss of iptables rule, ' + 'adding again...', 'instance', rule=rule, ) self.set_iptables_rule(rule) if self.ip6tables_rules is not None: for rule in self.ip6tables_rules: if not self.exists_ip6tables_rule(rule): if log and not self.interrupt: logger.error( 'Unexpected loss of ip6tables rule, ' + 'adding again...', 'instance', rule=rule, ) self.set_ip6tables_rule(rule) except subprocess.CalledProcessError as error: logger.exception('Failed to apply iptables ' + \ 'routing rule', 'server', server_id=self.server.id, output=error.output, ) raise finally: self.iptables_lock.release()
def get_user_id(username): try: response = requests.get( _getokta_url() + '/api/v1/users/%s' % urllib.quote(username), headers={ 'Accept': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, }, ) except httplib.HTTPException: logger.exception('Okta api error', 'sso', username=username, ) return None if response.status_code != 200: logger.error('Okta api error', 'sso', username=username, status_code=response.status_code, response=response.content, ) return None data = response.json() user_id = data.get('id') if not user_id: logger.error('Okta username not found', 'sso', username=username, status_code=response.status_code, response=response.content, ) return None if data['status'].lower() != 'active': logger.warning('Okta user is not active', 'sso', username=username, ) return None return user_id
def upsert_rules(self, log=False): self._lock.acquire() try: if not self._accept: return for rule in self._accept: if not self._exists_iptables_rule(rule): if log: logger.error( 'Unexpected loss of iptables rule, ' + 'adding again...', 'instance', rule=rule, ) self._insert_iptables_rule(rule) for rule in self._accept6: if not self._exists_iptables_rule(rule, ipv6=True): if log: logger.error( 'Unexpected loss of ip6tables rule, ' + 'adding again...', 'instance', rule=rule, ) self._insert_iptables_rule(rule, ipv6=True) for rule in self._drop: if not self._exists_iptables_rule(rule): if log: logger.error( 'Unexpected loss of iptables drop rule, ' + 'adding again...', 'instance', rule=rule, ) self._append_iptables_rule(rule) for rule in self._drop6: if not self._exists_iptables_rule(rule, ipv6=True): if log: logger.error( 'Unexpected loss of ip6tables drop rule, ' + 'adding again...', 'instance', rule=rule, ) self._append_iptables_rule(rule, ipv6=True) finally: self._lock.release()
def _insert_iptables_rule_cmd(self, rule, ipv6=False): rule = self._parse_rule(rule) _global_lock.acquire() try: for i in xrange(3): try: utils.check_output_logged( ['ip6tables' if ipv6 else 'iptables', '-I'] + rule) break except: if i == 2: raise logger.error( 'Failed to insert iptables rule, retrying...', 'instance', rule=rule, ) time.sleep(0.5) finally: _global_lock.release()
def check_output_logged(*args, **kwargs): if 'stdout' in kwargs or 'stderr' in kwargs: raise ValueError('Output arguments not allowed, it will be overridden') try: ignore_states = kwargs.pop('ignore_states') except KeyError: ignore_states = None process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, *args, **kwargs) stdoutdata, stderrdata = process.communicate() return_code = process.poll() if return_code: from pritunl import logger cmd = kwargs.get('args', args[0]) if ignore_states: for ignore_state in ignore_states: if ignore_state in stdoutdata or ignore_state in stderrdata: return stdoutdata logger.error( 'Popen returned error exit code', 'utils', cmd=cmd, return_code=return_code, stdout=stdoutdata, stderr=stderrdata, ) raise subprocess.CalledProcessError(return_code, cmd, output=stdoutdata) return stdoutdata
def setup_server(): db_ver_int = utils.get_db_ver_int() listener.add_listener('system', on_system_msg) if db_ver_int > settings.local.version_int: logger.error( 'Database version is newer than server version', 'setup', db_version=db_ver_int, server_version=settings.local.version_int, ) exit(75) # Fix for replaced conf file in 0.10.x upgrade if db_ver_int < 10000000000004000 and not settings.conf.mongodb_uri: settings.conf.mongodb_uri = 'mongodb://localhost:27017/pritunl' settings.conf.commit() global db_setup db_setup = not settings.conf.mongodb_uri global server_upgrade server_upgrade = db_ver_int and db_ver_int < settings.local.version_int if db_setup or server_upgrade: logger.info('Starting setup server', 'setup') if not db_setup: upgrade_database() settings.local.server_start.clear() thread = threading.Thread(target=server_thread) thread.daemon = True thread.start() setup_ready.wait() utils.set_db_ver(__version__)
def _keep_alive_thread(self): while True: time.sleep(self.ttl - 6) if self.queue_com.state in (COMPLETE, STOPPED): break response = self.collection.update({ '_id': self.id, 'runner_id': self.runner_id, }, {'$set': { 'ttl_timestamp': utils.now() + \ datetime.timedelta(seconds=self.ttl), }}) if response['updatedExisting']: logger.debug('Queue keep alive updated', 'queue', queue_id=self.id, queue_type=self.type, ) messenger.publish('queue', [UPDATE, self.id]) else: logger.debug('Queue keep alive lost reserve', 'queue', queue_id=self.id, queue_type=self.type, ) self.queue_com.state_lock.acquire() try: self.queue_com.state = STOPPED finally: self.queue_com.state_lock.release() logger.error('Lost reserve, queue stopped', 'queue', queue_id=self.id, queue_type=self.type, ) logger.debug('Queue keep alive thread ended', 'queue', queue_id=self.id, queue_type=self.type, )
def get_ip_pool(self, network, network_start): ip_pool = network.iterhosts() if network_start: network_start = ipaddress.IPv4Address(network_start) network_break = network_start - 1 while True: try: ip_addr = ip_pool.next() except StopIteration: logger.error('Failed to find network start', 'server', server_id=self.server.id, ) return if ip_addr == network_break: break else: ip_pool.next() return ip_pool
def get_factor_id(user_id): try: response = requests.get( _getokta_url() + '/api/v1/users/%s/factors' % user_id, headers={ 'Accept': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, }, ) except httplib.HTTPException: logger.exception( 'Okta api error', 'sso', user_id=user_id, ) return None if response.status_code != 200: logger.error( 'Okta api error', 'sso', user_id=user_id, status_code=response.status_code, response=response.content, ) return None not_active = False data = response.json() for factor in data: if 'id' not in factor or 'provider' not in factor or \ 'factorType' not in factor or 'status' not in factor: continue if factor['provider'].lower() != 'okta' or \ factor['factorType'].lower() != 'push': continue if factor['status'].lower() != 'active': not_active = True continue return factor['id'] if settings.app.sso_okta_skip_unavailable: return True elif not_active: logger.error( 'Okta push not active', 'sso', user_id=user_id, ) else: logger.error( 'Okta push not available', 'sso', user_id=user_id, ) return None
def run(self, timeout=None): from pritunl import logger thread = threading.Thread(target=self._proc_thread) thread.daemon = True thread.start() self._event.wait(timeout) cmd = self._kwargs.get('args', self._args[0]) if not self._event.is_set(): logger.error('Popen process timeout', 'utils', cmd=cmd, timeout=timeout, ) try: self._process.kill() except: pass raise subprocess.CalledProcessError(-99, cmd, output='') elif self._return_code: if self._ignore_states: for ignore_state in self._ignore_states: if ignore_state in self._stdoutdata or \ ignore_state in self._stderrdata: return self._stdoutdata logger.error('Popen returned error exit code', 'utils', cmd=cmd, timeout=timeout, return_code=self._return_code, stdout=self._stdoutdata, stderr=self._stderrdata, ) raise subprocess.CalledProcessError( self._return_code, cmd, output=self._stdoutdata)
def _append_iptables_rule_cmd(self, rule, ipv6=False): rule = self._parse_rule(rule) _global_lock.acquire() try: for i in range(3): try: utils.Process( ['ip6tables' if ipv6 else 'iptables', '-A'] + rule, ).run(15) break except: if i == 2: raise logger.error( 'Failed to append iptables rule, retrying...', 'instance', rule=rule, ) time.sleep(0.5) finally: _global_lock.release()
def _vault_thread(): while True: process = None try: process = subprocess.Popen( ['/home/cloud/go/bin/pritunl-vault'], env=dict(os.environ, **{ 'CLIENT_KEY': settings.local.se_client_pub_key, }), ) while True: if process.poll() is not None: if check_global_interrupt(): return logger.error( 'Vault service stopped unexpectedly', 'setup', ) process = None yield interrupter_sleep(1) break time.sleep(0.5) yield except GeneratorExit: if process: process.terminate() time.sleep(1) process.kill() return except: logger.exception('Error in vault service', 'setup') yield interrupter_sleep(1)
def auth_yubico(yubikey): yubikey_collection = mongo.get_collection('yubikey') if len(yubikey) != 44: return False, None yubikey = yubikey.lower() public_id = yubikey[:12] client = yubico_client.Yubico( client_id=settings.app.sso_yubico_client, key=settings.app.sso_yubico_secret, api_urls=settings.app.sso_yubico_servers, ca_certs_bundle_path=certifi.where(), ) try: if client.verify(yubikey) is not True: return False, None except: logger.exception('Yubico authentication error', 'sso') return False, None yubikey_hash = hashlib.sha512() yubikey_hash.update(yubikey.encode()) yubikey_hash = base64.b64encode(yubikey_hash.digest()).decode() try: yubikey_collection.insert({ '_id': yubikey_hash, 'timestamp': utils.now(), }) except pymongo.errors.DuplicateKeyError: logger.error('Yubico replay error', 'sso') return False, None return True, public_id
def reserve(cls, reserve_id, reserve_data, block=False, block_timeout=90): if block: cursor_id = messenger.get_cursor_id('queue') doc = cls.collection.find_and_modify( { 'state': PENDING, 'reserve_id': reserve_id, 'reserve_data': None, }, {'$set': { 'reserve_data': reserve_data, }}, new=True) if not doc: return if block: for msg in messenger.subscribe('queue', cursor_id=cursor_id, timeout=block_timeout): try: if msg['message'] == [COMPLETE, doc['_id']]: return doc elif msg['message'] == [ERROR, doc['_id']]: raise QueueTaskError( 'Error occurred running ' + 'queue task', { 'queue_id': doc['_id'], 'queue_type': doc['type'], }) except TypeError: pass logger.error('Blocking queue reserve timed out', { 'queue_id': doc['_id'], 'queue_type': doc['type'], }) return doc else: return doc
def _get_access_token(): response = requests.post( _get_base_url() + '/auth/oauth2/token', headers={ 'Authorization': 'client_id:%s, client_secret:%s' % ( settings.app.sso_onelogin_id, settings.app.sso_onelogin_secret, ), 'Content-Type': 'application/json', }, json={ 'grant_type': 'client_credentials', }, ) if response.status_code != 200: logger.error('OneLogin api error', 'sso', status_code=response.status_code, response=response.content, ) return None return response.json()['data'][0]['access_token']
def auth_onelogin(username): try: response = utils.request.get( ONELOGIN_URL + '/api/v3/users/username/%s' % (urllib.quote(username)), auth=(settings.app.sso_onelogin_key, 'x'), ) except httplib.HTTPException: logger.exception( 'OneLogin api error', 'sso', username=username, ) return False if response.status_code == 200: return True elif response.status_code == 404: logger.error( 'OneLogin user not found', 'sso', username=username, ) elif response.status_code == 406: logger.error( 'OneLogin user disabled', 'sso', username=username, ) else: logger.error( 'OneLogin api error', 'sso', username=username, status_code=response.status_code, response=response.content, ) return False
def sso_auth_check(self, password, remote_ip): sso_mode = settings.app.sso or '' auth_server = AUTH_SERVER if settings.app.dedicated: auth_server = settings.app.dedicated if GOOGLE_AUTH in self.auth_type and GOOGLE_AUTH in sso_mode: if settings.user.skip_remote_sso_check: return True try: resp = requests.get(auth_server + '/update/google?user=%s&license=%s' % ( urllib.quote(self.email), settings.app.license, )) if resp.status_code != 200: logger.error( 'Google auth check request error', 'user', user_id=self.id, user_name=self.name, status_code=resp.status_code, content=resp.content, ) return False valid, google_groups = sso.verify_google(self.email) if not valid: logger.error( 'Google auth check failed', 'user', user_id=self.id, user_name=self.name, ) return False if settings.app.sso_google_mode == 'groups': cur_groups = set(self.groups) new_groups = set(google_groups) if cur_groups != new_groups: self.groups = list(new_groups) self.commit('groups') return True except: logger.exception( 'Google auth check error', 'user', user_id=self.id, user_name=self.name, ) return False elif AZURE_AUTH in self.auth_type and AZURE_AUTH in sso_mode: if settings.user.skip_remote_sso_check: return True try: resp = requests.get( auth_server + ('/update/azure?user=%s&license=%s&' + 'directory_id=%s&app_id=%s&app_secret=%s') % ( urllib.quote(self.name), settings.app.license, urllib.quote(settings.app.sso_azure_directory_id), urllib.quote(settings.app.sso_azure_app_id), urllib.quote(settings.app.sso_azure_app_secret), )) if resp.status_code != 200: logger.error( 'Azure auth check request error', 'user', user_id=self.id, user_name=self.name, status_code=resp.status_code, content=resp.content, ) return False valid, azure_groups = sso.verify_azure(self.name) if not valid: logger.error( 'Azure auth check failed', 'user', user_id=self.id, user_name=self.name, ) return False if settings.app.sso_azure_mode == 'groups': cur_groups = set(self.groups) new_groups = set(azure_groups) if cur_groups != new_groups: self.groups = list(new_groups) self.commit('groups') return True except: logger.exception( 'Azure auth check error', 'user', user_id=self.id, user_name=self.name, ) return False elif SLACK_AUTH in self.auth_type and SLACK_AUTH in sso_mode: if settings.user.skip_remote_sso_check: return True if not isinstance(settings.app.sso_match, list): raise TypeError('Invalid sso match') try: resp = requests.get( auth_server + '/update/slack?user=%s&team=%s&license=%s' % ( urllib.quote(self.name), urllib.quote(settings.app.sso_match[0]), settings.app.license, )) if resp.status_code != 200: logger.error( 'Slack auth check request error', 'user', user_id=self.id, user_name=self.name, status_code=resp.status_code, content=resp.content, ) return False return True except: logger.exception( 'Slack auth check error', 'user', user_id=self.id, user_name=self.name, ) return False elif SAML_ONELOGIN_AUTH in self.auth_type and \ SAML_ONELOGIN_AUTH in sso_mode: if settings.user.skip_remote_sso_check: return True try: return sso.auth_onelogin(self.name) except: logger.exception( 'OneLogin auth check error', 'user', user_id=self.id, user_name=self.name, ) return False elif SAML_OKTA_AUTH in self.auth_type and \ SAML_OKTA_AUTH in sso_mode: if settings.user.skip_remote_sso_check: return True try: return sso.auth_okta(self.name) except: logger.exception( 'Okta auth check error', 'user', user_id=self.id, user_name=self.name, ) return False elif RADIUS_AUTH in self.auth_type and RADIUS_AUTH in sso_mode: try: return sso.verify_radius(self.name, password)[0] except: logger.exception( 'Radius auth check error', 'user', user_id=self.id, user_name=self.name, ) return False elif PLUGIN_AUTH in self.auth_type: try: return sso.plugin_login_authenticate( user_name=self.name, password=password, remote_ip=remote_ip, )[0] except: logger.exception( 'Plugin auth check error', 'user', user_id=self.id, user_name=self.name, ) return False return True
def check_session(csrf_check): auth_token = flask.request.headers.get('Auth-Token', None) if auth_token: auth_timestamp = flask.request.headers.get('Auth-Timestamp', None) auth_nonce = flask.request.headers.get('Auth-Nonce', None) auth_signature = flask.request.headers.get('Auth-Signature', None) if not auth_token or not auth_timestamp or not auth_nonce or \ not auth_signature: return False auth_nonce = auth_nonce[:32] try: if abs(int(auth_timestamp) - int(utils.time_now())) > \ settings.app.auth_time_window: return False except ValueError: return False administrator = find_user(token=auth_token) if not administrator: return False if not administrator.auth_api: return False auth_string = '&'.join([ auth_token, auth_timestamp, auth_nonce, flask.request.method, flask.request.path]) if len(auth_string) > AUTH_SIG_STRING_MAX_LEN or len(auth_nonce) < 8: return False if not administrator.secret or len(administrator.secret) < 8: return False auth_test_signature = base64.b64encode(hmac.new( administrator.secret.encode(), auth_string, hashlib.sha256).digest()) if auth_signature != auth_test_signature: return False try: Administrator.nonces_collection.insert({ 'token': auth_token, 'nonce': auth_nonce, 'timestamp': utils.now(), }) except pymongo.errors.DuplicateKeyError: return False else: if not flask.session: return False admin_id = utils.session_opt_str('admin_id') if not admin_id: return False admin_id = utils.ObjectId(admin_id) session_id = utils.session_opt_str('session_id') signature = utils.session_opt_str('signature') if not signature: return False if not utils.check_flask_sig(): return False if csrf_check: csrf_token = flask.request.headers.get('Csrf-Token', None) if not validate_token(csrf_token): logger.error('CSRF token check failed', 'auth', method=flask.request.method, path=flask.request.path, ) return False administrator = get_user(admin_id, session_id) if not administrator: return False if not settings.app.reverse_proxy and \ not settings.app.allow_insecure_session and \ not settings.app.server_ssl and \ utils.session_opt_str('source') != utils.get_remote_addr(): flask.session.clear() clear_session(admin_id, session_id) return False session_timeout = settings.app.session_timeout if session_timeout and int(utils.time_now()) - \ utils.session_int('timestamp') > session_timeout: flask.session.clear() clear_session(admin_id, session_id) return False flask.session['timestamp'] = int(utils.time_now()) utils.set_flask_sig() if administrator.disabled: return False flask.g.administrator = administrator return True
def _auth_radius(username, password): sso_mode = settings.app.sso valid, org_names, groups = sso.verify_radius(username, password) if not valid: return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) org_id = settings.app.sso_org if org_names: for org_name in org_names: org = organization.get_by_name(org_name, fields=('_id')) if org: org_id = org.id break valid, org_id_new, groups2 = sso.plugin_sso_authenticate( sso_type='radius', user_name=username, user_email=None, remote_ip=utils.get_remote_addr(), ) if valid: org_id = org_id_new or org_id else: logger.error('Radius plugin authentication not valid', 'sso', username=username, ) return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) groups = ((groups or set()) | (groups2 or set())) or None if DUO_AUTH in sso_mode: try: duo_auth = sso.Duo( username=username, factor=settings.app.sso_duo_mode, remote_ip=utils.get_remote_addr(), auth_type='Key', ) valid = duo_auth.authenticate() except InvalidUser: logger.error('Duo authentication username not valid', 'sso', username=username, ) return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) if valid: valid, org_id_new, groups2 = sso.plugin_sso_authenticate( sso_type='duo', user_name=username, user_email=None, remote_ip=utils.get_remote_addr(), ) if valid: org_id = org_id_new or org_id else: logger.error('Duo plugin authentication not valid', 'sso', username=username, ) return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) groups = ((groups or set()) | (groups2 or set())) or None else: logger.error('Duo authentication not valid', 'sso', username=username, ) return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) groups = ((groups or set()) | (groups2 or set())) or None org = organization.get_by_id(org_id) if not org: return flask.abort(405) usr = org.find_user(name=username) if not usr: usr = org.new_user(name=username, type=CERT_CLIENT, auth_type=sso_mode, groups=list(groups) if groups else None) usr.audit_event( 'user_created', 'User created with single sign-on', remote_addr=utils.get_remote_addr(), ) event.Event(type=ORGS_UPDATED) event.Event(type=USERS_UPDATED, resource_id=org.id) event.Event(type=SERVERS_UPDATED) else: if usr.disabled: return utils.jsonify({ 'error': AUTH_DISABLED, 'error_msg': AUTH_DISABLED_MSG, }, 403) if groups and groups - set(usr.groups or []): usr.groups = list(set(usr.groups or []) | groups) usr.commit('groups') if usr.auth_type != sso_mode: usr.auth_type = sso_mode usr.set_pin(None) usr.commit(('auth_type', 'pin')) key_link = org.create_user_key_link(usr.id, one_time=True) usr.audit_event('user_profile', 'User profile viewed from single sign-on', remote_addr=utils.get_remote_addr(), ) return utils.jsonify({ 'redirect': utils.get_url_root() + key_link['view_url'], }, 202)
def _auth_plugin(username, password): if settings.local.sub_plan != 'enterprise': return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) valid, org_id, groups = sso.plugin_login_authenticate( user_name=username, password=password, remote_ip=utils.get_remote_addr(), ) if not valid: return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) if not org_id: logger.error( 'Login plugin did not return valid organization name', 'auth', org_name=org_id, user_name=username, ) return utils.jsonify({ 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) org = organization.get_by_id(org_id) if not org: return flask.abort(405) usr = org.find_user(name=username) if not usr: usr = org.new_user(name=username, type=CERT_CLIENT, auth_type=PLUGIN_AUTH, groups=list(groups) if groups else None) usr.audit_event( 'user_created', 'User created with plugin authentication', remote_addr=utils.get_remote_addr(), ) event.Event(type=ORGS_UPDATED) event.Event(type=USERS_UPDATED, resource_id=org.id) event.Event(type=SERVERS_UPDATED) else: if usr.disabled: return utils.jsonify({ 'error': AUTH_DISABLED, 'error_msg': AUTH_DISABLED_MSG, }, 403) if groups and groups - set(usr.groups or []): usr.groups = list(set(usr.groups or []) | groups) usr.commit('groups') if usr.auth_type != PLUGIN_AUTH: usr.auth_type = PLUGIN_AUTH usr.set_pin(None) usr.commit(('auth_type', 'pin')) key_link = org.create_user_key_link(usr.id, one_time=True) usr.audit_event('user_profile', 'User profile viewed from plugin authentication', remote_addr=utils.get_remote_addr(), ) return utils.jsonify({ 'redirect': utils.get_url_root() + key_link['view_url'], }, 202)
def ping_thread(self): try: while True: try: try: client_id = self.clients_queue.popleft() except IndexError: if self.interrupter_sleep(10): return continue client = self.clients.find_id(client_id) if not client: continue diff = settings.vpn.client_ttl - 150 - \ (time.time() - client['timestamp']) if diff > settings.vpn.client_ttl: logger.error( 'Client ping time diff out of range', 'server', time_diff=diff, server_id=self.server.id, instance_id=self.instance.id, ) if self.interrupter_sleep(10): return elif diff > 1: if self.interrupter_sleep(diff): return if self.instance.sock_interrupt: return try: updated = self.clients.update_id( client_id, { 'timestamp': time.time(), }) if not updated: continue response = self.collection.update( { '_id': client['doc_id'], }, { '$set': { 'timestamp': utils.now(), }, }) if not response['updatedExisting']: logger.error( 'Client lost unexpectedly', 'server', server_id=self.server.id, instance_id=self.instance.id, ) self.instance_com.client_kill(client_id) continue except: self.clients_queue.append(client_id) logger.exception( 'Failed to update client', 'server', server_id=self.server.id, instance_id=self.instance.id, ) yield interrupter_sleep(1) continue self.clients_queue.append(client_id) yield if self.instance.sock_interrupt: return except GeneratorExit: raise except: logger.exception( 'Error in client thread', 'server', server_id=self.server.id, instance_id=self.instance.id, ) yield interrupter_sleep(3) if self.instance.sock_interrupt: return finally: doc_ids = [] for client in self.clients.find_all(): doc_id = client.get('doc_id') if doc_id: doc_ids.append(doc_id) try: self.collection.remove({ '_id': { '$in': doc_ids }, }) except: logger.exception( 'Error removing client', 'server', server_id=self.server.id, )
def popen(self, args): while True: self.wait_status() process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process_data = [process, False] self.processes.append(process_data) return_code = process.wait() self.processes.remove(process_data) if return_code: # If process_data is set process is paused restart # and wait for wait_status() if not process_data[1]: stdoutdata, stderrdata = process.communicate() logger.error('Popen returned error exit code', 'queue', cmd=args, return_code=return_code, stdout=stdoutdata, stderr=stderrdata, ) raise ValueError('Popen returned ' + 'error exit code %r' % return_code) else: break