def load_public_ip(attempts=1, timeout=5): for i in xrange(attempts): if settings.local.public_ip: return if i: time.sleep(3) logger.info('Retrying get public ip address', 'setup') logger.debug('Getting public ip address', 'setup') try: request = urllib2.Request( settings.app.public_ip_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip = json.load(response)['ip'] break except: pass logger.debug('Getting public ipv6 address', 'setup') try: request = urllib2.Request( settings.app.public_ip6_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip6 = json.load(response)['ip'] except: pass if not settings.local.public_ip: logger.warning('Failed to get public ip address', 'setup')
def remove_host(self, host_id): if host_id not in self.hosts: logger.warning( "Attempted to remove host that does not exists", "server", server_id=self.id, host_id=host_id ) return logger.debug("Removing host from server", "server", server_id=self.id, host_id=host_id) self.hosts.remove(host_id) response = self.collection.update( {"_id": self.id, "instances.host_id": host_id}, {"$pull": {"hosts": host_id, "instances": {"host_id": host_id}}, "$inc": {"instances_count": -1}}, ) if response["updatedExisting"]: self.publish("start", extra={"prefered_hosts": host.get_prefered_hosts(self.hosts, self.replica_count)}) doc = self.collection.find_and_modify( {"_id": self.id}, {"$pull": {"hosts": host_id}}, {"hosts": True}, new=True ) if doc and not doc["hosts"]: self.status = OFFLINE self.commit("status")
def rollback_task(self): if not self.server: logger.warning('Tried to run assign_ip_pool rollback queue ' + 'but server is no longer available', 'queues', server_id=self.server_id, ) return self.server.ip_pool.collection.remove({ 'network': self.network, 'server_id': self.server_id, }) self.server.collection.update({ '_id': self.server_id, 'network': self.network, }, {'$set': { 'network': self.old_network, }}) self.server.collection.update({ '_id': self.server_id, 'network_lock': self.id, }, {'$unset': { 'network_lock': '', }})
def sync_public_ip(attempts=1, timeout=5, update=False): from pritunl import logger for i in xrange(attempts): if i: time.sleep(3) logger.info('Retrying get public ip address', 'utils') try: request = urllib2.Request( settings.app.public_ip_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip = str(json.load(response)['ip']) break except: pass try: request = urllib2.Request( settings.app.public_ip6_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip6 = str(json.load(response)['ip']) except: pass if not settings.local.public_ip: logger.warning('Failed to get public ip address', 'utils') if update: settings.local.host.collection.update({ '_id': settings.local.host.id, }, {'$set': { 'auto_public_address': settings.local.public_ip, 'auto_public_address6': settings.local.public_ip6, }})
def task(self): if not self.server: logger.warning('Tried to run assign_ip_pool task queue ' + 'but server is no longer available', 'queues', server_id=self.server_id, ) return response = self.server_collection.update({ '_id': self.server_id, '$or': [ {'network_lock': self.id}, {'network_lock': {'$exists': False}}, ], }, {'$set': { 'network': self.network, 'network_lock': self.id, }}) if not response['updatedExisting']: raise ServerNetworkLocked('Server network is locked', { 'server_id': self.server_id, 'queue_id': self.id, 'queue_type': self.type, }) self.server.ip_pool.assign_ip_pool(self.network)
def sync_public_ip(attempts=1, timeout=5): from pritunl import logger for i in xrange(attempts): url = settings.app.public_ip_server if settings.app.dedicated: url = settings.app.dedicated + '/ip' if i: time.sleep(3) logger.info('Retrying get public ip address', 'utils') try: request = urllib2.Request(url) request.add_header('User-Agent', 'pritunl') response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip = str(json.load(response)['ip']) break except: pass if not settings.app.dedicated: try: request = urllib2.Request( settings.app.public_ip6_server) request.add_header('User-Agent', 'pritunl') response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip6 = str(json.load(response)['ip']) except: pass if not settings.local.public_ip: logger.warning('Failed to get public ip address', 'utils')
def resources_acquire(self): if self.interface: raise TypeError('Server resource already acquired') _instances_lock.acquire() try: instance = _instances.get(self.server.id) if instance: logger.warning( 'Stopping duplicate instance', 'server', server_id=self.server.id, instance_id=instance.id, ) try: instance.stop_process() except: logger.exception( 'Failed to stop duplicate instance', 'server', server_id=self.server.id, instance_id=instance.id, ) time.sleep(5) _instances[self.server.id] = self finally: _instances_lock.release() self.interface = utils.interface_acquire(self.server.adapter_type)
def rollback_actions(self): logger.warning('Transaction failed rolling back...', 'transaction', actions=self.action_sets, ) response = self.transaction_collection.update({ '_id': self.id, 'state': ROLLBACK, }, { '$set': { 'ttl_timestamp': utils.now() + \ datetime.timedelta(seconds=self.ttl), }, }) if not response['updatedExisting']: return try: self._rollback_actions() except: logger.exception('Error occurred rolling back ' + 'transaction actions', 'transaction', transaction_id=self.id, ) raise self.transaction_collection.remove(self.id)
def setup_all(): from pritunl import logger setup_local() setup_logger() try: setup_temp_path() setup_app() setup_signal_handler() setup_server() setup_mongo() setup_cache() setup_public_ip() setup_host() setup_server_listeners() setup_dns() setup_monitoring() setup_poolers() setup_host_fix() setup_subscription() setup_runners() setup_handlers() setup_check() soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) if soft < 25000 or hard < 25000: logger.warning( 'Open file ulimit is lower then recommended', 'setup', ) except: logger.exception('Pritunl setup failed', 'setup') raise
def assign_ip_pool(self, network, network_start, network_end, network_hash): server_id = self.server.id pool_end = False network = ipaddress.IPv4Network(network) if network_start: network_start = ipaddress.IPv4Address(network_start) if network_end: network_end = ipaddress.IPv4Address(network_end) ip_pool = self.get_ip_pool(network, network_start) if not ip_pool: return bulk = self.collection.initialize_unordered_bulk_op() bulk_empty = True for org in self.server.iter_orgs(): org_id = org.id for user in org.iter_users(include_pool=True): try: remote_ip_addr = ip_pool.next() if network_end and remote_ip_addr > network_end: raise StopIteration() except StopIteration: pool_end = True break doc_id = int(remote_ip_addr) spec = { '_id': doc_id, } doc = {'$set': { '_id': doc_id, 'network': network_hash, 'server_id': server_id, 'org_id': org_id, 'user_id': user.id, 'address': '%s/%s' % (remote_ip_addr, network.prefixlen), }} if bulk: bulk.find(spec).upsert().update(doc) bulk_empty = False else: self.collection.update(spec, doc, upsert=True) if pool_end: logger.warning('Failed to assign ip addresses ' + 'to server, ip pool empty', 'server', server_id=server_id, org_id=org_id, ) break if not bulk_empty: bulk.execute()
def complete_task(self): if not self.server: logger.warning('Tried to run assign_ip_pool complete queue ' + 'but server is no longer available', 'queues', server_id=self.server_id, ) return for org_id in self.server.organizations: event.Event(type=USERS_UPDATED, resource_id=org_id)
def assign_ip_pool(self, network): server_id = self.server.id pool_end = False ip_network = ipaddress.IPv4Network(network) ip_pool = ip_network.iterhosts() ip_pool.next() if mongo.has_bulk: bulk = self.collection.initialize_unordered_bulk_op() bulk_empty = True else: bulk = None bulk_empty = None for org in self.server.iter_orgs(): org_id = org.id for user in org.iter_users(include_pool=True): try: remote_ip_addr = ip_pool.next() except StopIteration: pool_end = True break doc_id = int(remote_ip_addr) spec = { '_id': doc_id, } doc = {'$set': { '_id': doc_id, 'network': network, 'server_id': server_id, 'org_id': org_id, 'user_id': user.id, 'address': '%s/%s' % (remote_ip_addr, ip_network.prefixlen), }} if bulk: bulk.find(spec).upsert().update(doc) bulk_empty = False else: self.collection.update(spec, doc, upsert=True) if pool_end: logger.warning('Failed to assign ip addresses ' + 'to server, ip pool empty', 'server', org_id=org_id, ) break if bulk and not bulk_empty: bulk.execute()
def fill_user(): collection = mongo.get_collection("users") org_collection = mongo.get_collection("organizations") queue_collection = mongo.get_collection("queue") orgs = {} orgs_count = utils.LeastCommonCounter() type_to_size = {CERT_CLIENT_POOL: settings.app.user_pool_size, CERT_SERVER_POOL: settings.app.server_user_pool_size} for org in organization.iter_orgs(type=None): orgs[org.id] = org orgs_count[org.id, CERT_CLIENT_POOL] = 0 orgs_count[org.id, CERT_SERVER_POOL] = 0 pools = collection.aggregate( [ {"$match": {"type": {"$in": (CERT_CLIENT_POOL, CERT_SERVER_POOL)}}}, {"$project": {"org_id": True, "type": True}}, {"$group": {"_id": {"org_id": "$org_id", "type": "$type"}, "count": {"$sum": 1}}}, ] ) for pool in pools: orgs_count[pool["_id"]["org_id"], pool["_id"]["type"]] += pool["count"] pools = queue_collection.aggregate( [ {"$match": {"type": "init_user_pooled", "user_doc.type": {"$in": (CERT_CLIENT_POOL, CERT_SERVER_POOL)}}}, {"$project": {"user_doc.org_id": True, "user_doc.type": True}}, {"$group": {"_id": {"org_id": "$user_doc.org_id", "type": "$user_doc.type"}, "count": {"$sum": 1}}}, ] ) for pool in pools: orgs_count[pool["_id"]["org_id"], pool["_id"]["type"]] += pool["count"] new_users = [] for org_id_user_type, count in orgs_count.least_common(): org_id, user_type = org_id_user_type pool_size = type_to_size[user_type] if count >= pool_size: break org = orgs.get(org_id) if not org: logger.warning("Pooler cannot find org from user_count", "pooler", org_id=org_id, user_type=user_type) continue new_users.append([(org, user_type)] * (pool_size - count)) for org, user_type in utils.roundrobin(*new_users): org.new_user(type=user_type, block=False)
def assign_ip_addr(self, org_id, user_id): network = self.server.network server_id = self.server.id response = self.collection.update({ 'network': network, 'server_id': server_id, 'user_id': {'$exists': False}, }, {'$set': { 'org_id': org_id, 'user_id': user_id, }}) if response['updatedExisting']: return ip_network = ipaddress.IPv4Network(network) ip_pool = ip_network.iterhosts() ip_pool.next() try: doc = self.collection.find({ 'network': network, 'server_id': server_id, }).sort('_id', pymongo.DESCENDING)[0] if doc: last_addr = doc['_id'] for remote_ip_addr in ip_pool: if int(remote_ip_addr) == last_addr: break except IndexError: pass for remote_ip_addr in ip_pool: try: self.collection.insert({ '_id': int(remote_ip_addr), 'network': network, 'server_id': server_id, 'org_id': org_id, 'user_id': user_id, 'address': '%s/%s' % (remote_ip_addr, ip_network.prefixlen), }) return except pymongo.errors.DuplicateKeyError: pass logger.warning('Failed to assign ip address ' + 'to user, ip pool empty', 'server', org_id=org_id, user_id=user_id, )
def remove_host(self, host_id): if host_id not in self.hosts: logger.warning('Attempted to remove host that does not exists', 'server', server_id=self.id, host_id=host_id, ) return logger.debug('Removing host from server', 'server', server_id=self.id, host_id=host_id, ) self.hosts.remove(host_id) response = self.collection.update({ '_id': self.id, 'instances.host_id': host_id, }, { '$pull': { 'hosts': host_id, 'instances': { 'host_id': host_id, }, }, '$inc': { 'instances_count': -1, }, }) if response['updatedExisting']: prefered_host = random.sample(self.hosts, min(self.replica_count, len(self.hosts))) self.publish('start', extra={ 'prefered_hosts': prefered_host, }) doc = self.collection.find_and_modify({ '_id': self.id, }, { '$pull': { 'hosts': host_id, }, }, { 'hosts': True, }) if doc and not doc['hosts']: self.status = OFFLINE self.commit('status')
def assign_ip_pool(self, network): server_id = self.server.id pool_end = False ip_network = ipaddress.IPv4Network(network) ip_pool = ip_network.iterhosts() ip_pool.next() if mongo.has_bulk: bulk = self.collection.initialize_unordered_bulk_op() bulk_empty = True else: bulk = None bulk_empty = None for org in self.server.iter_orgs(): org_id = org.id for user in org.iter_users(include_pool=True): try: remote_ip_addr = ip_pool.next() except StopIteration: pool_end = True break doc_id = int(remote_ip_addr) spec = {"_id": doc_id} doc = { "$set": { "_id": doc_id, "network": network, "server_id": server_id, "org_id": org_id, "user_id": user.id, "address": "%s/%s" % (remote_ip_addr, ip_network.prefixlen), } } if bulk: bulk.find(spec).upsert().update(doc) bulk_empty = False else: self.collection.update(spec, doc, upsert=True) if pool_end: logger.warning("Failed to assign ip addresses " + "to server, ip pool empty", "server", org_id=org_id) break if bulk and not bulk_empty: bulk.execute()
def remove_rule(self, rule): if self.cleared: return self._lock.acquire() try: self._other.remove(rule) self._remove_iptables_rule(rule) except ValueError: logger.warning('Lost iptables rule', 'iptables', rule=rule, ) finally: self._lock.release()
def remove_rule6(self, rule, silent=False): if self.cleared: return self._lock.acquire() try: self._other6.remove(rule) self._remove_iptables_rule(rule, ipv6=True) except ValueError: if not silent: logger.warning('Lost ip6tables rule', 'iptables', rule=rule, ) finally: self._lock.release()
def _upgrade_auth(): username = None password = None administrators_db = get_collection('administrators') db_path = settings.conf.db_path if db_path and os.path.exists(db_path): with open(db_path, 'r') as db_file: db_data = json.loads(db_file.read()) for key, _, _, value in db_data['data']: if key == 'auth': username = value.get('username') password = value.get('password') else: logger.warning( 'No db file found in upgraded', 'upgrade', path=db_path, ) if username and password: update_doc = { 'username': username, 'password': password, 'token': utils.generate_secret(), 'secret': utils.generate_secret(), 'default': False, 'sessions': [], } doc = administrators_db.find_one() if doc: spec = { '_id': doc['_id'], } else: spec = { 'username': username, } administrators_db.update(spec, update_doc, upsert=True) else: logger.warning( 'Username and password not upgraded', 'upgrade', path=db_path, )
def task(self): if not self.server: logger.warning( 'Tried to run assign_ip_pool task queue ' + 'but server is no longer available', 'queues', server_id=self.server_id, ) return response = self.server_collection.update( { '_id': self.server_id, '$or': [ { 'network_lock': self.id }, { 'network_lock': { '$exists': False } }, ], }, { '$set': { 'network': self.network, 'network_start': self.network_start, 'network_end': self.network_end, 'network_lock': self.id, 'network_lock_ttl': utils.now() + datetime.timedelta(minutes=6), } }) if not response['updatedExisting']: raise ServerNetworkLocked( 'Server network is locked', { 'server_id': self.server_id, 'queue_id': self.id, 'queue_type': self.type, }) self.server.ip_pool.assign_ip_pool(self.network, self.network_start, self.network_end, self.network_hash)
def auth_okta(username): user_id = get_user_id(username) if not user_id: return False okta_app_id = settings.app.sso_okta_app_id if not okta_app_id: return True try: response = requests.get( _getokta_url() + '/api/v1/apps?filter=user.id+eq+"%s"' % user_id, headers={ 'Accept': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, }, ) except httplib.HTTPException: logger.exception( 'Okta api error', 'sso', username=username, ) return None if response.status_code != 200: logger.error( 'Okta api error', 'sso', username=username, status_code=response.status_code, response=response.content, ) return None data = response.json() for application in data: if application['id'] == okta_app_id: return True logger.warning( 'Okta user is not assigned to application', 'sso', username=username, okta_app_id=okta_app_id, ) return False
def _check_whitelist(self): if settings.app.sso_whitelist: remote_ip = ipaddress.IPAddress(self.remote_ip) for network_str in settings.app.sso_whitelist: try: network = ipaddress.IPNetwork(network_str) except (ipaddress.AddressValueError, ValueError): logger.warning('Invalid whitelist network', 'authorize', network=network_str, ) continue if remote_ip in network: self.whitelisted = True break
def remove_rule(self, rule): if self.cleared: return self._lock.acquire() try: self._other.remove(rule) self._remove_iptables_rule(rule) except ValueError: logger.warning( 'Lost iptables rule', 'iptables', rule=rule, ) finally: self._lock.release()
def load_public_ip(attempts=1, timeout=5): for i in xrange(attempts): if settings.local.public_ip: return if i: time.sleep(3) logger.info('Retrying get public ip address', 'setup') logger.debug('Getting public ip address', 'setup') try: request = urllib2.Request(settings.app.public_ip_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip = json.load(response)['ip'] break except: pass if not settings.local.public_ip: logger.warning('Failed to get public ip address', 'setup')
def auth_okta(username): user_id = get_user_id(username) if not user_id: return False okta_app_id = settings.app.sso_okta_app_id if not okta_app_id: return True try: response = requests.get( _getokta_url() + \ '/api/v1/apps/%s/users/%s' % (okta_app_id, user_id), headers={ 'Accept': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, }, ) except httplib.HTTPException: logger.exception('Okta api error', 'sso', username=username, okta_app_id=okta_app_id, user_id=user_id, ) return None if response.status_code != 200: logger.error('Okta api error', 'sso', username=username, okta_app_id=okta_app_id, user_id=user_id, status_code=response.status_code, response=response.content, ) return None if response.json(): return True logger.warning('Okta user is not assigned to application', 'sso', username=username, okta_app_id=okta_app_id, user_id=user_id, ) return False
def remove_host(self, host_id): if host_id not in self.hosts: logger.warning('Attempted to remove host that does not exists', 'server', server_id=self.id, host_id=host_id, ) return self.hosts.remove(host_id) response = self.collection.update({ '_id': self.id, 'instances.host_id': host_id, }, { '$pull': { 'hosts': host_id, 'instances': { 'host_id': host_id, }, }, '$inc': { 'instances_count': -1, }, }) if response['updatedExisting']: self.publish('start', extra={ 'prefered_hosts': host.get_prefered_hosts( self.hosts, self.replica_count), }) doc = self.collection.find_and_modify({ '_id': self.id, }, { '$pull': { 'hosts': host_id, }, }, { 'hosts': True, }, new=True) if doc and not doc['hosts']: self.status = OFFLINE self.commit('status')
def _upgrade_auth(): username = None password = None administrators_db = get_collection('administrators') db_path = settings.conf.db_path if db_path and os.path.exists(db_path): with open(db_path, 'r') as db_file: db_data = json.loads(db_file.read()) for key, _, _, value in db_data['data']: if key == 'auth': username = value.get('username') password = value.get('password') else: logger.warning('No db file found in upgraded', 'upgrade', path=db_path, ) if username and password: update_doc = { 'username': username, 'password': password, 'token': utils.generate_secret(), 'secret': utils.generate_secret(), 'default': False, 'sessions': [], } doc = administrators_db.find_one() if doc: spec = { '_id': doc['_id'], } else: spec = { 'username': username, } administrators_db.update(spec, update_doc, upsert=True) else: logger.warning('Username and password not upgraded', 'upgrade', path=db_path, )
def task(self): if not self.server: logger.warning( "Tried to run assign_ip_pool task queue " + "but server is no longer available", "queues", server_id=self.server_id, ) return response = self.server_collection.update( {"_id": self.server_id, "$or": [{"network_lock": self.id}, {"network_lock": {"$exists": False}}]}, {"$set": {"network": self.network, "network_lock": self.id}}, ) if not response["updatedExisting"]: raise ServerNetworkLocked( "Server network is locked", {"server_id": self.server_id, "queue_id": self.id, "queue_type": self.type} ) self.server.ip_pool.assign_ip_pool(self.network)
def setup_all(): from pritunl import logger setup_local() setup_logger() try: setup_clean() setup_temp_path() setup_signal_handler() setup_vault() setup_server() setup_mongo() setup_settings() setup_boto_conf() setup_public_ip() setup_host() setup_cache() setup_server_listeners() setup_dns() setup_monitoring() setup_poolers() setup_host_fix() setup_subscription() setup_ndppd() setup_runners() setup_handlers() setup_check() setup_plugins() setup_demo() soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) if soft < 25000 or hard < 25000: logger.warning( 'Open file ulimit is lower then recommended', 'setup', ) except: logger.exception('Pritunl setup failed', 'setup') set_global_interrupt() raise
def get_user_id(username): try: response = requests.get( _getokta_url() + '/api/v1/users/%s' % urllib.quote(username), headers={ 'Accept': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, }, ) except httplib.HTTPException: logger.exception('Okta api error', 'sso', username=username, ) return None if response.status_code != 200: logger.error('Okta api error', 'sso', username=username, status_code=response.status_code, response=response.content, ) return None data = response.json() user_id = data.get('id') if not user_id: logger.error('Okta username not found', 'sso', username=username, status_code=response.status_code, response=response.content, ) return None if data['status'].lower() != 'active': logger.warning('Okta user is not active', 'sso', username=username, ) return None return user_id
def post_task(self): if not self.server: logger.warning( 'Tried to run assign_ip_pool post queue ' + 'but server is no longer available', 'queues', server_id=self.server_id, ) return self.server.ip_pool.collection.remove({ 'network': self.old_network, 'server_id': self.server_id, }) self.server.collection.update( { '_id': self.server_id, 'network_lock': self.id, }, {'$unset': { 'network_lock': '', }})
def sso_authenticate_post(): if settings.app.sso != DUO_AUTH or \ settings.app.sso_duo_mode == 'passcode': return flask.abort(405) username = utils.json_filter_str('username') usernames = [username] email = None if '@' in username: email = username usernames.append(username.split('@')[0]) valid = False for i, username in enumerate(usernames): try: duo_auth = sso.Duo( username=username, factor=settings.app.sso_duo_mode, remote_ip=utils.get_remote_addr(), auth_type='Key', ) valid = duo_auth.authenticate() break except InvalidUser: if i == len(usernames) - 1: logger.warning( 'Invalid duo username', 'sso', username=username, ) if valid: valid, org_id, groups = sso.plugin_sso_authenticate( sso_type='duo', user_name=username, user_email=email, remote_ip=utils.get_remote_addr(), ) if not valid: logger.warning( 'Duo plugin authentication not valid', 'sso', username=username, ) return flask.abort(401) groups = set(groups or []) else: logger.warning( 'Duo authentication not valid', 'sso', username=username, ) return flask.abort(401) if not org_id: org_id = settings.app.sso_org return _validate_user(username, email, DUO_AUTH, org_id, groups)
def task(self): svr = server.get_by_id(self.server_id) if not svr: logger.warning( "Tried to run assign_ip_addr queue " + "but server is no longer available", "queues", server_id=self.server_id, ) return for _ in xrange(5): if svr.network_lock: time.sleep(2) svr.load() else: break if svr.network_lock: raise ServerNetworkLocked( "Server network is locked", {"server_id": svr.id, "queue_id": self.id, "queue_type": self.type} ) svr.ip_pool.assign_ip_addr(self.org_id, self.user_id)
def task(self): if not self.server: logger.warning( 'Tried to run assign_ip_pool queue ' + 'but server is no longer available', 'queues', server_id=self.server_id, ) return response = self.server.collection.update( { '_id': self.server.id, '$or': [ { 'network_lock': self.id }, { 'network_lock': { '$exists': False } }, ], }, {'$set': { 'network': self.network, 'network_lock': self.id, }}) if not response['updatedExisting']: raise ServerNetworkLocked( 'Server network is locked', { 'server_id': self.server.id, 'queue_id': self.id, 'queue_type': self.type, }) self.server.ip_pool.assign_ip_pool(self.network)
def task(self): svr = server.get_by_id(self.server_id) if not svr: logger.warning('Tried to run assign_ip_addr queue ' + 'but server is no longer available', 'queues', server_id=self.server_id, ) return for _ in xrange(5): if svr.network_lock: time.sleep(2) svr.load() else: break if svr.network_lock: raise ServerNetworkLocked('Server network is locked', { 'server_id': svr.id, 'queue_id': self.id, 'queue_type': self.type, }) svr.ip_pool.assign_ip_addr(self.org_id, self.user_id)
def sync_public_ip(attempts=1, timeout=5, update=False): from pritunl import logger for i in xrange(attempts): if i: time.sleep(3) logger.info('Retrying get public ip address', 'utils') logger.debug('Getting public ip address', 'utils') try: request = urllib2.Request(settings.app.public_ip_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip = json.load(response)['ip'] break except: pass logger.debug('Getting public ipv6 address', 'utils') try: request = urllib2.Request(settings.app.public_ip6_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip6 = json.load(response)['ip'] except: pass if not settings.local.public_ip: logger.warning('Failed to get public ip address', 'utils') if update: settings.local.host.collection.update({ '_id': settings.local.host.id, }, { '$set': { 'auto_public_address': settings.local.public_ip, 'auto_public_address6': settings.local.public_ip6, } })
def auth_jumpcloud(username): try: response = requests.get( JUMPCLOUD_URL + '/api/systemusers?filter=email:$eq:%s' % ( urllib.parse.quote(username)), headers={ 'Accept': 'application/json', 'X-Api-Key': settings.app.sso_jumpcloud_secret, }, ) except http.client.HTTPException: logger.exception('JumpCloud api error', 'sso', username=username, ) return False if response.status_code != 200: logger.error('JumpCloud api error', 'sso', username=username, status_code=response.status_code, response=response.content, ) return False data = response.json() if not data.get('totalCount') or data.get('totalCount') < 1: logger.warning('JumpCloud user not found', 'sso', username=username, ) return False for user_data in data.get('results') or []: if user_data.get('email') != username: continue if user_data.get('account_locked') or user_data.get('suspended') or \ not user_data.get('activated'): logger.warning('JumpCloud user disabled', 'sso', username=username, ) return False return True logger.warning('JumpCloud user not found', 'sso', username=username, ) return False
def setup_check(): if not pymongo.has_c(): logger.warning('Failed to load pymongo c bindings') if not bson.has_c(): logger.warning('Failed to load bson c bindings')
def sso_duo_post(): sso_mode = settings.app.sso token = utils.filter_str(flask.request.json.get('token')) or None passcode = utils.filter_str(flask.request.json.get('passcode')) or '' if sso_mode not in (DUO_AUTH, AZURE_DUO_AUTH, GOOGLE_DUO_AUTH, SLACK_DUO_AUTH, SAML_DUO_AUTH, SAML_OKTA_DUO_AUTH, SAML_ONELOGIN_DUO_AUTH, RADIUS_DUO_AUTH): return flask.abort(404) if not token: return utils.jsonify( { 'error': TOKEN_INVALID, 'error_msg': TOKEN_INVALID_MSG, }, 401) tokens_collection = mongo.get_collection('sso_tokens') doc = tokens_collection.find_and_modify(query={ '_id': token, }, remove=True) if not doc or doc['_id'] != token or doc['type'] != DUO_AUTH: return utils.jsonify( { 'error': TOKEN_INVALID, 'error_msg': TOKEN_INVALID_MSG, }, 401) username = doc['username'] email = doc['email'] org_id = doc['org_id'] groups = set(doc['groups'] or []) if settings.app.sso_duo_mode == 'passcode': duo_auth = sso.Duo( username=username, factor=settings.app.sso_duo_mode, remote_ip=utils.get_remote_addr(), auth_type='Key', passcode=passcode, ) valid = duo_auth.authenticate() if not valid: logger.warning( 'Duo authentication not valid', 'sso', username=username, ) return utils.jsonify( { 'error': PASSCODE_INVALID, 'error_msg': PASSCODE_INVALID_MSG, }, 401) else: duo_auth = sso.Duo( username=username, factor=settings.app.sso_duo_mode, remote_ip=utils.get_remote_addr(), auth_type='Key', ) valid = duo_auth.authenticate() if not valid: logger.warning( 'Duo authentication not valid', 'sso', username=username, ) return utils.jsonify( { 'error': DUO_FAILED, 'error_msg': DUO_FAILED_MSG, }, 401) valid, org_id_new, groups2 = sso.plugin_sso_authenticate( sso_type='duo', user_name=username, user_email=email, remote_ip=utils.get_remote_addr(), ) if valid: org_id = org_id_new or org_id else: logger.warning( 'Duo plugin authentication not valid', 'sso', username=username, ) return flask.abort(401) groups = groups | set(groups2 or []) return _validate_user(username, email, sso_mode, org_id, groups)
def sso_callback_get(): sso_mode = settings.app.sso if sso_mode not in (AZURE_AUTH, AZURE_DUO_AUTH, AZURE_YUBICO_AUTH, GOOGLE_AUTH, GOOGLE_DUO_AUTH, GOOGLE_YUBICO_AUTH, SLACK_AUTH, SLACK_DUO_AUTH, SLACK_YUBICO_AUTH, SAML_AUTH, SAML_DUO_AUTH, SAML_YUBICO_AUTH, SAML_OKTA_AUTH, SAML_OKTA_DUO_AUTH, SAML_OKTA_YUBICO_AUTH, SAML_ONELOGIN_AUTH, SAML_ONELOGIN_DUO_AUTH, SAML_ONELOGIN_YUBICO_AUTH): return flask.abort(405) state = flask.request.args.get('state') sig = flask.request.args.get('sig') tokens_collection = mongo.get_collection('sso_tokens') doc = tokens_collection.find_and_modify(query={ '_id': state, }, remove=True) if not doc: return flask.abort(404) query = flask.request.query_string.split('&sig=')[0] test_sig = base64.urlsafe_b64encode( hmac.new(str(doc['secret']), query, hashlib.sha512).digest()) if not utils.const_compare(sig, test_sig): return flask.abort(401) params = urlparse.parse_qs(query) if doc.get('type') == SAML_AUTH: username = params.get('username')[0] email = params.get('email', [None])[0] org_names = [] if params.get('org'): org_names_param = params.get('org')[0] if ';' in org_names_param: org_names = org_names_param.split(';') else: org_names = org_names_param.split(',') org_names = [x for x in org_names if x] org_names = sorted(org_names) groups = [] if params.get('groups'): groups_param = params.get('groups')[0] if ';' in groups_param: groups = groups_param.split(';') else: groups = groups_param.split(',') groups = [x for x in groups if x] groups = set(groups) if not username: return flask.abort(406) org_id = settings.app.sso_org if org_names: not_found = False for org_name in org_names: org = organization.get_by_name( utils.filter_unicode(org_name), fields=('_id'), ) if org: not_found = False org_id = org.id break else: not_found = True if not_found: logger.warning( 'Supplied org names do not exists', 'sso', sso_type=doc.get('type'), user_name=username, user_email=email, org_names=org_names, ) valid, org_id_new, groups2 = sso.plugin_sso_authenticate( sso_type='saml', user_name=username, user_email=email, remote_ip=utils.get_remote_addr(), sso_org_names=org_names, ) if valid: org_id = org_id_new or org_id else: logger.error( 'Saml plugin authentication not valid', 'sso', username=username, ) return flask.abort(401) groups = groups | set(groups2 or []) elif doc.get('type') == SLACK_AUTH: username = params.get('username')[0] email = None user_team = params.get('team')[0] org_names = params.get('orgs', [''])[0] org_names = sorted(org_names.split(',')) if user_team != settings.app.sso_match[0]: return flask.abort(401) not_found = False org_id = settings.app.sso_org for org_name in org_names: org = organization.get_by_name( utils.filter_unicode(org_name), fields=('_id'), ) if org: not_found = False org_id = org.id break else: not_found = True if not_found: logger.warning( 'Supplied org names do not exists', 'sso', sso_type=doc.get('type'), user_name=username, user_email=email, org_names=org_names, ) valid, org_id_new, groups = sso.plugin_sso_authenticate( sso_type='slack', user_name=username, user_email=email, remote_ip=utils.get_remote_addr(), sso_org_names=org_names, ) if valid: org_id = org_id_new or org_id else: logger.error( 'Slack plugin authentication not valid', 'sso', username=username, ) return flask.abort(401) groups = set(groups or []) elif doc.get('type') == GOOGLE_AUTH: username = params.get('username')[0] email = username valid, google_groups = sso.verify_google(username) if not valid: return flask.abort(401) org_id = settings.app.sso_org valid, org_id_new, groups = sso.plugin_sso_authenticate( sso_type='google', user_name=username, user_email=email, remote_ip=utils.get_remote_addr(), ) if valid: org_id = org_id_new or org_id else: logger.error( 'Google plugin authentication not valid', 'sso', username=username, ) return flask.abort(401) groups = set(groups or []) if settings.app.sso_google_mode == 'groups': groups = groups | set(google_groups) else: not_found = False google_groups = sorted(google_groups) for org_name in google_groups: org = organization.get_by_name( utils.filter_unicode(org_name), fields=('_id'), ) if org: not_found = False org_id = org.id break else: not_found = True if not_found: logger.warning( 'Supplied org names do not exists', 'sso', sso_type=doc.get('type'), user_name=username, user_email=email, org_names=google_groups, ) elif doc.get('type') == AZURE_AUTH: username = params.get('username')[0] email = None tenant, username = username.split('/', 2) if tenant != settings.app.sso_azure_directory_id: logger.error( 'Azure directory ID mismatch', 'sso', username=username, ) return flask.abort(401) valid, azure_groups = sso.verify_azure(username) if not valid: return flask.abort(401) org_id = settings.app.sso_org valid, org_id_new, groups = sso.plugin_sso_authenticate( sso_type='azure', user_name=username, user_email=email, remote_ip=utils.get_remote_addr(), ) if valid: org_id = org_id_new or org_id else: logger.error( 'Azure plugin authentication not valid', 'sso', username=username, ) return flask.abort(401) groups = set(groups or []) if settings.app.sso_azure_mode == 'groups': groups = groups | set(azure_groups) else: not_found = False azure_groups = sorted(azure_groups) for org_name in azure_groups: org = organization.get_by_name( utils.filter_unicode(org_name), fields=('_id'), ) if org: not_found = False org_id = org.id break else: not_found = True if not_found: logger.warning( 'Supplied org names do not exists', 'sso', sso_type=doc.get('type'), user_name=username, user_email=email, org_names=azure_groups, ) else: logger.error( 'Unknown sso type', 'sso', sso_type=doc.get('type'), ) return flask.abort(401) if DUO_AUTH in sso_mode: token = utils.generate_secret() tokens_collection = mongo.get_collection('sso_tokens') tokens_collection.insert({ '_id': token, 'type': DUO_AUTH, 'username': username, 'email': email, 'org_id': org_id, 'groups': list(groups) if groups else None, 'timestamp': utils.now(), }) duo_page = static.StaticFile(settings.conf.www_path, 'duo.html', cache=False, gzip=False) sso_duo_mode = settings.app.sso_duo_mode if sso_duo_mode == 'passcode': duo_mode = 'passcode' elif sso_duo_mode == 'phone': duo_mode = 'phone' else: duo_mode = 'push' body_class = duo_mode if settings.app.theme == 'dark': body_class += ' dark' duo_page.data = duo_page.data.replace('<%= body_class %>', body_class) duo_page.data = duo_page.data.replace('<%= token %>', token) duo_page.data = duo_page.data.replace('<%= duo_mode %>', duo_mode) return duo_page.get_response() if YUBICO_AUTH in sso_mode: token = utils.generate_secret() tokens_collection = mongo.get_collection('sso_tokens') tokens_collection.insert({ '_id': token, 'type': YUBICO_AUTH, 'username': username, 'email': email, 'org_id': org_id, 'groups': list(groups) if groups else None, 'timestamp': utils.now(), }) yubico_page = static.StaticFile(settings.conf.www_path, 'yubico.html', cache=False, gzip=False) if settings.app.theme == 'dark': yubico_page.data = yubico_page.data.replace( '<body>', '<body class="dark">') yubico_page.data = yubico_page.data.replace('<%= token %>', token) return yubico_page.get_response() return _validate_user(username, email, sso_mode, org_id, groups, http_redirect=True)
def on_system_msg(msg): if msg['message'] == SHUT_DOWN: logger.warning('Received shut down event', 'setup') set_global_interrupt()
def fill_user(): collection = mongo.get_collection('users') org_collection = mongo.get_collection('organizations') queue_collection = mongo.get_collection('queue') orgs = {} orgs_count = utils.LeastCommonCounter() type_to_size = { CERT_CLIENT_POOL: settings.app.user_pool_size, CERT_SERVER_POOL: settings.app.server_user_pool_size, } for org in organization.iter_orgs(type=None): orgs[org.id] = org orgs_count[org.id, CERT_CLIENT_POOL] = 0 orgs_count[org.id, CERT_SERVER_POOL] = 0 pools = collection.aggregate([ { '$match': { 'type': { '$in': (CERT_CLIENT_POOL, CERT_SERVER_POOL) }, } }, { '$project': { 'org_id': True, 'type': True, } }, { '$group': { '_id': { 'org_id': '$org_id', 'type': '$type', }, 'count': { '$sum': 1 }, } }, ])['result'] for pool in pools: orgs_count[pool['_id']['org_id'], pool['_id']['type']] += pool['count'] pools = queue_collection.aggregate([ { '$match': { 'type': 'init_user_pooled', 'user_doc.type': { '$in': (CERT_CLIENT_POOL, CERT_SERVER_POOL) }, } }, { '$project': { 'user_doc.org_id': True, 'user_doc.type': True, } }, { '$group': { '_id': { 'org_id': '$user_doc.org_id', 'type': '$user_doc.type', }, 'count': { '$sum': 1 }, } }, ])['result'] for pool in pools: orgs_count[pool['_id']['org_id'], pool['_id']['type']] += pool['count'] new_users = [] for org_id_user_type, count in orgs_count.least_common(): org_id, user_type = org_id_user_type pool_size = type_to_size[user_type] if count >= pool_size: break org = orgs.get(org_id) if not org: logger.warning( 'Pooler cannot find org from user_count', 'pooler', org_id=org_id, user_type=user_type, ) continue new_users.append([(org, user_type)] * (pool_size - count)) for org, user_type in utils.roundrobin(*new_users): org.new_user(type=user_type, block=False)
def assign_ip_pool_org(self, org_id): org = organization.get_by_id(org_id) network_hash = self.server.network_hash server_id = self.server.id org_id = org.id ip_pool_avial = True pool_end = False network = ipaddress.IPv4Network(self.server.network) network_start = self.server.network_start network_end = self.server.network_end if network_start: network_start = ipaddress.IPv4Address(network_start) if network_end: network_end = ipaddress.IPv4Address(network_end) ip_pool = self.get_ip_pool(network, network_start) if not ip_pool: return try: doc = self.collection.find({ 'network': network_hash, 'server_id': server_id, }).sort('_id', pymongo.DESCENDING)[0] if doc: last_addr = doc['_id'] for remote_ip_addr in ip_pool: if int(remote_ip_addr) == last_addr: break if network_end and remote_ip_addr > network_end: break except IndexError: pass if mongo.has_bulk: bulk = self.collection.initialize_unordered_bulk_op() bulk_empty = True else: bulk = None bulk_empty = None for user in org.iter_users(include_pool=True): if ip_pool_avial: response = self.collection.update( { 'network': network_hash, 'server_id': server_id, 'user_id': { '$exists': False }, }, {'$set': { 'org_id': org_id, 'user_id': user.id, }}) if response['updatedExisting']: continue ip_pool_avial = False try: remote_ip_addr = ip_pool.next() if network_end and remote_ip_addr > network_end: raise StopIteration() except StopIteration: pool_end = True break doc_id = int(remote_ip_addr) spec = { '_id': doc_id, } doc = { '$set': { '_id': doc_id, 'network': network_hash, 'server_id': server_id, 'org_id': org_id, 'user_id': user.id, 'address': '%s/%s' % (remote_ip_addr, network.prefixlen), } } if bulk: bulk.find(spec).upsert().update(doc) bulk_empty = False else: self.collection.update(spec, doc, upsert=True) if bulk and not bulk_empty: bulk.execute() if pool_end: logger.warning( 'Failed to assign ip addresses ' + 'to org, ip pool empty', 'server', org_id=org_id, )
def update(): license = settings.app.license collection = mongo.get_collection('settings') if not license: settings.local.sub_active = False settings.local.sub_status = None settings.local.sub_plan = None settings.local.sub_amount = None settings.local.sub_period_end = None settings.local.sub_trial_end = None settings.local.sub_cancel_at_period_end = None settings.local.sub_url_key = None else: for i in xrange(2): try: response = utils.request.get( 'https://app.pritunl.com/subscription', json_data={ 'license': license, 'version': settings.local.version_int, }, timeout=max(settings.app.http_request_timeout, 10), ) # License key invalid if response.status_code == 470: logger.warning('License key is invalid', 'subscription') update_license(None) update() return if response.status_code == 473: raise ValueError(('Version %r not recognized by ' + 'subscription server') % settings.local.version_int) data = response.json() settings.local.sub_active = data['active'] settings.local.sub_status = data['status'] settings.local.sub_plan = data['plan'] settings.local.sub_amount = data['amount'] settings.local.sub_period_end = data['period_end'] settings.local.sub_trial_end = data['trial_end'] settings.local.sub_cancel_at_period_end = data[ 'cancel_at_period_end'] settings.local.sub_url_key = data.get('url_key') settings.local.sub_styles[data['plan']] = data['styles'] except: if i < 1: logger.exception('Failed to check subscription status', 'subscription, retrying...') time.sleep(1) continue logger.exception('Failed to check subscription status', 'subscription') settings.local.sub_active = False settings.local.sub_status = None settings.local.sub_plan = None settings.local.sub_amount = None settings.local.sub_period_end = None settings.local.sub_trial_end = None settings.local.sub_cancel_at_period_end = None settings.local.sub_url_key = None break if settings.app.license_plan != settings.local.sub_plan and \ settings.local.sub_plan: settings.app.license_plan = settings.local.sub_plan settings.commit() response = collection.update({ '_id': 'subscription', '$or': [ {'active': {'$ne': settings.local.sub_active}}, {'plan': {'$ne': settings.local.sub_plan}}, ], }, {'$set': { 'active': settings.local.sub_active, 'plan': settings.local.sub_plan, }}) if response['updatedExisting']: if settings.local.sub_active: if settings.local.sub_plan == 'premium': event.Event(type=SUBSCRIPTION_PREMIUM_ACTIVE) elif settings.local.sub_plan == 'enterprise': event.Event(type=SUBSCRIPTION_ENTERPRISE_ACTIVE) else: event.Event(type=SUBSCRIPTION_NONE_INACTIVE) else: if settings.local.sub_plan == 'premium': event.Event(type=SUBSCRIPTION_PREMIUM_INACTIVE) elif settings.local.sub_plan == 'enterprise': event.Event(type=SUBSCRIPTION_ENTERPRISE_INACTIVE) else: event.Event(type=SUBSCRIPTION_NONE_INACTIVE)
def assign_ip_pool_org(self, org_id): org = organization.get_by_id(org_id) network_hash = self.server.network_hash server_id = self.server.id org_id = org.id ip_pool_avial = True pool_end = False network = ipaddress.IPv4Network(self.server.network) network_start = self.server.network_start network_end = self.server.network_end if network_start: network_start = ipaddress.IPv4Address(network_start) if network_end: network_end = ipaddress.IPv4Address(network_end) ip_pool = self.get_ip_pool(network, network_start) if not ip_pool: return try: doc = self.collection.find({ 'network': network_hash, 'server_id': server_id, }).sort('_id', pymongo.DESCENDING)[0] if doc: last_addr = doc['_id'] for remote_ip_addr in ip_pool: if int(remote_ip_addr) == last_addr: break if network_end and remote_ip_addr > network_end: break except IndexError: pass bulk = self.collection.initialize_unordered_bulk_op() bulk_empty = True for user in org.iter_users(include_pool=True): if ip_pool_avial: response = self.collection.update({ 'network': network_hash, 'server_id': server_id, 'user_id': {'$exists': False}, }, {'$set': { 'org_id': org_id, 'user_id': user.id, }}) if response['updatedExisting']: continue ip_pool_avial = False try: remote_ip_addr = ip_pool.next() if network_end and remote_ip_addr > network_end: raise StopIteration() except StopIteration: pool_end = True break doc_id = int(remote_ip_addr) spec = { '_id': doc_id, } doc = {'$set': { '_id': doc_id, 'network': network_hash, 'server_id': server_id, 'org_id': org_id, 'user_id': user.id, 'address': '%s/%s' % (remote_ip_addr, network.prefixlen), }} bulk.find(spec).upsert().update(doc) bulk_empty = False if not bulk_empty: bulk.execute() if pool_end: logger.warning('Failed to assign ip addresses ' + 'to org, ip pool empty', 'server', org_id=org_id, )
def update(): license = settings.app.license collection = mongo.get_collection('settings') if not settings.app.id: settings.app.id = utils.random_name() settings.commit() if not license: settings.local.sub_active = False settings.local.sub_status = None settings.local.sub_plan = None settings.local.sub_amount = None settings.local.sub_period_end = None settings.local.sub_trial_end = None settings.local.sub_cancel_at_period_end = None settings.local.sub_url_key = None else: for i in xrange(2): try: response = requests.get( 'https://app.pritunl.com/subscription', json={ 'id': settings.app.id, 'license': license, 'version': settings.local.version_int, }, timeout=max(settings.app.http_request_timeout, 10), ) # License key invalid if response.status_code == 470: logger.warning('License key is invalid', 'subscription') update_license(None) update() return False if response.status_code == 473: raise ValueError( ('Version %r not recognized by ' + 'subscription server') % settings.local.version_int) data = response.json() settings.local.sub_active = data['active'] settings.local.sub_status = data['status'] settings.local.sub_plan = data['plan'] settings.local.sub_amount = data['amount'] settings.local.sub_period_end = data['period_end'] settings.local.sub_trial_end = data['trial_end'] settings.local.sub_cancel_at_period_end = data[ 'cancel_at_period_end'] settings.local.sub_url_key = data.get('url_key') settings.local.sub_styles[data['plan']] = data['styles'] except: if i < 1: logger.exception('Failed to check subscription status', 'subscription, retrying...') time.sleep(1) continue logger.exception('Failed to check subscription status', 'subscription') settings.local.sub_active = False settings.local.sub_status = None settings.local.sub_plan = None settings.local.sub_amount = None settings.local.sub_period_end = None settings.local.sub_trial_end = None settings.local.sub_cancel_at_period_end = None settings.local.sub_url_key = None break if settings.app.license_plan != settings.local.sub_plan and \ settings.local.sub_plan: settings.app.license_plan = settings.local.sub_plan settings.commit() response = collection.update( { '_id': 'subscription', '$or': [ { 'active': { '$ne': settings.local.sub_active } }, { 'plan': { '$ne': settings.local.sub_plan } }, ], }, { '$set': { 'active': settings.local.sub_active, 'plan': settings.local.sub_plan, } }) if response['updatedExisting']: if settings.local.sub_active: if settings.local.sub_plan == 'premium': event.Event(type=SUBSCRIPTION_PREMIUM_ACTIVE) elif settings.local.sub_plan == 'enterprise': event.Event(type=SUBSCRIPTION_ENTERPRISE_ACTIVE) else: event.Event(type=SUBSCRIPTION_NONE_INACTIVE) else: if settings.local.sub_plan == 'premium': event.Event(type=SUBSCRIPTION_PREMIUM_INACTIVE) elif settings.local.sub_plan == 'enterprise': event.Event(type=SUBSCRIPTION_ENTERPRISE_INACTIVE) else: event.Event(type=SUBSCRIPTION_NONE_INACTIVE) return True
def verify_radius(username, password): hosts = settings.app.sso_radius_host.split(',') for i, host in enumerate(hosts): host = host.split(':') if len(host) > 1: port = int(host[1]) else: port = 1645 host = host[0] conn = client.Client( server=host, authport=port, secret=settings.app.sso_radius_secret.encode(), dict=dictionary.Dictionary(StringIO.StringIO(RADIUS_DICTONARY)), ) if settings.app.sso_radius_timeout: conn.timeout = settings.app.sso_radius_timeout req = conn.CreateAuthPacket( code=packet.AccessRequest, User_Name=(settings.app.sso_radius_prefix or '') + username.encode(), ) req['User-Password'] = req.PwCrypt(password.encode()) try: reply = conn.SendPacket(req) except: if i == len(hosts) - 1: raise else: continue if reply.code != packet.AccessAccept: if i == len(hosts) - 1: logger.warning( 'Radius server rejected authentication', 'sso', username=username, reply_code=reply.code, ) return False, None, None else: continue break org_names = [] try: org_names = reply.get((97, 0)) or [] except: pass group_names = [] try: group_names = reply.get((97, 1)) or [] except: pass org_names2 = [] try: org_names2 = reply.get(97) or [] except: pass org_names = org_names or org_names2 groups = set() for group in group_names: groups.add(group) return True, org_names, groups
def setup_mongo(): if not pymongo.has_c(): logger.warning('Failed to load pymongo c bindings') if not bson.has_c(): logger.warning('Failed to load bson c bindings') prefix = settings.conf.mongodb_collection_prefix or '' last_error = time.time() - 24 while True: try: client = pymongo.MongoClient(settings.conf.mongodb_url, connectTimeoutMS=2000) break except pymongo.errors.ConnectionFailure: time.sleep(0.5) if time.time() - last_error > 30: last_error = time.time() logger.exception('Error connecting to mongodb server') database = client.get_default_database() cur_collections = database.collection_names() if prefix + 'messages' not in cur_collections: database.create_collection(prefix + 'messages', capped=True, size=100000) mongo.collections.update({ 'transaction': getattr(database, prefix + 'transaction'), 'queue': getattr(database, prefix + 'queue'), 'task': getattr(database, prefix + 'task'), 'system': getattr(database, prefix + 'system'), 'messages': getattr(database, prefix + 'messages'), 'administrators': getattr(database, prefix + 'administrators'), 'users': getattr(database, prefix + 'users'), 'users_key_link': getattr(database, prefix + 'users_key_link'), 'organizations': getattr(database, prefix + 'organizations'), 'hosts': getattr(database, prefix + 'hosts'), 'hosts_usage': getattr(database, prefix + 'hosts_usage'), 'servers': getattr(database, prefix + 'servers'), 'servers_output': getattr(database, prefix + 'servers_output'), 'servers_bandwidth': getattr(database, prefix + 'servers_bandwidth'), 'servers_ip_pool': getattr(database, prefix + 'servers_ip_pool'), 'dh_params': getattr(database, prefix + 'dh_params'), 'auth_nonces': getattr(database, prefix + 'auth_nonces'), 'auth_limiter': getattr(database, prefix + 'auth_limiter'), 'otp': getattr(database, prefix + 'otp'), 'otp_cache': getattr(database, prefix + 'otp_cache'), }) if prefix + 'log_entries' not in cur_collections: log_limit = settings.app.log_entry_limit database.create_collection(prefix + 'log_entries', capped=True, size=log_limit * 256 * 2, max=log_limit) mongo.collections.update({ 'log_entries': getattr(database, prefix + 'log_entries'), }) for collection_name, collection in mongo.collections.items(): collection.name_str = collection_name settings.init() mongo.collections['transaction'].ensure_index('lock_id', unique=True) mongo.collections['transaction'].ensure_index([ ('ttl_timestamp', pymongo.ASCENDING), ('state', pymongo.ASCENDING), ('priority', pymongo.DESCENDING), ]) mongo.collections['queue'].ensure_index('runner_id') mongo.collections['queue'].ensure_index('ttl_timestamp') mongo.collections['task'].ensure_index('type', unique=True) mongo.collections['task'].ensure_index('ttl_timestamp') mongo.collections['log_entries'].ensure_index([ ('timestamp', pymongo.DESCENDING), ]) mongo.collections['messages'].ensure_index('channel') mongo.collections['administrators'].ensure_index('username', unique=True) mongo.collections['users'].ensure_index([ ('type', pymongo.ASCENDING), ('org_id', pymongo.ASCENDING), ]) mongo.collections['users'].ensure_index([ ('org_id', pymongo.ASCENDING), ('name', pymongo.ASCENDING), ]) mongo.collections['users_key_link'].ensure_index('key_id') mongo.collections['users_key_link'].ensure_index('short_id', unique=True) mongo.collections['organizations'].ensure_index('type') mongo.collections['hosts'].ensure_index('name') mongo.collections['hosts_usage'].ensure_index([ ('host_id', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), ]) mongo.collections['servers'].ensure_index('name') mongo.collections['servers'].ensure_index('ping_timestamp') mongo.collections['servers_output'].ensure_index([ ('server_id', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), ]) mongo.collections['servers_bandwidth'].ensure_index([ ('server_id', pymongo.ASCENDING), ('period', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), ]) mongo.collections['servers_ip_pool'].ensure_index([ ('server_id', pymongo.ASCENDING), ('user_id', pymongo.ASCENDING), ]) mongo.collections['servers_ip_pool'].ensure_index('user_id') mongo.collections['dh_params'].ensure_index('dh_param_bits') mongo.collections['auth_nonces'].ensure_index([ ('token', pymongo.ASCENDING), ('nonce', pymongo.ASCENDING), ], unique=True) mongo.collections['users_key_link'].ensure_index( 'timestamp', expireAfterSeconds=settings.app.key_link_timeout) mongo.collections['auth_nonces'].ensure_index( 'timestamp', expireAfterSeconds=settings.app.auth_time_window * 2.1) mongo.collections['auth_limiter'].ensure_index( 'timestamp', expireAfterSeconds=settings.app.auth_limiter_ttl) mongo.collections['otp'].ensure_index('timestamp', expireAfterSeconds=120) mongo.collections['otp_cache'].ensure_index( 'timestamp', expireAfterSeconds=settings.user.otp_cache_ttl) if not auth.Administrator.collection.find_one(): auth.Administrator( username=DEFAULT_USERNAME, password=DEFAULT_PASSWORD, default=True, ).commit() secret_key = settings.app.cookie_secret if not secret_key: secret_key = re.sub(r'[\W_]+', '', base64.b64encode(os.urandom(128)))[:64] settings.app.cookie_secret = secret_key settings.commit() app.app.secret_key = secret_key.encode() server_api_key = settings.app.server_api_key if not server_api_key: server_api_key = re.sub(r'[\W_]+', '', base64.b64encode(os.urandom(128)))[:64] settings.app.server_api_key = server_api_key settings.commit()
def generate_iptables_rules(self): rules = [] try: routes_output = utils.check_output_logged(['route', '-n']) except subprocess.CalledProcessError: logger.exception( 'Failed to get IP routes', 'server', server_id=self.server.id, ) raise routes = {} for line in routes_output.splitlines(): line_split = line.split() if len(line_split) < 8 or not re.match(IP_REGEX, line_split[0]): continue routes[line_split[0]] = line_split[7] if '0.0.0.0' not in routes: raise IptablesError('Failed to find default network interface', { 'server_id': self.server.id, }) default_interface = routes['0.0.0.0'] rules.append(['INPUT', '-i', self.interface, '-j', 'ACCEPT']) rules.append(['FORWARD', '-i', self.interface, '-j', 'ACCEPT']) interfaces = set() for network_address in self.server.local_networks or ['0.0.0.0/0']: args_base = ['POSTROUTING', '-t', 'nat'] network = utils.parse_network(network_address)[0] if network not in routes: logger.warning('Failed to find interface for local ' + \ 'network route, using default route', 'server', server_id=self.server.id, ) interface = default_interface else: interface = routes[network] interfaces.add(interface) if network != '0.0.0.0': args_base += ['-d', network_address] args_base += [ '-o', interface, '-j', 'MASQUERADE', ] rules.append(args_base + ['-s', self.server.network]) for link_svr in self.server.iter_links(fields=('_id', 'network')): rules.append(args_base + ['-s', link_svr.network]) for interface in interfaces: rules.append([ 'FORWARD', '-i', interface, '-o', self.interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT', ]) rules.append([ 'FORWARD', '-i', self.interface, '-o', interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT', ]) extra_args = [ '-m', 'comment', '--comment', 'pritunl_%s' % self.server.id, ] if settings.local.iptables_wait: extra_args.append('--wait') rules = [x + extra_args for x in rules] return rules
def task(self): if settings.app.demo_mode: return try: timestamp = utils.now() timestamp_spec = timestamp - datetime.timedelta( seconds=settings.vpn.server_ping_ttl) docs = self.server_collection.find( { 'instances.ping_timestamp': { '$lt': timestamp_spec }, }, { '_id': True, 'instances': True, }) yield for doc in docs: for instance in doc['instances']: if instance['ping_timestamp'] < timestamp_spec: logger.warning( 'Removing instance doc', 'server', server_id=doc['_id'], instance_id=instance['instance_id'], cur_timestamp=timestamp, ttl_timestamp=timestamp_spec, ping_timestamp=instance['ping_timestamp'], ) self.server_collection.update( { '_id': doc['_id'], 'instances.instance_id': instance['instance_id'], }, { '$pull': { 'instances': { 'instance_id': instance['instance_id'], }, }, '$inc': { 'instances_count': -1, }, }) yield docs = self.host_collection.find({ 'status': ONLINE, }, { '_id': True, 'availability_group': True, }) yield hosts_group = {} for doc in docs: hosts_group[doc['_id']] = doc.get('availability_group', DEFAULT) yield response = self.server_collection.aggregate([ { '$match': { 'status': ONLINE, 'start_timestamp': { '$lt': timestamp_spec }, } }, { '$project': { '_id': True, 'hosts': True, 'instances': True, 'replica_count': True, 'availability_group': True, 'offline_instances_count': { '$subtract': [ '$replica_count', '$instances_count', ], } } }, { '$match': { 'offline_instances_count': { '$gt': 0 }, } }, ]) yield recover_count = 0 for doc in response: cur_avail_group = doc.get('availability_group', DEFAULT) hosts_set = set(doc['hosts']) group_best = None group_len_max = 0 server_groups = collections.defaultdict(set) for hst in hosts_set: avail_zone = hosts_group.get(hst) if not avail_zone: continue server_groups[avail_zone].add(hst) group_len = len(server_groups[avail_zone]) if group_len > group_len_max: group_len_max = group_len group_best = avail_zone elif group_len == group_len_max and \ avail_zone == cur_avail_group: group_best = avail_zone if group_best and cur_avail_group != group_best: logger.info( 'Rebalancing server availability group', 'server', server_id=doc['_id'], current_availability_group=cur_avail_group, new_availability_group=group_best, ) self.server_collection.update( { '_id': doc['_id'], 'status': ONLINE, }, { '$set': { 'instances': [], 'instances_count': 0, 'availability_group': group_best, } }) messenger.publish('servers', 'rebalance', extra={ 'server_id': doc['_id'], 'availability_group': group_best, }) prefered_hosts = server_groups[group_best] else: prefered_hosts = server_groups[cur_avail_group] active_hosts = set([x['host_id'] for x in doc['instances']]) prefered_hosts = list(prefered_hosts - active_hosts) if not prefered_hosts: continue if recover_count >= 3: continue recover_count += 1 logger.info( 'Recovering server state', 'server', server_id=doc['_id'], prefered_hosts=prefered_hosts, ) messenger.publish('servers', 'start', extra={ 'server_id': doc['_id'], 'send_events': True, 'prefered_hosts': host.get_prefered_hosts( prefered_hosts, doc['replica_count']) }) except GeneratorExit: raise except: logger.exception('Error checking server states', 'tasks')
def auth_duo(username, strong=False, ipaddr=None, type=None, info=None, factor='push', thread=True): if factor == 'push' and thread: state = { 'interrupt': False, 'valid': None, 'org_id': None, 'exception': None, } state_lock = threading.Lock() state_event = threading.Event() def phone_thread(): start = time.time() backup_delay = settings.app.ssu_duo_backup_delay while True: if state['interrupt']: return if time.time() - start >= backup_delay: break time.sleep(0.1) try: valid, org_id = auth_duo( username, strong=strong, ipaddr=ipaddr, type=type, info=info, factor='phone', thread=False, ) except Exception as error: state_lock.acquire() try: if state['interrupt']: return state['interrupt'] = True state['exception'] = error state_event.set() finally: state_lock.release() return if not valid: return state_lock.acquire() try: if state['interrupt']: return state['interrupt'] = True state['valid'] = valid state['org_id'] = org_id state_event.set() finally: state_lock.release() def push_thread(): try: valid, org_id = auth_duo( username, strong=strong, ipaddr=ipaddr, type=type, info=info, factor='push', thread=False, ) except UserDuoPushUnavailable: state_lock.acquire() try: if state['interrupt']: return state['interrupt'] = True finally: state_lock.release() try: valid, org_id = auth_duo( username, strong=strong, ipaddr=ipaddr, type=type, info=info, factor='phone', thread=False, ) except Exception as error: state['interrupt'] = True state['exception'] = error state_event.set() return state['valid'] = valid state['org_id'] = org_id state_event.set() return except Exception as error: state_lock.acquire() try: if state['interrupt']: return state['interrupt'] = True state['exception'] = error state_event.set() finally: state_lock.release() return state_lock.acquire() try: if state['interrupt']: return state['interrupt'] = True state['valid'] = valid state['org_id'] = org_id state_event.set() finally: state_lock.release() thread = threading.Thread(target=phone_thread) thread.daemon = True thread.start() thread = threading.Thread(target=push_thread) thread.daemon = True thread.start() state_event.wait() if state['exception']: raise state['exception'] return state['valid'], state['org_id'] params = { 'username': username, 'factor': factor, 'device': 'auto', } if ipaddr: params['ipaddr'] = ipaddr if factor == 'push': if type: params['type'] = type if info: params['pushinfo'] = urllib.urlencode(info) headers = _sign('POST', '/auth/v2/auth', params) url = 'https://%s/auth/v2/auth' % settings.app.sso_duo_host try: response = requests.post(url, headers=headers, params=params, timeout=settings.app.sso_timeout, ) except httplib.HTTPException: return False, None data = response.json() resp_data = data.get('response') if resp_data and resp_data.get('result') == 'allow': if strong and resp_data.get('status') == 'bypass': if SAML_DUO_AUTH in settings.app.sso and \ settings.app.sso_saml_duo_skip_unavailable: logger.warning('Skipping duo auth with bypass', 'sso', username=username, ) allow = True else: allow = False logger.error('Cannot use Duo bypass with profile login', 'sso', data=resp_data, ) else: allow = True elif data.get('code') == 40002: if factor == 'push': raise UserDuoPushUnavailable('Duo push is unavailable') if SAML_DUO_AUTH in settings.app.sso and \ settings.app.sso_saml_duo_skip_unavailable: logger.warning('Skipping duo auth for unavailable user', 'sso', username=username, ) allow = True else: raise InvalidUser('Invalid username') else: allow = False logger.error('Duo authentication failure', 'sso', data=data, ) return allow, None
def generate_iptables_rules(self): rules = [] try: routes_output = utils.check_output_logged(['route', '-n']) except subprocess.CalledProcessError: logger.exception('Failed to get IP routes', 'server', server_id=self.server.id, ) raise routes = {} for line in routes_output.splitlines(): line_split = line.split() if len(line_split) < 8 or not re.match(IP_REGEX, line_split[0]): continue routes[line_split[0]] = line_split[7] if '0.0.0.0' not in routes: raise IptablesError('Failed to find default network interface', { 'server_id': self.server.id, }) default_interface = routes['0.0.0.0'] rules.append(['INPUT', '-i', self.interface, '-j', 'ACCEPT']) rules.append(['FORWARD', '-i', self.interface, '-j', 'ACCEPT']) interfaces = set() for network_address in self.server.local_networks or ['0.0.0.0/0']: args_base = ['POSTROUTING', '-t', 'nat'] network = utils.parse_network(network_address)[0] if network not in routes: logger.warning('Failed to find interface for local ' + \ 'network route, using default route', 'server', server_id=self.server.id, ) interface = default_interface else: interface = routes[network] interfaces.add(interface) if network != '0.0.0.0': args_base += ['-d', network_address] args_base += [ '-o', interface, '-j', 'MASQUERADE', ] rules.append(args_base + ['-s', self.server.network]) for link_svr in self.server.iter_links(fields=('_id', 'network')): rules.append(args_base + ['-s', link_svr.network]) for interface in interfaces: rules.append([ 'FORWARD', '-i', interface, '-o', self.interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT', ]) rules.append([ 'FORWARD', '-i', self.interface, '-o', interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT', ]) extra_args = [ '-m', 'comment', '--comment', 'pritunl_%s' % self.server.id, ] if settings.local.iptables_wait: extra_args.append('--wait') rules = [x + extra_args for x in rules] return rules
def _auth_radius(username, password, remote_addr): sso_mode = settings.app.sso valid, org_names, groups = sso.verify_radius(username, password) if not valid: journal.entry( journal.SSO_AUTH_FAILURE, user_name=username, remote_address=remote_addr, reason=journal.SSO_AUTH_REASON_RADIUS_FAILED, reason_long='Radius authentication failed', ) return utils.jsonify( { 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) org_id = settings.app.sso_org if org_names: not_found = False for org_name in org_names: org = organization.get_by_name(org_name, fields=('_id')) if org: not_found = False org_id = org.id break else: not_found = True if not_found: logger.warning( 'Supplied org names do not exist', 'sso', sso_type='radius', user_name=username, org_names=org_names, ) valid, org_id_new, groups2 = sso.plugin_sso_authenticate( sso_type='radius', user_name=username, user_email=None, remote_ip=utils.get_remote_addr(), ) if valid: org_id = org_id_new or org_id else: journal.entry( journal.SSO_AUTH_FAILURE, user_name=username, remote_address=remote_addr, reason=journal.SSO_AUTH_REASON_PLUGIN_FAILED, reason_long='Radius plugin authentication failed', ) logger.error( 'Radius plugin authentication not valid', 'sso', username=username, ) return utils.jsonify( { 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) groups = ((groups or set()) | (groups2 or set())) or None if DUO_AUTH in sso_mode: try: duo_auth = sso.Duo( username=username, factor=settings.app.sso_duo_mode, remote_ip=utils.get_remote_addr(), auth_type='Key', ) valid = duo_auth.authenticate() except InvalidUser: logger.error( 'Duo authentication username not valid', 'sso', username=username, ) journal.entry( journal.SSO_AUTH_FAILURE, user_name=username, remote_address=remote_addr, reason=journal.SSO_AUTH_REASON_DUO_FAILED, reason_long='Duo authentication invalid username', ) return utils.jsonify( { 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) if valid: valid, org_id_new, groups2 = sso.plugin_sso_authenticate( sso_type='duo', user_name=username, user_email=None, remote_ip=utils.get_remote_addr(), ) if valid: org_id = org_id_new or org_id else: journal.entry( journal.SSO_AUTH_FAILURE, user_name=username, remote_address=remote_addr, reason=journal.SSO_AUTH_REASON_PLUGIN_FAILED, reason_long='Duo plugin authentication failed', ) logger.error( 'Duo plugin authentication not valid', 'sso', username=username, ) return utils.jsonify( { 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) groups = ((groups or set()) | (groups2 or set())) or None else: logger.error( 'Duo authentication not valid', 'sso', username=username, ) journal.entry( journal.SSO_AUTH_FAILURE, user_name=username, remote_address=remote_addr, reason=journal.SSO_AUTH_REASON_DUO_FAILED, reason_long='Duo authentication failed', ) return utils.jsonify( { 'error': AUTH_INVALID, 'error_msg': AUTH_INVALID_MSG, }, 401) groups = ((groups or set()) | (groups2 or set())) or None org = organization.get_by_id(org_id) if not org: logger.error( 'Organization for sso does not exist', 'auth', org_id=org_id, ) return flask.abort(405) usr = org.find_user(name=username) if not usr: usr = org.new_user(name=username, type=CERT_CLIENT, auth_type=sso_mode, groups=list(groups) if groups else None) usr.audit_event( 'user_created', 'User created with single sign-on', remote_addr=remote_addr, ) journal.entry( journal.USER_CREATE, usr.journal_data, event_long='User created with single sign-on', remote_address=remote_addr, ) event.Event(type=ORGS_UPDATED) event.Event(type=USERS_UPDATED, resource_id=org.id) event.Event(type=SERVERS_UPDATED) else: if usr.disabled: return utils.jsonify( { 'error': AUTH_DISABLED, 'error_msg': AUTH_DISABLED_MSG, }, 403) if groups and groups - set(usr.groups or []): usr.groups = list(set(usr.groups or []) | groups) usr.commit('groups') if usr.auth_type != sso_mode: usr.auth_type = sso_mode usr.set_pin(None) usr.commit(('auth_type', 'pin')) key_link = org.create_user_key_link(usr.id, one_time=True) journal.entry( journal.SSO_AUTH_SUCCESS, usr.journal_data, key_id_hash=hashlib.md5(key_link['id'].encode()).hexdigest(), remote_address=remote_addr, ) usr.audit_event( 'user_profile', 'User profile viewed from single sign-on', remote_addr=utils.get_remote_addr(), ) journal.entry( journal.USER_PROFILE_SUCCESS, usr.journal_data, event_long='User profile viewed from single sign-on', remote_address=remote_addr, ) return utils.jsonify( { 'redirect': utils.get_url_root() + key_link['view_url'], }, 202)
def assign_ip_pool(self, network, network_start, network_end, network_hash): server_id = self.server.id pool_end = False network = ipaddress.IPv4Network(network) if network_start: network_start = ipaddress.IPv4Address(network_start) if network_end: network_end = ipaddress.IPv4Address(network_end) ip_pool = self.get_ip_pool(network, network_start) if not ip_pool: return if mongo.has_bulk: bulk = self.collection.initialize_unordered_bulk_op() bulk_empty = True else: bulk = None bulk_empty = None for org in self.server.iter_orgs(): org_id = org.id for user in org.iter_users(include_pool=True): try: remote_ip_addr = ip_pool.next() if network_end and remote_ip_addr > network_end: raise StopIteration() except StopIteration: pool_end = True break doc_id = int(remote_ip_addr) spec = { '_id': doc_id, } doc = { '$set': { '_id': doc_id, 'network': network_hash, 'server_id': server_id, 'org_id': org_id, 'user_id': user.id, 'address': '%s/%s' % (remote_ip_addr, network.prefixlen), } } if bulk: bulk.find(spec).upsert().update(doc) bulk_empty = False else: self.collection.update(spec, doc, upsert=True) if pool_end: logger.warning( 'Failed to assign ip addresses ' + 'to server, ip pool empty', 'server', server_id=server_id, org_id=org_id, ) break if bulk and not bulk_empty: bulk.execute()
def auth_okta_secondary(username, passcode, remote_ip, okta_mode): user_id = get_user_id(username) if not user_id: return False if 'passcode' in okta_mode and not passcode: logger.error('Okta passcode empty', 'sso', username=username, okta_user_id=user_id, ) return False try: response = requests.get( _getokta_url() + '/api/v1/users/%s/factors' % user_id, headers={ 'Accept': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, }, ) except httplib.HTTPException: logger.exception('Okta api error', 'sso', username=username, okta_user_id=user_id, ) return False if response.status_code != 200: logger.error('Okta api error', 'sso', username=username, okta_user_id=user_id, status_code=response.status_code, response=response.content, ) return False not_active = False factor_id = None data = response.json() for factor in data: if not factor.get('id') or not factor.get('provider') or \ not factor.get('status'): continue if factor.get('provider').lower() not in ('okta', 'google') or \ factor.get('status').lower() != 'active': continue if 'push' in okta_mode: if factor['factorType'].lower() != 'push': continue elif 'passcode' in okta_mode: if factor['factorType'].lower() != 'token:software:totp': continue else: continue if factor_id is None or factor.get('provider').lower() == 'okta': factor_id = factor['id'] if not factor_id: if 'none' in okta_mode: logger.info('Okta secondary not available, skipped', 'sso', username=username, okta_user_id=user_id, ) return True elif not_active: logger.warning('Okta secondary not active', 'sso', username=username, okta_user_id=user_id, ) return False else: logger.warning('Okta secondary not available', 'sso', username=username, okta_user_id=user_id, ) return False verify_data = {} if passcode: verify_data['passCode'] = passcode logger.info('Sending Okta verify', 'sso', username=username, okta_user_id=user_id, okta_factor_id=factor_id, ) try: response = requests.post( _getokta_url() + '/api/v1/users/%s/factors/%s/verify' % ( user_id, factor_id), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, 'X-Forwarded-For': remote_ip, }, json=verify_data, ) except httplib.HTTPException: logger.exception('Okta api error', 'sso', username=username, user_id=user_id, factor_id=factor_id, ) return False if response.status_code != 200 and response.status_code != 201: logger.error('Okta api error', 'sso', username=username, user_id=user_id, factor_id=factor_id, status_code=response.status_code, response=response.content, ) return False poll_url = None start = time.time() while time.time() - start < settings.app.sso_timeout: data = response.json() result = data.get('factorResult').lower() if result == 'success': return True elif result == 'waiting': pass else: logger.warning('Okta push rejected', 'sso', username=username, user_id=user_id, factor_id=factor_id, result=result, ) return False if not poll_url: links = data.get('_links') if not links: logger.error('Okta cant find links', 'sso', username=username, user_id=user_id, factor_id=factor_id, data=data, ) return False poll = links.get('poll') if not poll: logger.error('Okta cant find poll', 'sso', username=username, user_id=user_id, factor_id=factor_id, data=data, ) return False poll_url = poll.get('href') if not poll_url: logger.error('Okta cant find href', 'sso', username=username, user_id=user_id, factor_id=factor_id, data=data, ) return False time.sleep(settings.app.sso_okta_poll_rate) try: response = requests.get( poll_url, headers={ 'Accept': 'application/json', 'Authorization': 'SSWS %s' % settings.app.sso_okta_token, }, ) except httplib.HTTPException: logger.exception('Okta poll api error', 'sso', username=username, user_id=user_id, factor_id=factor_id, ) return False if response.status_code != 200: logger.error('Okta poll api error', 'sso', username=username, user_id=user_id, factor_id=factor_id, status_code=response.status_code, response=response.content, ) return False