def commit(self, init=False): from pritunl import messenger from pritunl import mongo from pritunl import transaction docs = [] has_docs = False transaction = transaction.Transaction() collection = transaction.collection( self.collection.name_str) for group in self.groups: group_cls = getattr(self, group) if group_cls.type != GROUP_MONGO: continue doc = group_cls.get_commit_doc(init) if doc: has_docs = True collection.bulk().find({ '_id': doc['_id'], }).upsert().update({ '$set': doc, }) docs.append(doc) messenger.publish('setting', docs, transaction=transaction) if not has_docs: return collection.bulk_execute() transaction.commit()
def task(self): try: timestamp_spec = utils.now() - datetime.timedelta( seconds=settings.vpn.route_ping_ttl) docs = self.routes_collection.find({ 'timestamp': {'$lt': timestamp_spec}, }) yield for doc in docs: server_id = doc['server_id'] vpc_region = doc['vpc_region'] vpc_id = doc['vpc_id'] network = doc['network'] messenger.publish('instance', ['route_advertisement', server_id, vpc_region, vpc_id, network]) except GeneratorExit: raise except: logger.exception('Error checking route states', 'tasks') yield interrupter_sleep(settings.vpn.server_ping)
def commit(self, init=False): from pritunl import messenger from pritunl import transaction docs = [] has_docs = False transaction = transaction.Transaction() collection = transaction.collection(self.collection.name_str) for group in self.groups: group_cls = getattr(self, group) if group_cls.type != GROUP_MONGO: continue doc = group_cls.get_commit_doc(init) if doc: has_docs = True collection.bulk().find({ '_id': doc['_id'], }).upsert().update({ '$set': doc, }) docs.append(doc) messenger.publish('setting', docs, transaction=transaction) if not has_docs: return collection.bulk_execute() transaction.commit()
def commit(self, *args, **kwargs): tran = None if 'network' in self.loaded_fields and \ self.network != self._orig_network: tran = transaction.Transaction() if self.network_lock: raise ServerNetworkLocked('Server network is locked', { 'server_id': self.id, 'lock_id': self.network_lock, }) else: queue_ip_pool = queue.start('assign_ip_pool', transaction=tran, server_id=self.id, network=self.network, old_network=self._orig_network, ) self.network_lock = queue_ip_pool.id for org_id in self._orgs_added: self.ip_pool.assign_ip_pool_org(org_id) for org_id in self._orgs_removed: self.ip_pool.unassign_ip_pool_org(org_id) mongo.MongoObject.commit(self, transaction=tran, *args, **kwargs) if tran: messenger.publish('queue', 'queue_updated', transaction=tran) tran.commit()
def __init__(self, type, resource_id=None, delay=None): if delay: # Delay event to reduce duplicate events in short period event_queue.put((time.time() + delay, type, resource_id)) return messenger.publish('events', (type, resource_id))
def _keep_alive_thread(self): while True: time.sleep(self.ttl - 6) if self.queue_com.state in (COMPLETE, STOPPED): break response = self.collection.update({ '_id': self.id, 'runner_id': self.runner_id, }, {'$set': { 'ttl_timestamp': utils.now() + \ datetime.timedelta(seconds=self.ttl), }}) if response['updatedExisting']: messenger.publish('queue', [UPDATE, self.id]) else: self.queue_com.state_lock.acquire() try: self.queue_com.state = STOPPED finally: self.queue_com.state_lock.release() logger.error( 'Lost reserve, queue stopped', 'queue', queue_id=self.id, queue_type=self.type, )
def publish(self, message, transaction=None, extra=None): extra = extra or {} extra.update({ 'server_id': self.id, }) messenger.publish('servers', message, extra=extra, transaction=transaction)
def _disconnected(self, client): org_id = client['org_id'] user_id = client['user_id'] remote_ip = client['real_address'] org = self.get_org(org_id) if org: user = org.get_user(user_id, fields=('_id', )) if user: user.audit_event( 'user_connection', 'User disconnected from "%s"' % self.server.name, remote_addr=remote_ip, ) if self.route_clients: messenger.publish( 'client', { 'state': False, 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, }) self.instance_com.push_output('User disconnected user_id=%s' % client['user_id']) self.send_event()
def commit(self, *args, **kwargs): tran = None if 'network' in self.loaded_fields and \ self.network != self._orig_network: tran = transaction.Transaction() if self.network_lock: raise ServerNetworkLocked('Server network is locked', { 'server_id': self.id, 'lock_id': self.network_lock, }) else: queue_ip_pool = queue.start( 'assign_ip_pool', transaction=tran, server_id=self.id, network=self.network, old_network=self._orig_network, ) self.network_lock = queue_ip_pool.id for org_id in self._orgs_added: self.ip_pool.assign_ip_pool_org(org_id) for org_id in self._orgs_removed: self.ip_pool.unassign_ip_pool_org(org_id) mongo.MongoObject.commit(self, transaction=tran, *args, **kwargs) if tran: messenger.publish('queue', 'queue_updated', transaction=tran) tran.commit()
def publish(self, message, transaction=None, extra=None): extra = extra or {} extra.update({ 'server_id': self.server.id, }) messenger.publish('servers', message, extra=extra, transaction=transaction)
def commit(self, *args, **kwargs): tran = None if self.network != self._orig_network: tran = transaction.Transaction() if self.network_lock: raise ServerNetworkLocked('Server network is locked', { 'server_id': self.id, 'lock_id': self.network_lock, }) else: queue_ip_pool = queue.start('assign_ip_pool', transaction=tran, server_id=self.id, network=self.network, old_network=self._orig_network, ) self.network_lock = queue_ip_pool.id elif self._orgs_changed: # TODO update ip pool pass mongo.MongoObject.commit(self, transaction=tran, *args, **kwargs) if tran: messenger.publish('queue', 'queue_updated', transaction=tran) tran.commit()
def _disconnected(self, client): org_id = client['org_id'] user_id = client['user_id'] remote_ip = client['real_address'] org = self.get_org(org_id) if org: user = org.get_user(user_id, fields=('_id',)) if user: user.audit_event( 'user_connection', 'User disconnected from "%s"' % self.server.name, remote_addr=remote_ip, ) if self.route_clients: messenger.publish('client', { 'state': False, 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, }) self.instance_com.push_output( 'User disconnected user_id=%s' % client['user_id']) self.send_event()
def _keep_alive_thread(self): while True: time.sleep(self.ttl - 6) if self.queue_com.state in (COMPLETE, STOPPED): break response = self.collection.update({ '_id': self.id, 'runner_id': self.runner_id, }, {'$set': { 'ttl_timestamp': utils.now() + \ datetime.timedelta(seconds=self.ttl), }}) if response['updatedExisting']: messenger.publish('queue', [UPDATE, self.id]) else: self.queue_com.state_lock.acquire() try: self.queue_com.state = STOPPED finally: self.queue_com.state_lock.release() logger.error('Lost reserve, queue stopped', 'queue', queue_id=self.id, queue_type=self.type, )
def commit(self, *args, **kwargs): tran = None if self.network != self._orig_network: tran = transaction.Transaction() if self.network_lock: raise ServerNetworkLocked('Server network is locked', { 'server_id': self.id, 'lock_id': self.network_lock, }) else: queue_ip_pool = queue.start( 'assign_ip_pool', transaction=tran, server_id=self.id, network=self.network, old_network=self._orig_network, ) self.network_lock = queue_ip_pool.id elif self._orgs_changed: # TODO update ip pool pass mongo.MongoObject.commit(self, transaction=tran, *args, **kwargs) if tran: messenger.publish('queue', 'queue_updated', transaction=tran) tran.commit()
def update_license(license): settings.app.license = license settings.app.license_plan = None settings.commit() valid = update() messenger.publish('subscription', 'updated') if not valid: raise LicenseInvalid('License key is invalid')
def host_put(hst=None): if settings.app.demo_mode: return utils.demo_blocked() hst = host.get_by_id(hst) if 'name' in flask.request.json: hst.name = utils.filter_str( flask.request.json['name']) or utils.random_name() if 'public_address' in flask.request.json: hst.public_address = utils.filter_str( flask.request.json['public_address']) if 'public_address6' in flask.request.json: hst.public_address6 = utils.filter_str( flask.request.json['public_address6']) if 'routed_subnet6' in flask.request.json: routed_subnet6 = flask.request.json['routed_subnet6'] if routed_subnet6: try: routed_subnet6 = ipaddress.IPv6Network( flask.request.json['routed_subnet6']) except (ipaddress.AddressValueError, ValueError): return utils.jsonify({ 'error': IPV6_SUBNET_INVALID, 'error_msg': IPV6_SUBNET_INVALID_MSG, }, 400) if routed_subnet6.prefixlen > 64: return utils.jsonify({ 'error': IPV6_SUBNET_SIZE_INVALID, 'error_msg': IPV6_SUBNET_SIZE_INVALID_MSG, }, 400) routed_subnet6 = str(routed_subnet6) else: routed_subnet6 = None if hst.routed_subnet6 != routed_subnet6: if server.get_online_ipv6_count(): return utils.jsonify({ 'error': IPV6_SUBNET_ONLINE, 'error_msg': IPV6_SUBNET_ONLINE_MSG, }, 400) hst.routed_subnet6 = routed_subnet6 if 'link_address' in flask.request.json: hst.link_address = utils.filter_str( flask.request.json['link_address']) hst.commit(hst.changed) event.Event(type=HOSTS_UPDATED) messenger.publish('hosts', 'updated') return utils.jsonify(hst.dict())
def run(self): self.queue_com.state = RUNNING try: if self.state == PENDING: self.attempts += 1 if self.attempts > 1 and not self.retry: self.remove() return elif self.attempts > settings.mongo.queue_max_attempts: self.state = ROLLBACK if not self.claim_commit('state'): return else: if not self.claim_commit('attempts'): return self.task() if self.attempts > 1: self.repeat_task() if self.has_post_work: self.state = COMMITTED if not self.claim_commit('state'): return if self.has_post_work or self.state == ROLLBACK: if not self.claimed and not self.claim_commit(): return if self.state == COMMITTED: self.post_task() elif self.state == ROLLBACK: self.rollback_task() if self.has_post_work: self.complete_task() if self.claimed: self.complete() except: if self.queue_com.state is not STOPPED: logger.exception( 'Error running task in queue', 'queue', queue_id=self.id, queue_type=self.type, ) messenger.publish('queue', [ERROR, self.id]) finally: self.queue_com.state_lock.acquire() try: self.queue_com.state = COMPLETE finally: self.queue_com.state_lock.release()
def _init_host(self): local_addr = settings.local.host.local_addr doc = self.vxlan_collection.find_and_modify( { '_id': self.vxlan_id, 'server_id': self.server_id, 'hosts.host_dst': { '$nin': [local_addr] }, }, { '$push': { 'hosts': { 'vxlan_mac': self.vxlan_mac, 'host_dst': local_addr, }, } }, new=True) if not doc: doc = self.vxlan_collection.find_and_modify( { '_id': self.vxlan_id, 'server_id': self.server_id, 'hosts.host_dst': local_addr, }, {'$set': { 'hosts.$.vxlan_mac': self.vxlan_mac, }}, new=True) if doc: for host_vxlan_id, data in enumerate(doc['hosts']): if data['host_dst'] == local_addr: self.host_vxlan_id = host_vxlan_id + 1 if not self.host_vxlan_id: logger.error( 'Failed to get host vxlan id', 'vxlan', vxlan_id=self.vxlan_id, server_id=self.server_id, host_id=settings.local.host_id, local_addr=local_addr, ) raise ValueError('Failed to get host vxlan id') messenger.publish( 'vxlan', { 'vxlan_id': self.vxlan_id, 'server_id': self.server_id, 'host_vxlan_id': self.host_vxlan_id, 'vxlan_mac': self.vxlan_mac, 'host_dst': local_addr, })
def run(self): self.queue_com.state = RUNNING try: if self.state == PENDING: self.attempts += 1 if self.attempts > 1 and not self.retry: self.remove() return elif self.attempts > settings.mongo.queue_max_attempts: self.state = ROLLBACK if not self.claim_commit('state'): return else: if not self.claim_commit('attempts'): return self.task() if self.attempts > 1: self.repeat_task() if self.has_post_work: self.state = COMMITTED if not self.claim_commit('state'): return if self.has_post_work or self.state == ROLLBACK: if not self.claimed and not self.claim_commit(): return if self.state == COMMITTED: self.post_task() elif self.state == ROLLBACK: self.rollback_task() if self.has_post_work: self.complete_task() if self.claimed: self.complete() except: if self.queue_com.state is not STOPPED: logger.exception('Error running task in queue', 'queue', queue_id=self.id, queue_type=self.type, ) messenger.publish('queue', [ERROR, self.id]) finally: self.queue_com.state_lock.acquire() try: self.queue_com.state = COMPLETE finally: self.queue_com.state_lock.release()
def host_put(hst=None): if settings.app.demo_mode: return utils.demo_blocked() hst = host.get_by_id(hst) if "name" in flask.request.json: hst.name = utils.filter_str(flask.request.json["name"]) or utils.random_name() if "public_address" in flask.request.json: hst.public_address = utils.filter_str(flask.request.json["public_address"]) if "public_address6" in flask.request.json: hst.public_address6 = utils.filter_str(flask.request.json["public_address6"]) if "routed_subnet6" in flask.request.json: routed_subnet6 = flask.request.json["routed_subnet6"] if routed_subnet6: try: routed_subnet6 = ipaddress.IPv6Network(flask.request.json["routed_subnet6"]) except (ipaddress.AddressValueError, ValueError): return utils.jsonify({"error": IPV6_SUBNET_INVALID, "error_msg": IPV6_SUBNET_INVALID_MSG}, 400) if routed_subnet6.prefixlen > 64: return utils.jsonify( {"error": IPV6_SUBNET_SIZE_INVALID, "error_msg": IPV6_SUBNET_SIZE_INVALID_MSG}, 400 ) routed_subnet6 = str(routed_subnet6) else: routed_subnet6 = None if hst.routed_subnet6 != routed_subnet6: if server.get_online_ipv6_count(): return utils.jsonify({"error": IPV6_SUBNET_ONLINE, "error_msg": IPV6_SUBNET_ONLINE_MSG}, 400) hst.routed_subnet6 = routed_subnet6 if "local_address" in flask.request.json: hst.local_address = utils.filter_str(flask.request.json["local_address"]) if "local_address6" in flask.request.json: hst.local_address6 = utils.filter_str(flask.request.json["local_address6"]) if "link_address" in flask.request.json: hst.link_address = utils.filter_str(flask.request.json["link_address"]) if "instance_id" in flask.request.json: hst.instance_id = utils.filter_str(flask.request.json["instance_id"]) hst.commit(hst.changed) event.Event(type=HOSTS_UPDATED) messenger.publish("hosts", "updated") return utils.jsonify(hst.dict())
def clear_auth_cache(self): self.sso_passcode_cache_collection.delete_many({ 'user_id': self.id, }) self.sso_push_cache_collection.delete_many({ 'user_id': self.id, }) self.sso_client_cache_collection.delete_many({ 'user_id': self.id, }) messenger.publish('instance', ['user_disconnect', self.id])
def task(self): try: timestamp_spec = utils.now() - datetime.timedelta( seconds=settings.vpn.route_ping_ttl) docs = self.routes_collection.find({ 'timestamp': { '$lt': timestamp_spec }, }) yield for doc in docs: server_id = doc['server_id'] vpc_region = doc['vpc_region'] vpc_id = doc['vpc_id'] network = doc['network'] svr = server.get_by_id(server_id) if not svr: self.routes_collection.remove({ '_id': doc['_id'], }) continue match = False for route in svr.get_routes(include_server_links=True): route_advertise = route['advertise'] or \ (route['vpc_region'] and route['vpc_id']) route_network = route['network'] netmap = route.get('nat_netmap') if netmap: route_network = netmap if route_advertise and network == route_network: match = True if not match: self.routes_collection.remove({ '_id': doc['_id'], }) continue messenger.publish('instance', [ 'route_advertisement', server_id, vpc_region, vpc_id, network ]) except GeneratorExit: raise except: logger.exception('Error checking route states', 'tasks')
def _init_host(self): local_addr = settings.local.host.local_addr local_addr6 = None if self.ipv6: local_addr6 = settings.local.host.local_addr6 doc = self.vxlan_collection.find_and_modify({ '_id': self.vxlan_id, 'server_id': self.server_id, 'hosts.host_dst': {'$nin': [local_addr]}, }, {'$push': { 'hosts': { 'vxlan_mac': self.vxlan_mac, 'host_dst': local_addr, 'host_dst6': local_addr6, }, }}, new=True) if not doc: doc = self.vxlan_collection.find_and_modify({ '_id': self.vxlan_id, 'server_id': self.server_id, 'hosts.host_dst': local_addr, }, {'$set': { 'hosts.$.vxlan_mac': self.vxlan_mac, 'hosts.$.host_dst': local_addr, 'hosts.$.host_dst6': local_addr6, }}, new=True) if doc: for host_vxlan_id, data in enumerate(doc['hosts']): if data['host_dst'] == local_addr: self.host_vxlan_id = host_vxlan_id + 1 if not self.host_vxlan_id: logger.error('Failed to get host vxlan id', 'vxlan', vxlan_id=self.vxlan_id, server_id=self.server_id, host_id=settings.local.host_id, local_addr=local_addr, local_addr6=local_addr6, ) raise ValueError('Failed to get host vxlan id') messenger.publish('vxlan', { 'vxlan_id': self.vxlan_id, 'server_id': self.server_id, 'host_vxlan_id': self.host_vxlan_id, 'vxlan_mac': self.vxlan_mac, 'host_dst': local_addr, 'host_dst6': local_addr6, })
def stop(queue_id=None, spec=None, transaction=None): if queue_id is not None: pass elif spec is not None: doc = Queue.collection.find_one(spec, { '_id': True, }) if not doc: return queue_id = doc['_id'] else: raise ValueError('Must provide queue_id or spec') messenger.publish('queue', [STOP, queue_id], transaction=transaction)
def task(self): try: timestamp_spec = utils.now() - datetime.timedelta( seconds=settings.vpn.route_ping_ttl) docs = self.routes_collection.find({ 'timestamp': {'$lt': timestamp_spec}, }) yield for doc in docs: server_id = doc['server_id'] vpc_region = doc['vpc_region'] vpc_id = doc['vpc_id'] network = doc['network'] svr = server.get_by_id(server_id) if not svr: self.routes_collection.remove({ '_id': doc['_id'], }) continue match = False for route in svr.get_routes(include_server_links=True): route_advertise = route['advertise'] or \ (route['vpc_region'] and route['vpc_id']) route_network = route['network'] netmap = route.get('nat_netmap') if netmap: route_network = netmap if route_advertise and network == route_network: match = True if not match: self.routes_collection.remove({ '_id': doc['_id'], }) continue messenger.publish('instance', ['route_advertisement', server_id, vpc_region, vpc_id, network]) except GeneratorExit: raise except: logger.exception('Error checking route states', 'tasks')
def _disconnected(self, client): org_id = client['org_id'] user_id = client['user_id'] remote_ip = client['real_address'] org = self.get_org(org_id) if org: user = org.get_user(user_id, fields=('_id', 'name')) if user: user.audit_event( 'user_connection', 'User disconnected from "%s"' % self.server.name, remote_addr=remote_ip, ) monitoring.insert_point('user_disconnections', { 'host': settings.local.host.name, 'server': self.server.name, }, { 'user': user.name, 'remote_ip': remote_ip, }) plugins.event( 'user_disconnected', host_id=settings.local.host_id, server_id=self.server.id, org_id=org.id, user_id=user.id, host_name=settings.local.host.name, server_name=self.server.name, org_name=org.name, user_name=user.name, remote_ip=remote_ip, ) if self.route_clients: messenger.publish( 'client', { 'state': False, 'server_id': self.server.id, 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, }) self.instance_com.push_output('User disconnected user_id=%s' % client['user_id']) self.send_event()
def _disconnected(self, client): org_id = client['org_id'] user_id = client['user_id'] remote_ip = client['real_address'] org = self.get_org(org_id) if org: user = org.get_user(user_id, fields=('_id', 'name')) if user: user.audit_event( 'user_connection', 'User disconnected from "%s"' % self.server.name, remote_addr=remote_ip, ) monitoring.insert_point('user_disconnections', { 'host': settings.local.host.name, 'server': self.server.name, }, { 'user': user.name, 'remote_ip': remote_ip, }) plugins.event( 'user_disconnected', host_id=settings.local.host_id, server_id=self.server.id, org_id=org.id, user_id=user.id, host_name=settings.local.host.name, server_name=self.server.name, org_name=org.name, user_name=user.name, remote_ip=remote_ip, ) if self.route_clients: messenger.publish('client', { 'state': False, 'server_id': self.server.id, 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, }) self.instance_com.push_output( 'User disconnected user_id=%s' % client['user_id']) self.send_event()
def _server_check_thread(): checked_hosts = set() collection = mongo.get_collection('servers') while True: try: spec = { 'ping_timestamp': { '$lt': datetime.datetime.utcnow() - datetime.timedelta(seconds=settings.vpn.server_ping_ttl), }, } doc = { '$set': { 'clients': {}, }, '$unset': { 'host_id': '', 'instance_id': '', }, } project = { '_id': True, 'hosts': True, 'organizations': True, } if checked_hosts: spec['_id'] = {'$nin': list(checked_hosts)} doc = collection.find_and_modify(spec, doc, fields=project) if doc: checked_hosts.add(doc['_id']) messenger.publish('servers', 'start', extra={ 'server_id': str(doc['_id']), 'send_events': True, }) continue except: logger.exception('Error checking server states.') checked_hosts = set() time.sleep(settings.vpn.server_ping)
def _keep_alive_thread(self): while True: time.sleep(self.ttl - 5) if self.queue_com.state in (COMPLETE, STOPPED): break response = self.collection.update({ '_id': bson.ObjectId(self.id), 'runner_id': self.runner_id, }, {'$set': { 'ttl_timestamp': datetime.datetime.utcnow() + \ datetime.timedelta(seconds=self.ttl), }}) if response['updatedExisting']: logger.debug( 'Queue keep alive updated', 'queue', queue_id=self.id, queue_type=self.type, ) messenger.publish('queue', [UPDATE, self.id]) else: logger.debug( 'Queue keep alive lost reserve', 'queue', queue_id=self.id, queue_type=self.type, ) self.queue_com.state_lock.acquire() try: self.queue_com.state = STOPPED finally: self.queue_com.state_lock.release() raise QueueStopped('Lost reserve, queue stopped', { 'queue_id': self.id, 'queue_type': self.type, }) logger.debug( 'Queue keep alive thread ended', 'queue', queue_id=self.id, queue_type=self.type, )
def _server_check_thread(): checked_hosts = set() collection = mongo.get_collection('servers') while True: try: spec = { 'ping_timestamp': { '$lt': datetime.datetime.utcnow() - datetime.timedelta( seconds=settings.vpn.server_ping_ttl), }, } doc = { '$set': { 'clients': {}, }, '$unset': { 'host_id': '', 'instance_id': '', }, } project = { '_id': True, 'hosts': True, 'organizations': True, } if checked_hosts: spec['_id'] = {'$nin': list(checked_hosts)} doc = collection.find_and_modify(spec, doc, fields=project) if doc: checked_hosts.add(doc['_id']) messenger.publish('servers', 'start', extra={ 'server_id': str(doc['_id']), 'send_events': True, }) continue except: logger.exception('Error checking server states.') checked_hosts = set() time.sleep(settings.vpn.server_ping)
def start(self, transaction=None, block=False, block_timeout=30): self.ttl_timestamp = utils.now() + \ datetime.timedelta(seconds=self.ttl) self.commit(transaction=transaction) if block: if transaction: raise TypeError('Cannot use transaction when blocking') cursor_id = messenger.get_cursor_id('queue') extra = {'queue_doc': self.export()} messenger.publish('queue', [PENDING, self.id], extra=extra, transaction=transaction) if block: last_update = time.time() while True: for msg in messenger.subscribe('queue', cursor_id=cursor_id, timeout=block_timeout): cursor_id = msg['_id'] try: if msg['message'] == [COMPLETE, self.id]: return elif msg['message'] == [UPDATE, self.id]: last_update = time.time() break elif msg['message'] == [ERROR, self.id]: raise QueueTaskError( 'Error occured running ' + 'queue task', { 'queue_id': self.id, 'queue_type': self.type, }) except TypeError: pass if (time.time() - last_update) >= block_timeout: raise QueueTimeout('Blocking queue timed out.', { 'queue_id': self.id, 'queue_type': self.type, })
def start(self, transaction=None, block=False, block_timeout=30): self.ttl_timestamp = utils.now() + \ datetime.timedelta(seconds=self.ttl) self.commit(transaction=transaction) if block: if transaction: raise TypeError('Cannot use transaction when blocking') cursor_id = messenger.get_cursor_id('queue') extra = { 'queue_doc': self.export() } messenger.publish('queue', [PENDING, self.id], extra=extra, transaction=transaction) if block: last_update = time.time() while True: for msg in messenger.subscribe('queue', cursor_id=cursor_id, timeout=block_timeout): cursor_id = msg['_id'] try: if msg['message'] == [COMPLETE, self.id]: return elif msg['message'] == [UPDATE, self.id]: last_update = time.time() break elif msg['message'] == [ERROR, self.id]: raise QueueTaskError('Error occured running ' + 'queue task', { 'queue_id': self.id, 'queue_type': self.type, }) except TypeError: pass if (time.time() - last_update) >= block_timeout: raise QueueTimeout('Blocking queue timed out.', { 'queue_id': self.id, 'queue_type': self.type, })
def _keep_alive_thread(self): while True: time.sleep(self.ttl - 5) if self.queue_com.state in (COMPLETE, STOPPED): break response = self.collection.update({ '_id': bson.ObjectId(self.id), 'runner_id': self.runner_id, }, {'$set': { 'ttl_timestamp': datetime.datetime.utcnow() + \ datetime.timedelta(seconds=self.ttl), }}) if response['updatedExisting']: logger.debug('Queue keep alive updated', 'queue', queue_id=self.id, queue_type=self.type, ) messenger.publish('queue', [UPDATE, self.id]) else: logger.debug('Queue keep alive lost reserve', 'queue', queue_id=self.id, queue_type=self.type, ) self.queue_com.state_lock.acquire() try: self.queue_com.state = STOPPED finally: self.queue_com.state_lock.release() raise QueueStopped('Lost reserve, queue stopped', { 'queue_id': self.id, 'queue_type': self.type, }) logger.debug('Queue keep alive thread ended', 'queue', queue_id=self.id, queue_type=self.type, )
def commit(self, *args, **kwargs): tran = None if "network" in self.loaded_fields and self.network_hash != self._orig_network_hash: tran = transaction.Transaction() if self.network_lock: raise ServerNetworkLocked( "Server network is locked", {"server_id": self.id, "lock_id": self.network_lock} ) else: queue_ip_pool = queue.start( "assign_ip_pool", transaction=tran, server_id=self.id, network=self.network, network_start=self.network_start, network_end=self.network_end, network_hash=self.network_hash, old_network=self._orig_network, old_network_start=self._orig_network_start, old_network_end=self._orig_network_end, old_network_hash=self._orig_network_hash, ) self.network_lock = queue_ip_pool.id for org_id in self._orgs_added: self.ip_pool.assign_ip_pool_org(org_id) for org_id in self._orgs_removed: self.ip_pool.unassign_ip_pool_org(org_id) mongo.MongoObject.commit(self, transaction=tran, *args, **kwargs) if tran: messenger.publish("queue", "queue_updated", transaction=tran) tran.commit()
def __init__(self, type, resource_id=None): messenger.publish('events', (type, resource_id))
def _connected(self, client_id): client = self.clients.find_id(client_id) if not client: self.instance_com.push_output( 'ERROR Unknown client connected client_id=%s' % client_id) self.instance_com.client_kill(client_id) return self.set_iptables_rules( client['iptables_rules'], client['ip6tables_rules'], ) timestamp = utils.now() doc = { 'user_id': client['user_id'], 'server_id': self.server.id, 'host_id': settings.local.host_id, 'timestamp': timestamp, 'platform': client['platform'], 'type': client['user_type'], 'device_name': client['device_name'], 'mac_addr': client['mac_addr'], 'network': self.server.network, 'real_address': client['real_address'], 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, 'dns_servers': client['dns_servers'], 'dns_suffix': client['dns_suffix'], 'connected_since': int(timestamp.strftime('%s')), } if settings.local.sub_active and \ settings.local.sub_plan == 'enterprise': domain_hash = hashlib.md5() domain_hash.update((client['user_name'].split('@')[0] + '.' + client['org_name']).lower()) domain_hash = bson.binary.Binary(domain_hash.digest(), subtype=bson.binary.MD5_SUBTYPE) doc['domain'] = domain_hash try: doc_id = self.collection.insert(doc) if self.route_clients: messenger.publish( 'client', { 'state': True, 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, }) except: logger.exception( 'Error adding client', 'server', server_id=self.server.id, ) self.instance_com.client_kill(client_id) return self.clients.update_id(client_id, { 'doc_id': doc_id, 'timestamp': time.time(), }) self.clients_queue.append(client_id) self.instance_com.push_output('User connected user_id=%s' % client['user_id']) self.send_event()
def _run_thread(self, send_events): logger.debug('Starting ovpn process. %r' % { 'server_id': self.id, }) cursor_id = self.get_cursor_id() self._interrupt = False self._state = True self._clients = {} try: os.makedirs(self._temp_path) ovpn_conf_path = self._generate_ovpn_conf() self._enable_ip_forwarding() self._set_iptables_rules() self.output.clear_output() ovpn_conf_path = os.path.join(self._temp_path, OVPN_CONF_NAME) try: process = subprocess.Popen(['openvpn', ovpn_conf_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: self.output.push_output(traceback.format_exc()) logger.exception('Failed to start ovpn process. %r' % { 'server_id': self.id, }) self.publish('stopped') return semaphore = threading.Semaphore(3) for _ in xrange(3): semaphore.acquire() sub_thread = threading.Thread(target=self._sub_thread, args=(semaphore, cursor_id, process)) sub_thread.start() status_thread = threading.Thread(target=self._status_thread, args=(semaphore,)) status_thread.start() self.status = True self.host_id = settings.local.host_id self.start_timestamp = datetime.datetime.utcnow() self.ping_timestamp = datetime.datetime.utcnow() self.commit(( 'status', 'host_id', 'start_timestamp', 'ping_timestamp', )) keep_alive_thread = threading.Thread( target=self._keep_alive_thread, args=(semaphore, process)) keep_alive_thread.start() # Wait for all three threads to start for _ in xrange(3): semaphore.acquire() self.publish('started') if send_events: event.Event(type=SERVERS_UPDATED) event.Event(type=SERVER_HOSTS_UPDATED, resource_id=self.id) for org_id in self.organizations: event.Event(type=USERS_UPDATED, resource_id=org_id) while True: line = process.stdout.readline() if not line: if process.poll() is not None: break else: continue try: self.output.push_output(line) except: logger.exception('Failed to push vpn output. %r', { 'server_id': self.id, }) self._interrupt = True status_thread.join() if self._instance_id != self.instance_id: return self.status = False self.start_timestamp = None self.ping_timestamp = None self.unset('host_id') self.unset('instance_id') self.commit(( 'status', 'start_timestamp', 'ping_timestamp', )) self.update_clients({}, force=True) if self._state: event.Event(type=SERVERS_UPDATED) logger.LogEntry(message='Server stopped unexpectedly "%s".' % ( self.name)) logger.debug('Ovpn process has ended. %r' % { 'server_id': self.id, }) self.publish('stopped') except: self._interrupt = True logger.exception('Server error occurred while running. %r', { 'server_id': self.id, }) messenger.publish('server_instance', 'stopped', { 'server_id': self.id, })
def task(self): if settings.app.demo_mode: return try: timestamp = utils.now() timestamp_spec = timestamp - datetime.timedelta( seconds=settings.vpn.server_ping_ttl) docs = self.server_collection.find( { 'instances.ping_timestamp': { '$lt': timestamp_spec }, }, { '_id': True, 'instances': True, }) yield for doc in docs: for instance in doc['instances']: if instance['ping_timestamp'] < timestamp_spec: logger.warning( 'Removing instance doc', 'server', server_id=doc['_id'], instance_id=instance['instance_id'], cur_timestamp=timestamp, ttl_timestamp=timestamp_spec, ping_timestamp=instance['ping_timestamp'], ) self.server_collection.update( { '_id': doc['_id'], 'instances.instance_id': instance['instance_id'], }, { '$pull': { 'instances': { 'instance_id': instance['instance_id'], }, }, '$inc': { 'instances_count': -1, }, }) yield docs = self.host_collection.find({ 'status': ONLINE, }, { '_id': True, 'availability_group': True, }) yield hosts_group = {} for doc in docs: hosts_group[doc['_id']] = doc.get('availability_group', DEFAULT) yield response = self.server_collection.aggregate([ { '$match': { 'status': ONLINE, 'start_timestamp': { '$lt': timestamp_spec }, } }, { '$project': { '_id': True, 'hosts': True, 'instances': True, 'replica_count': True, 'availability_group': True, 'offline_instances_count': { '$subtract': [ '$replica_count', '$instances_count', ], } } }, { '$match': { 'offline_instances_count': { '$gt': 0 }, } }, ]) yield recover_count = 0 for doc in response: cur_avail_group = doc.get('availability_group', DEFAULT) hosts_set = set(doc['hosts']) group_best = None group_len_max = 0 server_groups = collections.defaultdict(set) for hst in hosts_set: avail_zone = hosts_group.get(hst) if not avail_zone: continue server_groups[avail_zone].add(hst) group_len = len(server_groups[avail_zone]) if group_len > group_len_max: group_len_max = group_len group_best = avail_zone elif group_len == group_len_max and \ avail_zone == cur_avail_group: group_best = avail_zone if group_best and cur_avail_group != group_best: logger.info( 'Rebalancing server availability group', 'server', server_id=doc['_id'], current_availability_group=cur_avail_group, new_availability_group=group_best, ) self.server_collection.update( { '_id': doc['_id'], 'status': ONLINE, }, { '$set': { 'instances': [], 'instances_count': 0, 'availability_group': group_best, } }) messenger.publish('servers', 'rebalance', extra={ 'server_id': doc['_id'], 'availability_group': group_best, }) prefered_hosts = server_groups[group_best] else: prefered_hosts = server_groups[cur_avail_group] active_hosts = set([x['host_id'] for x in doc['instances']]) prefered_hosts = list(prefered_hosts - active_hosts) if not prefered_hosts: continue if recover_count >= 3: continue recover_count += 1 logger.info( 'Recovering server state', 'server', server_id=doc['_id'], prefered_hosts=prefered_hosts, ) messenger.publish('servers', 'start', extra={ 'server_id': doc['_id'], 'send_events': True, 'prefered_hosts': host.get_prefered_hosts( prefered_hosts, doc['replica_count']) }) except GeneratorExit: raise except: logger.exception('Error checking server states', 'tasks')
def user_put(org_id, user_id): if settings.app.demo_mode: return utils.demo_blocked() org = organization.get_by_id(org_id) user = org.get_user(user_id) reset_user = False port_forwarding_event = False if 'name' in flask.request.json: name = utils.filter_str(flask.request.json['name']) or None if name != user.name: user.audit_event('user_updated', 'User name changed', remote_addr=utils.get_remote_addr(), ) user.name = name if 'email' in flask.request.json: email = utils.filter_str(flask.request.json['email']) or None if email != user.email: user.audit_event('user_updated', 'User email changed', remote_addr=utils.get_remote_addr(), ) user.email = email if 'pin' in flask.request.json: pin = flask.request.json['pin'] if pin != True: if pin: if settings.user.pin_mode == PIN_DISABLED: return utils.jsonify({ 'error': PIN_IS_DISABLED, 'error_msg': PIN_IS_DISABLED_MSG, }, 400) if RADIUS_AUTH in user.auth_type: return utils.jsonify({ 'error': PIN_RADIUS, 'error_msg': PIN_RADIUS_MSG, }, 400) if not pin.isdigit(): return utils.jsonify({ 'error': PIN_NOT_DIGITS, 'error_msg': PIN_NOT_DIGITS_MSG, }, 400) if len(pin) < settings.user.pin_min_length: return utils.jsonify({ 'error': PIN_TOO_SHORT, 'error_msg': PIN_TOO_SHORT_MSG, }, 400) if user.set_pin(pin): user.audit_event('user_updated', 'User pin changed', remote_addr=utils.get_remote_addr(), ) if 'network_links' in flask.request.json: network_links_cur = set(user.get_network_links()) network_links_new = set() for network_link in flask.request.json['network_links']: try: network_link = str(ipaddress.IPNetwork(network_link)) except (ipaddress.AddressValueError, ValueError): return _network_link_invalid() network_links_new.add(network_link) network_links_add = network_links_new - network_links_cur network_links_rem = network_links_cur - network_links_new if len(network_links_add) or len(network_links_rem): reset_user = True user.audit_event('user_updated', 'User network links updated', remote_addr=utils.get_remote_addr(), ) try: for network_link in network_links_add: user.add_network_link(network_link) except ServerOnlineError: return utils.jsonify({ 'error': NETWORK_LINK_NOT_OFFLINE, 'error_msg': NETWORK_LINK_NOT_OFFLINE_MSG, }, 400) for network_link in network_links_rem: user.remove_network_link(network_link) if 'port_forwarding' in flask.request.json: port_forwarding = [] for data in flask.request.json['port_forwarding'] or []: port_forwarding.append({ 'protocol': utils.filter_str(data.get('protocol')), 'port': utils.filter_str(data.get('port')), 'dport': utils.filter_str(data.get('dport')), }) if port_forwarding != user.port_forwarding: port_forwarding_event = True user.audit_event('user_updated', 'User port forwarding changed', remote_addr=utils.get_remote_addr(), ) user.port_forwarding = port_forwarding disabled = flask.request.json.get('disabled') if disabled is not None: if disabled != user.disabled: user.audit_event('user_updated', 'User %s' % ('disabled' if disabled else 'enabled'), remote_addr=utils.get_remote_addr(), ) user.disabled = disabled bypass_secondary = flask.request.json.get('bypass_secondary') if bypass_secondary is not None: user.bypass_secondary = True if bypass_secondary else False client_to_client = flask.request.json.get('client_to_client') if client_to_client is not None: user.client_to_client = True if client_to_client else False if 'dns_servers' in flask.request.json: dns_servers = flask.request.json['dns_servers'] or None if user.dns_servers != dns_servers: user.audit_event('user_updated', 'User dns servers changed', remote_addr=utils.get_remote_addr(), ) reset_user = True user.dns_servers = dns_servers if 'dns_suffix' in flask.request.json: dns_suffix = utils.filter_str( flask.request.json['dns_suffix']) or None if user.dns_suffix != dns_suffix: user.audit_event('user_updated', 'User dns suffix changed', remote_addr=utils.get_remote_addr(), ) reset_user = True user.dns_suffix = dns_suffix user.commit() event.Event(type=USERS_UPDATED, resource_id=user.org.id) if port_forwarding_event: messenger.publish('port_forwarding', { 'org_id': org.id, 'user_id': user.id, }) if reset_user or disabled: user.disconnect() if disabled: if user.type == CERT_CLIENT: logger.LogEntry(message='Disabled user "%s".' % user.name) elif disabled == False and user.type == CERT_CLIENT: logger.LogEntry(message='Enabled user "%s".' % user.name) send_key_email = flask.request.json.get('send_key_email') if send_key_email and user.email: user.audit_event('user_emailed', 'User key email sent to "%s"' % user.email, remote_addr=utils.get_remote_addr(), ) try: user.send_key_email(utils.get_url_root()) except EmailNotConfiguredError: return utils.jsonify({ 'error': EMAIL_NOT_CONFIGURED, 'error_msg': EMAIL_NOT_CONFIGURED_MSG, }, 400) except EmailFromInvalid: return utils.jsonify({ 'error': EMAIL_FROM_INVALID, 'error_msg': EMAIL_FROM_INVALID_MSG, }, 400) except EmailAuthInvalid: return utils.jsonify({ 'error': EMAIL_AUTH_INVALID, 'error_msg': EMAIL_AUTH_INVALID_MSG, }, 400) return utils.jsonify(user.dict())
def disconnect(self): messenger.publish('instance', ['user_disconnect', self.id])
def complete(self): messenger.publish('queue', [COMPLETE, self.id]) self.remove()
def main(default_conf=None): if len(sys.argv) > 1: cmd = sys.argv[1] else: cmd = 'start' parser = optparse.OptionParser(usage=USAGE) if cmd == 'start': parser.add_option('-d', '--daemon', action='store_true', help='Daemonize process') parser.add_option('-p', '--pidfile', type='string', help='Path to create pid file') parser.add_option('-c', '--conf', type='string', help='Path to configuration file') parser.add_option('-q', '--quiet', action='store_true', help='Suppress logging output') elif cmd == 'logs': parser.add_option('--archive', action='store_true', help='Archive log file') parser.add_option('--tail', action='store_true', help='Tail log file') parser.add_option('--limit', type='int', help='Limit log lines') parser.add_option('--natural', action='store_true', help='Natural log sort') elif cmd == 'set': parser.disable_interspersed_args() (options, args) = parser.parse_args() if hasattr(options, 'conf') and options.conf: conf_path = options.conf else: conf_path = default_conf pritunl.set_conf_path(conf_path) if cmd == 'version': print('%s v%s' % (pritunl.__title__, pritunl.__version__)) sys.exit(0) elif cmd == 'setup-key': from pritunl import setup from pritunl import settings setup.setup_loc() print(settings.local.setup_key) sys.exit(0) elif cmd == 'reset-version': from pritunl.constants import MIN_DATABASE_VER from pritunl import setup from pritunl import utils setup.setup_db() utils.set_db_ver(pritunl.__version__, MIN_DATABASE_VER) time.sleep(.2) print('Database version reset to %s' % pritunl.__version__) sys.exit(0) elif cmd == 'reset-password': from pritunl import setup from pritunl import auth setup.setup_db() username, password = auth.reset_password() print('Administrator password successfully reset:\n' + \ ' username: "******"\n password: "******"' % (username, password)) sys.exit(0) elif cmd == 'default-password': from pritunl import setup from pritunl import auth setup.setup_db() username, password = auth.get_default_password() if not password: print('No default password available, use reset-password') else: print('Administrator default password:\n' + \ ' username: "******"\n password: "******"' % (username, password)) sys.exit(0) elif cmd == 'reconfigure': from pritunl import setup from pritunl import settings setup.setup_loc() settings.conf.mongodb_uri = None settings.conf.commit() time.sleep(.2) print('Database configuration successfully reset') sys.exit(0) elif cmd == 'get': from pritunl import setup from pritunl import settings setup.setup_db_host() if len(args) != 2: raise ValueError('Invalid arguments') split = args[1].split('.') key_str = None group_str = split[0] if len(split) > 1: key_str = split[1] if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) if key_str: val = getattr(group, key_str) print('%s.%s = %s' % (group_str, key_str, json.dumps(val, default=lambda x: str(x)))) else: for field in group.fields: val = getattr(group, field) print('%s.%s = %s' % (group_str, field, json.dumps(val, default=lambda x: str(x)))) sys.exit(0) elif cmd == 'set': from pritunl.constants import HOSTS_UPDATED from pritunl import setup from pritunl import settings from pritunl import event from pritunl import messenger setup.setup_db_host() if len(args) != 3: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) val_str = args[2] try: val = json.loads(val_str) except ValueError: val = json.loads(json.JSONEncoder().encode(val_str)) setattr(group, key_str, val) if group_str == 'host': settings.local.host.commit() event.Event(type=HOSTS_UPDATED) messenger.publish('hosts', 'updated') else: settings.commit() time.sleep(.2) print('%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str), default=lambda x: str(x)))) print('Successfully updated configuration. This change is ' \ 'stored in the database and has been applied to all hosts ' \ 'in the cluster.') sys.exit(0) elif cmd == 'unset': from pritunl import setup from pritunl import settings setup.setup_db() if len(args) != 2: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') group = getattr(settings, group_str) group.unset(key_str) settings.commit() time.sleep(.2) print('%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str), default=lambda x: str(x)))) print('Successfully updated configuration. This change is ' \ 'stored in the database and has been applied to all hosts ' \ 'in the cluster.') sys.exit(0) elif cmd == 'set-mongodb': from pritunl import setup from pritunl import settings setup.setup_loc() if len(args) > 1: mongodb_uri = args[1] else: mongodb_uri = None settings.conf.mongodb_uri = mongodb_uri settings.conf.commit() time.sleep(.2) print('Database configuration successfully set') sys.exit(0) elif cmd == 'reset-ssl-cert': from pritunl import setup from pritunl import settings setup.setup_db() settings.app.server_cert = None settings.app.server_key = None settings.app.acme_timestamp = None settings.app.acme_key = None settings.app.acme_domain = None settings.commit() time.sleep(.2) print('Server ssl certificate successfully reset') sys.exit(0) elif cmd == 'destroy-secondary': from pritunl import setup from pritunl import logger from pritunl import mongo setup.setup_db() print('Destroying secondary database...') mongo.get_collection('clients').drop() mongo.get_collection('clients_pool').drop() mongo.get_collection('transaction').drop() mongo.get_collection('queue').drop() mongo.get_collection('tasks').drop() mongo.get_collection('messages').drop() mongo.get_collection('users_key_link').drop() mongo.get_collection('auth_sessions').drop() mongo.get_collection('auth_csrf_tokens').drop() mongo.get_collection('auth_limiter').drop() mongo.get_collection('otp').drop() mongo.get_collection('otp_cache').drop() mongo.get_collection('sso_tokens').drop() mongo.get_collection('sso_push_cache').drop() mongo.get_collection('sso_client_cache').drop() mongo.get_collection('sso_passcode_cache').drop() setup.upsert_indexes() server_coll = mongo.get_collection('servers') server_coll.update_many({}, { '$set': { 'status': 'offline', 'instances': [], 'instances_count': 0, }, '$unset': { 'network_lock': '', 'network_lock_ttl': '', }, }) print('Secondary database destroyed') sys.exit(0) elif cmd == 'repair-database': from pritunl import setup from pritunl import logger from pritunl import mongo setup.setup_db() print('Repairing database...') mongo.get_collection('clients').drop() mongo.get_collection('clients_pool').drop() mongo.get_collection('transaction').drop() mongo.get_collection('queue').drop() mongo.get_collection('tasks').drop() mongo.get_collection('messages').drop() mongo.get_collection('users_key_link').drop() mongo.get_collection('auth_sessions').drop() mongo.get_collection('auth_csrf_tokens').drop() mongo.get_collection('auth_limiter').drop() mongo.get_collection('otp').drop() mongo.get_collection('otp_cache').drop() mongo.get_collection('sso_tokens').drop() mongo.get_collection('sso_push_cache').drop() mongo.get_collection('sso_client_cache').drop() mongo.get_collection('sso_passcode_cache').drop() mongo.get_collection('logs').drop() mongo.get_collection('log_entries').drop() mongo.get_collection('servers_ip_pool').drop() setup.upsert_indexes() server_coll = mongo.get_collection('servers') server_coll.update_many({}, { '$set': { 'status': 'offline', 'instances': [], 'instances_count': 0, }, '$unset': { 'network_lock': '', 'network_lock_ttl': '', }, }) from pritunl import server for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception( 'Failed to sync server IP pool', 'tasks', server_id=svr.id, ) server_coll.update_many({}, { '$set': { 'status': 'offline', 'instances': [], 'instances_count': 0, }, '$unset': { 'network_lock': '', 'network_lock_ttl': '', }, }) print('Database repair complete') sys.exit(0) elif cmd == 'logs': from pritunl import setup from pritunl import logger setup.setup_db() log_view = logger.LogView() if options.archive: if len(args) > 1: archive_path = args[1] else: archive_path = './' print('Log archived to: ' + log_view.archive_log( archive_path, options.natural, options.limit)) elif options.tail: for msg in log_view.tail_log_lines(): print(msg) else: print( log_view.get_log_lines( natural=options.natural, limit=options.limit, )) sys.exit(0) elif cmd == 'clear-auth-limit': from pritunl import setup from pritunl import logger from pritunl import mongo from pritunl import settings setup.setup_db() mongo.get_collection('auth_limiter').delete_many({}) print('Auth limiter cleared') sys.exit(0) elif cmd == 'clear-logs': from pritunl import setup from pritunl import logger from pritunl import mongo from pritunl import settings setup.setup_db() mongo.get_collection('logs').drop() mongo.get_collection('log_entries').drop() prefix = settings.conf.mongodb_collection_prefix or '' log_limit = settings.app.log_limit mongo.database.create_collection(prefix + 'logs', capped=True, size=log_limit * 1024, max=log_limit) log_entry_limit = settings.app.log_entry_limit mongo.database.create_collection(prefix + 'log_entries', capped=True, size=log_entry_limit * 512, max=log_entry_limit) print('Log entries cleared') sys.exit(0) elif cmd != 'start': raise ValueError('Invalid command') from pritunl import settings if options.quiet: settings.local.quiet = True if options.daemon: pid = os.fork() if pid > 0: if options.pidfile: with open(options.pidfile, 'w') as pid_file: pid_file.write('%s' % pid) sys.exit(0) elif not options.quiet: print('##############################################################') print('# #') print('# /$$ /$$ /$$ #') print('# |__/ | $$ | $$ #') print('# /$$$$$$ /$$$$$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$$ | $$ #') print('# /$$__ $$ /$$__ $$| $$|_ $$_/ | $$ | $$| $$__ $$| $$ #') print('# | $$ \ $$| $$ \__/| $$ | $$ | $$ | $$| $$ \ $$| $$ #') print('# | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$ | $$| $$ #') print('# | $$$$$$$/| $$ | $$ | $$$$/| $$$$$$/| $$ | $$| $$ #') print('# | $$____/ |__/ |__/ \____/ \______/ |__/ |__/|__/ #') print('# | $$ #') print('# | $$ #') print('# |__/ #') print('# #') print('##############################################################') pritunl.init_server()
def _connected(self, client_id): client = self.clients.find_id(client_id) if not client: self.instance_com.push_output( 'ERROR Unknown client connected client_id=%s' % client_id) self.instance_com.client_kill(client_id) return self.set_iptables_rules( client['iptables_rules'], client['ip6tables_rules'], ) timestamp = utils.now() doc = { 'user_id': client['user_id'], 'server_id': self.server.id, 'host_id': settings.local.host_id, 'timestamp': timestamp, 'platform': client['platform'], 'type': client['user_type'], 'device_name': client['device_name'], 'mac_addr': client['mac_addr'], 'network': self.server.network, 'real_address': client['real_address'], 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, 'dns_servers': client['dns_servers'], 'dns_suffix': client['dns_suffix'], 'connected_since': int(timestamp.strftime('%s')), } if settings.local.sub_active and \ settings.local.sub_plan == 'enterprise': domain_hash = hashlib.md5() domain_hash.update((client['user_name'].split('@')[0] + '.' + client['org_name']).lower()) domain_hash = bson.binary.Binary(domain_hash.digest(), subtype=bson.binary.MD5_SUBTYPE) doc['domain'] = domain_hash try: doc_id = self.collection.insert(doc) if self.route_clients: messenger.publish('client', { 'state': True, 'server_id': self.server.id, 'virt_address': client['virt_address'], 'virt_address6': client['virt_address6'], 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, }) except: logger.exception('Error adding client', 'server', server_id=self.server.id, ) self.instance_com.client_kill(client_id) return self.clients.update_id(client_id, { 'doc_id': doc_id, 'timestamp': time.time(), }) self.clients_queue.append(client_id) self.instance_com.push_output( 'User connected user_id=%s' % client['user_id']) self.send_event()
def allow_client(self, client_data, org, user, reauth=False): client_id = client_data['client_id'] key_id = client_data['key_id'] org_id = client_data['org_id'] user_id = client_data['user_id'] device_id = client_data.get('device_id') device_name = client_data.get('device_name') platform = client_data.get('platform') mac_addr = client_data.get('mac_addr') remote_ip = client_data.get('remote_ip') address_dynamic = False if reauth: doc = self.clients.find_id(client_id) if not doc: self.instance_com.send_client_deny(client_id, key_id, 'Client connection info timed out') return virt_address = doc['virt_address'] virt_address6 = doc['virt_address6'] else: user.audit_event( 'user_connection', 'User connected to "%s"' % self.server.name, remote_addr=remote_ip, ) monitoring.insert_point('user_connections', { 'host': settings.local.host.name, 'server': self.server.name, }, { 'user': user.name, 'platform': platform, 'remote_ip': remote_ip, }) virt_address = self.server.get_ip_addr(org_id, user_id) if not virt_address: logger.error('User missing ip address', 'clients', server_id=self.server.id, instance_id=self.instance.id, user_id=user.id, multi_device=self.server.multi_device, network=self.server.network, user_count=self.server.user_count, ) if not self.server.multi_device: if self.server.route_clients: docs = self.collection.find({ 'user_id': user_id, 'server_id': self.server.id, }) for doc in docs: messenger.publish('client', { 'state': False, 'server_id': self.server.id, 'virt_address': doc['virt_address'], 'virt_address6': doc['virt_address6'], 'host_address': doc['host_address'], 'host_address6': doc['host_address6'], }) self.collection.remove({ 'user_id': user_id, 'server_id': self.server.id, }) messenger.publish('instance', ['user_reconnect', user_id, settings.local.host_id]) for clnt in self.clients.find({'user_id': user_id}): time.sleep(2) self.instance_com.client_kill(clnt['id']) elif virt_address: if mac_addr: for clnt in self.clients.find({ 'user_id': user_id, 'mac_addr': mac_addr, }): self.instance_com.client_kill(clnt['id']) if self.clients.find({'virt_address': virt_address}): virt_address = None if not virt_address: while True: try: ip_addr = self.ip_pool.pop() except IndexError: break ip_addr = '%s/%s' % (ip_addr, self.ip_network.prefixlen) if not self.clients.find({'virt_address': ip_addr}): virt_address = ip_addr address_dynamic = True break if not virt_address: logger.error('Unable to assign ip address, pool full', 'clients', server_id=self.server.id, instance_id=self.instance.id, user_id=user.id, multi_device=self.server.multi_device, network=self.server.network, user_count=self.server.user_count, ) if not virt_address: self.instance_com.send_client_deny(client_id, key_id, 'Unable to assign ip address') return virt_address6 = self.server.ip4to6(virt_address) dns_servers = [] if user.dns_servers: for dns_server in user.dns_servers: if dns_server == '127.0.0.1': dns_server = virt_address dns_servers.append(dns_server) rules, rules6 = self.generate_iptables_rules( user, virt_address, virt_address6) self.clients.insert({ 'id': client_id, 'org_id': org_id, 'org_name': org.name, 'user_id': user_id, 'user_name': user.name, 'user_type': user.type, 'dns_servers': dns_servers, 'dns_suffix': user.dns_suffix, 'device_id': device_id, 'device_name': device_name, 'platform': platform, 'mac_addr': mac_addr, 'virt_address': virt_address, 'virt_address6': virt_address6, 'real_address': remote_ip, 'address_dynamic': address_dynamic, 'iptables_rules': rules, 'ip6tables_rules': rules6, }) if user.type == CERT_CLIENT: plugins.event( 'user_connected', host_id=settings.local.host_id, server_id=self.server.id, org_id=org.id, user_id=user.id, host_name=settings.local.host.name, server_name=self.server.name, org_name=org.name, user_name=user.name, platform=platform, device_id=device_id, device_name=device_name, virtual_ip=virt_address, virtual_ip6=virt_address6, remote_ip=remote_ip, mac_addr=mac_addr, ) host.global_clients.insert({ 'instance_id': self.instance.id, 'client_id': client_id, }) client_conf = self.generate_client_conf(platform, client_id, virt_address, user, reauth) client_conf += 'ifconfig-push %s %s\n' % utils.parse_network( virt_address) if self.server.ipv6: client_conf += 'ifconfig-ipv6-push %s\n' % virt_address6 if self.server.debug: self.instance_com.push_output('Client conf %s:' % user_id) for conf_line in client_conf.split('\n'): if conf_line: self.instance_com.push_output(' ' + conf_line) self.instance_com.send_client_auth(client_id, key_id, client_conf)
def user_put(org_id, user_id): if settings.app.demo_mode: return utils.demo_blocked() org = organization.get_by_id(org_id) user = org.get_user(user_id) reset_user = False port_forwarding_event = False if 'name' in flask.request.json: name = utils.filter_str(flask.request.json['name']) or None if name != user.name: user.audit_event( 'user_updated', 'User name changed', remote_addr=utils.get_remote_addr(), ) user.name = name if 'email' in flask.request.json: email = utils.filter_str(flask.request.json['email']) or None if email != user.email: user.audit_event( 'user_updated', 'User email changed', remote_addr=utils.get_remote_addr(), ) user.email = email if 'groups' in flask.request.json: groups = flask.request.json['groups'] or [] for i, group in enumerate(groups): groups[i] = utils.filter_str(group) groups = set(groups) if groups != set(user.groups or []): user.audit_event( 'user_updated', 'User groups changed', remote_addr=utils.get_remote_addr(), ) user.groups = list(groups) if 'pin' in flask.request.json: pin = flask.request.json['pin'] if pin is not True: if pin: if settings.user.pin_mode == PIN_DISABLED: return utils.jsonify( { 'error': PIN_IS_DISABLED, 'error_msg': PIN_IS_DISABLED_MSG, }, 400) if RADIUS_AUTH in user.auth_type: return utils.jsonify( { 'error': PIN_RADIUS, 'error_msg': PIN_RADIUS_MSG, }, 400) if not pin.isdigit(): return utils.jsonify( { 'error': PIN_NOT_DIGITS, 'error_msg': PIN_NOT_DIGITS_MSG, }, 400) if len(pin) < settings.user.pin_min_length: return utils.jsonify( { 'error': PIN_TOO_SHORT, 'error_msg': PIN_TOO_SHORT_MSG, }, 400) if user.set_pin(pin): user.audit_event( 'user_updated', 'User pin changed', remote_addr=utils.get_remote_addr(), ) if 'network_links' in flask.request.json: network_links_cur = set(user.get_network_links()) network_links_new = set() for network_link in flask.request.json['network_links']: try: network_link = str(ipaddress.IPNetwork(network_link)) except (ipaddress.AddressValueError, ValueError): return _network_link_invalid() network_links_new.add(network_link) network_links_add = network_links_new - network_links_cur network_links_rem = network_links_cur - network_links_new if len(network_links_add) or len(network_links_rem): reset_user = True user.audit_event( 'user_updated', 'User network links updated', remote_addr=utils.get_remote_addr(), ) try: for network_link in network_links_add: user.add_network_link(network_link) except ServerOnlineError: return utils.jsonify( { 'error': NETWORK_LINK_NOT_OFFLINE, 'error_msg': NETWORK_LINK_NOT_OFFLINE_MSG, }, 400) for network_link in network_links_rem: user.remove_network_link(network_link) if 'port_forwarding' in flask.request.json: port_forwarding = [] for data in flask.request.json['port_forwarding'] or []: port_forwarding.append({ 'protocol': utils.filter_str(data.get('protocol')), 'port': utils.filter_str(data.get('port')), 'dport': utils.filter_str(data.get('dport')), }) if port_forwarding != user.port_forwarding: port_forwarding_event = True user.audit_event( 'user_updated', 'User port forwarding changed', remote_addr=utils.get_remote_addr(), ) user.port_forwarding = port_forwarding disabled = flask.request.json.get('disabled') if disabled is not None: if disabled != user.disabled: user.audit_event( 'user_updated', 'User %s' % ('disabled' if disabled else 'enabled'), remote_addr=utils.get_remote_addr(), ) user.disabled = disabled bypass_secondary = flask.request.json.get('bypass_secondary') if bypass_secondary is not None: user.bypass_secondary = True if bypass_secondary else False client_to_client = flask.request.json.get('client_to_client') if client_to_client is not None: user.client_to_client = True if client_to_client else False if 'dns_servers' in flask.request.json: dns_servers = flask.request.json['dns_servers'] or None if user.dns_servers != dns_servers: user.audit_event( 'user_updated', 'User dns servers changed', remote_addr=utils.get_remote_addr(), ) reset_user = True user.dns_servers = dns_servers if 'dns_suffix' in flask.request.json: dns_suffix = utils.filter_str(flask.request.json['dns_suffix']) or None if user.dns_suffix != dns_suffix: user.audit_event( 'user_updated', 'User dns suffix changed', remote_addr=utils.get_remote_addr(), ) reset_user = True user.dns_suffix = dns_suffix user.commit() event.Event(type=USERS_UPDATED, resource_id=user.org.id) if port_forwarding_event: messenger.publish('port_forwarding', { 'org_id': org.id, 'user_id': user.id, }) if reset_user or disabled: user.disconnect() if disabled: if user.type == CERT_CLIENT: logger.LogEntry(message='Disabled user "%s".' % user.name) elif disabled == False and user.type == CERT_CLIENT: logger.LogEntry(message='Enabled user "%s".' % user.name) send_key_email = flask.request.json.get('send_key_email') if send_key_email and user.email: user.audit_event( 'user_emailed', 'User key email sent to "%s"' % user.email, remote_addr=utils.get_remote_addr(), ) try: user.send_key_email(utils.get_url_root()) except EmailNotConfiguredError: return utils.jsonify( { 'error': EMAIL_NOT_CONFIGURED, 'error_msg': EMAIL_NOT_CONFIGURED_MSG, }, 400) except EmailFromInvalid: return utils.jsonify( { 'error': EMAIL_FROM_INVALID, 'error_msg': EMAIL_FROM_INVALID_MSG, }, 400) except EmailAuthInvalid: return utils.jsonify( { 'error': EMAIL_AUTH_INVALID, 'error_msg': EMAIL_AUTH_INVALID_MSG, }, 400) return utils.jsonify(user.dict())
def task(self): try: timestamp_spec = utils.now() - datetime.timedelta( seconds=settings.vpn.server_ping_ttl) docs = self.server_collection.find({ 'instances.ping_timestamp': {'$lt': timestamp_spec}, }, { '_id': True, 'instances': True, }) yield for doc in docs: for instance in doc['instances']: if instance['ping_timestamp'] < timestamp_spec: self.server_collection.update({ '_id': doc['_id'], 'instances.instance_id': instance['instance_id'], }, { '$pull': { 'instances': { 'instance_id': instance['instance_id'], }, }, '$inc': { 'instances_count': -1, }, }) yield docs = self.host_collection.find({ 'status': ONLINE, }, { '_id': True, 'availability_group': True, }) yield hosts_group = {} for doc in docs: hosts_group[doc['_id']] = doc.get( 'availability_group', DEFAULT) yield response = self.server_collection.aggregate([ {'$match': { 'status': ONLINE, 'start_timestamp': {'$lt': timestamp_spec}, }}, {'$project': { '_id': True, 'hosts': True, 'instances': True, 'replica_count': True, 'availability_group': True, 'offline_instances_count': { '$subtract': [ '$replica_count', '$instances_count', ], } }}, {'$match': { 'offline_instances_count': {'$gt': 0}, }}, ]) yield for doc in response: cur_avail_group = doc.get('availability_group', DEFAULT) hosts_set = set(doc['hosts']) group_best = None group_len_max = 0 server_groups = collections.defaultdict(set) for hst in hosts_set: avail_zone = hosts_group.get(hst) if not avail_zone: continue server_groups[avail_zone].add(hst) group_len = len(server_groups[avail_zone]) if group_len > group_len_max: group_len_max = group_len group_best = avail_zone elif group_len == group_len_max and \ avail_zone == cur_avail_group: group_best = avail_zone if cur_avail_group != group_best: logger.info( 'Rebalancing server availability group', 'server', server_id=doc['_id'], current_availability_group=cur_avail_group, new_availability_group=group_best, ) self.server_collection.update({ '_id': doc['_id'], 'status': ONLINE, }, {'$set': { 'instances': [], 'instances_count': 0, 'availability_group': group_best, }}) messenger.publish('servers', 'rebalance', extra={ 'server_id': doc['_id'], 'availability_group': group_best, }) prefered_hosts = server_groups[group_best] else: prefered_hosts = server_groups[cur_avail_group] active_hosts = set( [x['host_id'] for x in doc['instances']]) prefered_hosts = list(prefered_hosts - active_hosts) if not prefered_hosts: continue logger.info('Recovering server state', 'server', server_id=doc['_id'], prefered_hosts=prefered_hosts, ) messenger.publish('servers', 'start', extra={ 'server_id': doc['_id'], 'send_events': True, 'prefered_hosts': host.get_prefered_hosts( prefered_hosts, doc['replica_count']) }) except GeneratorExit: raise except: logger.exception('Error checking server states', 'tasks')
def update_license(license): settings.app.license = license settings.app.license_plan = None settings.commit() update() messenger.publish('subscription', 'updated')
def _run_thread(self, send_events): logger.debug('Starting ovpn process. %r' % { 'server_id': self.id, }) cursor_id = self.get_cursor_id() self._interrupt = False self._state = True self._clients = {} try: os.makedirs(self._temp_path) ovpn_conf_path = self._generate_ovpn_conf() self._enable_ip_forwarding() self._set_iptables_rules() self.output.clear_output() ovpn_conf_path = os.path.join(self._temp_path, OVPN_CONF_NAME) try: process = subprocess.Popen(['openvpn', ovpn_conf_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: self.output.push_output(traceback.format_exc()) logger.exception('Failed to start ovpn process. %r' % { 'server_id': self.id, }) self.publish('stopped') return semaphore = threading.Semaphore(3) for _ in xrange(3): semaphore.acquire() sub_thread = threading.Thread(target=self._sub_thread, args=(semaphore, cursor_id, process)) sub_thread.start() status_thread = threading.Thread(target=self._status_thread, args=(semaphore, )) status_thread.start() self.status = True self.host_id = settings.local.host_id self.start_timestamp = datetime.datetime.utcnow() self.ping_timestamp = datetime.datetime.utcnow() self.commit(( 'status', 'host_id', 'start_timestamp', 'ping_timestamp', )) keep_alive_thread = threading.Thread( target=self._keep_alive_thread, args=(semaphore, process)) keep_alive_thread.start() # Wait for all three threads to start for _ in xrange(3): semaphore.acquire() self.publish('started') if send_events: event.Event(type=SERVERS_UPDATED) event.Event(type=SERVER_HOSTS_UPDATED, resource_id=self.id) for org_id in self.organizations: event.Event(type=USERS_UPDATED, resource_id=org_id) while True: line = process.stdout.readline() if not line: if process.poll() is not None: break else: continue try: self.output.push_output(line) except: logger.exception('Failed to push vpn output. %r', { 'server_id': self.id, }) self._interrupt = True status_thread.join() if self._instance_id != self.instance_id: return self.status = False self.start_timestamp = None self.ping_timestamp = None self.unset('host_id') self.unset('instance_id') self.commit(( 'status', 'start_timestamp', 'ping_timestamp', )) self.update_clients({}, force=True) if self._state: event.Event(type=SERVERS_UPDATED) logger.LogEntry(message='Server stopped unexpectedly "%s".' % (self.name)) logger.debug('Ovpn process has ended. %r' % { 'server_id': self.id, }) self.publish('stopped') except: self._interrupt = True logger.exception('Server error occurred while running. %r', { 'server_id': self.id, }) messenger.publish('server_instance', 'stopped', { 'server_id': self.id, })
def user_put(org_id, user_id): if settings.app.demo_mode: return utils.demo_blocked() org = organization.get_by_id(org_id) user = org.get_user(user_id) reset_user = False reset_user_cache = False port_forwarding_event = False remote_addr = utils.get_remote_addr() if 'name' in flask.request.json: name = utils.filter_str(flask.request.json['name']) or 'undefined' if name != user.name: user.audit_event( 'user_updated', 'User name changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User name changed', remote_address=remote_addr, ) user.name = name if 'email' in flask.request.json: email = utils.filter_str(flask.request.json['email']) or None if email != user.email: user.audit_event( 'user_updated', 'User email changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User email changed', remote_address=remote_addr, ) user.email = email if 'auth_type' in flask.request.json: auth_type = utils.filter_str(flask.request.json['auth_type']) or None if auth_type in AUTH_TYPES: if auth_type != user.auth_type: reset_user = True reset_user_cache = True user.auth_type = auth_type if 'yubico_id' in flask.request.json and user.auth_type == YUBICO_AUTH: yubico_id = utils.filter_str(flask.request.json['yubico_id']) or None yubico_id = yubico_id[:12] if yubico_id else None if yubico_id != user.yubico_id: reset_user = True reset_user_cache = True user.yubico_id = yubico_id if 'groups' in flask.request.json: groups = flask.request.json['groups'] or [] for i, group in enumerate(groups): groups[i] = utils.filter_str(group) groups = set(groups) if groups != set(user.groups or []): user.audit_event( 'user_updated', 'User groups changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User groups changed', remote_address=remote_addr, ) user.groups = list(groups) if 'pin' in flask.request.json: pin = flask.request.json['pin'] or None if pin is not True: if pin: if settings.user.pin_mode == PIN_DISABLED: return utils.jsonify( { 'error': PIN_IS_DISABLED, 'error_msg': PIN_IS_DISABLED_MSG, }, 400) if RADIUS_AUTH in user.auth_type: return utils.jsonify( { 'error': PIN_RADIUS, 'error_msg': PIN_RADIUS_MSG, }, 400) if settings.user.pin_digits_only and not pin.isdigit(): return utils.jsonify( { 'error': PIN_NOT_DIGITS, 'error_msg': PIN_NOT_DIGITS_MSG, }, 400) if len(pin) < settings.user.pin_min_length: return utils.jsonify( { 'error': PIN_TOO_SHORT, 'error_msg': PIN_TOO_SHORT_MSG, }, 400) if user.set_pin(pin): reset_user = True reset_user_cache = True user.audit_event( 'user_updated', 'User pin changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User pin changed', remote_address=remote_addr, ) if 'network_links' in flask.request.json: network_links_cur = set(user.get_network_links()) network_links_new = set() for network_link in flask.request.json['network_links'] or []: try: network_link = str(ipaddress.IPNetwork(network_link)) except (ipaddress.AddressValueError, ValueError): return _network_link_invalid() network_links_new.add(network_link) network_links_add = network_links_new - network_links_cur network_links_rem = network_links_cur - network_links_new if len(network_links_add) or len(network_links_rem): reset_user = True user.audit_event( 'user_updated', 'User network links updated', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User network links updated', remote_address=remote_addr, ) try: for network_link in network_links_add: user.add_network_link(network_link) except ServerOnlineError: return utils.jsonify( { 'error': NETWORK_LINK_NOT_OFFLINE, 'error_msg': NETWORK_LINK_NOT_OFFLINE_MSG, }, 400) for network_link in network_links_rem: user.remove_network_link(network_link) if 'port_forwarding' in flask.request.json: port_forwarding = [] for data in flask.request.json['port_forwarding'] or []: port_forwarding.append({ 'protocol': utils.filter_str(data.get('protocol')), 'port': utils.filter_str(data.get('port')), 'dport': utils.filter_str(data.get('dport')), }) if port_forwarding != user.port_forwarding: port_forwarding_event = True user.audit_event( 'user_updated', 'User port forwarding changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User port forwarding changed', remote_address=remote_addr, ) user.port_forwarding = port_forwarding disabled = True if flask.request.json.get('disabled') else False if disabled != user.disabled: user.audit_event( 'user_updated', 'User %s' % ('disabled' if disabled else 'enabled'), remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User %s' % ('disabled' if disabled else 'enabled'), remote_address=remote_addr, ) if disabled: reset_user = True reset_user_cache = True user.disabled = disabled user.bypass_secondary = True if flask.request.json.get( 'bypass_secondary') else False user.client_to_client = True if flask.request.json.get( 'client_to_client') else False if user.bypass_secondary: if user.pin: return utils.jsonify( { 'error': PIN_BYPASS_SECONDARY, 'error_msg': PIN_BYPASS_SECONDARY_MSG, }, 400) if user.yubico_id: return utils.jsonify( { 'error': YUBIKEY_BYPASS_SECONDARY, 'error_msg': YUBIKEY_BYPASS_SECONDARY_MSG, }, 400) if 'mac_addresses' in flask.request.json: mac_addresses = flask.request.json['mac_addresses'] or None if user.mac_addresses != mac_addresses: user.audit_event( 'user_updated', 'User mac addresses changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User mac addresses changed', remote_address=remote_addr, ) reset_user = True user.mac_addresses = mac_addresses if 'dns_servers' in flask.request.json: dns_servers = flask.request.json['dns_servers'] or None if user.dns_servers != dns_servers: user.audit_event( 'user_updated', 'User dns servers changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User dns servers changed', remote_address=remote_addr, ) reset_user = True user.dns_servers = dns_servers if 'dns_suffix' in flask.request.json: dns_suffix = utils.filter_str(flask.request.json['dns_suffix']) or None if user.dns_suffix != dns_suffix: user.audit_event( 'user_updated', 'User dns suffix changed', remote_addr=remote_addr, ) journal.entry( journal.USER_UPDATE, user.journal_data, event_long='User dns suffix changed', remote_address=remote_addr, ) reset_user = True user.dns_suffix = dns_suffix user.commit() event.Event(type=USERS_UPDATED, resource_id=user.org.id) if port_forwarding_event: messenger.publish('port_forwarding', { 'org_id': org.id, 'user_id': user.id, }) if reset_user_cache: user.clear_auth_cache() if reset_user: user.disconnect() send_key_email = flask.request.json.get('send_key_email') if send_key_email and user.email: user.audit_event( 'user_emailed', 'User key email sent to "%s"' % user.email, remote_addr=remote_addr, ) journal.entry( journal.USER_PROFILE_EMAIL, user.journal_data, event_long='User key email sent to "%s"' % user.email, remote_address=remote_addr, ) try: user.send_key_email(utils.get_url_root()) except EmailNotConfiguredError: return utils.jsonify( { 'error': EMAIL_NOT_CONFIGURED, 'error_msg': EMAIL_NOT_CONFIGURED_MSG, }, 400) except EmailFromInvalid: return utils.jsonify( { 'error': EMAIL_FROM_INVALID, 'error_msg': EMAIL_FROM_INVALID_MSG, }, 400) except EmailAuthInvalid: return utils.jsonify( { 'error': EMAIL_AUTH_INVALID, 'error_msg': EMAIL_AUTH_INVALID_MSG, }, 400) return utils.jsonify(user.dict())
def main(default_conf=None): if len(sys.argv) > 1: cmd = sys.argv[1] else: cmd = 'start' parser = optparse.OptionParser(usage=USAGE) if cmd == 'start': parser.add_option('-d', '--daemon', action='store_true', help='Daemonize process') parser.add_option('-p', '--pidfile', type='string', help='Path to create pid file') parser.add_option('-c', '--conf', type='string', help='Path to configuration file') parser.add_option('-q', '--quiet', action='store_true', help='Suppress logging output') elif cmd == 'logs': parser.add_option('--archive', action='store_true', help='Archive log file') parser.add_option('--tail', action='store_true', help='Tail log file') parser.add_option('--limit', type='int', help='Limit log lines') (options, args) = parser.parse_args() if hasattr(options, 'conf') and options.conf: conf_path = options.conf else: conf_path = default_conf pritunl.set_conf_path(conf_path) if cmd == 'version': print '%s v%s' % (pritunl.__title__, pritunl.__version__) sys.exit(0) elif cmd == 'setup-key': from pritunl import setup from pritunl import settings setup.setup_loc() print settings.local.setup_key sys.exit(0) elif cmd == 'reset-version': from pritunl.constants import MIN_DATABASE_VER from pritunl import setup from pritunl import utils setup.setup_db() utils.set_db_ver(pritunl.__version__, MIN_DATABASE_VER) time.sleep(.3) print 'Database version reset to %s' % pritunl.__version__ sys.exit(0) elif cmd == 'reset-password': from pritunl import setup from pritunl import auth setup.setup_db() username, password = auth.reset_password() time.sleep(.3) print 'Administrator password successfully reset:\n' + \ ' username: "******"\n password: "******"' % (username, password) sys.exit(0) elif cmd == 'reconfigure': from pritunl import setup from pritunl import settings setup.setup_loc() settings.conf.mongodb_uri = None settings.conf.commit() time.sleep(.3) print 'Database configuration successfully reset' sys.exit(0) elif cmd == 'get': from pritunl import setup from pritunl import settings setup.setup_db_host() if len(args) != 2: raise ValueError('Invalid arguments') split = args[1].split('.') key_str = None group_str = split[0] if len(split) > 1: key_str = split[1] if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) if key_str: val = getattr(group, key_str) print '%s.%s = %s' % (group_str, key_str, json.dumps(val)) else: for field in group.fields: val = getattr(group, field) print '%s.%s = %s' % (group_str, field, json.dumps(val)) sys.exit(0) elif cmd == 'set': from pritunl.constants import HOSTS_UPDATED from pritunl import setup from pritunl import settings from pritunl import event from pritunl import messenger setup.setup_db_host() if len(args) != 3: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) val_str = args[2] val = json.loads(val_str) setattr(group, key_str, val) if group_str == 'host': settings.local.host.commit() event.Event(type=HOSTS_UPDATED) messenger.publish('hosts', 'updated') else: settings.commit() time.sleep(.3) print '%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str))) sys.exit(0) elif cmd == 'unset': from pritunl import setup from pritunl import settings setup.setup_db() if len(args) != 2: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') group = getattr(settings, group_str) group.unset(key_str) settings.commit() time.sleep(.3) print '%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str))) sys.exit(0) elif cmd == 'set-mongodb': from pritunl import setup from pritunl import settings setup.setup_loc() if len(args) > 1: mongodb_uri = args[1] else: mongodb_uri = None settings.conf.mongodb_uri = mongodb_uri settings.conf.commit() time.sleep(.3) print 'Database configuration successfully set' sys.exit(0) elif cmd == 'reset-ssl-cert': from pritunl import setup from pritunl import settings setup.setup_db() settings.app.server_cert = None settings.app.server_key = None settings.commit() time.sleep(.3) print 'Server ssl certificate successfully reset' sys.exit(0) elif cmd == 'logs': from pritunl import setup from pritunl import logger setup.setup_db() log_view = logger.LogView() if options.archive: if len(args) > 1: archive_path = args[1] else: archive_path = './' print 'Log archived to: ' + log_view.archive_log( archive_path, options.limit) elif options.tail: for msg in log_view.tail_log_lines(): print msg else: print log_view.get_log_lines(options.limit) sys.exit(0) elif cmd != 'start': raise ValueError('Invalid command') from pritunl import settings if options.quiet: settings.local.quiet = True if options.daemon: pid = os.fork() if pid > 0: if options.pidfile: with open(options.pidfile, 'w') as pid_file: pid_file.write('%s' % pid) sys.exit(0) elif not options.quiet: print '##############################################################' print '# #' print '# /$$ /$$ /$$ #' print '# |__/ | $$ | $$ #' print '# /$$$$$$ /$$$$$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$$ | $$ #' print '# /$$__ $$ /$$__ $$| $$|_ $$_/ | $$ | $$| $$__ $$| $$ #' print '# | $$ \ $$| $$ \__/| $$ | $$ | $$ | $$| $$ \ $$| $$ #' print '# | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$ | $$| $$ #' print '# | $$$$$$$/| $$ | $$ | $$$$/| $$$$$$/| $$ | $$| $$ #' print '# | $$____/ |__/ |__/ \____/ \______/ |__/ |__/|__/ #' print '# | $$ #' print '# | $$ #' print '# |__/ #' print '# #' print '##############################################################' pritunl.init_server()
def host_put(hst=None): if settings.app.demo_mode: return utils.demo_blocked() hst = host.get_by_id(hst) if 'name' in flask.request.json: hst.name = utils.filter_str( flask.request.json['name']) or utils.random_name() if 'public_address' in flask.request.json: public_address = utils.filter_str(flask.request.json['public_address']) hst.public_address = public_address if 'public_address6' in flask.request.json: public_address6 = utils.filter_str( flask.request.json['public_address6']) hst.public_address6 = public_address6 if 'routed_subnet6' in flask.request.json: routed_subnet6 = flask.request.json['routed_subnet6'] if routed_subnet6: try: routed_subnet6 = ipaddress.IPv6Network( flask.request.json['routed_subnet6']) except (ipaddress.AddressValueError, ValueError): return utils.jsonify( { 'error': IPV6_SUBNET_INVALID, 'error_msg': IPV6_SUBNET_INVALID_MSG, }, 400) if routed_subnet6.prefixlen > 64: return utils.jsonify( { 'error': IPV6_SUBNET_SIZE_INVALID, 'error_msg': IPV6_SUBNET_SIZE_INVALID_MSG, }, 400) routed_subnet6 = str(routed_subnet6) else: routed_subnet6 = None if hst.routed_subnet6 != routed_subnet6: if server.get_online_ipv6_count(): return utils.jsonify( { 'error': IPV6_SUBNET_ONLINE, 'error_msg': IPV6_SUBNET_ONLINE_MSG, }, 400) hst.routed_subnet6 = routed_subnet6 if 'proxy_ndp' in flask.request.json: proxy_ndp = True if flask.request.json['proxy_ndp'] else False hst.proxy_ndp = proxy_ndp if 'local_address' in flask.request.json: local_address = utils.filter_str(flask.request.json['local_address']) hst.local_address = local_address if 'local_address6' in flask.request.json: local_address6 = utils.filter_str(flask.request.json['local_address6']) hst.local_address6 = local_address6 if 'link_address' in flask.request.json: link_address = utils.filter_str(flask.request.json['link_address']) hst.link_address = link_address if 'sync_address' in flask.request.json: sync_address = utils.filter_str(flask.request.json['sync_address']) hst.sync_address = sync_address if 'availability_group' in flask.request.json: hst.availability_group = utils.filter_str( flask.request.json['availability_group']) or DEFAULT if 'instance_id' in flask.request.json: instance_id = utils.filter_str(flask.request.json['instance_id']) if instance_id != hst.aws_id: hst.instance_id = instance_id hst.commit(hst.changed) event.Event(type=HOSTS_UPDATED) messenger.publish('hosts', 'updated') return utils.jsonify(hst.dict())
def task(self): try: timestamp_spec = utils.now() - datetime.timedelta( seconds=settings.vpn.server_ping_ttl) docs = self.server_collection.find({ 'instances.ping_timestamp': {'$lt': timestamp_spec}, }, { '_id': True, 'instances': True, }) yield for doc in docs: for instance in doc['instances']: if instance['ping_timestamp'] < timestamp_spec: self.server_collection.update({ '_id': doc['_id'], 'instances.instance_id': instance['instance_id'], }, { '$pull': { 'instances': { 'instance_id': instance['instance_id'], }, }, '$inc': { 'instances_count': -1, }, }) yield response = self.server_collection.aggregate([ {'$match': { 'status': ONLINE, 'start_timestamp': {'$lt': timestamp_spec}, }}, {'$project': { '_id': True, 'hosts': True, 'instances': True, 'replica_count': True, 'offline_instances_count': { '$subtract': [ '$replica_count', '$instances_count', ], } }}, {'$match': { 'offline_instances_count': {'$gt': 0}, }}, ]) yield for doc in response: active_hosts = set([x['host_id'] for x in doc['instances']]) hosts = list(set(doc['hosts']) - active_hosts) if not hosts: continue messenger.publish('servers', 'start', extra={ 'server_id': doc['_id'], 'send_events': True, 'prefered_hosts': host.get_prefered_hosts( hosts, doc['replica_count']) }) except GeneratorExit: raise except: logger.exception('Error checking server states', 'tasks')
def main(default_conf=None): if len(sys.argv) > 1: cmd = sys.argv[1] else: cmd = 'start' parser = optparse.OptionParser(usage=USAGE) if cmd == 'start': parser.add_option('-d', '--daemon', action='store_true', help='Daemonize process') parser.add_option('-p', '--pidfile', type='string', help='Path to create pid file') parser.add_option('-c', '--conf', type='string', help='Path to configuration file') parser.add_option('-q', '--quiet', action='store_true', help='Suppress logging output') elif cmd == 'logs': parser.add_option('--archive', action='store_true', help='Archive log file') parser.add_option('--tail', action='store_true', help='Tail log file') parser.add_option('--limit', type='int', help='Limit log lines') elif cmd == 'set': parser.disable_interspersed_args() (options, args) = parser.parse_args() if hasattr(options, 'conf') and options.conf: conf_path = options.conf else: conf_path = default_conf pritunl.set_conf_path(conf_path) if cmd == 'version': print '%s v%s' % (pritunl.__title__, pritunl.__version__) sys.exit(0) elif cmd == 'setup-key': from pritunl import setup from pritunl import settings setup.setup_loc() print settings.local.setup_key sys.exit(0) elif cmd == 'reset-version': from pritunl.constants import MIN_DATABASE_VER from pritunl import setup from pritunl import utils setup.setup_db() utils.set_db_ver(pritunl.__version__, MIN_DATABASE_VER) time.sleep(.2) print 'Database version reset to %s' % pritunl.__version__ sys.exit(0) elif cmd == 'reset-password': from pritunl import setup from pritunl import auth setup.setup_db() username, password = auth.reset_password() time.sleep(.2) print 'Administrator password successfully reset:\n' + \ ' username: "******"\n password: "******"' % (username, password) sys.exit(0) elif cmd == 'reconfigure': from pritunl import setup from pritunl import settings setup.setup_loc() settings.conf.mongodb_uri = None settings.conf.commit() time.sleep(.2) print 'Database configuration successfully reset' sys.exit(0) elif cmd == 'get': from pritunl import setup from pritunl import settings setup.setup_db_host() if len(args) != 2: raise ValueError('Invalid arguments') split = args[1].split('.') key_str = None group_str = split[0] if len(split) > 1: key_str = split[1] if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) if key_str: val = getattr(group, key_str) print '%s.%s = %s' % (group_str, key_str, json.dumps(val, default=lambda x: str(x))) else: for field in group.fields: val = getattr(group, field) print '%s.%s = %s' % (group_str, field, json.dumps(val, default=lambda x: str(x))) sys.exit(0) elif cmd == 'set': from pritunl.constants import HOSTS_UPDATED from pritunl import setup from pritunl import settings from pritunl import event from pritunl import messenger setup.setup_db_host() if len(args) != 3: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) val_str = args[2] try: val = json.loads(val_str) except ValueError: val = json.loads(json.JSONEncoder().encode(val_str)) setattr(group, key_str, val) if group_str == 'host': settings.local.host.commit() event.Event(type=HOSTS_UPDATED) messenger.publish('hosts', 'updated') else: settings.commit() time.sleep(.2) print '%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str), default=lambda x: str(x))) sys.exit(0) elif cmd == 'unset': from pritunl import setup from pritunl import settings setup.setup_db() if len(args) != 2: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') group = getattr(settings, group_str) group.unset(key_str) settings.commit() time.sleep(.2) print '%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str), default=lambda x: str(x))) sys.exit(0) elif cmd == 'set-mongodb': from pritunl import setup from pritunl import settings setup.setup_loc() if len(args) > 1: mongodb_uri = args[1] else: mongodb_uri = None settings.conf.mongodb_uri = mongodb_uri settings.conf.commit() time.sleep(.2) print 'Database configuration successfully set' sys.exit(0) elif cmd == 'reset-ssl-cert': from pritunl import setup from pritunl import settings setup.setup_db() settings.app.server_cert = None settings.app.server_key = None settings.commit() time.sleep(.2) print 'Server ssl certificate successfully reset' sys.exit(0) elif cmd == 'destroy-secondary': from pritunl import setup from pritunl import logger from pritunl import mongo setup.setup_db() mongo.get_collection('clients').drop() mongo.get_collection('clients_pool').drop() mongo.get_collection('transaction').drop() mongo.get_collection('queue').drop() mongo.get_collection('tasks').drop() mongo.get_collection('messages').drop() mongo.get_collection('users_key_link').drop() mongo.get_collection('auth_sessions').drop() mongo.get_collection('auth_csrf_tokens').drop() mongo.get_collection('auth_nonces').drop() mongo.get_collection('auth_limiter').drop() mongo.get_collection('otp').drop() mongo.get_collection('otp_cache').drop() mongo.get_collection('sso_tokens').drop() mongo.get_collection('sso_cache').drop() server_coll = mongo.get_collection('servers') server_coll.update_many({}, { '$set': { 'status': 'offline', 'instances': [], 'instances_count': 0, }, '$unset': { 'network_lock': '', }, }) sys.exit(0) elif cmd == 'logs': from pritunl import setup from pritunl import logger setup.setup_db() log_view = logger.LogView() if options.archive: if len(args) > 1: archive_path = args[1] else: archive_path = './' print 'Log archived to: ' + log_view.archive_log(archive_path, options.limit) elif options.tail: for msg in log_view.tail_log_lines(): print msg else: print log_view.get_log_lines(options.limit) sys.exit(0) elif cmd != 'start': raise ValueError('Invalid command') from pritunl import settings if options.quiet: settings.local.quiet = True if options.daemon: pid = os.fork() if pid > 0: if options.pidfile: with open(options.pidfile, 'w') as pid_file: pid_file.write('%s' % pid) sys.exit(0) elif not options.quiet: print '##############################################################' print '# #' print '# /$$ /$$ /$$ #' print '# |__/ | $$ | $$ #' print '# /$$$$$$ /$$$$$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$$ | $$ #' print '# /$$__ $$ /$$__ $$| $$|_ $$_/ | $$ | $$| $$__ $$| $$ #' print '# | $$ \ $$| $$ \__/| $$ | $$ | $$ | $$| $$ \ $$| $$ #' print '# | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$ | $$| $$ #' print '# | $$$$$$$/| $$ | $$ | $$$$/| $$$$$$/| $$ | $$| $$ #' print '# | $$____/ |__/ |__/ \____/ \______/ |__/ |__/|__/ #' print '# | $$ #' print '# | $$ #' print '# |__/ #' print '# #' print '##############################################################' pritunl.init_server()