from pritunl.constants import * from pritunl.exceptions import * from pritunl.descriptors import * from pritunl import task from pritunl import logger from pritunl import server import time class TaskSyncIpPool(task.Task): type = 'sync_ip_pool' def task(self): for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception('Failed to sync server IP pool. %r' % { 'server_id': svr.id, 'task_id': self.id, }) task.add_task(TaskSyncIpPool, minutes=7)
from pritunl.helpers import * from pritunl import mongo from pritunl import task from pritunl import utils class TaskCleanNetworkLock(task.Task): type = 'clean_network_lock' @cached_static_property def server_collection(cls): return mongo.get_collection('servers') def task(self): self.server_collection.update_many({ 'network_lock_ttl': {'$lt': utils.now()}, }, {'$unset': { 'network_lock': '', 'network_lock_ttl': '', }}) task.add_task(TaskCleanNetworkLock, minutes=xrange(0, 60, 8), run_on_start=True)
type = 'clean_network_links' @cached_static_property def user_collection(cls): return mongo.get_collection('users') @cached_static_property def user_net_link_collection(cls): return mongo.get_collection('users_net_link') def task(self): user_ids_link = set( self.user_net_link_collection.find({}, { '_id': True, 'user_id': True, }).distinct('user_id')) user_ids = set( self.user_collection.find({}, { '_id': True, }).distinct('_id')) self.user_net_link_collection.remove({ 'user_id': { '$in': list(user_ids_link - user_ids) }, }) task.add_task(TaskCleanNetworkLinks, hours=5, minutes=47)
from pritunl.constants import * from pritunl.exceptions import * from pritunl.descriptors import * from pritunl import pooler from pritunl import task from pritunl import logger class TaskPooler(task.Task): type = 'pooler' def task(self): pooler.fill('org') pooler.fill('user') pooler.fill('dh_params') task.add_task(TaskPooler, minutes=xrange(0, 60, 5))
if item_id not in item_distinct: missing_items.append(item_id) if missing_items: self.server_collection.update({ '_id': doc['_id'], }, {'$pull': { item_type: { '$in': missing_items }, }}) missing_links = [] for link_doc in doc['links']: if link_doc['server_id'] not in server_ids: missing_links.append(link_doc['server_id']) if missing_links: self.server_collection.update({ '_id': doc['_id'], }, { '$pull': { 'links': { 'server_id': { '$in': missing_links }, }, } }) task.add_task(TaskCleanServers, hours=5, minutes=27)
from pritunl.helpers import * from pritunl import mongo from pritunl import task class TaskCleanIpPool(task.Task): type = 'clean_ip_pool' @cached_static_property def pool_collection(cls): return mongo.get_collection('servers_ip_pool') @cached_static_property def server_collection(cls): return mongo.get_collection('servers') def task(self): org_ids = self.server_collection.find({}, { '_id': True, }).distinct('_id') self.pool_collection.remove({ 'server_id': { '$nin': org_ids }, }) task.add_task(TaskCleanIpPool, hours=5, minutes=23)
server_count += doc.get('server_count') or 0 device_count += doc.get('device_count') or 0 if doc.get('ping_timestamp') and \ now - doc['ping_timestamp'] > ttl: response = self.hosts_collection.update({ '_id': doc['_id'], 'ping_timestamp': ttl_timestamp, }, {'$set': { 'status': OFFLINE, 'ping_timestamp': None, }}) yield if response['updatedExisting']: event.Event(type=HOSTS_UPDATED) yield monitoring.insert_point('cluster', {}, { 'server_count': server_count, 'device_count': device_count, }) except GeneratorExit: raise except: logger.exception('Error checking host status', 'runners') task.add_task(TaskHost, seconds=xrange(0, 60, settings.app.host_ping))
from pritunl import pooler from pritunl import task class TaskPooler(task.Task): type = 'pooler' def task(self): pooler.fill('org') pooler.fill('user') pooler.fill('dh_params') task.add_task(TaskPooler, minutes=xrange(0, 60, 5))
type = 'link' def task(self): if settings.app.demo_mode: return best_hosts = {} for hst in link.iter_hosts(): if not hst.check_available(): continue cur_hst = best_hosts.get(hst.location_id) if not cur_hst: best_hosts[hst.location_id] = hst continue if hst.priority > cur_hst.priority: best_hosts[hst.location_id] = hst continue if hst.priority == cur_hst.priority and \ hst.active and not cur_hst.active: best_hosts[hst.location_id] = hst continue for hst in best_hosts.values(): if not hst.active: hst.set_active() task.add_task(TaskLink, seconds=xrange(0, 60, 3))
from pritunl.helpers import * from pritunl import mongo from pritunl import task from pritunl import utils class TaskCleanNetworkLock(task.Task): type = 'clean_network_lock' @cached_static_property def server_collection(cls): return mongo.get_collection('servers') def task(self): self.server_collection.update_many( { 'network_lock_ttl': { '$lt': utils.now() }, }, {'$unset': { 'network_lock': '', 'network_lock_ttl': '', }}) task.add_task(TaskCleanNetworkLock, minutes=xrange(0, 60, 8), run_on_start=True)
logger.exception( 'Failed to update acme certificate. Timestamp not set', 'tasks', acme_domain=acme_domain, ) return if not settings.app.acme_key: logger.exception( 'Failed to update acme certificate. Account key not set', 'tasks', acme_domain=acme_domain, ) return if utils.time_now() - settings.app.acme_timestamp < \ settings.app.acme_renew: return logger.info( 'Updating acme certificate', 'tasks', acme_domain=acme_domain, ) acme.update_acme_cert() app.update_server() task.add_task(AcmeUpdate, hours=4, minutes=35, run_on_start=True)
if not svr: self.routes_collection.remove({ '_id': doc['_id'], }) continue match = False for route in svr.get_routes(include_server_links=True): if vpc_region == route['vpc_region'] and \ vpc_id == route['vpc_id'] and \ network == route['network']: match = True if not match: self.routes_collection.remove({ '_id': doc['_id'], }) continue messenger.publish('instance', [ 'route_advertisement', server_id, vpc_region, vpc_id, network ]) except GeneratorExit: raise except: logger.exception('Error checking route states', 'tasks') task.add_task(TaskRoute, seconds=xrange(0, 60, settings.vpn.server_ping_ttl))
}) continue match = False for route in svr.get_routes(include_server_links=True): route_advertise = route['advertise'] or \ (route['vpc_region'] and route['vpc_id']) route_network = route['network'] netmap = route.get('nat_netmap') if netmap: route_network = netmap if route_advertise and network == route_network: match = True if not match: self.routes_collection.remove({ '_id': doc['_id'], }) continue messenger.publish('instance', ['route_advertisement', server_id, vpc_region, vpc_id, network]) except GeneratorExit: raise except: logger.exception('Error checking route states', 'tasks') task.add_task(TaskRoute, seconds=xrange(0, 60, 30))
}}, {'$group': { '_id': { 'network': '$network', 'user_id': '$user_id', }, 'docs': {'$addToSet': '$_id'}, 'count': {'$sum': 1}, }}, {'$match': { 'count': {'$gt': 1}, }}, ]) for doc in response: user_id = doc['_id']['user_id'] network = doc['_id']['network'] doc_ids = doc['docs'][1:] for doc_id in doc_ids: self.pool_collection.update({ '_id': doc_id, 'network': network, 'user_id': user_id, }, {'$unset': { 'org_id': '', 'user_id': '', }}) task.add_task(TaskCleanIpPool, hours=5, minutes=23, run_on_start=True)
if not svr: self.routes_collection.remove({ '_id': doc['_id'], }) continue match = False for route in svr.get_routes(include_server_links=True): route_advertise = route['advertise'] or \ (route['vpc_region'] and route['vpc_id']) if route_advertise and network == route['network']: match = True if not match: self.routes_collection.remove({ '_id': doc['_id'], }) continue messenger.publish('instance', [ 'route_advertisement', server_id, vpc_region, vpc_id, network ]) except GeneratorExit: raise except: logger.exception('Error checking route states', 'tasks') task.add_task(TaskRoute, seconds=xrange(0, 60, 30))
from pritunl import task from pritunl import logger from pritunl import server class TaskSyncIpPool(task.Task): type = 'sync_ip_pool' def task(self): for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception( 'Failed to sync server IP pool', 'tasks', server_id=svr.id, task_id=self.id, ) task.add_task(TaskSyncIpPool, minutes=7)
import time class TaskCleanUsers(task.Task): type = 'clean_users' @cached_static_property def collection(cls): return mongo.get_collection('users') @cached_static_property def org_collection(cls): return mongo.get_collection('organizations') def _get_org_ids(self): return set(self.org_collection.find({}, { '_id': True, }).distinct('_id')) def task(self): # Remove users from orgs that dont exists check twice to reduce # possibility of deleting a ca user durning org creation org_ids = self._get_org_ids() time.sleep(30) org_ids2 = self._get_org_ids() self.collection.remove({ 'org_id': {'$nin': list(org_ids & org_ids2)}, }) task.add_task(TaskCleanUsers, hours=5, minutes=17)
from pritunl import task from pritunl import logger from pritunl import server class TaskSyncIpPool(task.Task): type = 'sync_ip_pool' def task(self): for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception('Failed to sync server IP pool', 'tasks', server_id=svr.id, task_id=self.id, ) task.add_task(TaskSyncIpPool, hours=4, minutes=7)
from pritunl.helpers import * from pritunl import mongo from pritunl import task class TaskCleanVxlans(task.Task): type = 'clean_vxlan' @cached_static_property def server_collection(cls): return mongo.get_collection('servers') @cached_static_property def vxlan_collection(cls): return mongo.get_collection('vxlans') def task(self): server_ids = set(self.server_collection.find().distinct('_id')) vxlan_ids = set(self.vxlan_collection.find().distinct('server_id')) self.vxlan_collection.remove({ 'server_id': {'$in': list(vxlan_ids - server_ids)} }) task.add_task(TaskCleanVxlans, minutes=52)
import time class TaskCleanUsers(task.Task): type = 'clean_users' @cached_static_property def collection(cls): return mongo.get_collection('users') @cached_static_property def org_collection(cls): return mongo.get_collection('organizations') def _get_org_ids(self): return set(self.org_collection.find({}, { '_id', }).distinct('_id')) def task(self): # Remove users from orgs that dont exists check twice to reduce # possibility of deleting a ca user durning org creation org_ids = self._get_org_ids() time.sleep(30) org_ids2 = self._get_org_ids() self.collection.remove({ 'org_id': {'$nin': list(org_ids & org_ids2)}, }) task.add_task(TaskCleanUsers, hours=5, minutes=17)
from pritunl.helpers import * from pritunl import mongo from pritunl import task class TaskCleanNetworkLinks(task.Task): type = 'clean_network_links' @cached_static_property def user_collection(cls): return mongo.get_collection('users') @cached_static_property def user_net_link_collection(cls): return mongo.get_collection('users_net_link') def task(self): user_ids_link = set(self.user_net_link_collection.find({}, { '_id': True, 'user_id': True, }).distinct('user_id')) user_ids = set(self.user_collection.find({}, { '_id': True, }).distinct('_id')) self.user_net_link_collection.remove({ 'user_id': {'$in': list(user_ids_link - user_ids)}, }) task.add_task(TaskCleanNetworkLinks, hours=5, minutes=47)
if recover_count >= 3: continue recover_count += 1 logger.info( 'Recovering server state', 'server', server_id=doc['_id'], prefered_hosts=prefered_hosts, ) messenger.publish('servers', 'start', extra={ 'server_id': doc['_id'], 'send_events': True, 'prefered_hosts': host.get_prefered_hosts( prefered_hosts, doc['replica_count']) }) except GeneratorExit: raise except: logger.exception('Error checking server states', 'tasks') task.add_task(TaskServer, seconds=xrange(0, 60, settings.vpn.server_ping))
for item_type, item_distinct in ( ('organizations', org_ids), ('hosts', host_ids), ): missing_items = [] for item_id in doc[item_type]: if item_id not in item_distinct: missing_items.append(item_id) if missing_items: self.server_collection.update({ '_id': doc['_id'], }, {'$pull': { item_type: {'$in': missing_items}, }}) missing_links = [] for link_doc in doc['links']: if link_doc['server_id'] not in server_ids: missing_links.append(link_doc['server_id']) if missing_links: self.server_collection.update({ '_id': doc['_id'], }, {'$pull': { 'links': { 'server_id': {'$in': missing_links}, }, }}) task.add_task(TaskCleanServers, hours=5, minutes=27)
for hst in link.iter_hosts(): hosts.append(hst) if not hst.is_available: continue location_available_hosts[hst.location_id].append(hst) cur_hst = best_hosts.get(hst.location_id) if not cur_hst: best_hosts[hst.location_id] = hst continue if hst.priority > cur_hst.priority: best_hosts[hst.location_id] = hst continue if hst.priority == cur_hst.priority and \ hst.active and not cur_hst.active: best_hosts[hst.location_id] = hst continue for hst in list(best_hosts.values()): if not hst.active: hst.set_active() for hst in hosts: hst.update_available(location_available_hosts[hst.location_id]) task.add_task(TaskLink, seconds=range(0, 60, 3))
from pritunl.constants import * from pritunl.exceptions import * from pritunl.helpers import * from pritunl import mongo from pritunl import task from pritunl import logger class TaskCleanIpPool(task.Task): type = 'clean_ip_pool' @cached_static_property def collection(cls): return mongo.get_collection('servers_ip_pool') @cached_static_property def server_collection(cls): return mongo.get_collection('servers') def task(self): org_ids = self.server_collection.find({}, { '_id', }).distinct('_id') self.collection.remove({ 'server_id': {'$nin': org_ids}, }) task.add_task(TaskCleanIpPool, hours=5, minutes=23)
if not settings.app.acme_timestamp: logger.exception( 'Failed to update acme certificate. Timestamp not set', 'tasks', acme_domain=acme_domain, ) return if not settings.app.acme_key: logger.exception( 'Failed to update acme certificate. Account key not set', 'tasks', acme_domain=acme_domain, ) return if utils.time_now() - settings.app.acme_timestamp < \ settings.app.acme_renew: return logger.info( 'Updating acme certificate', 'tasks', acme_domain=acme_domain, ) acme.update_acme_cert() app.update_server() task.add_task(AcmeUpdate, hours=4, minutes=35, run_on_start=True)
now - doc['ping_timestamp'] > ttl: response = self.hosts_collection.update( { '_id': doc['_id'], 'ping_timestamp': ttl_timestamp, }, { '$set': { 'status': OFFLINE, 'ping_timestamp': None, } }) yield if response['updatedExisting']: event.Event(type=HOSTS_UPDATED) yield monitoring.insert_point('cluster', {}, { 'server_count': server_count, 'device_count': device_count, }) except GeneratorExit: raise except: logger.exception('Error checking host status', 'runners') task.add_task(TaskHost, seconds=xrange(0, 60, settings.app.host_ping))
return mongo.get_collection('routes_reserve') @interrupter def task(self): try: timestamp_spec = utils.now() - datetime.timedelta( seconds=settings.vpn.route_ping_ttl) docs = self.routes_collection.find({ 'timestamp': {'$lt': timestamp_spec}, }) yield for doc in docs: server_id = doc['server_id'] vpc_region = doc['vpc_region'] vpc_id = doc['vpc_id'] network = doc['network'] messenger.publish('instance', ['route_advertisement', server_id, vpc_region, vpc_id, network]) except GeneratorExit: raise except: logger.exception('Error checking route states', 'tasks') yield interrupter_sleep(settings.vpn.server_ping) task.add_task(TaskRoute, seconds=xrange(0, 60, settings.vpn.server_ping))
from pritunl import task from pritunl import logger from pritunl import server class TaskSyncIpPool(task.Task): type = 'sync_ip_pool' def task(self): for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception( 'Failed to sync server IP pool', 'tasks', server_id=svr.id, task_id=self.id, ) task.add_task(TaskSyncIpPool, hours=4, minutes=7)