def server_link_get(server_id): links = [] svr = server.get_by_id(server_id, fields=['_id', 'status', 'links', 'replica_count', 'instances']) hosts_offline = svr.replica_count - len(svr.instances) > 0 if svr.links: spec = { '_id': {'$in': [x['server_id'] for x in svr.links]}, } for link_svr in server.iter_servers(spec=spec, fields=[ '_id', 'status', 'name', 'replica_count', 'instances']): link_hosts_offline = link_svr.replica_count - len( link_svr.instances) > 0 if svr.status == ONLINE: if hosts_offline or link_hosts_offline: status = OFFLINE elif link_svr.status == ONLINE: status = ONLINE else: status = OFFLINE else: status = None links.append({ 'id': link_svr.id, 'server': svr.id, 'status': status, 'name': link_svr.name, 'address': None, }) return utils.jsonify(links)
def task(self): for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception('Failed to sync server IP pool. %r' % { 'server_id': svr.id, 'task_id': self.id, })
def task(self): for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception('Failed to sync server IP pool', 'tasks', server_id=svr.id, task_id=self.id, )
def server_get(server_id=None): if server_id: return utils.jsonify(server.get_server(server_id).dict()) servers = [] for svr in server.iter_servers(): servers.append(svr.dict()) return utils.jsonify(servers)
def server_link_get(server_id): if settings.app.demo_mode: resp = utils.demo_get_cache() if resp: return utils.jsonify(resp) links = [] svr = server.get_by_id(server_id, fields=('_id', 'status', 'links', 'replica_count', 'instances')) if not svr: return flask.abort(404) hosts_offline = svr.replica_count - len(svr.instances) > 0 if svr.links: link_use_local = {} link_server_ids = [] for link in svr.links: link_server_id = link['server_id'] link_use_local[link_server_id] = link['use_local_address'] link_server_ids.append(link_server_id) spec = { '_id': { '$in': link_server_ids }, } for link_svr in server.iter_servers(spec=spec, fields=('_id', 'status', 'name', 'replica_count', 'instances')): link_hosts_offline = link_svr.replica_count - len( link_svr.instances) > 0 if svr.status == ONLINE: if hosts_offline or link_hosts_offline: status = OFFLINE elif link_svr.status == ONLINE: status = ONLINE else: status = OFFLINE else: status = None links.append({ 'id': link_svr.id, 'server': svr.id, 'status': status, 'name': link_svr.name, 'address': None, 'use_local_address': link_use_local[link_svr.id], }) if settings.app.demo_mode: utils.demo_set_cache(links) return utils.jsonify(links)
def setup_host_fix(): if settings.app.license: return from pritunl import server host_id = settings.local.host.id for svr in server.iter_servers(fields=['hosts']): if svr.hosts != [host_id]: svr.hosts = [host_id] svr.commit('hosts')
def task(self): for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception( 'Failed to sync server IP pool', 'tasks', server_id=svr.id, task_id=self.id, )
def setup_host_fix(): subscription.update() if settings.app.license and settings.app.license_plan != 'premium': return from pritunl import server host_id = settings.local.host.id for svr in server.iter_servers(fields=['hosts']): if svr.hosts != [host_id]: svr.hosts = [host_id] svr.commit('hosts')
def setup_host_init_server(): subscription.update() if settings.app.license and settings.app.license_plan != 'premium': return from pritunl import server host_id = settings.local.host_id for svr in server.iter_servers(fields=['hosts']): if svr.hosts == []: svr.hosts = [host_id] svr.commit('hosts')
def server_link_get(server_id): if settings.app.demo_mode: resp = utils.demo_get_cache() if resp: return utils.jsonify(resp) links = [] svr = server.get_by_id(server_id, fields=('_id', 'status', 'links', 'replica_count', 'instances')) if not svr: return flask.abort(404) hosts_offline = svr.replica_count - len(svr.instances) > 0 if svr.links: link_use_local = {} link_server_ids = [] for link in svr.links: link_server_id = link['server_id'] link_use_local[link_server_id] = link['use_local_address'] link_server_ids.append(link_server_id) spec = { '_id': {'$in': link_server_ids}, } for link_svr in server.iter_servers(spec=spec, fields=( '_id', 'status', 'name', 'replica_count', 'instances')): link_hosts_offline = link_svr.replica_count - len( link_svr.instances) > 0 if svr.status == ONLINE: if hosts_offline or link_hosts_offline: status = OFFLINE elif link_svr.status == ONLINE: status = ONLINE else: status = OFFLINE else: status = None links.append({ 'id': link_svr.id, 'server': svr.id, 'status': status, 'name': link_svr.name, 'address': None, 'use_local_address': link_use_local[link_svr.id], }) if settings.app.demo_mode: utils.demo_set_cache(links) return utils.jsonify(links)
def status_get(): orgs_count = 0 servers_count = 0 servers_online_count = 0 clients_count = 0 clients = set() for svr in server.iter_servers(): servers_count += 1 if svr.status: servers_online_count += 1 # MongoDict doesnt support set(svr.clients) clients = clients | set(svr.clients.keys()) clients_count = len(clients) user_count = organization.get_user_count_multi() local_networks = utils.get_local_networks() if settings.local.openssl_heartbleed: notification = 'You are running an outdated version of openssl ' + \ 'containting the heartbleed bug. This could allow an attacker ' + \ 'to compromise your server. Please upgrade your openssl ' + \ 'package and restart the pritunl service.' else: notification = settings.local.notification return utils.jsonify({ 'org_count': orgs_count, 'users_online': clients_count, 'user_count': user_count, 'servers_online': servers_online_count, 'server_count': servers_count, 'server_version': __version__, 'current_host': settings.local.host_id, 'public_ip': settings.local.public_ip, 'local_networks': local_networks, 'notification': notification, })
def status_get(): orgs_count = 0 servers_count = 0 servers_online_count = 0 clients_count = 0 clients = set() for svr in server.iter_servers(): servers_count += 1 if svr.status: servers_online_count += 1 # MongoDict doesnt support set(svr.clients) clients = clients | set(svr.clients.keys()) clients_count = len(clients) user_count = organization.get_user_count_multi() local_networks = utils.get_local_networks() if settings.local.openssl_heartbleed: notification = 'You are running an outdated version of openssl ' + \ 'containting the heartbleed bug. This could allow an attacker ' + \ 'to compromise your server. Please upgrade your openssl ' + \ 'package and restart the pritunl service.' else: notification = settings.local.notification return utils.jsonify({ 'org_count': orgs_count, 'users_online': clients_count, 'user_count': user_count, 'servers_online': servers_online_count, 'server_count': servers_count, 'server_version': __version__, 'public_ip': settings.local.public_ip, 'local_networks': local_networks, 'notification': notification, })
def export_get(): data_path = app_server.data_path temp_path = os.path.join(data_path, TEMP_DIR) empty_temp_path = os.path.join(temp_path, EMPTY_TEMP_DIR) data_archive_name = '%s_%s.tar' % ( APP_NAME, time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())) data_archive_path = os.path.join(temp_path, data_archive_name) # Create empty temp directory to recreate temp dirs in tarfile if not os.path.exists(empty_temp_path): os.makedirs(empty_temp_path) tar_file = tarfile.open(data_archive_path, 'w') try: tar_add(tar_file, os.path.join(data_path, AUTH_LOG_NAME)) tar_add(tar_file, os.path.join(data_path, 'pritunl.db')) tar_add(tar_file, os.path.join(data_path, SERVER_CERT_NAME)) tar_add(tar_file, os.path.join(data_path, SERVER_KEY_NAME)) tar_add(tar_file, os.path.join(data_path, VERSION_NAME)) for org in organization.iter_orgs(): tar_add(tar_file, org.get_path()) tar_file.add(empty_temp_path, arcname=os.path.relpath( os.path.join(org.path, TEMP_DIR), data_path)) for user in org.iter_users(): tar_add(tar_file, user.reqs_path) tar_add(tar_file, user.key_path) tar_add(tar_file, user.cert_path) tar_add(tar_file, user.get_path()) tar_add(tar_file, org.ca_cert.reqs_path) tar_add(tar_file, org.ca_cert.key_path) tar_add(tar_file, org.ca_cert.cert_path) tar_add(tar_file, org.ca_cert.get_path()) for svr in server.iter_servers(): tar_add(tar_file, svr.dh_param_path) tar_add(tar_file, svr.ip_pool_path) tar_add(tar_file, svr.get_path()) tar_add(tar_file, os.path.join(svr.path, NODE_SERVER)) tar_file.add(empty_temp_path, arcname=os.path.relpath( os.path.join(svr.path, TEMP_DIR), data_path)) tar_file.close() with open(data_archive_path, 'r') as archive_file: response = flask.Response(response=archive_file.read(), mimetype='application/octet-stream') response.headers.add( 'Content-Disposition', 'attachment; filename="%s"' % data_archive_name) return response finally: try: tar_file.close() except OSError: pass try: os.remove(data_archive_path) except OSError: pass
def main(default_conf=None): if len(sys.argv) > 1: cmd = sys.argv[1] else: cmd = 'start' parser = optparse.OptionParser(usage=USAGE) if cmd == 'start': parser.add_option('-d', '--daemon', action='store_true', help='Daemonize process') parser.add_option('-p', '--pidfile', type='string', help='Path to create pid file') parser.add_option('-c', '--conf', type='string', help='Path to configuration file') parser.add_option('-q', '--quiet', action='store_true', help='Suppress logging output') elif cmd == 'logs': parser.add_option('--archive', action='store_true', help='Archive log file') parser.add_option('--tail', action='store_true', help='Tail log file') parser.add_option('--limit', type='int', help='Limit log lines') parser.add_option('--natural', action='store_true', help='Natural log sort') elif cmd == 'set': parser.disable_interspersed_args() (options, args) = parser.parse_args() if hasattr(options, 'conf') and options.conf: conf_path = options.conf else: conf_path = default_conf pritunl.set_conf_path(conf_path) if cmd == 'version': print('%s v%s' % (pritunl.__title__, pritunl.__version__)) sys.exit(0) elif cmd == 'setup-key': from pritunl import setup from pritunl import settings setup.setup_loc() print(settings.local.setup_key) sys.exit(0) elif cmd == 'reset-version': from pritunl.constants import MIN_DATABASE_VER from pritunl import setup from pritunl import utils setup.setup_db() utils.set_db_ver(pritunl.__version__, MIN_DATABASE_VER) time.sleep(.2) print('Database version reset to %s' % pritunl.__version__) sys.exit(0) elif cmd == 'reset-password': from pritunl import setup from pritunl import auth setup.setup_db() username, password = auth.reset_password() print('Administrator password successfully reset:\n' + \ ' username: "******"\n password: "******"' % (username, password)) sys.exit(0) elif cmd == 'default-password': from pritunl import setup from pritunl import auth setup.setup_db() username, password = auth.get_default_password() if not password: print('No default password available, use reset-password') else: print('Administrator default password:\n' + \ ' username: "******"\n password: "******"' % (username, password)) sys.exit(0) elif cmd == 'reconfigure': from pritunl import setup from pritunl import settings setup.setup_loc() settings.conf.mongodb_uri = None settings.conf.commit() time.sleep(.2) print('Database configuration successfully reset') sys.exit(0) elif cmd == 'get': from pritunl import setup from pritunl import settings setup.setup_db_host() if len(args) != 2: raise ValueError('Invalid arguments') split = args[1].split('.') key_str = None group_str = split[0] if len(split) > 1: key_str = split[1] if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) if key_str: val = getattr(group, key_str) print('%s.%s = %s' % (group_str, key_str, json.dumps(val, default=lambda x: str(x)))) else: for field in group.fields: val = getattr(group, field) print('%s.%s = %s' % (group_str, field, json.dumps(val, default=lambda x: str(x)))) sys.exit(0) elif cmd == 'set': from pritunl.constants import HOSTS_UPDATED from pritunl import setup from pritunl import settings from pritunl import event from pritunl import messenger setup.setup_db_host() if len(args) != 3: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') if group_str == 'host': group = settings.local.host else: group = getattr(settings, group_str) val_str = args[2] try: val = json.loads(val_str) except ValueError: val = json.loads(json.JSONEncoder().encode(val_str)) setattr(group, key_str, val) if group_str == 'host': settings.local.host.commit() event.Event(type=HOSTS_UPDATED) messenger.publish('hosts', 'updated') else: settings.commit() time.sleep(.2) print('%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str), default=lambda x: str(x)))) print('Successfully updated configuration. This change is ' \ 'stored in the database and has been applied to all hosts ' \ 'in the cluster.') sys.exit(0) elif cmd == 'unset': from pritunl import setup from pritunl import settings setup.setup_db() if len(args) != 2: raise ValueError('Invalid arguments') group_str, key_str = args[1].split('.') group = getattr(settings, group_str) group.unset(key_str) settings.commit() time.sleep(.2) print('%s.%s = %s' % (group_str, key_str, json.dumps(getattr(group, key_str), default=lambda x: str(x)))) print('Successfully updated configuration. This change is ' \ 'stored in the database and has been applied to all hosts ' \ 'in the cluster.') sys.exit(0) elif cmd == 'set-mongodb': from pritunl import setup from pritunl import settings setup.setup_loc() if len(args) > 1: mongodb_uri = args[1] else: mongodb_uri = None settings.conf.mongodb_uri = mongodb_uri settings.conf.commit() time.sleep(.2) print('Database configuration successfully set') sys.exit(0) elif cmd == 'reset-ssl-cert': from pritunl import setup from pritunl import settings setup.setup_db() settings.app.server_cert = None settings.app.server_key = None settings.app.acme_timestamp = None settings.app.acme_key = None settings.app.acme_domain = None settings.commit() time.sleep(.2) print('Server ssl certificate successfully reset') sys.exit(0) elif cmd == 'destroy-secondary': from pritunl import setup from pritunl import logger from pritunl import mongo setup.setup_db() print('Destroying secondary database...') mongo.get_collection('clients').drop() mongo.get_collection('clients_pool').drop() mongo.get_collection('transaction').drop() mongo.get_collection('queue').drop() mongo.get_collection('tasks').drop() mongo.get_collection('messages').drop() mongo.get_collection('users_key_link').drop() mongo.get_collection('auth_sessions').drop() mongo.get_collection('auth_csrf_tokens').drop() mongo.get_collection('auth_limiter').drop() mongo.get_collection('otp').drop() mongo.get_collection('otp_cache').drop() mongo.get_collection('sso_tokens').drop() mongo.get_collection('sso_push_cache').drop() mongo.get_collection('sso_client_cache').drop() mongo.get_collection('sso_passcode_cache').drop() setup.upsert_indexes() server_coll = mongo.get_collection('servers') server_coll.update_many({}, { '$set': { 'status': 'offline', 'instances': [], 'instances_count': 0, }, '$unset': { 'network_lock': '', 'network_lock_ttl': '', }, }) print('Secondary database destroyed') sys.exit(0) elif cmd == 'repair-database': from pritunl import setup from pritunl import logger from pritunl import mongo setup.setup_db() print('Repairing database...') mongo.get_collection('clients').drop() mongo.get_collection('clients_pool').drop() mongo.get_collection('transaction').drop() mongo.get_collection('queue').drop() mongo.get_collection('tasks').drop() mongo.get_collection('messages').drop() mongo.get_collection('users_key_link').drop() mongo.get_collection('auth_sessions').drop() mongo.get_collection('auth_csrf_tokens').drop() mongo.get_collection('auth_limiter').drop() mongo.get_collection('otp').drop() mongo.get_collection('otp_cache').drop() mongo.get_collection('sso_tokens').drop() mongo.get_collection('sso_push_cache').drop() mongo.get_collection('sso_client_cache').drop() mongo.get_collection('sso_passcode_cache').drop() mongo.get_collection('logs').drop() mongo.get_collection('log_entries').drop() mongo.get_collection('servers_ip_pool').drop() setup.upsert_indexes() server_coll = mongo.get_collection('servers') server_coll.update_many({}, { '$set': { 'status': 'offline', 'instances': [], 'instances_count': 0, }, '$unset': { 'network_lock': '', 'network_lock_ttl': '', }, }) from pritunl import server for svr in server.iter_servers(): try: svr.ip_pool.sync_ip_pool() except: logger.exception( 'Failed to sync server IP pool', 'tasks', server_id=svr.id, ) server_coll.update_many({}, { '$set': { 'status': 'offline', 'instances': [], 'instances_count': 0, }, '$unset': { 'network_lock': '', 'network_lock_ttl': '', }, }) print('Database repair complete') sys.exit(0) elif cmd == 'logs': from pritunl import setup from pritunl import logger setup.setup_db() log_view = logger.LogView() if options.archive: if len(args) > 1: archive_path = args[1] else: archive_path = './' print('Log archived to: ' + log_view.archive_log( archive_path, options.natural, options.limit)) elif options.tail: for msg in log_view.tail_log_lines(): print(msg) else: print( log_view.get_log_lines( natural=options.natural, limit=options.limit, )) sys.exit(0) elif cmd == 'clear-auth-limit': from pritunl import setup from pritunl import logger from pritunl import mongo from pritunl import settings setup.setup_db() mongo.get_collection('auth_limiter').delete_many({}) print('Auth limiter cleared') sys.exit(0) elif cmd == 'clear-logs': from pritunl import setup from pritunl import logger from pritunl import mongo from pritunl import settings setup.setup_db() mongo.get_collection('logs').drop() mongo.get_collection('log_entries').drop() prefix = settings.conf.mongodb_collection_prefix or '' log_limit = settings.app.log_limit mongo.database.create_collection(prefix + 'logs', capped=True, size=log_limit * 1024, max=log_limit) log_entry_limit = settings.app.log_entry_limit mongo.database.create_collection(prefix + 'log_entries', capped=True, size=log_entry_limit * 512, max=log_entry_limit) print('Log entries cleared') sys.exit(0) elif cmd != 'start': raise ValueError('Invalid command') from pritunl import settings if options.quiet: settings.local.quiet = True if options.daemon: pid = os.fork() if pid > 0: if options.pidfile: with open(options.pidfile, 'w') as pid_file: pid_file.write('%s' % pid) sys.exit(0) elif not options.quiet: print('##############################################################') print('# #') print('# /$$ /$$ /$$ #') print('# |__/ | $$ | $$ #') print('# /$$$$$$ /$$$$$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$$ | $$ #') print('# /$$__ $$ /$$__ $$| $$|_ $$_/ | $$ | $$| $$__ $$| $$ #') print('# | $$ \ $$| $$ \__/| $$ | $$ | $$ | $$| $$ \ $$| $$ #') print('# | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$ | $$| $$ #') print('# | $$$$$$$/| $$ | $$ | $$$$/| $$$$$$/| $$ | $$| $$ #') print('# | $$____/ |__/ |__/ \____/ \______/ |__/ |__/|__/ #') print('# | $$ #') print('# | $$ #') print('# |__/ #') print('# #') print('##############################################################') pritunl.init_server()
def thread(): platforms = list(DESKTOP_PLATFORMS) start_timestamp = datetime.datetime(2015, 12, 28, 4, 1, 0) hosts_collection = mongo.get_collection('hosts') servers_collection = mongo.get_collection('servers') clients_collection = mongo.get_collection('clients') clients_collection.remove({}) for hst in host.iter_hosts(): hosts_collection.update({ '_id': hst.id, }, {'$set': { 'server_count': 0, 'device_count': 0, 'cpu_usage': 0, 'mem_usage': 0, 'thread_count': 0, 'open_file_count': 0, 'status': ONLINE, 'start_timestamp': start_timestamp, 'ping_timestamp': start_timestamp, 'auto_public_address': None, 'auto_public_address6': None, 'auto_public_host': hst.name + '.pritunl.com', 'auto_public_host6': hst.name + '.pritunl.com', }}) for svr in server.iter_servers(): prefered_hosts = host.get_prefered_hosts( svr.hosts, svr.replica_count) instances = [] for hst in prefered_hosts: instances.append({ 'instance_id': utils.ObjectId(), 'host_id': hst, 'ping_timestamp': utils.now(), }) servers_collection.update({ '_id': svr.id, }, {'$set': { 'status': ONLINE, 'pool_cursor': None, 'start_timestamp': start_timestamp, 'availability_group': DEFAULT, 'instances': instances, 'instances_count': len(instances), }}) for org in svr.iter_orgs(): for usr in org.iter_users(): if usr.type != CERT_CLIENT: continue virt_address = svr.get_ip_addr(org.id, usr.id) virt_address6 = svr.ip4to6(virt_address) doc = { '_id': utils.ObjectId(), 'user_id': usr.id, 'server_id': svr.id, 'host_id': settings.local.host_id, 'timestamp': start_timestamp, 'platform': random.choice(platforms), 'type': CERT_CLIENT, 'device_name': utils.random_name(), 'mac_addr': utils.rand_str(16), 'network': svr.network, 'real_address': str( ipaddress.IPAddress(100000000 + random.randint( 0, 1000000000))), 'virt_address': virt_address, 'virt_address6': virt_address6, 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, 'dns_servers': [], 'dns_suffix': None, 'connected_since': int(start_timestamp.strftime('%s')), } clients_collection.insert(doc) for lnk in link.iter_links(): lnk.status = ONLINE lnk.commit() for location in lnk.iter_locations(): active = False for hst in location.iter_hosts(): if not active: hst.active = True active = True hst.status = AVAILABLE hst.commit(('active', 'status')) logger.info('Demo initiated', 'demo')
def thread(): platforms = list(DESKTOP_PLATFORMS) start_timestamp = datetime.datetime(2015, 12, 28, 4, 1, 0) hosts_collection = mongo.get_collection('hosts') servers_collection = mongo.get_collection('servers') clients_collection = mongo.get_collection('clients') clients_collection.remove({}) for hst in host.iter_hosts(): hosts_collection.update({ '_id': hst.id, }, { '$set': { 'server_count': 0, 'device_count': 0, 'cpu_usage': 0, 'mem_usage': 0, 'thread_count': 0, 'open_file_count': 0, 'status': ONLINE, 'start_timestamp': start_timestamp, 'ping_timestamp': start_timestamp, 'auto_public_address': None, 'auto_public_address6': None, 'auto_public_host': hst.name + '.pritunl.com', 'auto_public_host6': hst.name + '.pritunl.com', } }) for svr in server.iter_servers(): prefered_hosts = host.get_prefered_hosts(svr.hosts, svr.replica_count) instances = [] for hst in prefered_hosts: instances.append({ 'instance_id': utils.ObjectId(), 'host_id': hst, 'ping_timestamp': utils.now(), }) servers_collection.update({ '_id': svr.id, }, { '$set': { 'status': ONLINE, 'pool_cursor': None, 'start_timestamp': start_timestamp, 'availability_group': DEFAULT, 'instances': instances, 'instances_count': len(instances), } }) for org in svr.iter_orgs(): for usr in org.iter_users(): if usr.type != CERT_CLIENT: continue virt_address = svr.get_ip_addr(org.id, usr.id) virt_address6 = svr.ip4to6(virt_address) doc = { '_id': utils.ObjectId(), 'user_id': usr.id, 'server_id': svr.id, 'host_id': settings.local.host_id, 'timestamp': start_timestamp, 'platform': random.choice(platforms), 'type': CERT_CLIENT, 'device_name': utils.random_name(), 'mac_addr': utils.rand_str(16), 'network': svr.network, 'real_address': str( ipaddress.IPAddress( 100000000 + random.randint(0, 1000000000))), 'virt_address': virt_address, 'virt_address6': virt_address6, 'host_address': settings.local.host.local_addr, 'host_address6': settings.local.host.local_addr6, 'dns_servers': [], 'dns_suffix': None, 'connected_since': int(start_timestamp.strftime('%s')), } clients_collection.insert(doc) for lnk in link.iter_links(): lnk.status = ONLINE lnk.commit() for location in lnk.iter_locations(): active = False for hst in location.iter_hosts(): if not active: hst.active = True active = True hst.status = AVAILABLE hst.commit(('active', 'status')) logger.info('Demo initiated', 'demo')
def export_get(): data_path = app_server.data_path temp_path = os.path.join(data_path, TEMP_DIR) empty_temp_path = os.path.join(temp_path, EMPTY_TEMP_DIR) data_archive_name = '%s_%s.tar' % (APP_NAME, time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())) data_archive_path = os.path.join(temp_path, data_archive_name) # Create empty temp directory to recreate temp dirs in tarfile if not os.path.exists(empty_temp_path): os.makedirs(empty_temp_path) tar_file = tarfile.open(data_archive_path, 'w') try: tar_add(tar_file, os.path.join(data_path, AUTH_LOG_NAME)) tar_add(tar_file, os.path.join(data_path, 'pritunl.db')) tar_add(tar_file, os.path.join(data_path, SERVER_CERT_NAME)) tar_add(tar_file, os.path.join(data_path, SERVER_KEY_NAME)) tar_add(tar_file, os.path.join(data_path, VERSION_NAME)) for org in organization.iter_orgs(): tar_add(tar_file, org.get_path()) tar_file.add(empty_temp_path, arcname=os.path.relpath(os.path.join(org.path, TEMP_DIR), data_path)) for user in org.iter_users(): tar_add(tar_file, user.reqs_path) tar_add(tar_file, user.key_path) tar_add(tar_file, user.cert_path) tar_add(tar_file, user.get_path()) tar_add(tar_file, org.ca_cert.reqs_path) tar_add(tar_file, org.ca_cert.key_path) tar_add(tar_file, org.ca_cert.cert_path) tar_add(tar_file, org.ca_cert.get_path()) for svr in server.iter_servers(): tar_add(tar_file, svr.dh_param_path) tar_add(tar_file, svr.ip_pool_path) tar_add(tar_file, svr.get_path()) tar_add(tar_file, os.path.join(svr.path, NODE_SERVER)) tar_file.add(empty_temp_path, arcname=os.path.relpath(os.path.join(svr.path, TEMP_DIR), data_path)) tar_file.close() with open(data_archive_path, 'r') as archive_file: response = flask.Response(response=archive_file.read(), mimetype='application/octet-stream') response.headers.add('Content-Disposition', 'attachment; filename="%s"' % data_archive_name) return response finally: try: tar_file.close() except OSError: pass try: os.remove(data_archive_path) except OSError: pass