def force_stop(self, silent=False): cache_db.lock_acquire(self.get_cache_key('op_lock')) try: if not self.status: return logger.debug('Forcing stop server. %r' % { 'server_id': self.id, }) stopped = False self.publish('force_stop') for message in cache_db.subscribe(self.get_cache_key(), SUB_RESPONSE_TIMEOUT): if message == 'stopped': stopped = True break if not stopped: raise ServerStopError('Server thread failed to return ' + \ 'stop event', { 'server_id': self.id, }) if not silent: Event(type=SERVERS_UPDATED) LogEntry(message='Stopped server "%s".' % self.name) finally: cache_db.lock_release(self.get_cache_key('op_lock'))
def start(self, silent=False): cache_db.lock_acquire(self.get_cache_key('op_lock')) try: if self.status: return if not self.org_count: raise ServerMissingOrg('Server cannot be started without ' + \ 'any organizations', { 'server_id': self.id, }) logger.debug('Starting node server. %r' % { 'server_id': self.id, }) ovpn_conf = self._generate_ovpn_conf() try: response = self._request('post', json_data={ 'interface': self.interface, 'network': self.network, 'local_networks': self.local_networks, 'ovpn_conf': ovpn_conf, 'server_ver': NODE_SERVER_VER, }) except httplib.HTTPException: raise NodeConnectionError('Failed to connect to node server', { 'server_id': self.id, }) if response.status_code == 401: raise InvalidNodeAPIKey('Invalid node server api key', { 'server_id': self.id, 'status_code': response.status_code, 'reason': response.reason, }) elif response.status_code != 200: raise ServerStartError('Failed to start node server', { 'server_id': self.id, 'status_code': response.status_code, 'reason': response.reason, }) cache_db.dict_set(self.get_cache_key(), 'start_time', str(int(time.time() - 1))) self.clear_output() self._interrupt = False self.status = True self._start_server_threads() if not silent: Event(type=SERVERS_UPDATED) LogEntry(message='Started server "%s".' % self.name) finally: cache_db.lock_release(self.get_cache_key('op_lock'))
def sort_users_cache(self): user_count = 0 users_dict = {} users_sort = [] # Create temp uuid key to prevent multiple threads modifying same key temp_suffix = 'temp_' + uuid.uuid4().hex temp_users_sorted_key = 'users_sorted_' + temp_suffix users_page_index_key = 'users_page_index_' + temp_suffix try: for user_id in cache_db.set_elements(self.get_cache_key('users')): user = User.get_user(self, id=user_id) if not user: continue name_id = '%s_%s' % (user.name, user_id) if user.type == CERT_CLIENT: user_count += 1 users_dict[name_id] = (user_id, user.type) users_sort.append(name_id) cache_db.set(self.get_cache_key('user_count'), str(user_count)) cur_page = 0 user_count = 0 client_count = 0 for name_id in sorted(users_sort): if users_dict[name_id][1] == CERT_CLIENT: page = client_count / USER_PAGE_COUNT if page != cur_page: cur_page = page cache_db.dict_set(self.get_cache_key(users_page_index_key), str(cur_page), str(user_count)) client_count += 1 user_count += 1 cache_db.list_rpush(self.get_cache_key(temp_users_sorted_key), users_dict[name_id][0]) cache_db.lock_acquire(self.get_cache_key('sort')) try: cache_db.rename(self.get_cache_key(users_page_index_key), self.get_cache_key('users_page_index')) cache_db.rename(self.get_cache_key(temp_users_sorted_key), self.get_cache_key('users_sorted')) cache_db.set(self.get_cache_key('users_page_total'), str(cur_page)) finally: cache_db.lock_release(self.get_cache_key('sort')) except: cache_db.remove(self.get_cache_key(users_page_index_key)) cache_db.remove(self.get_cache_key(temp_users_sorted_key)) raise
def update_ip_pool(self): cache_key = self.get_cache_key('ip_pool') set_cache_key = self.get_cache_key('ip_pool_set') cache_db.lock_acquire(cache_key) try: ip_pool = ipaddress.IPv4Network(self.network).iterhosts() ip_pool.next() users = set() for org in self.iter_orgs(): for user in org.iter_users(): if user.type == CERT_CLIENT: users.add(org.id + '-' + user.id) for user_id in cache_db.dict_keys(cache_key) - users: ip_set = cache_db.dict_get(cache_key, user_id) local_ip_addr, remote_ip_addr = ip_set.split('-') cache_db.set_remove(set_cache_key, local_ip_addr) cache_db.set_remove(set_cache_key, remote_ip_addr) cache_db.dict_remove(cache_key, user_id) try: for user_id in users - cache_db.dict_keys(cache_key): while True: remote_ip_addr = str(ip_pool.next()) ip_addr_endpoint = remote_ip_addr.split('.')[-1] if ip_addr_endpoint not in VALID_IP_ENDPOINTS: continue local_ip_addr = str(ip_pool.next()) if not cache_db.set_exists(set_cache_key, local_ip_addr) and not cache_db.set_exists( set_cache_key, remote_ip_addr): cache_db.set_add(set_cache_key, local_ip_addr) cache_db.set_add(set_cache_key, remote_ip_addr) break cache_db.dict_set(cache_key, user_id, local_ip_addr + '-' + remote_ip_addr) except StopIteration: pass finally: self._commit_ip_pool() for org in self.iter_orgs(): Event(type=USERS_UPDATED, resource_id=org.id) finally: cache_db.lock_release(cache_key)
def start(self, silent=False): cache_db.lock_acquire(self.get_cache_key('op_lock')) try: if self.status: return if not self.org_count: raise ServerMissingOrg('Server cannot be started without ' + \ 'any organizations', { 'server_id': self.id, }) logger.debug('Starting server. %r' % { 'server_id': self.id, }) self._generate_ovpn_conf() self._enable_ip_forwarding() self._set_iptables_rules() self.clear_output() threading.Thread(target=self._run_thread).start() started = False for message in cache_db.subscribe(self.get_cache_key(), SUB_RESPONSE_TIMEOUT): if message == 'started': started = True break elif message == 'stopped': raise ServerStartError('Server failed to start', { 'server_id': self.id, }) if not started: raise ServerStartError('Server thread failed to return ' + \ 'start event', { 'server_id': self.id, }) if not silent: Event(type=SERVERS_UPDATED) LogEntry(message='Started server "%s".' % self.name) finally: cache_db.lock_release(self.get_cache_key('op_lock'))
def stop(self): cache_db.lock_acquire(self.get_cache_key('op_lock')) try: if not self.status: return logger.debug('Stopping server. %r' % { 'server_id': self.id, }) stopped = False self.publish('stop') for message in cache_db.subscribe(self.get_cache_key(), SUB_RESPONSE_TIMEOUT): if message == 'stopped': stopped = True break if not stopped: raise ServerStopError('Server thread failed to return ' + \ 'stop event', { 'server_id': self.id, }) finally: cache_db.lock_release(self.get_cache_key('op_lock'))