def remove_host(self, host_id): if host_id not in self.hosts: logger.warning( "Attempted to remove host that does not exists", "server", server_id=self.id, host_id=host_id ) return logger.debug("Removing host from server", "server", server_id=self.id, host_id=host_id) self.hosts.remove(host_id) response = self.collection.update( {"_id": self.id, "instances.host_id": host_id}, {"$pull": {"hosts": host_id, "instances": {"host_id": host_id}}, "$inc": {"instances_count": -1}}, ) if response["updatedExisting"]: self.publish("start", extra={"prefered_hosts": host.get_prefered_hosts(self.hosts, self.replica_count)}) doc = self.collection.find_and_modify( {"_id": self.id}, {"$pull": {"hosts": host_id}}, {"hosts": True}, new=True ) if doc and not doc["hosts"]: self.status = OFFLINE self.commit("status")
def stop(self, timeout=VPN_OP_TIMEOUT, force=False): cursor_id = self.get_cursor_id() if not self.status: return logger.debug('Stopping server. %r' % { 'server_id': self.id, }) if force: self.publish('force_stop') else: self.publish('stop') for msg in self.subscribe(cursor_id=cursor_id, timeout=timeout): message = msg['message'] if message == 'started': self.status = True self.host_id = None self.instance_id = None return elif message == 'stopped': self.status = False self.host_id = None self.instance_id = None return raise ServerStopError('Server stop timed out', { 'server_id': self.id, })
def restart(self): if self.status != ONLINE: self.start() return logger.debug("Restarting server", "server", server_id=self.id) self.stop() self.start()
def remove(self): user_collection = mongo.get_collection('users') server_collection = mongo.get_collection('servers') logger.debug('Remove org', 'organization', org_id=self.id, ) server_ids = [] for server in self.iter_servers(): server_ids.append(server.id) if server.status == ONLINE: server.stop() server_collection.update({ 'organizations': self.id, }, {'$pull': { 'organizations': self.id, }}) mongo.MongoObject.remove(self) user_collection.remove({ 'org_id': self.id, }) return server_ids
def _check_updates(): while True: if not settings.app.update_check_rate: time.sleep(30) continue logger.debug('Checking notifications...') try: request = urllib2.Request( settings.app.notification_server + '/%s' % settings.local.version_int) response = urllib2.urlopen(request, timeout=60) data = json.load(response) settings.local.notification = data.get('message', '') settings.local.www_state = data.get('www', OK) settings.local.vpn_state = data.get('vpn', OK) except: logger.exception('Failed to check notifications.') logger.debug('Checking subscription status...') try: pass # TODO #self.subscription_update() except: logger.exception('Failed to check subscription status.') time.sleep(settings.app.update_check_rate)
def claim_commit(self, fields=None): doc = self.get_commit_doc(fields=fields) doc['runner_id'] = self.runner_id doc['ttl_timestamp'] = utils.now() + \ datetime.timedelta(seconds=self.ttl) response = self.collection.update({ '_id': self.id, '$or': [ {'runner_id': self.runner_id}, {'runner_id': {'$exists': False}}, ], }, { '$set': doc, }) self.claimed = response['updatedExisting'] if self.claimed: self.keep_alive() logger.debug('Queue claimed', 'queue', queue_id=self.id, queue_type=self.type, ) return response['updatedExisting']
def load_public_ip(attempts=1, timeout=5): for i in xrange(attempts): if settings.local.public_ip: return if i: time.sleep(3) logger.info('Retrying get public ip address', 'setup') logger.debug('Getting public ip address', 'setup') try: request = urllib2.Request( settings.app.public_ip_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip = json.load(response)['ip'] break except: pass logger.debug('Getting public ipv6 address', 'setup') try: request = urllib2.Request( settings.app.public_ip6_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip6 = json.load(response)['ip'] except: pass if not settings.local.public_ip: logger.warning('Failed to get public ip address', 'setup')
def add_queue_item(queue_item): if queue_item.id in running_queues: return running_queues[queue_item.id] = queue_item logger.debug('Add queue item for run', 'queue', queue_id=queue_item.id, queue_type=queue_item.type, queue_priority=queue_item.priority, queue_cpu_type=queue_item.cpu_type, ) runner_queues[queue_item.cpu_type].put(( abs(queue_item.priority - 4), queue_item, )) if queue_item.priority >= NORMAL: for running_queue in running_queues.values(): if running_queue.priority >= queue_item.priority: continue if running_queue.pause(): logger.debug('Puase queue item', 'queue', queue_id=running_queue.id, queue_type=running_queue.type, queue_priority=running_queue.priority, queue_cpu_type=running_queue.cpu_type, ) runner_queues[running_queue.cpu_type].put(( abs(running_queue.priority - 4), running_queue, )) thread_limits[running_queue.cpu_type].release()
def stop_task(self): logger.debug("Stopping queued dh params", "server", queue_id=self.id, dh_param_bits=self.dh_param_bits) self.queue_com.running.clear() self.queue_com.popen_kill_all() return True
def add_host(self, host_id): logger.debug('Adding host to server', 'server', server_id=self.id, host_id=host_id, ) if host_id in self.hosts: logger.debug('Host already on server, skipping', 'server', server_id=self.id, host_id=host_id, ) return if self.links: hosts_set = set(self.hosts) hosts_set.add(host_id) spec = { '_id': {'$in': [x['server_id'] for x in self.links]}, } project = { '_id': True, 'hosts': True, } for doc in self.collection.find(spec, project): if hosts_set & set(doc['hosts']): raise ServerLinkCommonHostError( 'Servers have a common host') self.hosts.append(host_id) self.changed.add('hosts')
def stop(self, force=False): logger.debug('Stopping server', 'server', server_id=self.id, ) if self.status != ONLINE: return response = self.collection.update({ '_id': self.id, 'status': ONLINE, }, {'$set': { 'status': OFFLINE, 'start_timestamp': None, 'instances': [], 'instances_count': 0, }}) if not response['updatedExisting']: raise ServerStopError('Server not running', { 'server_id': self.id, }) self.status = OFFLINE if force: self.publish('force_stop') else: self.publish('stop')
def remove_primary_user(self): logger.debug("Removing primary user", "server", server_id=self.id) self.user_collection.remove({"resource_id": self.id}) self.primary_organization = None self.primary_user = None
def remove_link_user(self): logger.debug('Removing host link user. %r' % { 'host_id': self.id, }) self.user_collection.remove({ 'resource_id': self.id, })
def _generate_iptables_rules(self): rules = [] try: routes_output = subprocess.check_output(['route', '-n'], stderr=subprocess.PIPE) except subprocess.CalledProcessError: logger.exception('Failed to get IP routes. %r' % { 'server_id': self.id, }) raise routes = {} for line in routes_output.splitlines(): line_split = line.split() if len(line_split) < 8 or not re.match(IP_REGEX, line_split[0]): continue routes[line_split[0]] = line_split[7] if '0.0.0.0' not in routes: raise IptablesError('Failed to find default network interface', { 'server_id': self.id, }) default_interface = routes['0.0.0.0'] rules.append(['INPUT', '-i', self.interface, '-j', 'ACCEPT']) rules.append(['FORWARD', '-i', self.interface, '-j', 'ACCEPT']) interfaces = set() for network_address in self.local_networks or ['0.0.0.0/0']: args = ['POSTROUTING', '-t', 'nat'] network = self._parse_network(network_address)[0] if network not in routes: logger.debug('Failed to find interface for local network ' + \ 'route, using default route. %r' % { 'server_id': self.id, }) interface = default_interface else: interface = routes[network] interfaces.add(interface) if network != '0.0.0.0': args += ['-d', network_address] args += ['-s', self.network, '-o', interface, '-j', 'MASQUERADE'] rules.append(args) for interface in interfaces: rules.append(['FORWARD', '-i', interface, '-o', self.interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT']) rules.append(['FORWARD', '-i', self.interface, '-o', interface, '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT']) return rules
def restart(self): if self.status != ONLINE: self.start() return logger.debug('Restarting server', 'server', server_id=self.id, ) self.stop() self.start()
def restart(self): if self.status != ONLINE: self.start() return logger.debug('Restarting server. %r' % { 'server_id': self.id, }) self.stop() self.start()
def new_pooled(): thread = threading.Thread(target=new_org, kwargs={ 'type': ORG_POOL, 'block': False, }) thread.daemon = True thread.start() logger.debug('Queued pooled org', 'organization')
def set_iptables_rules(self): logger.debug('Setting iptables rules', 'server', server_id=self.server.id, ) processes = {} poller = select.epoll() self.iptables_rules, self.ip6tables_rules = \ self.generate_iptables_rules() for rule in self.iptables_rules: cmd, process = self.exists_iptables_rules(rule) fileno = process.stdout.fileno() processes[fileno] = (cmd, process, ['iptables', '-I'] + rule) poller.register(fileno, select.EPOLLHUP) for rule in self.ip6tables_rules: cmd, process = self.exists_ip6tables_rules(rule) fileno = process.stdout.fileno() processes[fileno] = (cmd, process, ['ip6tables', '-I'] + rule) poller.register(fileno, select.EPOLLHUP) try: while True: for fd, event in poller.poll(timeout=8): cmd, process, next_cmd = processes.pop(fd) poller.unregister(fd) if next_cmd: if process.poll(): process = subprocess.Popen(next_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) fileno = process.stdout.fileno() processes[fileno] = (next_cmd, process, None) poller.register(fileno, select.EPOLLHUP) else: retcode = process.poll() if retcode: std_out, err_out = process.communicate() raise subprocess.CalledProcessError( retcode, cmd, output=err_out) if not processes: return except subprocess.CalledProcessError as error: logger.exception('Failed to apply iptables ' + \ 'routing rule', 'server', server_id=self.server.id, output=error.output, ) raise
def stop(self, timeout=VPN_OP_TIMEOUT, force=False): cursor_id = self.get_cursor_id() logger.debug('Stopping server. %r' % { 'server_id': self.id, }) if not self.status: return if force: self.publish('force_stop') else: self.publish('stop') stopped = 0 instances_count = self.instances_count for _ in xrange(2): for msg in self.subscribe(cursor_id=cursor_id, timeout=(timeout / 2)): message = msg['message'] if message == 'stopped': stopped += 1 if stopped >= instances_count: break if stopped >= instances_count: break self.load() stopped = self.replica_count - self.instances_count instances_count = self.instances_count if stopped >= instances_count: break if stopped >= instances_count: self.status = False response = self.collection.update({ '_id': bson.ObjectId(self.id), 'status': True, }, {'$set': { 'status': False, 'start_timestamp': None, }}) if not response['updatedExisting']: self.status = False self.start_timestamp = None else: raise ServerStopError('Server stop timed out', { 'server_id': self.id, })
def _exists_iptables_rules(self, rule): logger.debug('Checking for iptables rule. %r' % { 'server_id': self.id, 'rule': rule, }) try: subprocess.check_call(['iptables', '-C'] + rule, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except subprocess.CalledProcessError: return False return True
def remove_primary_user(self): logger.debug('Removing primary user', 'server', server_id=self.id, ) self.user_collection.remove({ 'resource_id': self.id, }) self.primary_organization = None self.primary_user = None
def enable_ip_forwarding(self): logger.debug('Enabling ip forwarding. %r' % { 'server_id': self.server.id, }) try: subprocess.check_call(['sysctl', '-w', 'net.ipv4.ip_forward=1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except subprocess.CalledProcessError: logger.exception('Failed to enable IP forwarding. %r' % { 'server_id': self.server.id, }) raise
def enable_ip_forwarding(self): logger.debug('Enabling ip forwarding', 'server', server_id=self.server.id, ) try: utils.check_output_logged( ['sysctl', '-w', 'net.ipv4.ip_forward=1']) except subprocess.CalledProcessError: logger.exception('Failed to enable IP forwarding', 'server', server_id=self.server.id, ) raise
def pause_task(self): if self.reserve_data: return False self.load() if self.reserve_data: return False logger.debug("Pausing queued dh params", "server", queue_id=self.id, dh_param_bits=self.dh_param_bits) self.queue_com.running.clear() self.queue_com.popen_kill_all() return True
def reserve_queued_dh_params(svr, block=False): reserve_id = svr.dh_param_bits reserve_data = {"server_id": svr.id} doc = QueueDhParams.reserve(reserve_id, reserve_data, block=block) if not doc: logger.debug("Reserved queued dh params", "server", server_id=svr.id, dh_param_bits=svr.dh_param_bits) return False if block: svr.load() return True
def add_host(self, host_id): logger.debug('Adding host to server. %r' % { 'server_id': self.id, 'host_id': host_id, }) if host_id in self.hosts: logger.debug('Host already on server, skipping. %r' % { 'server_id': self.id, 'host_id': host_id, }) return self.hosts.append(host_id) self.changed.add('hosts')
def remove_primary_user(self): logger.debug( 'Removing primary user', 'server', server_id=self.id, ) self.user_collection.remove({ 'resource_id': self.id, }) self.primary_organization = None self.primary_user = None
def remove_host(self, host_id): if host_id not in self.hosts: logger.warning('Attempted to remove host that does not exists', 'server', server_id=self.id, host_id=host_id, ) return logger.debug('Removing host from server', 'server', server_id=self.id, host_id=host_id, ) self.hosts.remove(host_id) response = self.collection.update({ '_id': self.id, 'instances.host_id': host_id, }, { '$pull': { 'hosts': host_id, 'instances': { 'host_id': host_id, }, }, '$inc': { 'instances_count': -1, }, }) if response['updatedExisting']: prefered_host = random.sample(self.hosts, min(self.replica_count, len(self.hosts))) self.publish('start', extra={ 'prefered_hosts': prefered_host, }) doc = self.collection.find_and_modify({ '_id': self.id, }, { '$pull': { 'hosts': host_id, }, }, { 'hosts': True, }) if doc and not doc['hosts']: self.status = OFFLINE self.commit('status')
def reserve_pooled_dh_params(svr): doc = QueueDhParams.dh_params_collection.find_and_modify( {"dh_param_bits": svr.dh_param_bits}, {"$set": {"dh_param_bits": None}}, new=True ) if not doc: return False QueueDhParams.dh_params_collection.remove(doc["_id"]) logger.debug("Reserved pooled dh params", "server", server_id=svr.id, dh_param_bits=svr.dh_param_bits) svr.dh_params = doc["dh_params"] return True
def set_iptables_rules(self): logger.debug('Setting iptables rules', 'server', server_id=self.server.id, ) processes = {} poller = select.epoll() self.iptables_rules = self.generate_iptables_rules() for rule in self.iptables_rules: cmd, process = self.exists_iptables_rules(rule) fileno = process.stdout.fileno() processes[fileno] = (cmd, process, ['iptables', '-A'] + rule) poller.register(fileno, select.EPOLLHUP) try: while True: for fd, event in poller.poll(timeout=8): cmd, process, next_cmd = processes.pop(fd) poller.unregister(fd) if next_cmd: if process.poll(): process = subprocess.Popen(next_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) fileno = process.stdout.fileno() processes[fileno] = (next_cmd, process, None) poller.register(fileno, select.EPOLLHUP) else: retcode = process.poll() if retcode: std_out, err_out = process.communicate() raise subprocess.CalledProcessError( retcode, cmd, output=err_out) if not processes: return except subprocess.CalledProcessError as error: logger.exception('Failed to apply iptables ' + \ 'routing rule', 'server', server_id=self.server.id, rule=rule, output=error.output, ) raise
def remove(self): logger.debug('Remove org', 'organization', org_id=self.id, ) for server in self.iter_servers(): if server.status: server.stop() server.remove_org(self) server.commit() mongo.MongoObject.remove(self) user.User.collection.remove({ 'org_id': self.id, })
def remove_host(self, host_id): if not isinstance(host_id, basestring): host_id = host_id.id if host_id not in self.hosts: return logger.debug('Removing host from server. %r' % { 'server_id': self.id, 'host_id': host_id, }) try: self.hosts.remove(host_id) except ValueError: pass self.changed.add('hosts')
def add_host(self, host_id): if not isinstance(host_id, basestring): host_id = host_id.id logger.debug('Adding host to server. %r' % { 'server_id': self.id, 'host_id': host_id, }) if host_id in self.hosts: logger.debug('Host already on server, skipping. %r' % { 'server_id': self.id, 'host_id': host_id, }) return self.hosts.append(host_id) self.changed.add('hosts')
def add_host(self, host_id): logger.debug('Adding host to server', 'server', server_id=self.id, host_id=host_id, ) if host_id in self.hosts: logger.debug('Host already on server, skipping', 'server', server_id=self.id, host_id=host_id, ) return self.hosts.append(host_id) self.changed.add('hosts')
def pause_task(self): if self.reserve_data: return False self.load() if self.reserve_data: return False logger.debug('Pausing queued dh params', 'server', queue_id=self.id, dh_param_bits=self.dh_param_bits, ) self.queue_com.running.clear() self.queue_com.popen_kill_all() return True
def _remove_primary_user(self): logger.debug('Removing primary user. %r' % { 'server_id': self.id, }) if not self.primary_organization or not self.primary_user: return org = organization.get_org(id=self.primary_organization) if org: user = org.get_user(id=self.primary_user) if user: user.remove() self.primary_organization = None self.primary_user = None
def remove(self): logger.debug( 'Remove org', 'organization', org_id=self.id, ) for server in self.iter_servers(): if server.status: server.stop() server.remove_org(self) server.commit() mongo.MongoObject.remove(self) user.User.collection.remove({ 'org_id': self.id, })
def clear_iptables_rules(self): logger.debug('Clearing iptables rules', 'server', server_id=self.server.id, ) processes = [] for rule in self.iptables_rules: process = subprocess.Popen(['iptables', '-D'] + rule, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) processes.append(process) for process in processes: process.wait()
def load_public_ip(attempts=1, timeout=5): for i in xrange(attempts): if settings.local.public_ip: return if i: time.sleep(3) logger.debug('Retrying get public ip address...') logger.debug('Getting public ip address...') try: request = urllib2.Request(settings.app.public_ip_server) response = urllib2.urlopen(request, timeout=timeout) settings.local.public_ip = json.load(response)['ip'] break except: pass if not settings.local.public_ip: logger.exception('Failed to get public ip address...')
def add_org(self, org_id): if not isinstance(org_id, basestring): org_id = org_id.id logger.debug('Adding organization to server. %r' % { 'server_id': self.id, 'org_id': org_id, }) if org_id in self.organizations: logger.debug('Organization already on server, skipping. %r' % { 'server_id': self.id, 'org_id': org_id, }) return self.organizations.append(org_id) self.changed.add('organizations') self.generate_ca_cert() self._orgs_changed = True
def _clear_iptables_rules(self): logger.debug('Clearing iptables rules. %r' % { 'server_id': self.id, }) for rule in self._generate_iptables_rules(): if self._exists_iptables_rules(rule): try: subprocess.check_call(['iptables', '-D'] + rule, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except subprocess.CalledProcessError: logger.exception('Failed to clear iptables ' + \ 'routing rule. %r' % { 'server_id': self.id, 'rule': rule, }) raise
def initialize(self, queue_user_init=True): ca_user = user.User(org=self, type=CERT_CA) if queue_user_init: ca_user.queue_initialize(block=True, priority=HIGH if self.type == ORG_DEFAULT else None) else: ca_user.initialize() ca_user.commit() logger.debug('Init ca_user', 'organization', org_id=self.id, user_id=ca_user.id, ) self.ca_private_key = ca_user.private_key self.ca_certificate = ca_user.certificate
def reserve_queued_dh_params(svr, block=False): reserve_id = svr.dh_param_bits reserve_data = { 'server_id': svr.id, } doc = QueueDhParams.reserve(reserve_id, reserve_data, block=block) if not doc: logger.debug('Reserved queued dh params', 'server', server_id=svr.id, dh_param_bits=svr.dh_param_bits, ) return False if block: svr.load() return True
def clear_iptables_rules(self): logger.debug('Clearing iptables rules', 'server', server_id=self.server.id, ) self.iptables_lock.acquire() try: if self.iptables_rules is not None: for rule in self.iptables_rules: self.remove_iptables_rule(rule) if self.ip6tables_rules is not None: for rule in self.ip6tables_rules: self.remove_ip6tables_rule(rule) finally: self.iptables_rules = None self.ip6tables_rules = None self.iptables_lock.release()
def remove_org(self, org_id): if not isinstance(org_id, basestring): org_id = org_id.id if org_id not in self.organizations: return logger.debug('Removing organization from server. %r' % { 'server_id': self.id, 'org_id': org_id, }) if self.primary_organization == org_id: self._remove_primary_user() try: self.organizations.remove(org_id) except ValueError: pass self.changed.add('organizations') self.generate_ca_cert() self._orgs_changed = True
def commit(self, *args, **kwargs): exists = self.exists mongo.MongoObject.commit(self, *args, **kwargs) if not exists: logger.debug('Fill new org pool', 'organization', org_id=self.id, ) thread = threading.Thread( target=pooler.fill, args=( 'new_user', self, ), ) thread.daemon = True thread.start()
def run_queue_item(queue_item, thread_limit): release = True try: if queue_item.queue_com.state == None: logger.debug( 'Run queue item', 'queue_runner', queue_id=queue_item.id, queue_type=queue_item.type, ) queue_item.run() elif queue_item.queue_com.state == PAUSED: release = False queue_item.resume() finally: running_queues.pop(queue_item.id, None) if release: thread_limit.release()
def stop(self, force=False): logger.debug( 'Stopping server', 'server', server_id=self.id, ) if self.status != ONLINE: return response = self.collection.update({ '_id': self.id, 'status': ONLINE, }, { '$set': { 'status': OFFLINE, 'start_timestamp': None, 'pool_cursor': None, 'instances': [], 'instances_count': 0, 'availability_group': None, } }) self.vxlan_collection.update({ 'server_id': self.id, }, {'$set': { 'hosts': [], }}) self.clients_pool_collection.remove({ 'server_id': self.id, }) if not response['updatedExisting']: raise ServerStopError('Server not running', { 'server_id': self.id, }) self.status = OFFLINE if force: self.publish('force_stop') else: self.publish('stop')
def reserve_pooled_dh_params(svr): doc = QueueDhParams.dh_params_collection.find_and_modify({ 'dh_param_bits': svr.dh_param_bits, }, {'$set': { 'dh_param_bits': None, }}) if not doc: return False QueueDhParams.dh_params_collection.remove(doc['_id']) logger.debug('Reserved pooled dh params', 'server', server_id=svr.id, dh_param_bits=svr.dh_param_bits, ) svr.dh_params = doc['dh_params'] return True
def create_primary_user(self): logger.debug('Creating primary user', 'server', server_id=self.id, ) try: org = self.iter_orgs().next() except StopIteration: raise ServerMissingOrg('Primary user cannot be created ' + \ 'without any organizations', { 'server_id': self.id, }) user = org.new_user(name=SERVER_USER_PREFIX + str(self.id), type=CERT_SERVER, resource_id=self.id) self.primary_organization = org.id self.primary_user = user.id self.commit(('primary_organization', 'primary_user'))
def _check_updates(): while True: if not settings.app.update_check_rate: yield interrupter_sleep(30) continue try: logger.debug('Checking notifications...', 'runners') request = urllib2.Request(settings.app.notification_server + '/%s' % settings.local.version_int) response = urllib2.urlopen(request, timeout=60) data = json.load(response) settings.local.notification = data.get('message', '') settings.local.www_state = data.get('www', OK) settings.local.vpn_state = data.get('vpn', OK) except: logger.exception('Failed to check notifications', 'runners') yield interrupter_sleep(settings.app.update_check_rate)
def set_iptables_rules(self, log=False): logger.debug( 'Setting iptables rules', 'server', server_id=self.server.id, ) self.iptables_lock.acquire() try: if self.iptables_rules is not None: for rule in self.iptables_rules: if not self.exists_iptables_rule(rule): if log and not self.interrupt: logger.error( 'Unexpected loss of iptables rule, ' + 'adding again...', 'instance', rule=rule, ) self.set_iptables_rule(rule) if self.ip6tables_rules is not None: for rule in self.ip6tables_rules: if not self.exists_ip6tables_rule(rule): if log and not self.interrupt: logger.error( 'Unexpected loss of ip6tables rule, ' + 'adding again...', 'instance', rule=rule, ) self.set_ip6tables_rule(rule) except subprocess.CalledProcessError as error: logger.exception('Failed to apply iptables ' + \ 'routing rule', 'server', server_id=self.server.id, output=error.output, ) raise finally: self.iptables_lock.release()
def new_user(self, type=CERT_CLIENT, block=True, **kwargs): # First attempt to get user from pool then attempt to get # unfinished queued user in pool then queue a new user init if type in (CERT_SERVER, CERT_CLIENT): usr = user.reserve_pooled_user(org=self, type=type, **kwargs) if not usr: usr = queue.reserve('queued_user', org=self, type=type, block=block, **kwargs) if usr: logger.debug('Reserved queued user', 'organization', org_id=self.id, user_id=usr.id, ) else: logger.debug('Reserved pooled user', 'organization', org_id=self.id, user_id=usr.id, ) if usr: user.new_pooled_user(org=self, type=type) return usr usr = user.User(org=self, type=type, **kwargs) usr.queue_initialize(block=block, priority=HIGH if type in (CERT_SERVER, CERT_CLIENT) else None) logger.debug('Queued user init', 'organization', org_id=self.id, user_id=usr.id, ) return usr
def set_iptables_rules(self): logger.debug( 'Setting iptables rules', 'server', server_id=self.server.id, ) try: for rule in self.iptables_rules: if not self.exists_iptables_rule(rule): self.set_iptables_rule(rule) for rule in self.ip6tables_rules: if not self.exists_ip6tables_rule(rule): self.set_ip6tables_rule(rule) except subprocess.CalledProcessError as error: logger.exception('Failed to apply iptables ' + \ 'routing rule', 'server', server_id=self.server.id, output=error.output, ) raise
def add_queue_item(queue_item): if queue_item.id in running_queues: return running_queues[queue_item.id] = queue_item logger.debug( 'Add queue item for run', 'queue', queue_id=queue_item.id, queue_type=queue_item.type, queue_priority=queue_item.priority, queue_cpu_type=queue_item.cpu_type, ) runner_queues[queue_item.cpu_type].put(( abs(queue_item.priority - 4), queue_item, )) if queue_item.priority >= NORMAL: for running_queue in running_queues.values(): if running_queue.priority >= queue_item.priority: continue if running_queue.pause(): logger.debug( 'Puase queue item', 'queue', queue_id=running_queue.id, queue_type=running_queue.type, queue_priority=running_queue.priority, queue_cpu_type=running_queue.cpu_type, ) runner_queues[running_queue.cpu_type].put(( abs(running_queue.priority - 4), running_queue, )) thread_limits[running_queue.cpu_type].release()
def claim_commit(self, fields=None): doc = self.get_commit_doc(fields=fields) doc['runner_id'] = self.runner_id doc['ttl_timestamp'] = utils.now() + \ datetime.timedelta(seconds=self.ttl) response = self.collection.update( { '_id': self.id, '$or': [ { 'runner_id': self.runner_id }, { 'runner_id': { '$exists': False } }, ], }, { '$set': doc, }) self.claimed = response['updatedExisting'] if self.claimed: self.keep_alive() logger.debug( 'Queue claimed', 'queue', queue_id=self.id, queue_type=self.type, ) return response['updatedExisting']
def add_host(self, host_id): logger.debug( 'Adding host to server', 'server', server_id=self.id, host_id=host_id, ) if host_id in self.hosts: logger.debug( 'Host already on server, skipping', 'server', server_id=self.id, host_id=host_id, ) return if self.links: hosts_set = set(self.hosts) hosts_set.add(host_id) spec = { '_id': { '$in': [x['server_id'] for x in self.links] }, } project = { '_id': True, 'hosts': True, } for doc in self.collection.find(spec, project): if hosts_set & set(doc['hosts']): raise ServerLinkCommonHostError( 'Servers have a common host') self.hosts.append(host_id) self.changed.add('hosts')