def main_process(self): """ Main process for zfssnapd """ if (settings['rpdb2_wait']): # a wait to attach with rpdb2... log_info('Waiting for rpdb2 to attach.') time.sleep(float(settings['rpdb2_wait'])) log_info('program starting.') log_debug("The daemon_canary is: '{0}'".format( settings['daemon_canary'])) # Do a nice output message to the log pwnam = pwd.getpwnam(settings['run_as_user']) if setproctitle_support: gpt_output = getproctitle() else: gpt_output = "no getproctitle()" log_debug( "PID: {0} process name: '{1}' daemon: '{2}' User: '******' UID: {4} GID {5}" .format(os.getpid(), gpt_output, self.i_am_daemon(), pwnam.pw_name, os.getuid(), os.getgid())) if (settings['memory_debug']): # Turn on memory debugging log_info('Turning on GC memory debugging.') gc.set_debug(gc.DEBUG_LEAK) # Create a Process object so that we can check in on ourself resource # wise self.proc_monitor = psutil.Process(pid=os.getpid()) # Initialise a few nice things for the loop debug_mark = get_boolean_setting('debug_mark') sleep_time = int(get_numeric_setting('sleep_time', float)) debug_sleep_time = int(get_numeric_setting('debug_sleep_time', float)) sleep_time = debug_sleep_time if debug() else sleep_time # Initialise Manager stuff ds_settings = Config.read_ds_config() # Process Main Loop while (self.check_signals()): try: Manager.run(ds_settings, sleep_time) except Exception as ex: log_error('Exception: {0}'.format(str(ex))) if debug_mark: log_debug( "----MARK---- sleep({0}) seconds ----".format(sleep_time)) self.main_sleep(sleep_time) log_info('Exited main loop - process terminating normally.') sys.exit(os.EX_OK)
def vacuum_zones(self, age_days=None): """ Destroy zones older than age_days """ self._begin_op() db_session = self.db_session db_query_slice = get_numeric_setting('db_query_slice', int) age_days_from_config = float(zone_cfg.get_row_exc(db_session, key='zone_del_age')) if age_days_from_config <= 0 and age_days is None: age_days = get_numeric_setting('zone_del_off_age', float) elif age_days is None: age_days = age_days_from_config age_days = timedelta(days=age_days) count = 0 # Clear old and nuked zones one by one id_query = db_session.query(ZoneSM.id_)\ .filter(ZoneSM.state == ZSTATE_DELETED)\ .filter(or_(ZoneSM.deleted_start == None, (func.now() - ZoneSM.deleted_start) > age_days))\ .filter(ZoneSM.zone_files == False)\ .yield_per(db_query_slice) id_results = [] for zone_id, in id_query: id_results.append(zone_id) for zone_id in id_results: try: zone_sm = db_session.query(ZoneSM)\ .filter(ZoneSM.id_ == zone_id).one() except NoResultFound: continue if zone_sm.state != ZSTATE_DELETED: # Skip this if a customer has undeleted zone in the mean time.. continue db_session.delete(zone_sm) db_session.commit() count += 1 # Finally do zone_sm destroy operation to query = db_session.query(ZoneSM)\ .filter(ZoneSM.state == ZSTATE_DELETED)\ .filter(or_(ZoneSM.deleted_start == None, (func.now() - ZoneSM.deleted_start) > age_days)) for zone_sm in query: if zone_sm.state != ZSTATE_DELETED: # Skip this if a customer has undeleted zone in the mean time.. continue try: exec_zonesm(zone_sm, ZoneSMDoDestroy) except ZoneSmFailure: continue count += 1 result = {'num_deleted': count} self._finish_op() return result
def do_garbage_collect(self): """ Do Resource Release exercise at low memory threshold, blow up over max """ error_str = '' try: rss_mem_usage = (float(self.proc_monitor.get_memory_info().rss) /1024/1024) except Exception as exc: error_str = str(exc) # Process above error... if (error_str): log_error("Error obtaining resource usage - %s" % error_str) sys.exit(os.EX_SOFTWARE) memory_exec_threshold = get_numeric_setting('memory_exec_threshold', float) if (rss_mem_usage > memory_exec_threshold): log_warning('Memory exec threshold %s MB reached, actual %s MB - execve() to reclaim.' % (memory_exec_threshold, rss_mem_usage)) file_path = os.path.join(sys.path[0], sys.argv[0]) file_path = os.path.normpath(file_path) os.execve(file_path, sys.argv, os.environ) else: # Spend idle time being RAM thrifty... gc.collect() return
def configure_file_logging(self): """ Configure file logging This is typically done after we have syslog running or stderr and after processing the configuration file in a daemon process """ if (hasattr(self, 'logfile_handler') and self.logfile_handler): logging.root.removeHandler(self.logfile_handler) if (settings['log_file']): try: maxBytes = 1024 * get_numeric_setting( 'log_file_max_size_kbytes', int) logfile_handler = logging.handlers.RotatingFileHandler( filename=settings['log_file'], maxBytes=maxBytes, backupCount=settings['log_file_backup_count']) except (IOError, OSError) as e: if (e.filename): log_error("%s - %s." % (e.filename, e.strerror)) else: log_error("%s." % e.strerror) return logfile_handler.setFormatter(self.log_formatter) logging.root.addHandler(logfile_handler) self.logfile_handler = logfile_handler
def init_master_dns_address(self): """ Master dns server setting in an IP addr Results determined by getaddrinfo(3) and thus by /etc/hosts contents, or else DNS if hostname not in /etc/hosts! """ test_hostname = settings['master_dns_server'] if not test_hostname: test_hostname = socket.getfqdn() connect_retry_wait = get_numeric_setting('connect_retry_wait', float) exc_msg = '' for t in range(3): try: # Transform any hostname to an IP address settings['master_dns_server'] = connect_test_address( test_hostname, port=settings['master_dns_port']) break except (IOError, OSError) as exc: exc_msg = str(exc) time.sleep(connect_retry_wait) continue else: log_error("Testing master DNS server IP address '%s:%s' - %s" % (test_hostname, settings['master_dns_port'], exc_msg)) systemd_exit(os.EX_NOHOST, SDEX_CONFIG) # If we get here without raising an exception, we can talk to # the server address (mostly) return
def do_garbage_collect(self): """ Do Resource Release exercise at low memory threshold, blow up over max """ error_str = '' try: rss_mem_usage = (float(self.proc_monitor.memory_info().rss) / 1024 / 1024) except AttributeError: # Deal with a change in name of get_memory_info() method rss_mem_usage = (float(self.proc_monitor.get_memory_info().rss) / 1024 / 1024) except Exception as exc: error_str = str(exc) # Process above error... if (error_str): log_error("Error obtaining resource usage - %s" % error_str) systemd_exit(os.EX_SOFTWARE, SDEX_NOTRUNNING) memory_exec_threshold = get_numeric_setting('memory_exec_threshold', float) if (rss_mem_usage > memory_exec_threshold): log_warning( 'Memory exec threshold %s MB reached, actual %s MB - execve() to reclaim.' % (memory_exec_threshold, rss_mem_usage)) file_path = os.path.join(sys.path[0], sys.argv[0]) file_path = os.path.normpath(file_path) os.execve(file_path, sys.argv, os.environ) else: # Spend idle time being RAM thrifty... gc.collect() return
def __init__(self): """ Special init to set queue size """ self.processing = {} self.queue_reset = False super().__init__( maxsize=get_numeric_setting('event_queue_maxsize', int))
def list_events(self, last_limit=None): """ List failed events """ self._begin_op() if not last_limit: last_limit = get_numeric_setting('list_events_last_limit', float) db_query_slice = get_numeric_setting('db_query_slice', int) db_session = self.db_session query = db_session.query(Event)\ .order_by(desc(Event.id_)).limit(last_limit)\ .yield_per(db_query_slice) results = [] for event in query: json_event = event.to_engine_brief(time_format=self.time_format) results.append(json_event) self._finish_op() return results
def _thread_top_up(self): if (debug()): self.event_queue_threads = 1 else: self.event_queue_threads = get_numeric_setting( 'event_queue_threads', int) # Check what threads are active, and remove those that are not self._threads = [thread for thread in self._threads if thread.is_alive()] while (len(self._threads) < self.event_queue_threads): log_debug("_thread_top_up() - topping up threads") self._thread_start()
def __init__(self): """ Sets up queue and Threads """ # This is a FIFO queue self._queue = _EQueue() self._threads = [] self._event_count = 0 self.event_queue_session_transactions \ = get_numeric_setting('event_queue_session_transactions', int) self._thread_top_up() # Pause to let threads get established. time.sleep(3)
def __init__(self, dataset='', time_spec='', mountpoint=''): """ Initialise class """ hysteresis_time = int( get_numeric_setting('startup_hysteresis_time', float)) self.prev_secs = int(time.time()) - hysteresis_time self.dataset = dataset self.mountpoint = mountpoint self.time_spec = time_spec self.date = self._midnight_date() # Do this before calling _parse_timespec(), as that routine sets it! self.trigger_flag = False self.time_list = self._parse_timespec( self.time_spec) if self.time_spec else []
def vacuum_pare_deleted_zone_zis(self, age_days=None): """ Pare ZIs on deleted zones older than age_days """ self._begin_op() db_session = self.db_session db_query_slice = get_numeric_setting('db_query_slice', int) age_days_from_config = float(zone_cfg.get_row_exc(db_session, key='zone_del_pare_age')) if age_days_from_config <= 0 and age_days is None: return {'num_deleted': 0} if age_days is None: age_days = age_days_from_config age_days = timedelta(days=age_days) stmt = db_session.query(ZoneInstance.zone_id, func.count(ZoneInstance.id_).label('zi_count'))\ .group_by(ZoneInstance.zone_id).subquery() zone_sm_query = db_session.query(ZoneSM)\ .filter(ZoneSM.state == ZSTATE_DELETED)\ .outerjoin(stmt, ZoneSM.id_ == stmt.c.zone_id)\ .filter(stmt.c.zi_count > 1)\ .filter(and_(ZoneSM.deleted_start != None, (func.now() - ZoneSM.deleted_start) > age_days))\ .yield_per(db_query_slice) count = 0 for zone_sm in zone_sm_query: zi_query = db_session.query(ZoneInstance)\ .filter(ZoneInstance.zone_id == zone_sm.id_)\ .filter(ZoneInstance.id_ != zone_sm.zi_id) if zone_sm.state != ZSTATE_DELETED: # Skip this if a customer has undeleted zone in the mean time.. continue for zi in zi_query: if (zi.id_ == zone_sm.zi_id or zi.id_ == zone_sm.zi_candidate_id): # Skip if this ZI has published or selected to be published continue db_session.delete(zi) count += 1 result = {'num_deleted': count} self._finish_op() return result
def vacuum_zis(self, age_days=None, zi_max_num=None): """ Age ZIs according to age_days and zi_max_num """ self._begin_op() db_session = self.db_session db_query_slice = get_numeric_setting('db_query_slice', int) if age_days is None: age_days = float(zone_cfg.get_row_exc(db_session, key='zi_max_age')) age_days = timedelta(days=age_days) if zi_max_num is None: zi_max_num = int(zone_cfg.get_row_exc(db_session, key='zi_max_num')) stmt = db_session.query(ZoneInstance.zone_id, func.count(ZoneInstance.id_).label('zi_count'))\ .group_by(ZoneInstance.zone_id).subquery() zone_sm_query = db_session.query(ZoneSM)\ .filter(ZoneSM.state != ZSTATE_DELETED)\ .outerjoin(stmt, ZoneSM.id_ == stmt.c.zone_id)\ .filter(stmt.c.zi_count > zi_max_num)\ .yield_per(db_query_slice) count = 0 for zone_sm in zone_sm_query: zi_keep = db_session.query(ZoneInstance.id_)\ .filter(ZoneInstance.zone_id == zone_sm.id_)\ .order_by(desc(ZoneInstance.mtime))\ .limit(zi_max_num) zi_query = db_session.query(ZoneInstance)\ .filter(ZoneInstance.zone_id == zone_sm.id_)\ .filter(ZoneInstance.id_ != zone_sm.zi_id)\ .filter(not_(ZoneInstance.id_.in_(zi_keep)))\ .filter(ZoneInstance.mtime < (func.now() - age_days)) for zi in zi_query: if (zi.id_ == zone_sm.zi_id or zi.id_ == zone_sm.zi_candidate_id): # Skip if this ZI has ben selected for republishing in # the mean time continue db_session.delete(zi) count += 1 result = {'num_deleted': count} self._finish_op() return result
def _test_connected(self, host, port): connect_retry_wait = get_numeric_setting('connect_retry_wait', float) exc_msg = '' for t in range(3): try: # Transform any hostname to an IP address connect_test_address(host, port) break except (IOError, OSError) as exc: exc_msg = str(exc) time.sleep(connect_retry_wait) continue else: if self.local_dataset: log_info("[{0}] - Can't reach endpoint '{1}:{2}' - {3}".format( self.local_dataset, host, port, exc_msg)) else: log_error("Can't reach endpoint '{0}:{1}' - {2}".format( host, port, exc_msg)) return False return True
def init_update_engines(self): """ Initialise the update engines used """ connect_retry_wait = get_numeric_setting('connect_retry_wait', float) error_str = '' for t in range(3): try: dyndns_engine = DynDNSUpdate(settings['dns_server'], settings['dyndns_key_file'], settings['dyndns_key_name']) break except (DynDNSCantReadKeyError, IOError, OSError) as exc: error_str = ( "Can't connect to named for dynamic updates - %s" % str(exc)) time.sleep(connect_retry_wait) continue # Process above error... else: log_error("%s" % error_str) systemd_exit(os.EX_NOHOST, SDEX_CONFIG) update_engine['dyndns'] = dyndns_engine
def write_named_conf_includes(self, db_session, op_exc): """ Write the bits of named configuration. Seperated so that it is callable from recovery script """ def open_tmp_file(prefix): (fd, tmp_filename) = mkstemp( dir=tmp_dir, prefix=prefix) include_file = io.open(fd, mode='wt') return (include_file, tmp_filename) def clean_up_rename(include_file, tmp_filename, config_file_name): include_file.close() # Rename tmp file into place so that replacement is atomic run_as_user = settings['run_as_user'] try: run_as_user_pwd = pwd.getpwnam(run_as_user) except KeyError as exc: msg = ("Could not find user '%s' in passwd database - %s" % (run_as_user, str(exc))) raise op_exc(msg) uid = run_as_user_pwd.pw_uid gid = run_as_user_pwd.pw_gid os.chown(tmp_filename, uid, gid) os.chmod(tmp_filename, int(settings['zone_file_mode'],8)) # Rename tmp file into place so that replacement is atomic os.chmod(tmp_filename, int(settings['config_file_mode'],8)) os.rename(tmp_filename, config_file_name) db_query_slice = get_numeric_setting('db_query_slice', int) # Clear template cache. This forces a re read of all templates clear_template_cache() # Rewrite include and global server ACL file if required. # Trap file IO errors as event queue can't handle them. try: tmp_dir = settings['master_config_dir'] # master server ACL file acl_prefix = ('.' + basename(settings['master_server_acl_file']) + '-') acl_file, tmp_filename = open_tmp_file(acl_prefix) # Create Master ACL file server_acl_template = read_template( settings['master_template_dir'] + '/' + settings[MASTER_SERVER_ACL_TEMPLATE]) server_acls = {} ServerGroup = sql_types['ServerGroup'] query = db_session.query(ServerGroup) for sg in query: # Each SG gets its own ACL to prevent cross SG # domain discovery if a server is compromised. sg_acl_name = sg.name + settings['acl_name_extension'] server_acls[sg.name] = {'acl_name': sg_acl_name, 'servers': ''} for server_sm in sg.servers: # include disabled server, as access can be shut off # in IPSEC and firewall! server_acls[sg.name]['servers'] += ("%s;\n" % server_sm.address) del server_sm if not server_acls[sg.name]['servers']: server_acls[sg.name]['servers'] = 'none;\n' # Stop memory leaks del sg for sg_name in server_acls: acl_file.write(server_acl_template % server_acls[sg_name]) clean_up_rename(acl_file, tmp_filename, settings['master_server_acl_file']) # include file include_prefix = ('.' + basename(settings['master_include_file']) + '-') include_file, tmp_filename = open_tmp_file(include_prefix) # Get list of zones from zone_sm, and write out each # config file section ZoneSM = sql_types['ZoneSM'] query = ZoneSM.query_is_configured( db_session.query(ZoneSM)).yield_per(db_query_slice) for zone_sm in query: zone_sm.write_config(include_file, db_session, server_acls, self.replica_sg) del zone_sm clean_up_rename(include_file, tmp_filename, settings['master_include_file']) except (IOError, OSError) as exc: msg = ( "Could not access/write file '%s' - %s." % (exc.filename, exc.strerror)) raise op_exc(msg) except KeyError as exc: msg = ("Invalid template key in template dir %s - %s" % (settings['master_template_dir'], str(exc))) raise op_exc(msg) finally: # clean up if possible try: os.unlink(tmp_filename) except: pass
def nuke_zones(self, *names, include_deleted=False, toggle_deleted=False, sg_name=None, reference=None): """ Destroy multiple zones. Multiple names may be given. Wildcards can be used for partial matches. This is mainly a command for testing, or cleaning up after a large batch zone load goes awry. Zones being nuked have their deleted_start set to 1/1/1970, midnight. This means they will be immediately reaped by the next vacuum_zones command. """ # Deal with SA auto-BEGIN - want fresh transaction to see fresh data self._begin_op() if not names: # No arguments self._finish_op() raise NoZonesFound('') db_session = self.db_session db_query_slice = get_numeric_setting('db_query_slice', int) # We were given some arguments zones = [] # We keep domains and labels in database lowercase names = [x.lower() for x in names] name_pattern = ' '.join(names) names = [x.replace('*', '%') for x in names] names = [x.replace('?', '_') for x in names] for name in names: if not name.endswith('.') and not name.endswith('%'): name += '.' query = db_session.query(ZoneSM)\ .filter(ZoneSM.name.like(name)) # Don't delete any reverse zones with this command query = query.filter(not_(ZoneSM.name.like('%.in-addr.arpa.')))\ .filter(not_(ZoneSM.name.like('%.ip6.arpa.'))) if reference: query = query.join(Reference)\ .filter(Reference.reference.ilike(reference)) if sg_name and self.sectag.sectag == settings['admin_sectag']: if sg_name not in list_all_sgs(self.db_session): raise NoSgFound(sg_name) query = query.join(ServerGroup, ZoneSM.sg_id == ServerGroup.id_)\ .filter(ServerGroup.name == sg_name) if include_deleted: pass elif toggle_deleted: query = query.filter(ZoneSM.state == ZSTATE_DELETED) else: query = query.filter(ZoneSM.state != ZSTATE_DELETED) query = query.yield_per(db_query_slice) # The following gives less RAM piggery even though it is slower for z in query: zones.append(z) # Take note of security tags if self.sectag.sectag != settings['admin_sectag']: zones = [x for x in zones if self.sectag in x.sectags] if not zones: if len(name_pattern) > 240: name_pattern = '* - %s names' % len(names) raise NoZonesFound(name_pattern) # Mark them all as deleted. for zone in zones: exec_zonesm(zone, ZoneSMNukeStart) self._finish_op()
def main_process(self): """Main process for dmsdmd """ if (settings['rpdb2_wait']): # a wait to attach with rpdb2... log_info('Waiting for rpdb2 to attach.') time.sleep(float(settings['rpdb2_wait'])) log_info('program starting.') log_debug("The daemon_canary is: '%s'" % settings['daemon_canary']) # Do a nice output message to the log pwnam = pwd.getpwnam(settings['run_as_user']) log_debug("PID: %s daemon: '%s' User: '******' UID: %d GID %d" % (os.getpid(), self.i_am_daemon(), pwnam.pw_name, os.getuid(), os.getgid())) # Check we can reach DNS server self.init_update_engines() # Initialise ServerSM rcodes from settings init_soaquery_rcodes() # Initialize master dns address if required self.init_master_dns_address() # Connect to database, intialise SQL Alchemy setup_sqlalchemy() # Initialize master DNS server data self.init_master_dns_server_data() # Create a queue event_queue = EventQueue() # Create a Process object so that we can check in on ourself resource # wise self.proc_monitor = psutil.Process(pid=os.getpid()) # Initialise a few nice things for the loop debug_mark = get_boolean_setting('debug_mark') sleep_time = get_numeric_setting('sleep_time', float) # test Read this value... master_hold_timeout = get_numeric_setting('master_hold_timeout', float) if (settings['memory_debug']): # Turn on memory debugging log_info('Turning on GC memory debugging.') gc.set_debug(gc.DEBUG_LEAK) # Process Main Loop while (self.check_signals()): event_queue.process_queue() if event_queue.queue_empty(): self.do_garbage_collect() if debug_mark: log_debug("----MARK---- sleep(%s) seconds ----" % sleep_time) time.sleep(sleep_time) log_info('Exited main loop - process terminating normally.') sys.exit(os.EX_OK)
def write_config(self, db_session, op_exc): """ Write out all configuration files needed for a server group. """ def write_zone_include(zone_sm): # Remove dot at end of zone name as this gives more # human literate filenames filler_name = zone_sm.name[:-1] \ if zone_sm.name.endswith('.') \ else zone_sm.name filler = {'name': filler_name, 'master': master_address} tmp_file.write(template % filler) replica_sg = (True if hasattr(self, 'master_sm') and self.master_sm else False) # Calculate master addresses if (self.master_address and self.master_address in settings['this_servers_addresses']): master_address = self.master_address elif (self.master_alt_address and self.master_alt_address in settings['this_servers_addresses']): master_address = self.master_alt_address else: master_address = settings['master_dns_server'] # Get list of server types in SG ServerSM = sql_types['ServerSM'] server_types = [s.server_type for s in self.servers] # sort|uniq the types list server_types = list(set(sorted(server_types))) if replica_sg: server_types = [ st + settings['server_replica_suffix'] for st in server_types ] db_query_slice = get_numeric_setting('db_query_slice', int) for server_type in server_types: include_dir = self.get_include_dir() include_file = self.get_include_file(server_type) if self.config_dir: # This allows us to override default template configuration # for say internal domains which IPV6 ULA/ # IPV4 RFC1918 addressing template_file = (self.config_dir + '/' + server_type + '.conf') else: template_file = (settings['server_config_dir'] + '/' + server_type + '.conf') try: # Make directory if it already does not exist # This is in here to avoid try: verbosity if not os.path.isdir(include_dir): os.mkdir(include_dir) template = read_template(template_file) (fd, tmp_filename) = tempfile.mkstemp( dir=include_dir, prefix='.' + basename(include_file) + '-') tmp_file = io.open(fd, mode='wt') zone_sm_type = sql_types['ZoneSM'] zone_count = 0 if replica_sg: # Master SG - include all zones query = db_session.query(zone_sm_type) query = zone_sm_type.query_sg_is_configured(query)\ .yield_per(db_query_slice) for zone_sm in query: write_zone_include(zone_sm) zone_count += 1 # Prevent Memory leaks... del zone_sm else: query = zone_sm_type.query_sg_is_configured(self.zones)\ .yield_per(db_query_slice) for zone_sm in query: write_zone_include(zone_sm) zone_count += 1 # Prevent Memory leaks... del zone_sm query = zone_sm_type.query_sg_is_configured( self.alt_zones)\ .yield_per(db_query_slice) for zone_sm in query: write_zone_include(zone_sm) zone_count += 1 # Prevent Memory leaks... del zone_sm tmp_file.close() # Rename tmp file into place so that replacement is atomic os.chmod(tmp_filename, int(settings['config_file_mode'],8)) os.rename(tmp_filename, include_file) # Store zone_count for monitoring data input self.zone_count = zone_count except (IOError, OSError) as exc: msg = ( "SG %s - '%s' - %s." % (self.name, exc.filename, exc.strerror)) if exc.errno in (errno.ENOENT, errno.EPERM, errno.EACCES): raise op_exc(msg) else: raise exc except KeyError as exc: msg = ("SG %s - Invalid template key in template file %s - %s" % (self.name, template_file, str(exc))) raise op_exc(msg) finally: # clean up if possible try: os.unlink(tmp_filename) except: pass return
def write_named_conf_includes(self, db_session, op_exc): """ Write the bits of named configuration. Seperated so that it is callable from recovery script """ def open_tmp_file(prefix): (fd, tmp_filename) = mkstemp(dir=tmp_dir, prefix=prefix) include_file = io.open(fd, mode='wt') return (include_file, tmp_filename) def clean_up_rename(include_file, tmp_filename, config_file_name): include_file.close() # Rename tmp file into place so that replacement is atomic run_as_user = settings['run_as_user'] try: run_as_user_pwd = pwd.getpwnam(run_as_user) except KeyError as exc: msg = ("Could not find user '%s' in passwd database - %s" % (run_as_user, str(exc))) raise op_exc(msg) uid = run_as_user_pwd.pw_uid gid = run_as_user_pwd.pw_gid os.chown(tmp_filename, uid, gid) os.chmod(tmp_filename, int(settings['zone_file_mode'], 8)) # Rename tmp file into place so that replacement is atomic os.chmod(tmp_filename, int(settings['config_file_mode'], 8)) os.rename(tmp_filename, config_file_name) db_query_slice = get_numeric_setting('db_query_slice', int) # Clear template cache. This forces a re read of all templates clear_template_cache() # Rewrite include and global server ACL file if required. # Trap file IO errors as event queue can't handle them. try: tmp_dir = settings['master_config_dir'] # master server ACL file acl_prefix = ('.' + basename(settings['master_server_acl_file']) + '-') acl_file, tmp_filename = open_tmp_file(acl_prefix) # Create Master ACL file server_acl_template = read_template( settings['master_template_dir'] + '/' + settings[MASTER_SERVER_ACL_TEMPLATE]) server_acls = {} ServerGroup = sql_types['ServerGroup'] query = db_session.query(ServerGroup) for sg in query: # Each SG gets its own ACL to prevent cross SG # domain discovery if a server is compromised. sg_acl_name = sg.name + settings['acl_name_extension'] server_acls[sg.name] = {'acl_name': sg_acl_name, 'servers': ''} for server_sm in sg.servers: # include disabled server, as access can be shut off # in IPSEC and firewall! server_acls[sg.name]['servers'] += ("%s;\n" % server_sm.address) del server_sm if not server_acls[sg.name]['servers']: server_acls[sg.name]['servers'] = 'none;\n' # Stop memory leaks del sg for sg_name in server_acls: acl_file.write(server_acl_template % server_acls[sg_name]) clean_up_rename(acl_file, tmp_filename, settings['master_server_acl_file']) # include file include_prefix = ('.' + basename(settings['master_include_file']) + '-') include_file, tmp_filename = open_tmp_file(include_prefix) # Get list of zones from zone_sm, and write out each # config file section ZoneSM = sql_types['ZoneSM'] query = ZoneSM.query_is_configured( db_session.query(ZoneSM)).yield_per(db_query_slice) for zone_sm in query: zone_sm.write_config(include_file, db_session, server_acls, self.replica_sg) del zone_sm clean_up_rename(include_file, tmp_filename, settings['master_include_file']) except (IOError, OSError) as exc: msg = ("Could not access/write file '%s' - %s." % (exc.filename, exc.strerror)) raise op_exc(msg) except KeyError as exc: msg = ("Invalid template key in template dir %s - %s" % (settings['master_template_dir'], str(exc))) raise op_exc(msg) finally: # clean up if possible try: os.unlink(tmp_filename) except: pass