def __init__(self, config_path): self.stats = None self.log = logging.getLogger("pghoard") self.log_level = None self.running = True self.config_path = config_path self.compression_queue = Queue() self.transfer_queue = Queue() self.syslog_handler = None self.config = {} self.site_transfers = {} self.state = { "backup_sites": {}, "startup_time": datetime.datetime.utcnow().isoformat(), } self.load_config() if not os.path.exists(self.config["backup_location"]): os.makedirs(self.config["backup_location"]) signal.signal(signal.SIGHUP, self.load_config) signal.signal(signal.SIGINT, self.quit) signal.signal(signal.SIGTERM, self.quit) self.time_of_last_backup = {} self.time_of_last_backup_check = {} self.basebackups = {} self.basebackups_callbacks = {} self.receivexlogs = {} self.compressors = [] self.walreceivers = {} self.transfer_agents = [] self.requested_basebackup_sites = set() self.inotify = InotifyWatcher(self.compression_queue) self.webserver = WebServer( self.config, self.requested_basebackup_sites, self.compression_queue, self.transfer_queue) for _ in range(self.config["compression"]["thread_count"]): compressor = CompressorThread( config_dict=self.config, compression_queue=self.compression_queue, transfer_queue=self.transfer_queue, stats=self.stats) self.compressors.append(compressor) compressor_state = {} # shared among transfer agents for _ in range(self.config["transfer"]["thread_count"]): ta = TransferAgent( config=self.config, compression_queue=self.compression_queue, transfer_queue=self.transfer_queue, stats=self.stats, shared_state_dict=compressor_state) self.transfer_agents.append(ta) logutil.notify_systemd("READY=1") self.log.info("pghoard initialized, own_hostname: %r, cwd: %r", socket.gethostname(), os.getcwd())
def __init__(self, config_path): self.metrics = None self.log = logging.getLogger("pghoard") self.log_level = None self.running = True self.config_path = config_path self.compression_queue = Queue() self.transfer_queue = Queue() self.syslog_handler = None self.basebackups = {} self.basebackups_callbacks = {} self.receivexlogs = {} self.compressors = [] self.walreceivers = {} self.transfer_agents = [] self.config = {} self.mp_manager = None self.site_transfers = {} self.state = { "backup_sites": {}, "startup_time": datetime.datetime.utcnow().isoformat(), } self.transfer_agent_state = {} # shared among transfer agents self.load_config() if self.config["transfer"]["thread_count"] > 1: self.mp_manager = multiprocessing.Manager() if not os.path.exists(self.config["backup_location"]): os.makedirs(self.config["backup_location"]) # Read transfer_agent_state from state file if available so that there's no disruption # in the metrics we send out as a result of process restart state_file_path = self.config["json_state_file_path"] if os.path.exists(state_file_path): with open(state_file_path, "r") as fp: state = json.load(fp) self.transfer_agent_state = state.get( "transfer_agent_state") or {} signal.signal(signal.SIGHUP, self.load_config) signal.signal(signal.SIGINT, self.quit) signal.signal(signal.SIGTERM, self.quit) self.time_of_last_backup_check = {} self.requested_basebackup_sites = set() self.inotify = InotifyWatcher(self.compression_queue) self.webserver = WebServer(self.config, self.requested_basebackup_sites, self.compression_queue, self.transfer_queue, self.metrics) for _ in range(self.config["compression"]["thread_count"]): compressor = CompressorThread( config_dict=self.config, compression_queue=self.compression_queue, transfer_queue=self.transfer_queue, metrics=self.metrics) self.compressors.append(compressor) for _ in range(self.config["transfer"]["thread_count"]): ta = TransferAgent(config=self.config, compression_queue=self.compression_queue, mp_manager=self.mp_manager, transfer_queue=self.transfer_queue, metrics=self.metrics, shared_state_dict=self.transfer_agent_state) self.transfer_agents.append(ta) logutil.notify_systemd("READY=1") self.log.info("pghoard initialized, own_hostname: %r, cwd: %r", socket.gethostname(), os.getcwd())