コード例 #1
0
    def __init__(self, config_path):
        self.stats = None
        self.log = logging.getLogger("pghoard")
        self.log_level = None
        self.running = True
        self.config_path = config_path
        self.compression_queue = Queue()
        self.transfer_queue = Queue()
        self.syslog_handler = None
        self.config = {}
        self.site_transfers = {}
        self.state = {
            "backup_sites": {},
            "startup_time": datetime.datetime.utcnow().isoformat(),
        }
        self.load_config()

        if not os.path.exists(self.config["backup_location"]):
            os.makedirs(self.config["backup_location"])

        signal.signal(signal.SIGHUP, self.load_config)
        signal.signal(signal.SIGINT, self.quit)
        signal.signal(signal.SIGTERM, self.quit)
        self.time_of_last_backup = {}
        self.time_of_last_backup_check = {}
        self.basebackups = {}
        self.basebackups_callbacks = {}
        self.receivexlogs = {}
        self.compressors = []
        self.walreceivers = {}
        self.transfer_agents = []
        self.requested_basebackup_sites = set()

        self.inotify = InotifyWatcher(self.compression_queue)
        self.webserver = WebServer(
            self.config,
            self.requested_basebackup_sites,
            self.compression_queue,
            self.transfer_queue)

        for _ in range(self.config["compression"]["thread_count"]):
            compressor = CompressorThread(
                config_dict=self.config,
                compression_queue=self.compression_queue,
                transfer_queue=self.transfer_queue,
                stats=self.stats)
            self.compressors.append(compressor)

        compressor_state = {}  # shared among transfer agents
        for _ in range(self.config["transfer"]["thread_count"]):
            ta = TransferAgent(
                config=self.config,
                compression_queue=self.compression_queue,
                transfer_queue=self.transfer_queue,
                stats=self.stats,
                shared_state_dict=compressor_state)
            self.transfer_agents.append(ta)

        logutil.notify_systemd("READY=1")
        self.log.info("pghoard initialized, own_hostname: %r, cwd: %r", socket.gethostname(), os.getcwd())
コード例 #2
0
ファイル: test_compressor.py プロジェクト: zvolsky/pghoard
    def setup_method(self, method):
        super().setup_method(method)
        self.log = logging.getLogger(str(method))
        self.config = self.config_template()
        self.config["backup_sites"][self.test_site] = {
            "encryption_key_id": None,
            "encryption_keys": {
                "testkey": {
                    "public": CONSTANT_TEST_RSA_PUBLIC_KEY,
                    "private": CONSTANT_TEST_RSA_PRIVATE_KEY
                },
            },
            "object_storage": {
                "storage_type": "s3",
            },
            "pg_version": 90500,
        }
        self.config["compression"]["algorithm"] = self.algorithm
        self.compression_queue = Queue()
        self.transfer_queue = Queue()
        self.incoming_path = os.path.join(self.temp_dir, self.test_site,
                                          "xlog")
        os.makedirs(self.incoming_path)
        self.handled_path = os.path.join(self.config["backup_location"],
                                         self.test_site, "xlog")
        os.makedirs(self.handled_path)

        self.compressor = CompressorThread(
            config_dict=self.config,
            compression_queue=self.compression_queue,
            transfer_queue=self.transfer_queue,
            stats=statsd.StatsClient(host=None),
        )
        self.compressor.start()
コード例 #3
0
ファイル: test_compressor.py プロジェクト: devopsbox/pghoard
    def setup_method(self, method):
        super().setup_method(method)
        self.config = self.config_template()
        self.config["backup_sites"][self.test_site] = {
            "encryption_key_id": None,
            "encryption_keys": {
                "testkey": {
                    "public": CONSTANT_TEST_RSA_PUBLIC_KEY,
                    "private": CONSTANT_TEST_RSA_PRIVATE_KEY
                },
            },
            "object_storage": {
                "storage_type": "s3",
            },
            "pg_version": 90500,
        }
        self.config["compression"]["algorithm"] = self.algorithm
        self.compression_queue = Queue()
        self.transfer_queue = Queue()
        self.incoming_path = os.path.join(self.temp_dir, self.test_site,
                                          "xlog")
        os.makedirs(self.incoming_path)
        self.handled_path = os.path.join(self.config["backup_location"],
                                         self.test_site, "xlog")
        os.makedirs(self.handled_path)
        self.random_file_path = os.path.join(self.incoming_path,
                                             "00000001000000000000000C")
        self.random_file_path_partial = os.path.join(
            self.incoming_path, "00000001000000000000000C.partial")

        # Create a totally random file, bigger than block size.  Compressed output is longer
        self.random_file_contents = os.urandom(IO_BLOCK_SIZE * 2)
        with open(self.random_file_path, "wb") as out:
            out.write(self.random_file_contents)
            self.random_file_size = out.tell()

        # Create an easily compressible test file, too (with random prefix and suffix)
        self.zero_file_path = os.path.join(self.incoming_path,
                                           "00000001000000000000000D")
        self.zero_file_path_partial = os.path.join(
            self.incoming_path, "00000001000000000000000D.partial")

        # ensure the plaintext file is bigger than the block size and zero (compressed is smaller)
        zeros = (IO_BLOCK_SIZE * 2 - 32) * b"\x00"
        self.zero_file_contents = os.urandom(16) + zeros + os.urandom(16)
        with open(self.zero_file_path, "wb") as out:
            out.write(self.zero_file_contents)
            self.zero_file_size = out.tell()

        self.compressor = CompressorThread(
            config=self.config,
            compression_queue=self.compression_queue,
            transfer_queue=self.transfer_queue)
        self.compressor.start()
コード例 #4
0
ファイル: pghoard.py プロジェクト: mvandermeulen/pghoard
    def __init__(self, config_path):
        self.metrics = None
        self.log = logging.getLogger("pghoard")
        self.log_level = None
        self.running = True
        self.config_path = config_path
        self.compression_queue = Queue()
        self.transfer_queue = Queue()
        self.syslog_handler = None
        self.basebackups = {}
        self.basebackups_callbacks = {}
        self.receivexlogs = {}
        self.compressors = []
        self.walreceivers = {}
        self.transfer_agents = []
        self.config = {}
        self.mp_manager = None
        self.site_transfers = {}
        self.state = {
            "backup_sites": {},
            "startup_time": datetime.datetime.utcnow().isoformat(),
        }
        self.transfer_agent_state = {}  # shared among transfer agents
        self.load_config()
        if self.config["transfer"]["thread_count"] > 1:
            self.mp_manager = multiprocessing.Manager()

        if not os.path.exists(self.config["backup_location"]):
            os.makedirs(self.config["backup_location"])

        # Read transfer_agent_state from state file if available so that there's no disruption
        # in the metrics we send out as a result of process restart
        state_file_path = self.config["json_state_file_path"]
        if os.path.exists(state_file_path):
            with open(state_file_path, "r") as fp:
                state = json.load(fp)
                self.transfer_agent_state = state.get(
                    "transfer_agent_state") or {}

        signal.signal(signal.SIGHUP, self.load_config)
        signal.signal(signal.SIGINT, self.quit)
        signal.signal(signal.SIGTERM, self.quit)
        self.time_of_last_backup_check = {}
        self.requested_basebackup_sites = set()

        self.inotify = InotifyWatcher(self.compression_queue)
        self.webserver = WebServer(self.config,
                                   self.requested_basebackup_sites,
                                   self.compression_queue, self.transfer_queue,
                                   self.metrics)

        for _ in range(self.config["compression"]["thread_count"]):
            compressor = CompressorThread(
                config_dict=self.config,
                compression_queue=self.compression_queue,
                transfer_queue=self.transfer_queue,
                metrics=self.metrics)
            self.compressors.append(compressor)

        for _ in range(self.config["transfer"]["thread_count"]):
            ta = TransferAgent(config=self.config,
                               compression_queue=self.compression_queue,
                               mp_manager=self.mp_manager,
                               transfer_queue=self.transfer_queue,
                               metrics=self.metrics,
                               shared_state_dict=self.transfer_agent_state)
            self.transfer_agents.append(ta)

        logutil.notify_systemd("READY=1")
        self.log.info("pghoard initialized, own_hostname: %r, cwd: %r",
                      socket.gethostname(), os.getcwd())