예제 #1
0
def _create_db_node_state(pg_last_xlog_receive_location=None, pg_is_in_recovery=True,
                          connection=True, replication_time_lag=None, fetch_time=None,
                          db_time=None):
    return {
        "connection": connection,
        "db_time": get_iso_timestamp(db_time),
        "fetch_time": get_iso_timestamp(fetch_time),
        "pg_is_in_recovery": pg_is_in_recovery,
        "pg_last_xact_replay_timestamp": None,
        "pg_last_xlog_receive_location": pg_last_xlog_receive_location,
        "pg_last_xlog_replay_location": None,
        "replication_time_lag": replication_time_lag,
    }
예제 #2
0
def test_replication_positions(pgl):
    standby_nodes = {
        "10.255.255.10": {
            "connection": True,
            "db_time": "2014-08-28T14:09:57.919301+00:00Z",
            "fetch_time": "2014-08-28T14:09:57.918753Z",
            "pg_is_in_recovery": True,
            "pg_last_xlog_receive_location": "0/9000090",
            "pg_last_xlog_replay_location": "0/9000090",
            "pg_last_xact_replay_timestamp":
            "2014-08-28T14:05:43.577357+00:00Z",
            "replication_time_lag": 254.341944,
        },
    }
    # the above node shouldn't show up as its fetch_time is (way) older than 20 seconds
    positions = {}
    assert pgl.get_replication_positions(standby_nodes) == positions
    standby_nodes["10.255.255.10"]["fetch_time"] = get_iso_timestamp()
    positions[0x9000090] = set(["10.255.255.10"])
    assert pgl.get_replication_positions(standby_nodes) == positions
    # add another standby, further ahead
    standby_nodes["10.255.255.11"] = dict(
        standby_nodes["10.255.255.10"],
        pg_last_xlog_receive_location="1/0000AAAA")
    positions[1 << 32 | 0xAAAA] = set(["10.255.255.11"])
    assert pgl.get_replication_positions(standby_nodes) == positions
    # add another standby which hasn't received anything
    standby_nodes["10.255.255.12"] = dict(standby_nodes["10.255.255.10"],
                                          pg_last_xlog_receive_location=None)
    positions[0x9000090].add("10.255.255.12")
    assert pgl.get_replication_positions(standby_nodes) == positions
예제 #3
0
 def test_replication_positions(self):
     standby_nodes = {
         '10.255.255.10': {
             'connection': True,
             'db_time': '2014-08-28T14:09:57.919301+00:00Z',
             'fetch_time': '2014-08-28T14:09:57.918753Z',
             'pg_is_in_recovery': True,
             'pg_last_xlog_receive_location': '0/9000090',
             'pg_last_xlog_replay_location': '0/9000090',
             'pg_last_xact_replay_timestamp': '2014-08-28T14:05:43.577357+00:00Z',
             'replication_time_lag': 254.341944,
         },
     }
     # the above node shouldn't show up as it's fetch_time is (way) older than 20 seconds
     positions = {}
     assert self.pglookout.get_replication_positions(standby_nodes) == positions
     standby_nodes['10.255.255.10']['fetch_time'] = get_iso_timestamp()
     positions[0x9000090] = set(['10.255.255.10'])
     assert self.pglookout.get_replication_positions(standby_nodes) == positions
     # add another standby, further ahead
     standby_nodes['10.255.255.11'] = dict(standby_nodes['10.255.255.10'], pg_last_xlog_receive_location='1/0000AAAA')
     positions[1 << 32 | 0xAAAA] = set(['10.255.255.11'])
     assert self.pglookout.get_replication_positions(standby_nodes) == positions
     # add another standby which hasn't received anything
     standby_nodes['10.255.255.12'] = dict(standby_nodes['10.255.255.10'], pg_last_xlog_receive_location=None)
     positions[0x9000090].add('10.255.255.12')
     assert self.pglookout.get_replication_positions(standby_nodes) == positions
예제 #4
0
def _create_db_node_state(pg_last_xlog_receive_location=None,
                          pg_is_in_recovery=True,
                          connection=True,
                          replication_time_lag=None,
                          fetch_time=None,
                          db_time=None):
    return {
        "connection": connection,
        "db_time": get_iso_timestamp(db_time),
        "fetch_time": get_iso_timestamp(fetch_time),
        "pg_is_in_recovery": pg_is_in_recovery,
        "pg_last_xact_replay_timestamp": None,
        "pg_last_xlog_receive_location": pg_last_xlog_receive_location,
        "pg_last_xlog_replay_location": None,
        "replication_time_lag": replication_time_lag,
        "min_replication_time_lag": 0,  # simulate that we've been in sync once
    }
예제 #5
0
 def _add_to_observer_state(self, observer_name, db_name, pg_last_xlog_receive_location=None,
                            pg_is_in_recovery=True, connection=True, replication_time_lag=None,
                            fetch_time=None, db_time=None):
     db_node_state = _create_db_node_state(pg_last_xlog_receive_location, pg_is_in_recovery,
                                           connection, replication_time_lag, fetch_time=fetch_time,
                                           db_time=db_time)
     update_dict = {"fetch_time": get_iso_timestamp(),
                    "connection": True, db_name: db_node_state}
     if observer_name in self.pglookout.observer_state:
         self.pglookout.observer_state[observer_name].update(update_dict)
     else:
         self.pglookout.observer_state[observer_name] = update_dict
예제 #6
0
def test_standbys_failover_equal_replication_positions(pgl):
    now = get_iso_timestamp(datetime.datetime.utcnow())
    pgl.cluster_state = {
        "192.168.54.183": {
            "connection": True,
            "db_time": now,
            "fetch_time": now,
            "pg_is_in_recovery": True,
            "pg_last_xact_replay_timestamp":
            "2015-04-28T11:21:56.098946+00:00Z",
            "pg_last_xlog_receive_location": "0/70004D8",
            "pg_last_xlog_replay_location": "0/70004D8",
            "replication_time_lag": 400.435871,
            "min_replication_time_lag":
            0,  # simulate that we've been in sync once
        },
        "192.168.57.180": {
            "connection": False,
            "db_time": "2015-04-28T11:21:55.830432Z",
            "fetch_time": now,
            "pg_is_in_recovery": False,
            "pg_last_xact_replay_timestamp": None,
            "pg_last_xlog_receive_location": None,
            "pg_last_xlog_replay_location": None,
            "replication_time_lag": 0.0,
            "min_replication_time_lag":
            0,  # simulate that we've been in sync once
        },
        "192.168.63.4": {
            "connection": True,
            "db_time": now,
            "fetch_time": now,
            "pg_is_in_recovery": True,
            "pg_last_xact_replay_timestamp":
            "2015-04-28T11:21:56.098946+00:00Z",
            "pg_last_xlog_receive_location": "0/70004D8",
            "pg_last_xlog_replay_location": "0/70004D8",
            "replication_time_lag": 401.104655,
            "min_replication_time_lag":
            0,  # simulate that we've been in sync once
        },
    }
    pgl.current_master = "192.168.57.180"
    # We select the node with the "highest" identifier so call_count should stay zero if we're not the
    # highest standby currently.
    pgl.own_db = "192.168.54.183"
    pgl.check_cluster_state()
    assert pgl.execute_external_command.call_count == 0
    # If we're the highest we should see call_count increment
    pgl.own_db = "192.168.63.4"
    pgl.check_cluster_state()
    assert pgl.execute_external_command.call_count == 1
예제 #7
0
 def test_standbys_failover_equal_replication_positions(self):
     now = get_iso_timestamp(datetime.datetime.utcnow())
     self.pglookout.cluster_state = {
         "192.168.54.183": {
             "connection": True,
             "db_time": now,
             "fetch_time": now,
             "pg_is_in_recovery": True,
             "pg_last_xact_replay_timestamp": "2015-04-28T11:21:56.098946+00:00Z",
             "pg_last_xlog_receive_location": "0/70004D8",
             "pg_last_xlog_replay_location": "0/70004D8",
             "replication_time_lag": 400.435871,
         },
         "192.168.57.180": {
             "connection": False,
             "db_time": "2015-04-28T11:21:55.830432Z",
             "fetch_time": now,
             "pg_is_in_recovery": False,
             "pg_last_xact_replay_timestamp": None,
             "pg_last_xlog_receive_location": None,
             "pg_last_xlog_replay_location": None,
             "replication_time_lag": 0.0,
         },
         "192.168.63.4": {
             "connection": True,
             "db_time": now,
             "fetch_time": now,
             "pg_is_in_recovery": True,
             "pg_last_xact_replay_timestamp": "2015-04-28T11:21:56.098946+00:00Z",
             "pg_last_xlog_receive_location": "0/70004D8",
             "pg_last_xlog_replay_location": "0/70004D8",
             "replication_time_lag": 401.104655,
         },
     }
     self.pglookout.current_master = "192.168.57.180"
     # We select the node with the "highest" identifier so call_count should stay zero if we're not the
     # highest standby currently.
     self.pglookout.own_db = "192.168.54.183"
     self.pglookout.check_cluster_state()
     self.assertEqual(self.pglookout.execute_external_command.call_count, 0)
     # If we're the highest we should see call_count increment
     self.pglookout.own_db = "192.168.63.4"
     self.pglookout.check_cluster_state()
     self.assertEqual(self.pglookout.execute_external_command.call_count, 1)
예제 #8
0
def _add_to_observer_state(pgl,
                           observer_name,
                           db_name,
                           pg_last_xlog_receive_location=None,
                           pg_is_in_recovery=True,
                           connection=True,
                           replication_time_lag=None,
                           fetch_time=None,
                           db_time=None):
    db_node_state = _create_db_node_state(pg_last_xlog_receive_location,
                                          pg_is_in_recovery,
                                          connection,
                                          replication_time_lag,
                                          fetch_time=fetch_time,
                                          db_time=db_time)
    update_dict = {
        "fetch_time": get_iso_timestamp(),
        "connection": True,
        db_name: db_node_state,
    }
    if observer_name in pgl.observer_state:
        pgl.observer_state[observer_name].update(update_dict)
    else:
        pgl.observer_state[observer_name] = update_dict
예제 #9
0
def test_get_iso_timestamp():
    v = get_iso_timestamp()
    assert ISO_EXT_RE.match(v)
    ts = datetime.datetime.now()
    v = get_iso_timestamp(ts)
    assert parse_iso_datetime(v) == ts
예제 #10
0
def test_get_iso_timestamp():
    v = get_iso_timestamp()
    assert ISO_EXT_RE.match(v)
    ts = datetime.datetime.now()
    v = get_iso_timestamp(ts)
    assert parse_iso_datetime(v) == ts