def _get_machines_cache_from_dns(self, host, port): """One host might be resolved into multiple ip addresses. We will make list out of it""" if self.protocol == 'http': ret = map(lambda res: uri(self.protocol, res[-1][:2]), self._dns_resolver.resolve(host, port)) if ret: return list(set(ret)) return [uri(self.protocol, (host, port))]
def __init__(self, host='127.0.0.1', port=8500, token=None, scheme='http', verify=True, cert=None, ca_cert=None): self.token = token self._read_timeout = 10 self.base_uri = uri(scheme, (host, port)) kwargs = {} if cert: if isinstance(cert, tuple): # Key and cert are separate kwargs['cert_file'] = cert[0] kwargs['key_file'] = cert[1] else: # combined certificate kwargs['cert_file'] = cert if ca_cert: kwargs['ca_certs'] = ca_cert if verify or ca_cert: kwargs['cert_reqs'] = ssl.CERT_REQUIRED self.http = urllib3.PoolManager(num_pools=10, **kwargs) self._ttl = None
def _get_machines_cache_from_srv(self, srv): """Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record. This record should contain list of host and peer ports which could be used to run 'GET http://{host}:{port}/members' request (peer protocol)""" ret = [] for r in [ '-client-ssl', '-client', '-ssl', '', '-server-ssl', '-server' ]: protocol = 'https' if '-ssl' in r else 'http' endpoint = '/members' if '-server' in r else '' for host, port in self.get_srv_record('_etcd{0}._tcp.{1}'.format( r, srv)): url = uri(protocol, (host, port), endpoint) if endpoint: try: response = requests.get(url, timeout=self.read_timeout, verify=False) if response.ok: for member in response.json(): ret.extend(member['clientURLs']) break except RequestException: logger.exception('GET %s', url) else: ret.append(url) if ret: self._protocol = protocol break else: logger.warning('Can not resolve SRV for %s', srv) return list(set(ret))
def _query_exhibitors(self, exhibitors): random.shuffle(exhibitors) for host in exhibitors: try: response = requests.get(uri('http', (host, self._exhibitor_port), self._uri_path), timeout=self.TIMEOUT) return response.json() except RequestException: pass return None
def conn_url(self): conn_url = self.data.get('conn_url') conn_kwargs = self.data.get('conn_kwargs') if conn_url: return conn_url if conn_kwargs: conn_url = uri('postgresql', (conn_kwargs.get('host'), conn_kwargs.get('port', 5432))) self.data['conn_url'] = conn_url return conn_url
def _query_exhibitors(self, exhibitors): random.shuffle(exhibitors) for host in exhibitors: try: response = requests_get(uri('http', (host, self._exhibitor_port), self._uri_path), timeout=self.TIMEOUT) return json.loads(response.data.decode('utf-8')) except Exception: logging.debug('Request to %s failed', host) return None
def reload_config(self, config): if 'listen' not in config: # changing config in runtime raise ValueError('Can not find "restapi.listen" config') ssl_options = {n: config[n] for n in ('certfile', 'keyfile', 'cafile') if n in config} if isinstance(config.get('verify_client'), six.string_types): ssl_options['verify_client'] = config['verify_client'].lower() if self.__listen != config['listen'] or self.__ssl_options != ssl_options: self.__initialize(config['listen'], ssl_options) self.__auth_key = base64.b64encode(config['auth'].encode('utf-8')).decode('utf-8') if 'auth' in config else None self.connection_string = uri(self.__protocol, config.get('connect_address') or self.__listen, 'patroni')
def call_post_bootstrap(self, config): """ runs a script after initdb or custom bootstrap script is called and waits until completion. """ cmd = config.get('post_bootstrap') or config.get('post_init') if cmd: r = self._postgresql.config.local_connect_kwargs if 'host' in r: # '/tmp' => '%2Ftmp' for unix socket path host = quote_plus( r['host']) if r['host'].startswith('/') else r['host'] else: host = '' # https://www.postgresql.org/docs/current/static/libpq-pgpass.html # A host name of localhost matches both TCP (host name localhost) and Unix domain socket # (pghost empty or the default socket directory) connections coming from the local machine. r['host'] = 'localhost' # set it to localhost to write into pgpass if 'user' in r: user = r['user'] else: user = '' if 'password' in r: import getpass r.setdefault('user', os.environ.get('PGUSER', getpass.getuser())) connstring = uri('postgres', (host, r['port']), r['database'], user) env = self._postgresql.write_pgpass(r) if 'password' in r else None try: ret = self._postgresql.cancellable.call(shlex.split(cmd) + [connstring], env=env) except OSError: logger.error('post_init script %s failed', cmd) return False if ret != 0: logger.error('post_init script %s returned non-zero code %d', cmd, ret) return False return True
def _get_machines_cache_from_config(self): if 'proxy' in self._config: return [ uri(self.protocol, (self._config['host'], self._config['port'])) ] machines_cache = [] if 'srv' in self._config: machines_cache = self._get_machines_cache_from_srv( self._config['srv']) if not machines_cache and 'hosts' in self._config: machines_cache = list(self._config['hosts']) if not machines_cache and 'host' in self._config: machines_cache = self._get_machines_cache_from_dns( self._config['host'], self._config['port']) return machines_cache
def __set_config_parameters(self, config): self.__auth_key = base64.b64encode(config['auth'].encode( 'utf-8')).decode('utf-8') if 'auth' in config else None self.connection_string = uri( self.__protocol, config.get('connect_address') or self.__listen, 'patroni')
def get_etcd_client(config): if 'proxy' in config: config['use_proxies'] = True config['url'] = config['proxy'] if 'url' in config: r = urlparse(config['url']) config.update({ 'protocol': r.scheme, 'host': r.hostname, 'port': r.port or 2379, 'username': r.username, 'password': r.password }) elif 'hosts' in config: hosts = config.pop('hosts') default_port = config.pop('port', 2379) protocol = config.get('protocol', 'http') if isinstance(hosts, six.string_types): hosts = hosts.split(',') config['hosts'] = [] for value in hosts: if isinstance(value, six.string_types): config['hosts'].append( uri(protocol, split_host_port(value, default_port))) elif 'host' in config: host, port = split_host_port(config['host'], 2379) config['host'] = host if 'port' not in config: config['port'] = int(port) if config.get('cacert'): config['ca_cert'] = config.pop('cacert') if config.get('key') and config.get('cert'): config['cert'] = (config['cert'], config['key']) for p in ('discovery_srv', 'srv_domain'): if p in config: config['srv'] = config.pop(p) dns_resolver = DnsCachingResolver() def create_connection_patched(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None): host, port = address if host.startswith('['): host = host.strip('[]') err = None for af, socktype, proto, _, sa in dns_resolver.resolve(host, port): sock = None try: sock = socket.socket(af, socktype, proto) if socket_options: for opt in socket_options: sock.setsockopt(*opt) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as e: err = e if sock is not None: sock.close() sock = None if err is not None: raise err raise socket.error("getaddrinfo returns an empty list") urllib3.util.connection.create_connection = create_connection_patched client = None while not client: try: client = Client(config, dns_resolver) if 'use_proxies' in config and not client.machines: raise etcd.EtcdException except etcd.EtcdException: logger.info('waiting on etcd') time.sleep(5) return client
def create_replica(self, clone_member): """ create the replica according to the replica_method defined by the user. this is a list, so we need to loop through all methods the user supplies """ self._postgresql.set_state('creating replica') self._postgresql.schedule_sanity_checks_after_pause() is_remote_master = isinstance(clone_member, RemoteMember) # get list of replica methods either from clone member or from # the config. If there is no configuration key, or no value is # specified, use basebackup replica_methods = (clone_member.create_replica_methods if is_remote_master else self._postgresql.create_replica_methods) or ['basebackup'] if clone_member and clone_member.conn_url: r = clone_member.conn_kwargs(self._postgresql.config.replication) connstring = uri('postgres', (r['host'], r['port']), r['database'], r['user']) # add the credentials to connect to the replica origin to pgpass. env = self._postgresql.write_pgpass(r) else: connstring = '' env = os.environ.copy() # if we don't have any source, leave only replica methods that work without it replica_methods = [r for r in replica_methods if self._postgresql.replica_method_can_work_without_replication_connection(r)] # go through them in priority order ret = 1 for replica_method in replica_methods: if self._postgresql.cancellable.is_cancelled: break method_config = self._postgresql.replica_method_options(replica_method) # if the method is basebackup, then use the built-in if replica_method == "basebackup": ret = self.basebackup(connstring, env, method_config) if ret == 0: logger.info("replica has been created using basebackup") # if basebackup succeeds, exit with success break else: if not self._postgresql.data_directory_empty(): if method_config.get('keep_data', False): logger.info('Leaving data directory uncleaned') else: self._postgresql.remove_data_directory() cmd = replica_method # user-defined method; check for configuration # not required, actually if method_config: # look to see if the user has supplied a full command path # if not, use the method name as the command cmd = method_config.pop('command', cmd) # add the default parameters if not method_config.get('no_params', False): method_config.update({"scope": self._postgresql.scope, "role": "replica", "datadir": self._postgresql.data_dir, "connstring": connstring}) else: for param in ('no_params', 'no_master', 'keep_data'): method_config.pop(param, None) params = ["--{0}={1}".format(arg, val) for arg, val in method_config.items()] try: # call script with the full set of parameters ret = self._postgresql.cancellable.call(shlex.split(cmd) + params, env=env) # if we succeeded, stop if ret == 0: logger.info('replica has been created using %s', replica_method) break else: logger.error('Error creating replica using method %s: %s exited with code=%s', replica_method, cmd, ret) except Exception: logger.exception('Error creating replica using method %s', replica_method) ret = 1 self._postgresql.set_state('stopped') return ret