def _dict_row_factory(cursor, row): def _normalize_port(value): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Starting...") try: return int(value) except ValueError: utils.write_to_log('sqllite._dict_row_factory._normalize_port', "In ValueError") if isinstance(value, unicode): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Returning str value {0}".format(str(value))) return str(value) if isinstance(value, str): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Returning value {0}".format(value)) return value raise custom_parsers = { 'auth': json.loads, 'port': _normalize_port, 'alive': lambda v: v != 0, 'reserved': lambda v: v != 0 } result = {} for idx, col in enumerate(cursor.description): name = col[0] content = row[idx] if name in custom_parsers: result[name] = custom_parsers[name](content) else: result[name] = content utils.write_to_log('sqllite._dict_row_factory', "result[{0}] is {1}".format(name,str(result[name]))) return result
def get_host(self, host_id): hosts = self.storage.get_hosts(host_id=host_id) if len(hosts) == 0: utils.write_to_log('backend.get_host', "hostnotfound len(hosts) is Zero") raise exceptions.HostNotFoundException(host_id) self._load_keyfile(hosts[0]) return hosts[0]
def _load_keyfile(host): if host['auth'].get('keyfile'): keyfile = host['auth']['keyfile'] with open(keyfile) as f: utils.write_to_log('backend._load_keyfile', "'reading keyfile {0}".format(keyfile)) content = f.read() host['auth']['keyfile'] = content
def _validate(config): if 'pool' not in config: utils.write_to_log( 'config._validate', "'pool' property is missing from the configuration") raise RuntimeError( "'pool' property is missing from the configuration")
def _create_table(self): with self.connect() as cursor: sql = 'CREATE TABLE IF NOT EXISTS {0} {1}'.format( self.TABLE_NAME, self._schema.create()) utils.write_to_log('_create_table', "sql is {0}".format(sql)) cursor.execute(sql) utils.write_to_log('_create_table', "after cursor.execute create_table")
def wrapper(*args, **kwargs): while True: try: return func(*args, **kwargs) except sqlite3.OperationalError as e: if e.message != 'database is locked': utils.write_to_log('wrapper', "database is locked") raise exceptions.StorageException(e.message) time.sleep(0.1)
def _get_subnet_and_mask(ip_range): regex = re.compile(CIDR_REGEX) result = regex.findall(ip_range) if len(result) != 1: utils.write_to_log('_get_subnet_and_mask', '{0} is not a legal CIDR notation'.format(ip_range)) raise exceptions.ConfigurationError( '{0} is not a legal CIDR notation'.format(ip_range)) subnet, mask = ip_range.split('/') return subnet, mask
def __init__(self, pool): utils.write_to_log('YAMLPoolLoader.init',"Starting...") config = self._load(pool) utils.write_to_log('YAMLPoolLoader.init',"after load") self._validate(config) utils.write_to_log('YAMLPoolLoader.init',"After validate") self.default = config.get('default', {}) utils.write_to_log('YAMLPoolLoader.init',"after config.get default") self.hosts = config['hosts'] utils.write_to_log('YAMLPoolLoader.init',"End")
def list_hosts(): """ List allocated hosts """ value_of_arg_all_key = utils.get_arg_value('all', arg_value='') get_all_hosts = value_of_arg_all_key.lower() in ('yes', 'true') hosts = backend.list_hosts(get_all_hosts) utils.write_to_log('service.setup', "list_hosts is {0}".format(str(hosts))) return jsonify(hosts=hosts), httplib.OK
def load(self): def _create_host(_host): if not auth: utils.write_to_log('_create_host', 'Authentication not provided for host: {0}'.format(_host)) raise exceptions.ConfigurationError( 'Authentication not provided ' 'for host: {0}'.format(_host)) if not port: utils.write_to_log('_create_host', 'Port not provided for host: {0}'.format(_host)) raise exceptions.ConfigurationError( 'Port not provided for host: {0}' .format(_host)) utils.write_to_log('_create_host', " auth {0}".format(str(auth))) utils.write_to_log('_create_host', " port {0}".format(port)) utils.write_to_log('_create_host', " _host {0}".format(str(_host))) utils.write_to_log('_create_host', " public address {0}".format(public_address)) return { 'auth': auth, 'port': port, 'host': _host, 'public_address': public_address } for host in self.hosts: port = self._get_port(host) auth = self._get_auth(host) public_address = host.get('public_address') if 'host' in host: utils.write_to_log('_create_host', "'host' is in host") # an explicit address is configured for this host yield _create_host(host['host']) elif 'ip_range' in host: # ip range was specified. in this case we create a host # dictionary for each ip address separately. subnet, mask = _get_subnet_and_mask(host['ip_range']) for host_ip in _get_subnet_hosts(subnet, mask): yield _create_host(host_ip) else: utils.write_to_log('_create_host', "A host must define either the 'host' or the 'ip_range' key") raise exceptions.ConfigurationError( "A host must define either the " "'host' or the 'ip_range' key")
def __init__(self, storage=None): if storage is None: utils.write_to_log('sqllite.__init__', "storage is None") storage = 'host-pool-data.sqlite' self._filename = os.path.abspath(storage) utils.write_to_log('sqllite.__init__', "_filename is {0}".format(self._filename)) self._schema = _create_schema() utils.write_to_log('sqllite.__init__', "after create_schema") self._create_table() utils.write_to_log('sqllite.__init__', "after _create_table")
def acquire_host(self): for host in self._get_free_hosts(): # try reserving the host, we only update hosts that have a reserved # value of False in case some other thread has managed to reserve # this host before us. _, reserved = self.storage.update_host(host['global_id'], {'reserved': True}, { 'reserved': False, 'host_id': None }) # if we didn't manage to reserve it, # continue to the next one if not reserved: continue # if we did manager to reserve it, # check its state host_alive = self._is_alive(host) # if the host is dead, delete the # reservation and move on if not host_alive: self.storage.update_host(host['global_id'], { 'reserved': False, 'alive': False }) continue # if the host is alive, this is our host. if host_alive: hst, _ = self.storage.update_host(host['global_id'], { 'reserved': False, 'host_id': str(uuid.uuid4()), 'alive': True }) self._load_keyfile(hst) return hst # we didn't manager to acquire any host utils.write_to_log('backend.acquire_host', "This service didn't manage to acquire any host") raise exceptions.NoHostAvailableException()
def update_host(self, global_id, new_values, old_values=None): if old_values is None: old_values = {} with self.connect(exclusive=True) as cursor: sql_set = _construct_set_values_sql(new_values) old_values.update({'global_id': global_id}) sql_con = _construct_and_query_sql(old_values) new = _construct_values_tuple(new_values) old = _construct_values_tuple(old_values) cursor.execute('UPDATE {0} SET {1} WHERE {2}'.format( self.TABLE_NAME, sql_set, sql_con), new + old) changed = cursor.connection.total_changes == 1 cursor.execute('SELECT * FROM {0} WHERE {1}=?' .format(self.TABLE_NAME, self._schema.primary_key_name), (global_id, )) utils.write_to_log('update_host', "after cursor.execute update_host") return cursor.fetchone(), changed
def __init__(self, pool, storage=None): utils.write_to_log('RestBackend._init__', "Starting...") self.storage = sqlite.SQLiteStorage(storage) # allow only one process to do the initial load utils.write_to_log('RestBackend._init__', "After SQLiteStorage") def _create_indicator(): fd = os.open(INDICATOR, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0600) os.close(fd) with FLock: if not os.path.exists(INDICATOR): utils.write_to_log('backend.__init__', "path doesn't exist {0}".format(INDICATOR)) self._load_pool(pool) utils.write_to_log('backend.__init__', "After _load_pool") _create_indicator() utils.write_to_log('backend.__init__', "After _create_indicator")
def acquire_host(self): for host in self._get_free_hosts(): # try reserving the host, we only update hosts that have a reserved # value of False in case some other thread has managed to reserve # this host before us. _, reserved = self.storage.update_host( host['global_id'], {'reserved': True}, {'reserved': False, 'host_id': None}) # if we didn't manage to reserve it, # continue to the next one if not reserved: continue # if we did manager to reserve it, # check its state host_alive = self._is_alive(host) # if the host is dead, delete the # reservation and move on if not host_alive: self.storage.update_host( host['global_id'], {'reserved': False, 'alive': False}) continue # if the host is alive, this is our host. if host_alive: hst, _ = self.storage.update_host( host['global_id'], {'reserved': False, 'host_id': str(uuid.uuid4()), 'alive': True}) self._load_keyfile(hst) return hst # we didn't manager to acquire any host utils.write_to_log('backend.acquire_host', "This service didn't manage to acquire any host") raise exceptions.NoHostAvailableException()
def update_host(self, global_id, new_values, old_values=None): if old_values is None: old_values = {} with self.connect(exclusive=True) as cursor: sql_set = _construct_set_values_sql(new_values) old_values.update({'global_id': global_id}) sql_con = _construct_and_query_sql(old_values) new = _construct_values_tuple(new_values) old = _construct_values_tuple(old_values) cursor.execute( 'UPDATE {0} SET {1} WHERE {2}'.format(self.TABLE_NAME, sql_set, sql_con), new + old) changed = cursor.connection.total_changes == 1 cursor.execute( 'SELECT * FROM {0} WHERE {1}=?'.format( self.TABLE_NAME, self._schema.primary_key_name), (global_id, )) utils.write_to_log('update_host', "after cursor.execute update_host") return cursor.fetchone(), changed
def setup(): global app, backend # initialize flask application app = Flask(__name__) Api(app) # load application configuration file is exists config_file_path = os.environ.get('HOST_POOL_SERVICE_CONFIG_PATH') if config_file_path: utils.write_to_log('service.setup', "config_file_path {0}".format(config_file_path)) with open(config_file_path) as f: yaml_conf = yaml.load(f.read()) config.configure(yaml_conf) utils.write_to_log('service.setup', "config_file_path {0} after configure".format(config_file_path)) else: utils.write_to_log('service.setup', "Failed loading application: " \ "HOST_POOL_SERVICE_CONFIG_PATH environment variable is not defined. " \ "Use this variable to point to the application configuration file ") raise exceptions.ConfigurationError( 'Failed loading application: ' 'HOST_POOL_SERVICE_CONFIG_PATH environment ' 'variable is not defined. Use this variable to ' 'point to the application configuration file ') # initialize application backend backend = rest_backend.RestBackend(pool=config.get().pool)
def _load(pool): if isinstance(pool, str): utils.write_to_log('_load', "isinstance") with open(pool, 'r') as config_file: utils.write_to_log('_load', "open pool {0} config_file".format(pool)) return yaml.load(config_file) elif isinstance(pool, dict): utils.write_to_log('_load', "isinstance pool dict") return pool else: utils.write_to_log('_load', "Unexpected pool configuration type: '{0}'".format(type(pool))) raise exceptions.ConfigurationError( 'Unexpected pool configuration ' 'type: {0}'.format(type(pool)))
def _dict_row_factory(cursor, row): def _normalize_port(value): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Starting...") try: return int(value) except ValueError: utils.write_to_log('sqllite._dict_row_factory._normalize_port', "In ValueError") if isinstance(value, unicode): utils.write_to_log( 'sqllite._dict_row_factory._normalize_port', "Returning str value {0}".format(str(value))) return str(value) if isinstance(value, str): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Returning value {0}".format(value)) return value raise custom_parsers = { 'auth': json.loads, 'port': _normalize_port, 'alive': lambda v: v != 0, 'reserved': lambda v: v != 0 } result = {} for idx, col in enumerate(cursor.description): name = col[0] content = row[idx] if name in custom_parsers: result[name] = custom_parsers[name](content) else: result[name] = content utils.write_to_log( 'sqllite._dict_row_factory', "result[{0}] is {1}".format(name, str(result[name]))) return result
def _normalize_port(value): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Starting...") try: return int(value) except ValueError: utils.write_to_log('sqllite._dict_row_factory._normalize_port', "In ValueError") if isinstance(value, unicode): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Returning str value {0}".format(str(value))) return str(value) if isinstance(value, str): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Returning value {0}".format(value)) return value raise
def _check_connection(sock): try: utils.write_to_log('_check_connection', "Start") sock.getpeername() utils.write_to_log('_check_connection', "After getpeername") except socket.error as e: utils.write_to_log('_check_connection', "socket.error") if e.errno != errno.ENOTCONN: raise result = False else: result = True finally: sock.close() utils.write_to_log('_check_connection', "End") return result
def _normalize_port(value): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Starting...") try: return int(value) except ValueError: utils.write_to_log('sqllite._dict_row_factory._normalize_port', "In ValueError") if isinstance(value, unicode): utils.write_to_log( 'sqllite._dict_row_factory._normalize_port', "Returning str value {0}".format(str(value))) return str(value) if isinstance(value, str): utils.write_to_log('sqllite._dict_row_factory._normalize_port', "Returning value {0}".format(value)) return value raise
def _get_auth(self, host): utils.write_to_log('get_auth', "Starting...") default_auth = self.default.get('auth', {}) auth = copy.deepcopy(default_auth) auth.update(host.get('auth', {})) keyfile = auth.get('keyfile') utils.write_to_log('get_auth', "keyfile is {0}".format(keyfile)) if keyfile and not os.access(keyfile, os.R_OK): utils.write_to_log('get_auth', "keyfile {0} no access".format(keyfile)) raise exceptions.ConfigurationError( 'Key file {0} does not exist or does not have ' 'the proper permissions'.format(keyfile)) return auth
def _create_schema(): schema = SQLiteSchema(primary_key_name='global_id', primary_key_type='integer') utils.write_to_log('sqllite._create_schema', "Starting...") utils.write_to_log('sqllite._create_schema', "B4 add_column...") schema.add_column('host_id', 'text') schema.add_column('host', 'text') schema.add_column('public_address', 'text') schema.add_column('auth', 'text') schema.add_column('port', 'text') schema.add_column('alive', 'integer') schema.add_column('reserved', 'integer') utils.write_to_log('sqllite._create_schema', "B4 return schema") return schema
def add_host(self, host): with self.connect() as cursor: column_names = host.keys() values = _construct_values_tuple(host) values_wild = self._schema.wilds sql = 'INSERT INTO {0} ({1}) VALUES({2})'.format( self.TABLE_NAME, ', '.join(column_names), values_wild) utils.write_to_log('add_host', "sql is {0}".format(sql)) cursor.execute(sql, values) utils.write_to_log('add_host', "after cursor.execute add_host") host[self._schema.primary_key_name] = cursor.lastrowid utils.write_to_log('add_host', "key {0} is {1}".format(self._schema.primary_key_name, cursor.lastrowid))
def add_host(self, host): with self.connect() as cursor: column_names = host.keys() values = _construct_values_tuple(host) values_wild = self._schema.wilds sql = 'INSERT INTO {0} ({1}) VALUES({2})'.format( self.TABLE_NAME, ', '.join(column_names), values_wild) utils.write_to_log('add_host', "sql is {0}".format(sql)) cursor.execute(sql, values) utils.write_to_log('add_host', "after cursor.execute add_host") host[self._schema.primary_key_name] = cursor.lastrowid utils.write_to_log( 'add_host', "key {0} is {1}".format(self._schema.primary_key_name, cursor.lastrowid))
def _create_schema(): schema = SQLiteSchema( primary_key_name='global_id', primary_key_type='integer' ) utils.write_to_log('sqllite._create_schema', "Starting...") utils.write_to_log('sqllite._create_schema', "B4 add_column...") schema.add_column('host_id', 'text') schema.add_column('host', 'text') schema.add_column('public_address', 'text') schema.add_column('auth', 'text') schema.add_column('port', 'text') schema.add_column('alive', 'integer') schema.add_column('reserved', 'integer') utils.write_to_log('sqllite._create_schema', "B4 return schema") return schema
def _load_pool(self, pool): utils.write_to_log('backend._load_pool', "Starting...") config_loader = yaml_pool.YAMLPoolLoader(pool) utils.write_to_log('backend._load_pool', "after config_loader") hosts = config_loader.load() utils.write_to_log('backend._load_pool', "after config_loader.load()") for host in hosts: utils.write_to_log('backend._load_pool', "host ...") # initial values for the hosts. # these will update over time. host.update({ 'alive': False, 'reserved': False, 'host_id': None }) utils.write_to_log('backend._load_pool', "b4 add_host ...") self.storage.add_host(host) utils.write_to_log('backend._load_pool', "after add_host")
def _scan(endpoints): # results = a dict indexed with a tuple (host, port) and containing # a bool indicating if the endpoint is connectible. results = {} # sockets = a dict indexed with a file descriptor and containing # *nested* tuples (socket object, (host, port)). sockets = {} utils.write_to_log('scan._scan', "Starting... ") try: utils.write_to_log('scan._scan', "Iterating endpoints") for host, port in endpoints: gai_args = [host, port, socket.AF_UNSPEC, # Using AF_UNSPEC will allow # scanning both IPv4 and # IPv6 endpoints. socket.SOCK_STREAM] try: utils.write_to_log('scan._scan', "b4 getaddrinfo...") gai_res = socket.getaddrinfo(*gai_args) utils.write_to_log('scan._scan', "after getaddrinfo...") except socket.gaierror: utils.write_to_log('scan._scan', "gaierror") results[host, port] = False break for r in gai_res: sock, is_open = _init_connection(r) if is_open: results[host, port] = True break elif sock: # Yes, a nested tuple. sockets[sock.fileno()] = sock, (host, port) break # If not broken... else: results[host, port] = False while sockets: utils.write_to_log('scan._scan', "b4 _wait_for_any_change") fds = _wait_for_any_change(sockets.keys()) utils.write_to_log('scan._scan', "after _wait_for_any_change") for fd in fds: sock, host_and_port = sockets[fd] results[host_and_port] = _check_connection(sock) del sockets[fd] finally: for s, _ in sockets.itervalues(): s.close() utils.write_to_log('scan._scan', "End b4 result") return results
def _load_pool(self, pool): utils.write_to_log('backend._load_pool', "Starting...") config_loader = yaml_pool.YAMLPoolLoader(pool) utils.write_to_log('backend._load_pool', "after config_loader") hosts = config_loader.load() utils.write_to_log('backend._load_pool', "after config_loader.load()") for host in hosts: utils.write_to_log('backend._load_pool', "host ...") # initial values for the hosts. # these will update over time. host.update({'alive': False, 'reserved': False, 'host_id': None}) utils.write_to_log('backend._load_pool', "b4 add_host ...") self.storage.add_host(host) utils.write_to_log('backend._load_pool', "after add_host")
def _init_connection_2(address_family, socket_type, protocol, address_tuple): try: utils.write_to_log('_init_connection_2', "starting ...") sock = socket.socket(address_family, socket_type, protocol) utils.write_to_log('_init_connection_2', "after socket.socket") except socket.error: return None, False sock_fd = sock.fileno() fd_flags = fcntl.fcntl(sock_fd, fcntl.F_GETFL) fd_flags |= os.O_NONBLOCK fcntl.fcntl(sock_fd, fcntl.F_SETFL, fd_flags) try: utils.write_to_log('_init_connection_2', "b4 connect") sock.connect(address_tuple) utils.write_to_log('_init_connection_2', "after connect") except socket.error as e: utils.write_to_log('_init_connection_2', "in socket error") if e.errno == errno.EINPROGRESS: return sock, False else: utils.write_to_log('_init_connection_2', "b4 sock close in socket.error #1") sock.close() utils.write_to_log('_init_connection_2', "after sock close in socket.error #1") return None, False else: utils.write_to_log('_init_connection_2', "b4 sock close") sock.close() utils.write_to_log('_init_connection_2', "after sock close") return None, True
def list_hosts(self, all_hosts=False): utils.write_to_log('backend.list_hosts', "Starting...") hosts = self.storage.get_hosts() utils.write_to_log('backend.list_hosts', "hosts is empty :{0}".format(hosts is None)) if hosts is not None: for curr_host in hosts: utils.write_to_log( 'backend.list_hosts', "Looping over host {0}".format(str(curr_host))) for curr_key in curr_host: utils.write_to_log( 'backend.list_hosts', "host['{0}'] is {1}".format(curr_key, str(curr_host[curr_key]))) utils.write_to_log('backend.list_hosts', "End loop") if all_hosts: utils.write_to_log('backend.list_hosts', "Returning all hosts") return hosts utils.write_to_log('backend.list_hosts', "Returning only allocated hosts") return filter(lambda host: host['host_id'], hosts)
def _validate(config): if 'pool' not in config: utils.write_to_log('config._validate', "'pool' property is missing from the configuration") raise RuntimeError("'pool' property is missing from the configuration")
def _scan(endpoints): # results = a dict indexed with a tuple (host, port) and containing # a bool indicating if the endpoint is connectible. results = {} # sockets = a dict indexed with a file descriptor and containing # *nested* tuples (socket object, (host, port)). sockets = {} utils.write_to_log('scan._scan', "Starting... ") try: utils.write_to_log('scan._scan', "Iterating endpoints") for host, port in endpoints: gai_args = [ host, port, socket.AF_UNSPEC, # Using AF_UNSPEC will allow # scanning both IPv4 and # IPv6 endpoints. socket.SOCK_STREAM ] try: utils.write_to_log('scan._scan', "b4 getaddrinfo...") gai_res = socket.getaddrinfo(*gai_args) utils.write_to_log('scan._scan', "after getaddrinfo...") except socket.gaierror: utils.write_to_log('scan._scan', "gaierror") results[host, port] = False break for r in gai_res: sock, is_open = _init_connection(r) if is_open: results[host, port] = True break elif sock: # Yes, a nested tuple. sockets[sock.fileno()] = sock, (host, port) break # If not broken... else: results[host, port] = False while sockets: utils.write_to_log('scan._scan', "b4 _wait_for_any_change") fds = _wait_for_any_change(sockets.keys()) utils.write_to_log('scan._scan', "after _wait_for_any_change") for fd in fds: sock, host_and_port = sockets[fd] results[host_and_port] = _check_connection(sock) del sockets[fd] finally: for s, _ in sockets.itervalues(): s.close() utils.write_to_log('scan._scan', "End b4 result") return results
def get_hosts(self, **filters): utils.write_to_log('sqllite.get_hosts', "Starting...") with self.connect() as cursor: if not filters: utils.write_to_log('sqllite.get_hosts', "Filter is None or false") cursor.execute('SELECT * FROM {0}'.format(self.TABLE_NAME)) utils.write_to_log('sqllite.get_hosts', "After cursor.execute") else: sql_cond = _construct_and_query_sql(filters) utils.write_to_log('sqllite.get_hosts', "sql_cond is {0}".format(sql_cond)) values = _construct_values_tuple(filters) cursor.execute( 'SELECT * FROM {0} WHERE {1}'.format( self.TABLE_NAME, sql_cond), values) utils.write_to_log( 'sqllite.get_hosts', "After cursor.execute with sql_cond {0}".format(sql_cond)) return list(cursor.fetchall()) utils.write_to_log('sqllite.get_hosts', "End")
def list_hosts(self, all_hosts=False): utils.write_to_log('backend.list_hosts', "Starting...") hosts = self.storage.get_hosts() utils.write_to_log('backend.list_hosts', "hosts is empty :{0}".format(hosts is None)) if hosts is not None: for curr_host in hosts: utils.write_to_log('backend.list_hosts', "Looping over host {0}".format(str(curr_host))) for curr_key in curr_host: utils.write_to_log('backend.list_hosts', "host['{0}'] is {1}". format(curr_key, str(curr_host[curr_key]))) utils.write_to_log('backend.list_hosts', "End loop") if all_hosts: utils.write_to_log('backend.list_hosts', "Returning all hosts") return hosts utils.write_to_log('backend.list_hosts', "Returning only allocated hosts") return filter(lambda host: host['host_id'], hosts)
def _create_host(_host): if not auth: utils.write_to_log('_create_host', 'Authentication not provided for host: {0}'.format(_host)) raise exceptions.ConfigurationError( 'Authentication not provided ' 'for host: {0}'.format(_host)) if not port: utils.write_to_log('_create_host', 'Port not provided for host: {0}'.format(_host)) raise exceptions.ConfigurationError( 'Port not provided for host: {0}' .format(_host)) utils.write_to_log('_create_host', " auth {0}".format(str(auth))) utils.write_to_log('_create_host', " port {0}".format(port)) utils.write_to_log('_create_host', " _host {0}".format(str(_host))) utils.write_to_log('_create_host', " public address {0}".format(public_address)) return { 'auth': auth, 'port': port, 'host': _host, 'public_address': public_address }
def get_hosts(self, **filters): utils.write_to_log('sqllite.get_hosts', "Starting...") with self.connect() as cursor: if not filters: utils.write_to_log('sqllite.get_hosts', "Filter is None or false") cursor.execute('SELECT * FROM {0}'.format(self.TABLE_NAME)) utils.write_to_log('sqllite.get_hosts', "After cursor.execute") else: sql_cond = _construct_and_query_sql(filters) utils.write_to_log('sqllite.get_hosts', "sql_cond is {0}".format(sql_cond)) values = _construct_values_tuple(filters) cursor.execute('SELECT * FROM {0} WHERE {1}' .format(self.TABLE_NAME, sql_cond), values) utils.write_to_log('sqllite.get_hosts', "After cursor.execute with sql_cond {0}".format(sql_cond)) return list(cursor.fetchall()) utils.write_to_log('sqllite.get_hosts', "End")
def _validate(config): if 'hosts' not in config: utils.write_to_log('_validate', "Pool configuration is missing a hosts section") raise exceptions.ConfigurationError( 'Pool configuration ' 'is missing a hosts section')