def setup(): rax_username = get_config(p, 'rax', 'username', 'RAX_USERNAME', None) rax_api_key = get_config(p, 'rax', 'api_key', 'RAX_API_KEY', None) pyrax.set_setting('identity_type', 'rackspace') pyrax.set_credentials(rax_username, rax_api_key) region = pyrax.get_setting('region') regions = [] if region: regions.append(region) else: region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.stderr.write('Unsupported region %s' % region) sys.exit(1) elif region not in regions: regions.append(region) return regions
def get_paths(self, vars): paths = [] basedir = self.get_basedir(vars) paths.append(self._loader.path_dwim(basedir)) if '_original_file' in vars: paths.append(self._loader.path_dwim_relative(basedir, '', vars['_original_file'])) if 'playbook_dir' in vars: paths.append(vars['playbook_dir']) try: # Ansible 2.4 lookupPaths = C.config.get_config_value('lookup_file_paths', None, 'lookup', 'available_files') except AttributeError: # Ansible 2.3 lookupPaths = C.get_config(C.p, C.DEFAULTS, 'lookup_file_paths', None, [], value_type='list') except TypeError: # Ansible 2.2.x and below lookupPaths = C.get_config(C.p, C.DEFAULTS, 'lookup_file_paths', None, [], islist=True) for path in lookupPaths: path = utils.path.unfrackpath(path) if os.path.exists(path): paths.append(path) unq = [] [unq.append(i) for i in paths if not unq.count(i)] return unq
def test_value_type_float(self, cfgparser): assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', '10', value_type='float') == 10.0 assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 10, value_type='float') == 10.0 assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', '11.5', value_type='float') == 11.5 assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 11.5, value_type='float') == 11.5
def ara_config(key, env_var, default, section='ara', value_type=None): """ Wrapper around Ansible's get_config backward/forward compatibility """ # Bootstrap Ansible configuration # Ansible >=2.4 takes care of loading the configuration file itself if LooseVersion(ansible_version) < LooseVersion('2.4.0'): config, path = load_config_file() else: path = find_ini_config_file() config = configparser.ConfigParser() if path is not None: config.read(path) # >= 2.3.0.0 (NOTE: Ansible trunk versioning scheme has 3 digits, not 4) if LooseVersion(ansible_version) >= LooseVersion('2.3.0'): return get_config(config, section, key, env_var, default, value_type=value_type) # < 2.3.0.0 compatibility if value_type is None: return get_config(config, section, key, env_var, default) args = { 'boolean': dict(boolean=True), 'integer': dict(integer=True), 'list': dict(islist=True), 'tmppath': dict(istmppath=True) } return get_config(config, section, key, env_var, default, **args[value_type])
def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) if env: pyrax.set_environment(env) keyring_username = pyrax.get_setting('keyring_username') # Attempt to grab credentials from environment first creds_file = get_config(p, 'rax', 'creds_file', 'RAX_CREDS_FILE', None) if creds_file is not None: creds_file = os.path.expanduser(creds_file) else: # But if that fails, use the default location of # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): creds_file = default_creds_file elif not keyring_username: sys.stderr.write('No value in environment variable %s and/or no ' 'credentials file at %s\n' % ('RAX_CREDS_FILE', default_creds_file)) sys.exit(1) identity_type = pyrax.get_setting('identity_type') pyrax.set_setting('identity_type', identity_type or 'rackspace') region = pyrax.get_setting('region') try: if keyring_username: pyrax.keyring_auth(keyring_username, region=region) else: pyrax.set_credential_file(creds_file, region=region) except Exception as e: sys.stderr.write("%s: %s\n" % (e, e.message)) sys.exit(1) regions = [] if region: regions.append(region) else: region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.stderr.write('Unsupported region %s' % region) sys.exit(1) elif region not in regions: regions.append(region) return regions
def __init__(self): self.opsmatic_http = get_config(p, "opsmatic", "opsmatic_http", "OPSMATIC_API_HTTP", "https://api.opsmatic.com") self.token = get_config(p, "opsmatic", "integration_token", "OPSMATIC_INTEGRATION_TOKEN", "") self.have_creds = self.token != "" if not self.have_creds: utils.warning("Opsmatic token is not set, so no events will be sent." "It can be set via the `integration_token` varibale in the [opsmatic] section of ansible.cfg" "OR via the OPSMATIC_INTEGRATION_TOKEN environment variable")
def test_value_type_integer(self, cfgparser): assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', '10', value_type='integer') == 10 assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 10, value_type='integer') == 10
def test_value_type_none(self, cfgparser): assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'None', value_type='none') is None assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', None, value_type='none') is None
def test_from_default(self, cfgparser): assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'foo', value_type=None) == u'foo' assert constants.get_config(cfgparser, 'unknown', 'defaults_one', 'ANSIBLE_TEST_VAR', 'foo', value_type=None) == u'foo'
def test_from_config_file(self, cfgparser): assert constants.get_config(cfgparser, 'defaults', 'defaults_one', 'ANSIBLE_TEST_VAR', 'foo', value_type=None) == 'data_defaults_one' assert constants.get_config(cfgparser, 'level1', 'level1_one', 'ANSIBLE_TEST_VAR', 'foo', value_type=None) == 'data_level1_one'
def __init__(self, inventory): self.inventory = inventory self.inventory_basedir = inventory.basedir() p, _ = load_config_file() self.pre_template_enabled = get_config(p, DEFAULTS, 'var_pre_template', 'ANSIBLE_VAR_PRE_TEMPLATE', False, boolean=True) self.defaults_glob = get_config(p, DEFAULTS, 'var_defaults_glob', 'ANSIBLE_VAR_DEFAULTS_GLOB', None) self._templar = None
def test_value_type_list(self, cfgparser): assert constants.get_config( cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'one,two,three', value_type='list') == ['one', 'two', 'three'] assert constants.get_config( cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', ['one', 'two', 'three'], value_type='list') == ['one', 'two', 'three']
def test_value_type_path(self, cfgparser, user, cfg_file): assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', '~/local', value_type='path') == os.path.join( user['home'], 'local') assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'local', value_type='path') == 'local' assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'local', value_type='path', expand_relative_paths=True) \ == os.path.join(cfg_file, 'local')
def get_paths(self, inject): paths = [] for path in C.get_config(C.p, C.DEFAULTS, 'lookup_file_paths', None, [], islist=True): path = utils.unfrackpath(path) if os.path.exists(path): paths.append(path) if '_original_file' in inject: # check the templates and vars directories too, # if they exist for roledir in ('templates', 'vars'): path = utils.path_dwim(self.basedir, os.path.join(self.basedir, '..', roledir)) if os.path.exists(path): paths.append(path) if 'playbook_dir' in inject: paths.append(inject['playbook_dir']) paths.append(utils.path_dwim(self.basedir, '')) unq = [] [unq.append(i) for i in paths if not unq.count(i)] return unq
def test_from_env_var(self, cfgparser): os.environ['ANSIBLE_TEST_VAR'] = 'bar' assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'foo', value_type=None) == 'bar' assert constants.get_config(cfgparser, 'unknown', 'defaults_one', 'ANSIBLE_TEST_VAR', 'foo', value_type=None) == 'bar' del os.environ['ANSIBLE_TEST_VAR']
def test_configfile_not_set_env_not_set(self): r = random_string(6) env_var = 'ANSIBLE_TEST_%s' % r assert env_var not in os.environ res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default') assert res == 'default'
def test_configfile_and_env_both_set(self): r = random_string(6) env_var = 'ANSIBLE_TEST_%s' % r os.environ[env_var] = r res = get_config(p, 'defaults', 'test_key', env_var, 'default') del os.environ[env_var] assert res == r
def test_configfile_not_set_env_set(self): r = random_string(6) env_var = 'ANSIBLE_TEST_%s' % r os.environ[env_var] = r res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default') del os.environ[env_var] assert res == r
def test_configfile_set_env_not_set(self): r = random_string(6) env_var = 'ANSIBLE_TEST_%s' % r assert env_var not in os.environ res = get_config(p, 'defaults', 'test_key', env_var, 'default') print res assert res == 'test_value'
def test_value_type_boolean(self, cfgparser): assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'on', value_type='boolean') is True assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', True, value_type='boolean') is True assert constants.get_config(cfgparser, 'defaults', 'unknown', 'ANSIBLE_TEST_VAR', 'other', value_type='boolean') is False
def _list(regions, refresh_cache=True): cache_max_age = int( get_config(p, 'rax', 'cache_max_age', 'RAX_CACHE_MAX_AGE', 600)) if (not os.path.exists(get_cache_file_path(regions)) or refresh_cache or (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): # Cache file doesn't exist or older than 10m or refresh cache requested _list_into_cache(regions) with open(get_cache_file_path(regions), 'r') as cache_file: groups = json.load(cache_file) print(json.dumps(groups, sort_keys=True, indent=4))
def _list(regions, refresh_cache=True): cache_max_age = int(get_config(p, 'rax', 'cache_max_age', 'RAX_CACHE_MAX_AGE', 600)) if (not os.path.exists(get_cache_file_path(regions)) or refresh_cache or (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): # Cache file doesn't exist or older than 10m or refresh cache requested _list_into_cache(regions) with open(get_cache_file_path(regions), 'r') as cache_file: groups = json.load(cache_file) print(json.dumps(groups, sort_keys=True, indent=4))
def _get_defaults(self): p, cfg_path = load_config_file() defaults_file = get_config(p, DEFAULTS, 'var_defaults_file', 'ANSIBLE_VAR_DEFAULTS_FILE', None) if not defaults_file: return None ursula_env = os.environ.get('URSULA_ENV', '') defaults_path = os.path.join(ursula_env, defaults_file) if os.path.exists(defaults_path): with open(defaults_path) as fh: return yaml.safe_load(fh) return None
def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) if env: pyrax.set_environment(env) keyring_username = pyrax.get_setting('keyring_username') # Attempt to grab credentials from environment first creds_file = get_config(p, 'rax', 'creds_file', 'RAX_CREDS_FILE', None) if creds_file is not None: creds_file = os.path.expanduser(creds_file) else: # But if that fails, use the default location of # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): creds_file = default_creds_file elif not keyring_username: sys.stderr.write('No value in environment variable %s and/or no ' 'credentials file at %s\n' % ('RAX_CREDS_FILE', default_creds_file)) sys.exit(1) identity_type = pyrax.get_setting('identity_type') pyrax.set_setting('identity_type', identity_type or 'rackspace') region = pyrax.get_setting('region') try: if keyring_username: pyrax.keyring_auth(keyring_username, region=region) else: pyrax.set_credential_file(creds_file, region=region) except Exception, e: sys.stderr.write("%s: %s\n" % (e, e.message)) sys.exit(1)
def _ara_config(config, key, env_var, default=None, section='ara', value_type=None): """ Wrapper around Ansible's get_config backward/forward compatibility """ if default is None: try: # We're using env_var as keys in the DEFAULTS dict default = DEFAULTS.get(env_var) except KeyError as e: msg = 'There is no default value for {0}: {1}'.format(key, str(e)) raise KeyError(msg) # >= 2.3.0.0 (NOTE: Ansible trunk versioning scheme has 3 digits, not 4) if LooseVersion(ansible_version) >= LooseVersion('2.3.0'): return get_config(config, section, key, env_var, default, value_type=value_type) # < 2.3.0.0 compatibility if value_type is None: return get_config(config, section, key, env_var, default) args = { 'boolean': dict(boolean=True), 'integer': dict(integer=True), 'list': dict(islist=True), 'tmppath': dict(istmppath=True) } return get_config(config, section, key, env_var, default, **args[value_type])
def ara_config(key, env_var, default, section='ara', value_type=None): """ Wrapper around Ansible's get_config backward/forward compatibility """ # Bootstrap Ansible configuration # Ansible >=2.4 takes care of loading the configuration file itself path = find_ini_config_file() config = configparser.ConfigParser() if path is not None: config.read(path) return get_config( config, section, key, env_var, default, value_type=value_type )
def get_paths(self, inject): paths = [] paths.append(utils.path_dwim(self.basedir, '')) if '_original_file' in inject: paths.append(utils.path_dwim_relative(inject['_original_file'], '', '', self.basedir, check=False)) if 'playbook_dir' in inject and paths[0] != inject['playbook_dir']: paths.append(inject['playbook_dir']) for path in C.get_config(C.p, C.DEFAULTS, 'lookup_file_paths', None, [], islist=True): path = utils.unfrackpath(path) if os.path.exists(path): paths.append(path) return paths
def ara_config(key, env_var, default, section='ara', value_type=None): """ Wrapper around Ansible's get_config backward/forward compatibility """ # Bootstrap Ansible configuration # Ansible >=2.4 takes care of loading the configuration file itself path = find_ini_config_file() config = configparser.ConfigParser() if path is not None: config.read(path) return get_config(config, section, key, env_var, default, value_type=value_type)
def exec_command(self, *args, **kwargs): """ Wrapper around _exec_command to retry in the case of an ssh failure Will retry if: * an exception is caught * ssh returns 255 Will not retry if * remaining_tries is <2 * retries limit reached """ remaining_tries = C.get_config( C.p, 'ssh_retry', 'retries', 'ANSIBLE_SSH_RETRY_RETRIES', 3, integer=True) + 1 cmd_summary = "%s %s..." % (args[0], str(kwargs)[:200]) for attempt in xrange(remaining_tries): pause = 2 ** attempt - 1 if pause > 30: pause = 30 time.sleep(pause) try: return_tuple = super(Connection, self).exec_command(*args, **kwargs) except Exception as e: msg = ("ssh_retry: attempt: %d, caught exception(%s) from cmd " "(%s).") % (attempt, e, cmd_summary) display(msg, color='blue') if attempt == remaining_tries - 1: raise e else: continue # 0 = success # 1-254 = remote command return code # 255 = failure from the ssh command itself if return_tuple[0] != 255: break else: msg = ('ssh_retry: attempt: %d, ssh return code is 255. cmd ' '(%s).') % (attempt, cmd_summary) display(msg, color='blue') return return_tuple
def get_paths(self, vars): paths = [] basedir = self.get_basedir(vars) for path in C.get_config(C.p, C.DEFAULTS, 'lookup_file_paths', None, [], islist=True): path = utils.path.unfrackpath(path) if os.path.exists(path): paths.append(path) if '_original_file' in vars: paths.append(self._loader.path_dwim_relative(basedir, '', vars['_original_file'])) if 'playbook_dir' in vars: paths.append(vars['playbook_dir']) paths.append(self._loader.path_dwim(basedir)) unq = [] [unq.append(i) for i in paths if not unq.count(i)] return unq
def get_paths(self, vars): paths = [] basedir = self.get_basedir(vars) for path in C.get_config(C.p, C.DEFAULTS, "lookup_file_paths", None, [], islist=True): path = utils.path.unfrackpath(path) if os.path.exists(path): paths.append(path) if "_original_file" in vars: for roledir in ("templates", "vars"): path = utils.path.path_dwim(self.basedir, os.path.join(self.basedir, "..", roledir)) if os.path.exists(path): paths.append(path) if "playbook_dir" in vars: paths.append(vars["playbook_dir"]) paths.append(self._loader.path_dwim(basedir)) unq = [] [unq.append(i) for i in paths if not unq.count(i)] return unq
#!/usr/bin/python # Copyright 2015, Jonathan A. Sternberg <*****@*****.**> # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import requests from StringIO import StringIO from ansible import errors, utils from ansible.callbacks import vvv from ansible.constants import p, get_config DEFAULT_USE_SSL = get_config(p, 'agent', 'use_ssl', None, False, boolean=True) CERTIFICATE = get_config(p, 'agent', 'certificate', None, None) class Connection(object): def __init__(self, runner, host, port, user, password, *args, **kwargs): self.runner = runner self.host = host self.port = port or 8700 self.user = user self.password = password self.proto = 'http' if DEFAULT_USE_SSL: self.proto = 'https' self.has_pipelining = True
# We need to monkeypatch ssh for Vagrant because of this bug in Ansible: # https://github.com/ansible/ansible/pull/5732 import ansible.constants as constants from ansible.runner.connection_plugins.ssh import Connection as SSHConnection def monkeypatch_get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False): ''' return a configuration variable with casting ''' value = constants._get_config(p, section, key, env_var, default) if boolean: return contants.mk_boolean(value) if value and integer: return int(value) if value and floating: return float(value) return value constants.get_config = monkeypatch_get_config constants.DEFAULT_REMOTE_PORT = constants.get_config(constants.p, constants.DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) class Connection(SSHConnection): pass
def _get_defaults(self): p = load_config_file() defaults_file = get_config(p, DEFAULTS, 'var_defaults_file', 'ANSIBLE_VAR_DEFAULTS_FILE', None) if defaults_file: return yaml.load(open(defaults_file))
from pysnmp.entity.rfc3413 import mibvar from pysnmp.entity import engine from pysnmp.proto import rfc1902 from pysnmp.proto import rfc1905 from pyasn1.type import univ from pysnmp.carrier.asynsock.dgram import udp __all__ = ['Connection', 'SnmpValue', 'OctetString', 'ObjectIdentifier', 'Integer32', 'Counter32', 'IpAddress', 'Gauge32', 'TimeTicks', 'Opaque', 'Counter64', 'SnmpClient', 'SnmpError'] _cache = dict() _snmp_engine = None p = constants.load_config_file() SNMP_AUTH_PROTOCOL = constants.get_config(p, 'snmp', 'auth_protocol', 'SNMP_AUTH_PROTOCOL', 'none').lower() SNMP_PRIV_PROTOCOL = constants.get_config(p, 'snmp', 'priv_protocol', 'SNMP_PRIV_PROTOCOL', 'none').lower() SNMP_ENGINE_ID = constants.get_config(p, 'snmp', 'engine_id', 'SNMP_ENGINE_ID', None) SNMP_COMMUNITY = constants.get_config(p, 'snmp', 'community', 'SNMP_COMMUNITY', None) SNMP_AUTH_KEY = constants.get_config(p, 'snmp', 'auth_key', 'SNMP_AUTH_KEY', None) SNMP_PRIV_KEY = constants.get_config(p, 'snmp', 'priv_key', 'SNMP_PRIV_KEY', None) class Connection(object): """ SNMP based connections """ def __init__(self, runner, host, port, *args, **kwargs): self.runner = runner self.host = host self.port = port if port else 161 self.has_pipelining = False
from ansible.constants import get_config, load_config_file DEFAULT_ARA_DIR = os.path.expanduser('~/.ara') DEFAULT_DATABASE_PATH = os.path.join(DEFAULT_ARA_DIR, 'ansible.sqlite') DEFAULT_DATABASE = 'sqlite:///{}'.format(DEFAULT_DATABASE_PATH) DEFAULT_ARA_LOGFILE = os.path.join(DEFAULT_ARA_DIR, 'ara.log') DEFAULT_ARA_LOG_LEVEL = 'INFO' DEFAULT_ARA_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' DEFAULT_ARA_SQL_DEBUG = False DEFAULT_ARA_PATH_MAX = 30 config, path = load_config_file() ARA_DIR = get_config( config, 'ara', 'dir', 'ARA_DIR', DEFAULT_ARA_DIR) ARA_LOG_FILE = get_config( config, 'ara', 'logfile', 'ARA_LOG_FILE', DEFAULT_ARA_LOGFILE) ARA_LOG_LEVEL = get_config( config, 'ara', 'loglevel', 'ARA_LOG_LEVEL', DEFAULT_ARA_LOG_LEVEL).upper() ARA_LOG_FORMAT = get_config( config, 'ara', 'logformat', 'ARA_LOG_FORMAT', DEFAULT_ARA_LOG_FORMAT) ARA_PATH_MAX = get_config( config, 'ara', 'path_max', 'ARA_PATH_MAX', DEFAULT_ARA_PATH_MAX) ARA_ENABLE_DEBUG_VIEW = get_config( config, 'ara', 'enable_debug_view', 'ARA_ENABLE_DEBUG_VIEW',
return map(header._asdict, map(header._make, f(*args, **kwargs))) return wrapper return outer def database(name): def query(f): @wraps(f) def wrapper(*args, **kwargs): cursor = cursors[name] cursor.execute(f(*args, **kwargs)) return cursor return wrapper return query class Cursors(object): def __init__(self, option_file): self.option_file = option_file self.cursors = {} def __getitem__(self, database): try: return self.cursors[database] except KeyError: self.cursors[database] = result = MySQLdb.connect(db=database, read_default_file=self.option_file, charset='utf8').cursor() return result MYSQL_OPTION_FILE = get_config(p, 'python_vars', 'mysql_option_file', 'MYSQL_OPTION_FILE', '~/.my.cnf') cursors = Cursors(MYSQL_OPTION_FILE)
def ans_config(self, section, name, default): """ Gets an Ansible configuration using the repository's ansible.cfg if present """ env_var = "ANSIBLE_%s" % name.upper() return C.get_config(self._p, section, name, env_var, default)
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ansible.constants import get_config, load_config_file import os from flask import Flask from flask_sqlalchemy import SQLAlchemy DEFAULT_DATABASE = os.path.expanduser('~/.ara/ansible.sqlite') config, file = load_config_file() DATABASE = get_config(config, 'ara', 'database', 'ARA_DATABASE', DEFAULT_DATABASE) # TODO (dmsimard): Figure out the best place and way to initialize the # database if it hasn't been created yet. try: if not os.path.exists(os.path.dirname(DATABASE)): os.makedirs(os.path.dirname(DATABASE)) except Exception as e: raise IOError("Unable to ensure database directory exists. " + str(e)) app = Flask(__name__) app.config['DATABASE'] = DATABASE app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///{0}".format(DATABASE) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app)
region = pyrax.get_setting('region') try: if keyring_username: pyrax.keyring_auth(keyring_username, region=region) else: pyrax.set_credential_file(creds_file, region=region) except Exception, e: sys.stderr.write("%s: %s\n" % (e, e.message)) sys.exit(1) regions = [] if region: regions.append(region) else: region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.stderr.write('Unsupported region %s' % region) sys.exit(1) elif region not in regions: regions.append(region) return regions def main():
def get_config_value(key, env_var, default): """ Look up key in ansible.cfg This uses load_config_file() and get_config() from ansible.constants """ config = AC.load_config_file() return AC.get_config(config, DEFAULT_SECTION, key, env_var, default)
def _list(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} cbs_attachments = collections.defaultdict(dict) prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', islist=True) try: ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, islist=True)) except: ip_versions = [4] else: ip_versions = [v for v in ip_versions if v in [4, 6]] if not ip_versions: ip_versions = [4] # Go through all the regions looking for servers for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) if cs is None: warnings.warn( 'Connecting to Rackspace region "%s" has caused Pyrax to ' 'return a NoneType. Is this a valid region?' % region, RuntimeWarning) continue for server in cs.servers.list(): # Create a group on region groups[region].append(server.name) # Check if group metadata key in servers' metadata group = server.metadata.get('group') if group: groups[group].append(server.name) for extra_group in server.metadata.get('groups', '').split(','): if extra_group: groups[extra_group].append(server.name) # Add host metadata for key, value in to_dict(server).items(): hostvars[server.name][key] = value hostvars[server.name]['rax_region'] = region for key, value in server.metadata.iteritems(): groups['%s_%s_%s' % (prefix, key, value)].append(server.name) groups['instance-%s' % server.id].append(server.name) groups['flavor-%s' % server.flavor['id']].append(server.name) # Handle boot from volume if not server.image: if not cbs_attachments[region]: cbs = pyrax.connect_to_cloud_blockstorage(region) for vol in cbs.list(): if mk_boolean(vol.bootable): for attachment in vol.attachments: metadata = vol.volume_image_metadata server_id = attachment['server_id'] cbs_attachments[region][server_id] = { 'id': metadata['image_id'], 'name': slugify(metadata['image_name']) } image = cbs_attachments[region].get(server.id) if image: server.image = {'id': image['id']} hostvars[server.name]['rax_image'] = server.image hostvars[server.name]['rax_boot_source'] = 'volume' images[image['id']] = image['name'] else: hostvars[server.name]['rax_boot_source'] = 'local' try: imagegroup = 'image-%s' % images[server.image['id']] groups[imagegroup].append(server.name) groups['image-%s' % server.image['id']].append(server.name) except KeyError: try: image = cs.images.get(server.image['id']) except cs.exceptions.NotFound: groups['image-%s' % server.image['id']].append(server.name) else: images[image.id] = image.human_id groups['image-%s' % image.human_id].append(server.name) groups['image-%s' % server.image['id']].append(server.name) # And finally, add an IP address ansible_ssh_host = None # use accessIPv[46] instead of looping address for 'public' for network_name in networks: if ansible_ssh_host: break if network_name == 'public': for version_name in ip_versions: if ansible_ssh_host: break if version_name == 6 and server.accessIPv6: ansible_ssh_host = server.accessIPv6 elif server.accessIPv4: ansible_ssh_host = server.accessIPv4 if not ansible_ssh_host: addresses = server.addresses.get(network_name, []) for address in addresses: for version_name in ip_versions: if ansible_ssh_host: break if address.get('version') == version_name: ansible_ssh_host = address.get('addr') break if ansible_ssh_host: hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host if hostvars: groups['_meta'] = {'hostvars': hostvars} print(json.dumps(groups, sort_keys=True, indent=4))
def _list_into_cache(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} cbs_attachments = collections.defaultdict(dict) prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') try: # Ansible 2.3+ networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', value_type='list') except TypeError: # Ansible 2.2.x and below # pylint: disable=unexpected-keyword-arg networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', islist=True) try: try: # Ansible 2.3+ ip_versions = map( int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, value_type='list')) except TypeError: # Ansible 2.2.x and below # pylint: disable=unexpected-keyword-arg ip_versions = map( int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, islist=True)) except: ip_versions = [4] else: ip_versions = [v for v in ip_versions if v in [4, 6]] if not ip_versions: ip_versions = [4] # Go through all the regions looking for servers for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) if cs is None: warnings.warn( 'Connecting to Rackspace region "%s" has caused Pyrax to ' 'return None. Is this a valid region?' % region, RuntimeWarning) continue for server in cs.servers.list(): # Create a group on region groups[region].append(server.name) # Check if group metadata key in servers' metadata group = server.metadata.get('group') if group: groups[group].append(server.name) for extra_group in server.metadata.get('groups', '').split(','): if extra_group: groups[extra_group].append(server.name) # Add host metadata for key, value in to_dict(server).items(): hostvars[server.name][key] = value hostvars[server.name]['rax_region'] = region for key, value in iteritems(server.metadata): groups['%s_%s_%s' % (prefix, key, value)].append(server.name) groups['instance-%s' % server.id].append(server.name) groups['flavor-%s' % server.flavor['id']].append(server.name) # Handle boot from volume if not server.image: if not cbs_attachments[region]: cbs = pyrax.connect_to_cloud_blockstorage(region) for vol in cbs.list(): if boolean(vol.bootable, strict=False): for attachment in vol.attachments: metadata = vol.volume_image_metadata server_id = attachment['server_id'] cbs_attachments[region][server_id] = { 'id': metadata['image_id'], 'name': slugify(metadata['image_name']) } image = cbs_attachments[region].get(server.id) if image: server.image = {'id': image['id']} hostvars[server.name]['rax_image'] = server.image hostvars[server.name]['rax_boot_source'] = 'volume' images[image['id']] = image['name'] else: hostvars[server.name]['rax_boot_source'] = 'local' try: imagegroup = 'image-%s' % images[server.image['id']] groups[imagegroup].append(server.name) groups['image-%s' % server.image['id']].append(server.name) except KeyError: try: image = cs.images.get(server.image['id']) except cs.exceptions.NotFound: groups['image-%s' % server.image['id']].append(server.name) else: images[image.id] = image.human_id groups['image-%s' % image.human_id].append(server.name) groups['image-%s' % server.image['id']].append(server.name) # And finally, add an IP address ansible_ssh_host = None # use accessIPv[46] instead of looping address for 'public' for network_name in networks: if ansible_ssh_host: break if network_name == 'public': for version_name in ip_versions: if ansible_ssh_host: break if version_name == 6 and server.accessIPv6: ansible_ssh_host = server.accessIPv6 elif server.accessIPv4: ansible_ssh_host = server.accessIPv4 if not ansible_ssh_host: addresses = server.addresses.get(network_name, []) for address in addresses: for version_name in ip_versions: if ansible_ssh_host: break if address.get('version') == version_name: ansible_ssh_host = address.get('addr') break if ansible_ssh_host: hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host if hostvars: groups['_meta'] = {'hostvars': hostvars} with open(get_cache_file_path(regions), 'w') as cache_file: json.dump(groups, cache_file)
try: if keyring_username: pyrax.keyring_auth(keyring_username, region=region) else: pyrax.set_credential_file(creds_file, region=region) except Exception, e: sys.stderr.write("%s: %s\n" % (e, e.message)) sys.exit(1) regions = [] if region: regions.append(region) else: region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.stderr.write('Unsupported region %s' % region) sys.exit(1) elif region not in regions: regions.append(region) return regions
import sys, getopt from ansible.constants import p, get_config from ansible import utils operation = str(sys.argv[1]) #if running multiple armrest instances, use this to separate them #for instance, if armrest_instance is set to "prod", the corresponding configuration section in ansible.cfg should be "armrest_prod" armrest_instance = "" if armrest_instance != "": armrest_config = "armrest_" + armrest_instance else: armrest_config = "armrest" armrest_uri = get_config(p, armrest_config, "armrest_uri", "ARMREST_URI","") #print(armrest_uri) armrest_use_cache = get_config(p, armrest_config, "armrest_use_cache", "ARMREST_USE_CACHE","") armrest_cache_lifetime_seconds = get_config(p, armrest_config, "armrest_cache_lifetime_seconds", "ARMREST_CACHE_LIFETIME_SECONDS","") if (armrest_use_cache == True): import requests_cache requests.cache.install_cache('armrest_cache', backend='sqlite', expire_after=armrest_cache_lifetime_seconds) if (operation == '--list'): armrest_list_uri = armrest_uri + "/api/listhosts" r = requests.get(armrest_list_uri) print(r.text) if (operation == '--host'): armrest_host = str(sys.argv[2])
import os from ansible.constants import get_config, load_config_file DEFAULT_ARA_DIR = os.path.expanduser('~/.ara') DEFAULT_ARA_TMPDIR = os.path.expanduser('~/.ansible/tmp') DEFAULT_ARA_LOG_LEVEL = 'INFO' DEFAULT_ARA_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' DEFAULT_ARA_SQL_DEBUG = False DEFAULT_ARA_PATH_MAX = 40 DEFAULT_ARA_IGNORE_MIMETYPE_WARNINGS = True config, path = load_config_file() ARA_DIR = get_config(config, 'ara', 'dir', 'ARA_DIR', DEFAULT_ARA_DIR) # Log/database location default to the ARA directory once we know where it is DEFAULT_ARA_LOGFILE = os.path.join(ARA_DIR, 'ara.log') DEFAULT_DATABASE_PATH = os.path.join(ARA_DIR, 'ansible.sqlite') DEFAULT_DATABASE = 'sqlite:///{}'.format(DEFAULT_DATABASE_PATH) ARA_TMP_DIR = get_config(config, 'defaults', 'local_tmp', 'ANSIBLE_LOCAL_TEMP', DEFAULT_ARA_TMPDIR, istmppath=True) ARA_LOG_FILE = get_config(config, 'ara', 'logfile', 'ARA_LOG_FILE', DEFAULT_ARA_LOGFILE) ARA_LOG_LEVEL = get_config(config, 'ara', 'loglevel', 'ARA_LOG_LEVEL', DEFAULT_ARA_LOG_LEVEL).upper()
#!/usr/bin/env python import requests import sys, getopt from ansible.constants import p, get_config from ansible import utils operation = str(sys.argv[1]) vmminventory_uri = get_config(p, "vmminventory", "vmminventory_uri", "VMMINVENTORY_URI", "") # print(armrest_uri) vmminventory_use_cache = get_config(p, "vmminventory", "vmminventory_use_cache", "VMMINVENTORY_USE_CACHE", "") vmminventory_cache_lifetime_seconds = get_config( p, "vmminventory", "vmminventory_cache_lifetime_seconds", "VMMINVENTORY_CACHE_LIFETIME_SECONDS", "" ) if vmminventory_use_cache == True: import requests_cache requests.cache.install_cache("vmminventory_cache", backend="sqlite", expire_after=vmminventory_use_cache) if operation == "--list": vmminventory_list_uri = vmminventory_uri + "/api/listhosts" r = requests.get(vmminventory_list_uri) print(r.text)
from pysnmp.proto import rfc1905 from pyasn1.type import univ from pysnmp.carrier.asynsock.dgram import udp __all__ = [ 'Connection', 'SnmpValue', 'OctetString', 'ObjectIdentifier', 'Integer32', 'Counter32', 'IpAddress', 'Gauge32', 'TimeTicks', 'Opaque', 'Counter64', 'SnmpClient', 'SnmpError' ] _cache = dict() _snmp_engine = None p = constants.load_config_file() SNMP_AUTH_PROTOCOL = constants.get_config(p, 'snmp', 'auth_protocol', 'SNMP_AUTH_PROTOCOL', 'none').lower() SNMP_PRIV_PROTOCOL = constants.get_config(p, 'snmp', 'priv_protocol', 'SNMP_PRIV_PROTOCOL', 'none').lower() SNMP_ENGINE_ID = constants.get_config(p, 'snmp', 'engine_id', 'SNMP_ENGINE_ID', None) SNMP_COMMUNITY = constants.get_config(p, 'snmp', 'community', 'SNMP_COMMUNITY', None) SNMP_AUTH_KEY = constants.get_config(p, 'snmp', 'auth_key', 'SNMP_AUTH_KEY', None) SNMP_PRIV_KEY = constants.get_config(p, 'snmp', 'priv_key', 'SNMP_PRIV_KEY', None) class Connection(object):
import sys import argparse import base64 import getpass import binascii import ansible.constants as C from ansible import errors from cryptography.fernet import Fernet, InvalidToken from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC # section in config file FILTERS = 'filters' VAULT_FILTER_KEY = C.get_config(C.p, FILTERS, 'vault_filter_key', 'ANSIBLE_VAULT_FILTER_KEY', 'vault.key', ispath=True) VAULT_FILTER_SALT = C.get_config(C.p, FILTERS, 'vault_filter_salt', 'ANSIBLE_VAULT_FILTER_SALT', None) VAULT_FILTER_ITERATIONS = C.get_config(C.p, FILTERS, 'vault_filter_iterations', 'ANSIBLE_VAULT_FILTER_ITERATIONS', 1000000, integer=True) VAULT_FILTER_GENERATE_KEY = C.get_config(C.p, FILTERS, 'vault_filter_generate_key', 'ANSIBLE_VAULT_GENERATE_KEY', False, boolean=True) vault_filter_key = os.path.abspath(VAULT_FILTER_KEY) verbose = True def vault(cipher): try: f = fernet() return f.decrypt(bytes(cipher)) except IOError: raise errors.AnsibleFilterError("vault: could not open key file: {}. Please run 'vault.py' filter file with --key option first.".format(vault_filter_key)) except InvalidToken: raise errors.AnsibleFilterError("vault: could not decrypt variable. Invalid vault key.")
#!/usr/bin/env python import requests import sys, getopt from ansible.constants import p, get_config from ansible import utils operation = str(sys.argv[1]) vmminventory_uri = get_config(p, "vmminventory", "vmminventory_uri", "VMMINVENTORY_URI", "") #print(armrest_uri) vmminventory_use_cache = get_config(p, "vmminventory", "vmminventory_use_cache", "VMMINVENTORY_USE_CACHE", "") vmminventory_cache_lifetime_seconds = get_config( p, "vmminventory", "vmminventory_cache_lifetime_seconds", "VMMINVENTORY_CACHE_LIFETIME_SECONDS", "") if (vmminventory_use_cache == True): import requests_cache requests.cache.install_cache('vmminventory_cache', backend='sqlite', expire_after=vmminventory_use_cache) if (operation == '--list'): vmminventory_list_uri = vmminventory_uri + "/api/listhosts" r = requests.get(vmminventory_list_uri) print(r.text)