def parse_aws_credentials(): path = os.getenv('AWS_SHARED_CREDENTIALS_FILE', "~/.aws/credentials") conf = Config(os.path.expanduser(path)) if access_key_id == conf.get('default', 'aws_access_key_id'): return (access_key_id, conf.get('default', 'aws_secret_access_key')) return (conf.get(access_key_id, 'aws_access_key_id'), conf.get(access_key_id, 'aws_secret_access_key'))
def parse_aws_credentials(): path = os.getenv('AWS_SHARED_CREDENTIALS_FILE', "~/.aws/credentials") if not os.path.exists(os.path.expanduser(path)): return None conf = Config(os.path.expanduser(path)) if access_key_id == conf.get('default', 'aws_access_key_id'): return (access_key_id, conf.get('default', 'aws_secret_access_key')) return (conf.get(access_key_id, 'aws_access_key_id'), conf.get(access_key_id, 'aws_secret_access_key'))
def parse_aws_credentials(): path = os.getenv("AWS_SHARED_CREDENTIALS_FILE", "~/.aws/credentials") if not os.path.exists(os.path.expanduser(path)): return None conf = Config(os.path.expanduser(path)) if access_key_id == conf.get("default", "aws_access_key_id"): return (access_key_id, conf.get("default", "aws_secret_access_key")) return ( conf.get(access_key_id, "aws_access_key_id"), conf.get(access_key_id, "aws_secret_access_key"), )
def getint(self, option, default=0): try: val = Config.get(self, self.name, option) val = int(val) except: val = int(default) return val
def copy_aws_credentials(src_fname, dst_fname, region): """ Opens a Boto file, changes the region and saves it to a new file, changing the ec2 region. """ log("copy_aws_credentials from src[%s] to [%s] in region [%s]" % ( src_fname, dst_fname, region)) ec2_region_endpoint = {"us-east-1": "ec2.us-east-1.amazonaws.com", "us-west-2": "ec2.us-west-2.amazonaws.com", "us-west-1": "ec2.us-west-1.amazonaws.com", "eu-west-1": "ec2.eu-west-1.amazonaws.com", "ap-southeast-1": "ec2.ap-southeast-1.amazonaws.com", "ap-southeast-2": "ec2.ap-southeast-2.amazonaws.com", "ap-northeast-1": "ec2.ap-northeast-1.amazonaws.com", "sa-east-1": "ec2.sa-east-1.amazonaws.com"}[region] creds = BotoConfig(src_fname) # check for AZ override in the CloudSim section az = creds.get('CloudSim', region) if az in ['any', None]: az = region # use region without a specific AZ print(src_fname, dst_fname, region) creds.set('Boto', 'ec2_region_name', az) log("copy_aws_credentials: using Availability Zone: %s" % az) creds.set('Boto', 'ec2_region_endpoint', ec2_region_endpoint) with open(dst_fname, 'w') as f: creds.write(f)
def create(cls, config_file=None, logical_volume=None, cfg=None, **params): if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO.StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement=zone, user_data=cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip != None and instances.__len__() > 0: instance = instances[0] print 'Waiting for instance to start so we can set its elastic IP address...' while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print 'set the elastic IP of the first instance to %s' % elastic_ip for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i == 0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
def create(cls, config_file=None, logical_volume = None, cfg = None, **params): if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO.StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement = zone, user_data = cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip != None and instances.__len__() > 0: instance = instances[0] print 'Waiting for instance to start so we can set its elastic IP address...' while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print 'set the elastic IP of the first instance to %s' % elastic_ip for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i==0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None): Config.__init__(self, config_file) self.aws_access_key_id = aws_access_key_id self.aws_secret_access_key = aws_secret_access_key script = Config.get(self, 'Pyami', 'scripts') if script: self.name = script.split('.')[-1] else: self.name = None
def getbool(self, option, default=False): try: val = Config.get(self, self.name, option) if val.lower() == 'true': val = True else: val = False except: val = default return val
def get_aws_credentials(config_file): # Try to read .boto configuration from several places (later ones take precedence) try: # user home directory (~/.boto) boto_cfg = Config(os.path.join(os.path.expanduser('~'), '.boto')) except: pass try: # current directory (./.boto) boto_cfg = Config('.boto') except: pass try: # command line option (--config <file>) if config_file: boto_cfg = Config(config_file) except: pass # Load the AWS key credentials try: access_key = boto_cfg.get('Credentials', 'aws_access_key_id') secret_key = boto_cfg.get('Credentials', 'aws_secret_access_key') except: print >> sys.stderr, 'Could not find .boto config file' sys.exit(1) return (access_key, secret_key)
def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: cfg = Config() cfg.load_credential_file(os.path.expanduser("~/.aws/credentials")) cfg.load_credential_file(os.path.expanduser("~/.aws/config")) session_token = cfg.get(self.boto_profile, "aws_session_token") conn = ec2.connect_to_region(region, security_token=session_token, profile_name=self.boto_profile) # connect_to_region will fail "silently" by returning None if the # region name is wrong or not supported if conn is None: print("region name: {} likely not supported, or AWS is down. " "connection to region failed.".format(region)) sys.exit(1) reservations = conn.get_all_instances(filters=self.filters) bastion_ip = self.find_bastion_box(conn) instances = [] for reservation in reservations: instances.extend(reservation.instances) # sort the instance based on name and index, in this order def sort_key(instance): name = instance.tags.get('Name', '') return "{}-{}".format(name, instance.id) for instance in sorted(instances, key=sort_key): self.add_instance(bastion_ip, instance, region) except boto.provider.ProfileNotFoundError as e: raise Exception( "{}, configure it with 'aws configure --profile {}'".format( e.message, self.boto_profile)) except boto.exception.BotoServerError as e: print(e) sys.exit(1)
def assume_identity(config, profile): # if AWS_PROFILE was the option last used, and it didn't require assuming a role if config.get('AWS_PROFILE_REFRESH_NOT_NEEDED'): return None _config_lock = config.get('CONFIG_LOCK') or config_lock _config_lock.acquire() if 'assumed_roles' not in config: config['assumed_roles'] = {} if 'role_last_updated' not in config: config['role_last_updated'] = {} try: assumed_roles = config.get('assumed_roles', {}) assumed_role = assumed_roles.get(profile) if assumed_role and not assumed_role.credentials.is_expired( time_offset_seconds=900): return False # fetch the credentials from the aws configs shared_credentials = config.get('AWS_SHARED_CREDENTIALS') if not shared_credentials: config_path = config.get('AWS_CONFIG_FILE') or os.environ.get( 'AWS_CONFIG_FILE') or os.path.join(expanduser('~'), '.aws', 'config') credentials_path = (config.get('AWS_CONFIG_FILE') or os.environ.get('AWS_CONFIG_FILE') or os.path.join(expanduser('~'), '.aws', 'credentials')).replace( '/config', '/credentials') shared_credentials = Config(do_load=False) if os.path.isfile(credentials_path): shared_credentials.load_from_path(credentials_path) if os.path.isfile(config_path): shared_credentials.load_from_path(config_path) config['AWS_SHARED_CREDENTIALS'] = shared_credentials profile_key = profile if not shared_credentials.has_section(profile_key): profile_key = 'profile {}'.format(profile_key) if not shared_credentials.has_section(profile_key): raise ProfileNotFoundError('Profile {} not found'.format( config['AWS_PROFILE'])) # no matter what, get the access and secret key pair if all([ shared_credentials.has_option(profile_key, x) for x in ('aws_access_key_id', 'aws_secret_access_key') ]): aws_access_key_id = shared_credentials.get(profile_key, 'aws_access_key_id') aws_secret_access_key = shared_credentials.get( profile_key, 'aws_secret_access_key') elif shared_credentials.has_option(profile_key, 'source_profile'): source_profile_key = shared_credentials.get( profile_key, 'source_profile') if not shared_credentials.has_section(source_profile_key): source_profile_key = 'profile {}'.format(source_profile_key) if not shared_credentials.has_section(source_profile_key): raise ProfileNotFoundError( 'Source profile {} for profile {} not found'.format( shared_credentials.get(profile_key, 'source_profile'), profile)) # source_section = shared_credentials['_sections'][source_profile_key] if all([ shared_credentials.has_option(source_profile_key, x) for x in ('aws_access_key_id', 'aws_secret_access_key') ]): aws_access_key_id = shared_credentials.get( source_profile_key, 'aws_access_key_id') aws_secret_access_key = shared_credentials.get( source_profile_key, 'aws_secret_access_key') else: raise ProfileNotFoundError( 'Source profile {} for profile {} has no access or secret key' .format( shared_credentials.get(profile_key, 'source_profile'), profile)) # if there's a role_arn, use it to assume a role if shared_credentials.has_option(profile_key, 'role_arn'): role_arn = shared_credentials.get(profile_key, 'role_arn') sts_connection = sts.STSConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) config['assumed_roles'][profile] = sts_connection.assume_role( role_arn, ROLE_SESSION_NAME, policy=None, duration_seconds=960) config['role_last_updated'][profile] = datetime.datetime.utcnow( ).isoformat()[:19] + 'Z' return True finally: _config_lock.release()
def get(self, name, default=None): return Config.get(self, self.name, name, default)
def create(cls, config_file=None, logical_volume = None, cfg = None, **params): """ Create a new instance based on the specified configuration file or the specified configuration and the passed in parameters. If the config_file argument is not None, the configuration is read from there. Otherwise, the cfg argument is used. The config file may include other config files with a #import reference. The included config files must reside in the same directory as the specified file. The logical_volume argument, if supplied, will be used to get the current physical volume ID and use that as an override of the value specified in the config file. This may be useful for debugging purposes when you want to debug with a production config file but a test Volume. The dictionary argument may be used to override any EC2 configuration values in the config file. """ if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO.StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement = zone, user_data = cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip is not None and instances.__len__() > 0: instance = instances[0] print 'Waiting for instance to start so we can set its elastic IP address...' # Sometimes we get a message from ec2 that says that the instance does not exist. # Hopefully the following delay will giv eec2 enough time to get to a stable state: time.sleep(5) while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print 'set the elastic IP of the first instance to %s' % elastic_ip for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i==0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
def create(cls, config_file=None, logical_volume = None, cfg = None, **params): """ Create a new instance based on the specified configuration file or the specified configuration and the passed in parameters. If the config_file argument is not None, the configuration is read from there. Otherwise, the cfg argument is used. The config file may include other config files with a #import reference. The included config files must reside in the same directory as the specified file. The logical_volume argument, if supplied, will be used to get the current physical volume ID and use that as an override of the value specified in the config file. This may be useful for debugging purposes when you want to debug with a production config file but a test Volume. The dictionary argument may be used to override any EC2 configuration values in the config file. """ if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement = zone, user_data = cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip is not None and instances.__len__() > 0: instance = instances[0] print('Waiting for instance to start so we can set its elastic IP address...') # Sometimes we get a message from ec2 that says that the instance does not exist. # Hopefully the following delay will giv eec2 enough time to get to a stable state: time.sleep(5) while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print('set the elastic IP of the first instance to %s' % elastic_ip) for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i==0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
def loadConfig(self, path=None): # Get all the Configuration config = Config(path=path) self.aws_access_key_id = config.get('Credentials','aws_access_key_id') self.aws_secret_access_key = config.get('Credentials','aws_secret_access_key') self.key_name = config.get('Key','key_name') self.instance_type = config.get('Instance','instance_type') self.zone = config.get('Instance','zone', default='us-east-1c') self.security_groups = config.get('Instance','security_groups') self.tags = config.get('Instance', 'tags') self.os = config.get('Type','os') self.num_nodes = int(config.get('Type','num_nodes')) self.ami = config.get('AMI',self.os) self.ebs_size = int(config.get('EBS','volume_size', default=0)) self.num_ebs = int(config.get('EBS','volumes', default=0)) self.membase_port = config.get('global', 'port', default='8091') self.ssh_username = config.get('global', 'username', default='root') self.ssh_key_path = config.get('global', 'ssh_key', default='/root/.ssh/QAkey.pem') self.rest_username = config.get('membase','rest_username', default='Administrator') self.rest_password = config.get('membase','rest_password', default='password')
def main(argv): parser = argparse.ArgumentParser(description='Upload assets to Amazon') parser.add_argument('--config', dest='config_filename', action='store', default=CONFIG_FILE, help='optional custom configuration filename') parser.add_argument('--node', dest='node_name_override', action='store', default=False, help='optional override for the pid-id specified in the config file') parameters = parser.parse_args() current_defaults_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), parameters.config_filename) config = Config(path=current_defaults_filename) global access_key_id global secret_access_key access_key_id = config.get('Amazon', 'aws_access_key_id') secret_access_key = config.get('Amazon', 'aws_secret_access_key') log_file_path = config.get('General', 'log_file_path', '/var/log/s3ingest.log') log_level = config.getint('General', 'log_level', 20) target_bucket_name = config.get('Amazon', 's3_bucket_name') monitored_dir_name = config.get('General', 'monitored_directory') worker_threads = config.getint('General', 'worker_threads', 5) pid_file_path = config.get('General', 'pid_file_path', './s3ingest.semaphore') if not parameters.node_name_override: pid_id = config.get('General', 'pid_id').rstrip() else: pid_id = parameters.node_name_override.rstrip() HEART_BEAT_TIME_SECS = config.getint('General', 'heart_beat_time_secs', 300) MIN_MODIFIED_INTERVAL_SECS = 3600 # 3600 secs = 1 hr. Keep high to allow time for large files to upload and reduce false positives if not os.path.exists(monitored_dir_name): print "The directory to be monitored '{0}' does not exist".format(monitored_dir_name) sys.exit(1) logging.basicConfig(filename=log_file_path, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=log_level) mailhost = config.get('Mail', 'mailhost') fromaddr = config.get('Mail', 'fromaddr') toaddrs = config.get('Mail', 'toaddrs') smtp_handler = handlers.SMTPHandler(mailhost, fromaddr, toaddrs, 'S3Util error occurred') smtp_handler.setLevel(logging.ERROR) logging.getLogger().addHandler(smtp_handler) s3_util = S3Util(access_key_id, secret_access_key) s3_util.set_target_bucket_name(target_bucket_name) signal.signal(signal.SIGINT, s3_util.signal_handler) signal.signal(signal.SIGTERM, s3_util.signal_handler) # Check for pid file and create if not found if not os.path.exists(pid_file_path): pid_file = open(pid_file_path, "w+") fcntl.flock(pid_file.fileno(), fcntl.LOCK_EX) pid_file.write(str(pid_id)) fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN) pid_file.close() s3_util.start_monitoring(monitored_dir_name) logging.debug("Starting worker threads") for i in range(worker_threads): t = S3Uploader(s3_util) t.setDaemon(True) t.start() logging.debug("Worker threads started") while True: pid_file = open(pid_file_path, "r+") logging.debug("Waiting for lock") fcntl.flock(pid_file.fileno(), fcntl.LOCK_SH) logging.debug("Acquired lock") current_pid = pid_file.readline().rstrip() st = os.stat(pid_file_path) now = time.time() pid_modified_time = st[stat.ST_MTIME] logging.debug("pid file: {0}, current_host: {1}".format(current_pid, pid_id)) if pid_id == current_pid: logging.debug("State - Active") os.utime(pid_file_path, None) s3_util.set_active(True) # Find files have been unmodified for a defined threshold and assume that they need to be queued for dirpath, dirnames, filenames in os.walk(monitored_dir_name): for name in filenames: file_path = os.path.normpath(os.path.join(dirpath, name)) last_modifed_time = os.path.getmtime(file_path) if ((now - last_modifed_time) > MIN_MODIFIED_INTERVAL_SECS and not (s3_util.is_queued(file_path) or s3_util.is_currently_processing(file_path))): logging.info("Directory scan found file '{0}' older than {1} seconds and added to queue".format(file_path, (now - last_modifed_time))) s3_util.add_to_queue(file_path) else: if now - pid_modified_time > HEART_BEAT_TIME_SECS: logging.debug("Stale pid file found, setting state - Active") pid_file.truncate(0) pid_file.seek(0) pid_file.write(str(pid_id)) s3_util.set_active(True) else: logging.debug("State - Inactive") s3_util.set_active(False) fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN) logging.debug("Released lock") pid_file.close() #Play nice sleep(5) s3_util.wait_for_completion() logging.debug("Exiting") sys.exit(0)
# Get Command Line Arguments PHRASE = sys.argv[1] JOB_ID = sys.argv[2] ITERATION = sys.argv[3] PARENT_HIT_ID = sys.argv[4] BRANCHES = sys.argv[5] # Connect to the HIT database database = sqlite3.connect('crowdstorming.db', isolation_level='DEFERRED') db = database.cursor() # BOTO Configuration config = Config() AWS_ID = config.get('Credentials', 'aws_access_key_id', None) SECRET_ID = config.get('Credentials', 'aws_secret_access_key_id', None) HOST = 'mechanicalturk.amazonaws.com' mt = MTurkConnection( aws_access_key_id=AWS_ID, aws_secret_access_key=SECRET_ID, host=HOST ) # HIT Configuration - global title, description, keywords, qualifications TITLE = 'Provide Related Terms' DESC = 'Given a word or phrase, provide another (different) word that relates to the given one.' KEYWORDS = 'opinions, relations, idea, brainstorm, crowdstorm' QUAL = Qualifications() QUAL = QUAL.add(PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 75))
def loadConfig(self, path=None): # Get all the Configuration config = Config(path=path) self.aws_access_key_id = config.get('Credentials', 'aws_access_key_id') self.aws_secret_access_key = config.get('Credentials', 'aws_secret_access_key') self.key_name = config.get('Key', 'key_name') self.instance_type = config.get('Instance', 'instance_type') self.zone = config.get('Instance', 'zone', default='us-east-1c') self.security_groups = config.get('Instance', 'security_groups') self.tags = config.get('Instance', 'tags') self.os = config.get('Type', 'os') self.num_nodes = int(config.get('Type', 'num_nodes')) self.ami = config.get('AMI', self.os) self.ebs_size = int(config.get('EBS', 'volume_size', default=0)) self.num_ebs = int(config.get('EBS', 'volumes', default=0)) self.membase_port = config.get('global', 'port', default='8091') self.ssh_username = config.get('global', 'username', default='root') self.ssh_key_path = config.get('global', 'ssh_key', default='/root/.ssh/QAkey.pem') self.rest_username = config.get('membase', 'rest_username', default='Administrator') self.rest_password = config.get('membase', 'rest_password', default='password')
import os import sys from boto.pyami.config import Config from fabric.colors import red # Load the configuration file if os.path.exists('config.ini'): boto_config = Config() boto_config.load_credential_file('config.ini') if boto_config.items('Credentials'): AWS_ID = boto_config.get('Credentials', 'aws_access_key_id') AWS_KEY = boto_config.get('Credentials', 'aws_secret_access_key') REGION = boto_config.get('Credentials', 'region') else: print(red('Error: credentials section is missing, abort!')) sys.exit(1) if boto_config.items('Config'): DEFAULT_OS = boto_config.get('Config', 'default_os') DEFAULT_SSH_DIR = os.path.expanduser(boto_config.get('Config', 'default_ssh_dir')) DEFAULT_FILE_DIR = os.path.expanduser(boto_config.get('Config', 'default_file_dir')) DEFAULT_INTERNAL_DOMAIN = boto_config.get('Config', 'default_internal_domain') else: print(red('Error: config section is missing, abort!')) sys.exit(1) else: print(red('Error: configuration file missing, abort!')) sys.exit(1) AWS_REGIONS = { 'ap-northeast-1': 'Asia Pacific (Tokyo)', 'ap-southeast-1': 'Asia Pacific (Singapore)',