def print_trailing_info(): try: with open('/home/ubuntu/datastax_ami/presetup/VERSION', 'r') as f: version = f.readline().strip() except: version = "<< $HOME/datastax_ami/presetup/VERSION missing >>" substring = "Version: " versionInfo = subprocess.Popen(shlex.split( "dpkg -s %s" % conf.get_config("AMI", "package")), stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.read() versionInfo = versionInfo[versionInfo.find(substring) + len(substring):versionInfo. find("\n", versionInfo.find(substring))].strip() if conf.get_config("AMI", "Type") == "Community": versionInfo = "DataStax Community version " + versionInfo if conf.get_config("AMI", "Type") == "Enterprise": versionInfo = "DataStax Enterprise version " + versionInfo print """ ------------------------------------ DataStax AMI for DataStax Enterprise and DataStax Community AMI version {0} {1} ------------------------------------ """.format(version, versionInfo)
def smoke_test(): if conf.get_config("AMI", "SmokeURL") and conf.get_config( "AMI", "SmokeFile") and config_data['launchindex'] == 0: smokeURL = conf.get_config("AMI", "SmokeURL") smokeFile = conf.get_config("AMI", "SmokeFile") import urllib urllib.urlretrieve(smokeURL, 'smoke.tar.gz') log = exe('tar xvf smoke.tar.gz', True) os.chdir('/home/ubuntu/smoke') log += "-----------------------------------------------------" + "\n" log += "-------------------- SMOKE TESTS --------------------" + "\n" log += "-----------------------------------------------------" + "\n" log += "Retrieved: " + smokeURL + "\n" log += "Executing: " + smokeFile + "\n" log += "\n" with open(smokeFile, 'r') as f: log += f.read() + "\n" log += "-----------------------------------------------------" + "\n" log += exe('sudo chmod +x ' + smokeFile, True) log += exe('./' + smokeFile, True) log += "-----------------------------------------------------" + "\n" log += "--------------------- END TESTS ---------------------" + "\n" log += "-----------------------------------------------------" + "\n" os.chdir('/home/ubuntu/') # Email smoke test results email_report( 'SMOKE-TESTS ::: ' + smokeFile + ' ::: ' + config_data['publichostname'], log)
def check_and_launch_opscenter(): if config_data['launchindex'] == 0 and conf.get_config( "OpsCenter", "DNS") and not conf.get_config( "AMI", "CompletedFirstBoot") and not conf.get_config( "OpsCenter", "NoOpsCenter"): logger.exe('sudo service opscenterd restart') conf.set_config("AMI", "CompletedFirstBoot", True)
def wait_for_seed(): # Wait for the seed node to come online req = urllib2.Request('http://169.254.169.254/latest/meta-data/local-ipv4') internalip = urllib2.urlopen(req).read() if internalip != conf.get_config("AMI", "LeadingSeed"): logger.info("Waiting for seed node to come online...") nodetoolStatement = "nodetool -h " + conf.get_config( "AMI", "LeadingSeed") + " ring" logger.info(nodetoolStatement) while True: nodetool_out = subprocess.Popen( shlex.split(nodetoolStatement), stderr=subprocess.PIPE, stdout=subprocess.PIPE).stdout.read() # get rid of substring included in new jvm options printout that was causing an infinite loop nodetool_out = nodetool_out.replace('+HeapDumpOnOutOfMemoryError', '') if (nodetool_out.lower().find("error") == -1 and nodetool_out.lower().find("up") and len(nodetool_out) > 0): logger.info("Seed node now online!") time.sleep(5) break time.sleep(5) logger.info("Retrying seed node...")
def print_trailing_info(): version = comboami_version() substring = "Version: " versionInfo = subprocess.Popen(shlex.split( "dpkg -s %s" % conf.get_config("AMI", "package")), stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.read() versionInfo = versionInfo[versionInfo.find(substring) + len(substring):versionInfo. find("\n", versionInfo.find(substring))].strip() if conf.get_config("AMI", "Type") == "Community": versionInfo = "DataStax Community version " + versionInfo if conf.get_config("AMI", "Type") == "Enterprise": versionInfo = "DataStax Enterprise version " + versionInfo print """ ------------------------------------ DataStax AMI for DataStax Enterprise and DataStax Community AMI version {0} {1} ------------------------------------ """.format(version, versionInfo)
def ami_error_handling(): if conf.get_config("AMI", "Error"): print print conf.get_config("AMI", "Error") print print "Please visit http://datastax.com/ami for this AMI's feature set." print sys.exit(1)
def start(ask_serverurl=True, ask_username=True, ask_password=True, ask_oauth2=True): """ Interactive configuration. """ conf.load_or_create() serverurl = conf.get_config('serverurl') username = conf.get_config('username') password = conf.get_config('password') client = conf.get_config('client') secret = conf.get_config('secret') if ask_serverurl or serverurl == "": serverurl = __serverurl(serverurl == "") if ask_username or username == "": username = __username(username == "") if ask_password or password == "": password = __password(password == "") if ask_oauth2 or client == "" or secret == "": client = __client(client == "") secret = __secret(secret == "") if serverurl != "": conf.set_config('serverurl', serverurl) if username != "": conf.set_config('username', username) if password != "": conf.set_config('password', password) if client != "": conf.set_config('client', client) if secret != "": conf.set_config('secret', secret) # username/password and client/secret check testresponse = api.api_token() if testresponse.has_error(): conf.save() if testresponse.error == api.Error.http_bad_request: print(testresponse.error_description) if testresponse.error_text == "invalid_grant": start(ask_serverurl=False, ask_oauth2=False) return elif testresponse.error_text == "invalid_client": start(ask_serverurl=False, ask_username=False, ask_password=False) return print("An unknown error occured on the server side. Please try again later.") print() exit(-1) print() if conf.save(): print("The config was saved successfully.") else: print("An error occured while saving the configuration. Please try again.") print() exit(-1)
def opscenter_installation(): if instance_data['launchindex'] == 0 and options.opscenter != "no": logger.info('Installing OpsCenter...') if conf.get_config("AMI", "Type") == "Community": logger.exe('sudo apt-get -y install opscenter-free libssl0.9.8') elif conf.get_config("AMI", "Type") == "Enterprise": logger.exe('sudo apt-get -y install opscenter libssl0.9.8') logger.exe('sudo service opscenterd stop') elif options.opscenter == "no": conf.set_config("OpsCenter", "NoOpsCenter", True)
def run(): initial_configurations() write_bin_tools() restart_tasks() if conf.get_config("AMI", "OpsCenterOnly"): logger.exe('sudo service opscenterd restart') if conf.get_config("AMI", "LeadingSeed"): wait_for_seed() launch_opscenter() start_services()
def restart_tasks(): logger.info("AMI Type: " + str(conf.get_config("AMI", "Type"))) # Mount all attached drives logger.exe('sudo mount -a') # Disable swap logger.exe('sudo swapoff --all') # Ensure the correct blockdev readahead since this sometimes resets after restarts if conf.get_config('AMI', 'raid_readahead'): logger.exe('sudo blockdev --setra %s /dev/md0' % (conf.get_config('AMI', 'raid_readahead')), expectError=True)
def wait_for_seed(): # Wait for the seed node to come online req = urllib2.Request('http://169.254.169.254/latest/meta-data/local-ipv4') internalip = urllib2.urlopen(req).read() if internalip != conf.get_config("AMI", "LeadingSeed"): logger.info("Waiting for seed node to come online...") d = dict(os.environ) d["HOST"] = conf.get_config("AMI", "LeadingSeed") wait_script = "python /home/ubuntu/datastax_ami/wait_for_first_node.sh" subprocess.Popen(shlex.split(wait_script), stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=d).stdout.read()
def print_opscenter_information(): try: opscenter_ip = conf.get_config("OpsCenter", "DNS") opscenter_port = conf.get_config("OpsCenter", "port") packageQuery = subprocess.Popen(shlex.split("dpkg-query -l 'opscenter'"), stderr=subprocess.PIPE, stdout=subprocess.PIPE).stdout.read() if packageQuery: print "Opscenter: http://{0}:{1}/".format(opscenter_ip, opscenter_port) print " Please wait 60 seconds if this is the cluster's first start..." print print except: pass
def install_java(): logger.info('Performing deployment install...') if conf.get_config("AMI", "JavaType") == "1.7": url = "http://www.java.com/en/download/manual.jsp" majorversion = "7" else: url = "http://java.com/en/download/manual_v6.jsp" majorversion = "6" f = urllib2.urlopen(url) t = f.read() #regex to find java minor version vr = re.compile("(?<=Update )\d+(?=.*)") m = vr.search(t) minorversion = m.group() arch = "64" if arch == "64": # regex to find download link dlr = re.compile('(?<=Linux x64\" href=\")\S+(?=\".*)') else: dlr = re.compile('(?<=Linux\" href=\")\S+(?=\".*)') m = dlr.search(t) downloadlink = m.group() path = "/opt/java/" + arch + "/" cwd = os.curdir logger.exe("sudo mkdir -p " + path) os.chdir(path) if conf.get_config("AMI", "JavaType") == "1.7": outputfilename = "jre1.7.tar.gz" else: outputfilename = "jre1.6.bin" urllib.urlretrieve(downloadlink, path + outputfilename) if conf.get_config("AMI", "JavaType") == "1.7": logger.exe("sudo tar -zxvf " + path + outputfilename) else: logger.exe("sudo chmod +x " + path + outputfilename) logger.exe("sudo " + path + outputfilename) logger.exe('sudo update-alternatives --install "/usr/bin/java" "java" "' + path + 'jre1.' + majorversion + '.0_' + minorversion + '/bin/java" 1') logger.exe('sudo update-alternatives --set "java" "' + path + 'jre1.' + majorversion + '.0_' + minorversion + '/bin/java"') os.chdir(cwd)
def start_services(): # Wait for system setup changes to settle time.sleep(5) # Actually start the application if conf.get_config("AMI", "Type") == "Community" or conf.get_config( "AMI", "Type") == "False": logger.info('Starting DataStax Community...') logger.exe('sudo service cassandra restart') elif conf.get_config("AMI", "Type") == "Enterprise": logger.info('Starting DataStax Enterprise...') logger.exe('sudo service dse restart')
def waiting_for_full_cluster_to_launch(nodetool_out): start_time = time.time() while True: if nodetool_out.count("Up") == int(conf.get_config("Cassandra", "TotalNodes")): break if nodetool_out.count("UN") == int(conf.get_config("Cassandra", "TotalNodes")): break if time.time() - start_time > 60: break nodetool_out = subprocess.Popen(shlex.split(config_data['nodetool_statement']), stderr=subprocess.PIPE, stdout=subprocess.PIPE).stdout.read() print nodetool_out
def construct_mapred_site(): if conf.get_config("AMI", "Type") == "Enterprise": with open('/etc/dse/hadoop/mapred-site.xml', 'r') as f: mapred_site = f.read() mapred_local_dir = os.path.join( conf.get_config("AMI", "MountDirectory"), 'hadoop', 'mapredlocal') mapred_site = mapred_site.replace('/tmp/mapredlocal', mapred_local_dir) logger.exe('sudo mkdir -p %s' % mapred_local_dir) logger.exe('sudo chown -R cassandra:cassandra %s' % mapred_local_dir) with open('/etc/dse/hadoop/mapred-site.xml', 'w') as f: f.write(mapred_site)
def __init__(self, bullet_file=None): #do something self.url = "" self.bullets = [] self.clips = [] self.bullets_xml = None self.port = None self.save_path = None self.bullets_dir = conf.get_config('bullets_dir') self.plugins_dir = conf.get_config('plugins_dir') if bullet_file == None: self.bullet_file = conf.get_config('default_bullet') + ".xml" else: self.bullet_file = bullet_file + ".xml"
def run(): ami_error_handling() print_userdata() waiting_for_status() if not conf.get_config("AMI", "RaidOnly") and not conf.get_config( "AMI", "OpsCenterOnly"): waiting_for_nodetool() nodetool_out = check_for_one_up_node() waiting_for_full_cluster_to_launch(nodetool_out) print_opscenter_information() print_tools() print_trailing_info() print_errors()
def confirm_authentication(): if conf.get_config("AMI", "Type") == "Enterprise": if options.username and options.password: repo_url = "http://debian.datastax.com/enterprise" # Configure HTTP authentication password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, repo_url, options.username, options.password) handler = urllib2.HTTPBasicAuthHandler(password_mgr) opener = urllib2.build_opener(handler) # Try reading from the authenticated connection try: opener.open(repo_url) config_data['conf_path'] = os.path.expanduser( "/etc/dse/cassandra/") except Exception as inst: # Print error message if failed if "401" in str(inst): exit_path( 'Authentication for DataStax Enterprise failed. Please confirm your username and password.\n' ) elif (options.username or options.password): exit_path( "Both --username (-u) and --password (-p) required for DataStax Enterprise." )
def get_mongodb_connection(): env = os.environ.get('UGGIPUGGI_BACKEND_ENV', 'docker_compose') config = get_config(env) # get all config values about DB db_config = config['mongodb'] # map db_name = db_config.get('name') attr_map = { 'host': 'str', 'port': 'int', 'username': '******', 'password': '******' } kwargs = {} for key, typ in attr_map.items(): typecast_fn = getattr(builtins, typ) # cast the value from db_config accordingly if key-value pair exists kwargs[key] = typecast_fn( db_config.get(key)) if db_config.get(key) else None #kwargs['alias'] = 'default_celery' #connection.disconnect('mongo_celery') # disconnect previous default connection if any connection.disconnect( 'default') # disconnect previous default connection if any return connection.connect(db_name, **kwargs)
def initial_configurations(): # Begin configuration this is only run once in Public Packages if conf.get_config("AMI", "CurrentStatus") != "Complete!": # Configure DataStax variables try: import ds2_configure ds2_configure.run() except: conf.set_config( "AMI", "Error", "Exception seen in %s. Please check ~/datastax_ami/ami.log for more info." % 'ds1_launcher.py') logger.exception('ds1_launcher.py') # Set ulimit hard limits logger.pipe('echo "* soft nofile 32768"', 'sudo tee -a /etc/security/limits.conf') logger.pipe('echo "* hard nofile 32768"', 'sudo tee -a /etc/security/limits.conf') logger.pipe('echo "root soft nofile 32768"', 'sudo tee -a /etc/security/limits.conf') logger.pipe('echo "root hard nofile 32768"', 'sudo tee -a /etc/security/limits.conf') # Change permission back to being ubuntu's and cassandra's logger.exe('sudo chown -hR ubuntu:ubuntu /home/ubuntu') logger.exe('sudo chown -hR cassandra:cassandra /raid0/cassandra', False) logger.exe('sudo chown -hR cassandra:cassandra /mnt/cassandra', False) else: logger.info('Skipping initial configurations.')
def __init__(self): logging.Logger.__init__(self, __name__) self._conf = conf.get_config("taemin").get("general", {}) self.setLevel(self._LEVEL.get(self._conf.get("log_level"), logging.INFO)) self.addHandler(self._get_handler()) sys.excepthook = self._handler_exception
def confirm_authentication(): if os.path.isfile('/etc/datastax_ami.conf'): with open('/etc/datastax_ami.conf') as f: # Using this license is strictly prohibited on any AMIs other than # those that come pre-baked with this key. options.username = f.readline().strip() options.password = f.readline().strip() return if conf.get_config("AMI", "Type") == "Enterprise": if options.username and options.password: repo_url = "http://debian.datastax.com/enterprise" # Configure HTTP authentication password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, repo_url, options.username, options.password) handler = urllib2.HTTPBasicAuthHandler(password_mgr) opener = urllib2.build_opener(handler) # Try reading from the authenticated connection try: opener.open(repo_url) except Exception as inst: # Print error message if failed if "401" in str(inst): exit_path( 'Authentication for DataStax Enterprise failed. Please confirm your username and password.\n' ) elif not options.username or not options.password: exit_path( "Both --username (-u) and --password (-p) required for DataStax Enterprise." )
def setup_repos(): # Add repos if conf.get_config("AMI", "Type") == "Enterprise": logger.pipe( 'echo "deb http://{0}:{1}@debian.datastax.com/enterprise stable main"' .format(options.username, options.password), 'sudo tee /etc/apt/sources.list.d/datastax.sources.list') else: logger.pipe( 'echo "deb http://debian.datastax.com/community stable main"', 'sudo tee /etc/apt/sources.list.d/datastax.sources.list') # Add repokeys logger.exe( 'sudo apt-key add /home/ubuntu/datastax_ami/repo_keys/DataStax.key') # Perform the install logger.exe('sudo apt-get update') while True: output = logger.exe('sudo apt-get update') if not output[1] and not 'err' in output[0].lower( ) and not 'failed' in output[0].lower(): break time.sleep(5)
def waiting_for_status(): config_data['waiting_for_status'] = False dots = 0 while True: status = conf.get_config("AMI", "CurrentStatus") if not status == 'Complete!' and not status == False: ticker = '' for i in range(dots): ticker += '.' sys.stdout.write( "\r " ) sys.stdout.write("\r%s%s " % (status, ticker)) sys.stdout.flush() elif status == 'Complete!': break else: if not config_data['waiting_for_status']: print "Waiting for cluster to boot..." config_data['waiting_for_status'] = True ami_error_handling() time.sleep(5) dots = (dots + 1) % 4 print
def calculate_tokens(): if conf.get_config('Cassandra', 'partitioner') == 'random_partitioner': import tokentoolv2 datacenters = [ options.realtimenodes, options.analyticsnodes, options.searchnodes ] config_data['tokens'] = tokentoolv2.run(datacenters)
def index(): json_string = json.dumps(conf.get_config()) schedule_string = json.dumps(MqttScheduler.getSchedule()) sensors_string = json.dumps(snsr.get_config()) return render_template('index.html', config=json_string, schedule=schedule_string, sensors=sensors_string)
def run(): initial_configurations() write_bin_tools() restart_tasks() if conf.get_config("AMI", "LeadingSeed"): wait_for_seed() launch_opscenter() start_services()
def prepare_for_raid(): # Only create raid0 once. Mount all times in init.d script. A failsafe against deleting this file. if conf.get_config("AMI", "RAIDAttempted"): return conf.set_config("AMI", "CurrentStatus", "Raiding started") # Remove EC2 default /mnt from fstab fstab = '' file_to_open = '/etc/fstab' logger.exe('sudo chmod 777 {0}'.format(file_to_open)) with open(file_to_open, 'r') as f: for line in f: if not "/mnt" in line: fstab += line with open(file_to_open, 'w') as f: f.write(fstab) logger.exe('sudo chmod 644 {0}'.format(file_to_open)) # Create a list of devices devices = glob.glob('/dev/xvd*') devices.remove('/dev/xvda1') devices.sort() logger.info('Unformatted devices: {0}'.format(devices)) # Check if there are enough drives to start a RAID set if len(devices) > 1: time.sleep(3) # was at 20 mnt_point = mount_raid(devices) # Not enough drives to RAID together. else: mnt_point = format_xfs(devices) if not options.raidonly: # Change cassandra.yaml to point to the new data directories with open(os.path.join(config_data['conf_path'], 'cassandra.yaml'), 'r') as f: yaml = f.read() yaml = yaml.replace('/var/lib/cassandra/data', os.path.join(mnt_point, 'cassandra', 'data')) yaml = yaml.replace( '/var/lib/cassandra/saved_caches', os.path.join(mnt_point, 'cassandra', 'saved_caches')) yaml = yaml.replace('/var/lib/cassandra/commitlog', os.path.join(mnt_point, 'cassandra', 'commitlog')) with open(os.path.join(config_data['conf_path'], 'cassandra.yaml'), 'w') as f: f.write(yaml) # Never create raid array again conf.set_config("AMI", "RAIDAttempted", True) logger.info("Mounted Raid.\n") conf.set_config("AMI", "MountDirectory", mnt_point) conf.set_config("AMI", "CurrentStatus", "Raiding complete")
def initial_configurations(): # Begin configuration this is only run once in Public Packages if not conf.get_config("AMI", "CurrentStatus"): # Configure DataStax variables try: import ds2_configure ds2_configure.run() except: conf.set_config("AMI", "Error", "Exception seen in %s. Please check ~/datastax_ami/ami.log for more info." % 'ds1_launcher.py') logger.exception('ds1_launcher.py') # Change permission back to being ubuntu's and cassandra's logger.exe('sudo chown -hR ubuntu:ubuntu /home/ubuntu') logger.exe('sudo chown -hR cassandra:cassandra /raid0/cassandra', False) logger.exe('sudo chown -hR cassandra:cassandra /mnt/cassandra', False) # Ensure permissions directory_list = [ ('/home/ubuntu', 'ubuntu', 'ubuntu'), ('/raid0/cassandra', 'cassandra', 'cassandra'), ('/mnt/cassandra', 'cassandra', 'cassandra') ] for directory in directory_list: if os.path.isdir(directory[0]): logger.info('Checking permissions for: %s' % directory[0]) attempt = 0 max_attempts = 10 permissions_set = False while attempt < max_attempts: logger.info('Attempt #%s' % attempt) stat_info = os.stat(directory[0]) uid = stat_info.st_uid gid = stat_info.st_gid user = pwd.getpwuid(uid)[0] group = grp.getgrgid(gid)[0] if user == directory[1] and group == directory[2]: permissions_set = True break attempt += 1 time.sleep(1) if not permissions_set: logger.warn('Permissions not set correctly. Please run manually:') logger.warn('sudo chown -hR %s:%s %s' % (directory[1], directory[2], directory[0])) logger.warn('sudo service dse restart') else: logger.info('Permissions set for %s as %s:%s' % (directory[0], user, group)) else: logger.info('Skipping initial configurations.')
def construct_core_site(): if conf.get_config("AMI", "Type") == "Enterprise": with open('/etc/dse/hadoop/core-site.xml', 'r') as f: core_site = f.read() hadoop_tmp_dir = os.path.join(conf.get_config("AMI", "MountDirectory"), 'hadoop') tmp_dir = '\n <!-- AMI configuration -->\n <property>\n <name>hadoop.tmp.dir</name>\n <value>%s/${user.name}</value>\n </property>\n</configuration>' % hadoop_tmp_dir core_site = core_site.replace('</configuration>', tmp_dir) logger.exe('sudo mkdir -p %s' % hadoop_tmp_dir) logger.exe('sudo chown -R cassandra:cassandra %s' % hadoop_tmp_dir) hadoop_ubuntu_dir = os.path.join(hadoop_tmp_dir, 'ubuntu') logger.exe('sudo mkdir -p %s' % hadoop_ubuntu_dir) logger.exe('sudo chown -R ubuntu:ubuntu %s' % hadoop_ubuntu_dir) with open('/etc/dse/hadoop/core-site.xml', 'w') as f: f.write(core_site)
def main(opts, args): """ """ if opts.confpath: confd = conf.get_config(opts.confpath) lgr.debug(pprint.pformat(confd)) else: confd = {} if opts.flikrpage: likelicandidate, resultstr = extract_photo_url(opts.flikrpage) print likelicandidate print resultstr if opts.flikrphoto: get_photo(opts.flikrphoto)
def use_ec2_userdata(): if not options: exit_path("No parsed options found.") if not options.totalnodes: exit_path("Missing required --totalnodes (-n) switch.") if (options.analyticsnodes + options.searchnodes) > options.totalnodes: exit_path("Total nodes assigned (--analyticsnodes + --searchnodes) > total available nodes (--totalnodes)") if conf.get_config("AMI", "Type") == "Community" and (options.cfsreplication or options.analyticsnodes or options.searchnodes): exit_path('CFS Replication, Analytics Nodes, and Search Node settings can only be set in DataStax Enterprise installs.') if options.email: logger.info('Setting up diagnostic email using: {0}'.format(options.email)) conf.set_config("AMI", "Email", options.email) if options.clustername: logger.info('Using cluster name: {0}'.format(options.clustername)) instance_data['clustername'] = options.clustername if options.customreservation: instance_data['reservationid'] = options.customreservation if options.seeds: instance_data['seeds'] = options.seeds if options.opscenterip: instance_data['opscenterip'] = options.opscenterip if options.stop_services: with open(ami_disabled, 'w') as f: f.write('') options.realtimenodes = (options.totalnodes - options.analyticsnodes - options.searchnodes) options.seed_indexes = [0, options.realtimenodes, options.realtimenodes + options.analyticsnodes] logger.info('Using cluster size: {0}'.format(options.totalnodes)) conf.set_config("Cassandra", "TotalNodes", options.totalnodes) logger.info('Using seed indexes: {0}'.format(options.seed_indexes)) if options.reflector: logger.info('Using reflector: {0}'.format(options.reflector))
def get_test_snakebite(): config = get_config('testing') return create_snakebite(**config)
""" tools_prepare_root(confd) depends_checkdeps(confd) # install_odi(confd) # install_odisvn(confd) # prepare_a_repo(confd, "test1") def parse_args(): """ parse the args for the main install script """ parser = OptionParser() parser.add_option("--config", dest="conf", help="Path to config file.") (options, args) = parser.parse_args() return (options, args) if __name__ == '__main__': fmt = "** %(message)s" logging.basicConfig(level=logging.DEBUG, format=fmt) lgr = logging.getLogger("ODISVN-installer") opts, args = parse_args() confd = conf.get_config(opts.conf) lgr.info("starting main") main(confd)
from pbnh.app import app import conf conf = conf.get_config().get('server') app.run(conf.get('bind_ip'), port=conf.get('bind_port'), debug=conf.get('debug'))
class myError(Exception): pass ######### def main(): """ """ ######### def parse_args(): parser = OptionParser() parser.add_option("--config", dest="confpath", help="path to ini file") (options, args) = parser.parse_args() return (options, args) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) opts, args = parse_args() confd = conf.get_config(opts.confpath) lgr.debug(pprint.pformat(confd)) try: main() except Exception, e: print "We can trap a lot up here" raise e
def construct_yaml(): with open(os.path.join(config_data['conf_path'], 'scylla.yaml'), 'r') as f: yaml = f.read() # Create the seed list seeds_yaml = ','.join(config_data['seed_list']) if options.seeds: if options.bootstrap: # Do not include current node while bootstrapping seeds_yaml = options.seeds else: # Add current node to seed list for multi-region setups seeds_yaml = seeds_yaml + ',' + options.seeds # Set seeds for DSE/C p = re.compile('seeds:.*') yaml = p.sub('seeds: "{0}"'.format(seeds_yaml), yaml) # Set listen_address p = re.compile('listen_address:.*') yaml = p.sub('listen_address: {0}'.format(instance_data['internalip']), yaml) # Set rpc_address p = re.compile('rpc_address:.*') if options.rpcbinding: yaml = p.sub('rpc_address: {0}'.format(instance_data['internalip']), yaml) else: yaml = p.sub('rpc_address: 0.0.0.0', yaml) # needed for 2.1+ p = re.compile('# broadcast_rpc_address:.*') yaml = p.sub('broadcast_rpc_address: {0}'.format(instance_data['internalip']), yaml) if options.multiregion: # multiregion: --rpcbinding is implicitly true yaml = p.sub('rpc_address: {0}'.format(instance_data['internalip']), yaml) yaml = yaml.replace('endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch', 'endpoint_snitch: org.apache.cassandra.locator.Ec2MultiRegionSnitch') yaml = yaml.replace('endpoint_snitch: SimpleSnitch', 'endpoint_snitch: Ec2MultiRegionSnitch') p = re.compile('# broadcast_address: 1.2.3.4') req = curl_instance_data('http://169.254.169.254/latest/meta-data/public-ipv4') instance_data['externalip'] = urllib2.urlopen(req).read() logger.info("meta-data:external-ipv4: %s" % instance_data['externalip']) yaml = p.sub('broadcast_address: {0}'.format(instance_data['externalip']), yaml) # XXX: Commented out to use SimpleSnitch # Uses the EC2Snitch for Community Editions # if conf.get_config("AMI", "Type") == "Community": # yaml = yaml.replace('endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch', 'endpoint_snitch: org.apache.cassandra.locator.GossipingPropertyFileSnitch') # yaml = yaml.replace('endpoint_snitch: SimpleSnitch', 'endpoint_snitch: GossipingPropertyFileSnitch') # Set cluster_name to reservationid instance_data['clustername'] = instance_data['clustername'].strip("'").strip('"') yaml = yaml.replace("cluster_name: 'Test Cluster'", "cluster_name: '{0}'".format(instance_data['clustername'])) # Set auto_bootstrap if options.bootstrap: if 'auto_bootstrap' in yaml: p = re.compile('auto_bootstrap:.*') yaml = p.sub('auto_bootstrap: true', yaml) else: yaml += "\nauto_bootstrap: true\n" else: if 'auto_bootstrap' in yaml: p = re.compile('auto_bootstrap:.*') yaml = p.sub('auto_bootstrap: false', yaml) else: yaml += "\nauto_bootstrap: false\n" if conf.get_config('Cassandra', 'partitioner') == 'random_partitioner': # Construct token for an equally split ring logger.info('Cluster tokens: {0}'.format(config_data['tokens'])) if instance_data['launchindex'] < options.seed_indexes[1]: token = config_data['tokens'][0][instance_data['launchindex']] if options.seed_indexes[1] <= instance_data['launchindex'] and instance_data['launchindex'] < options.seed_indexes[2]: token = config_data['tokens'][1][instance_data['launchindex'] - options.realtimenodes] if options.seed_indexes[2] <= instance_data['launchindex']: token = config_data['tokens'][2][instance_data['launchindex'] - options.realtimenodes - options.analyticsnodes] p = re.compile( 'initial_token:.*') yaml = p.sub('initial_token: {0}'.format(token), yaml) elif conf.get_config('Cassandra', 'partitioner') == 'murmur': if conf.get_config('Cassandra', 'vnodes') == 'True' or options.vnodes: p = re.compile( '# num_tokens:.*') yaml = p.sub('num_tokens: 256', yaml) else: if instance_data['launchindex'] < options.seed_indexes[1]: tokens = [((2**64 / options.realtimenodes) * i) - 2**63 for i in range(options.realtimenodes)] token = str(tokens[instance_data['launchindex']]) if options.seed_indexes[1] <= instance_data['launchindex'] and instance_data['launchindex'] < options.seed_indexes[2]: tokens = [((2**64 / options.analyticsnodes) * i) - 2**63 for i in range(options.analyticsnodes)] token = str(tokens[instance_data['launchindex'] - options.realtimenodes] + 10000) if options.seed_indexes[2] <= instance_data['launchindex']: tokens = [((2**64 / options.searchnodes) * i) - 2**63 for i in range(options.searchnodes)] token = str(tokens[instance_data['launchindex'] - options.realtimenodes - options.analyticsnodes] + 20000) p = re.compile( 'initial_token:.*') yaml = p.sub('initial_token: {0}'.format(token), yaml) with open(os.path.join(config_data['conf_path'], 'scylla.yaml'), 'w') as f: f.write(yaml) logger.info('scylla.yaml configured.')
# -*- coding: utf-8 -*- from __future__ import absolute_import from reportmaker import create_app from conf import get_config import os # load config via env env = os.environ.get('REPORTMAKER_ENV', 'dev') config = get_config(env) reportmaker = create_app(**config)