def try_sending_outqueue(): global outq, stage, lastping, lastrecv now = util.ticks() # check for disconnect if (now - lastrecv) > timeoutinterval: log("no packets from biller in %s seconds, assuming down" % ((now-lastrecv)/100)) util.exit(1) # only send stuff once we're connected if stage != s_connected: return for p in outq: if (now - p.lastretry) > reliable_retry: raw_send(p.data) if p.reliable: p.lastretry = now else: outq.remove(p) # check for pings if (now - lastping) > pinginterval: lastping = now queue_pkt('\x01')
def run_hooks_for(trigger): from sys import exit from os.path import sep from subprocess import call global _triggers if trigger not in _triggers: raise ValueError("unknown trigger: '" + str(trigger) + "'") hooks = list(set(_hooks[trigger]) - set(_hooks_done[trigger])) num_done = 0 if len(hooks) > 0: util.info("running hooks for trigger '" + str(trigger) + "'") for fname in hooks: rv = call(config.hooks_dir + sep + fname, env=_create_env()) _hooks_done[trigger].append(fname) num_done += 1 if rv != 0: util.error("hook '" + str(fname) + "' exited abnormally") util.exit(util.ERR_ABNORMAL_HOOK_EXIT) util.info("successfully ran " + str(num_done) + " " + \ util.plural('hook', num_done))
def handle_s2b_connect(line): global serverid, groupid, scoreid, gotconnect version, swname, zonename, network, password = line.split(':', 4) version = int(version) if version != 1: log("local server specified wrong protocol version!") vie_bprot.disconnect() util.exit(1) if network: sendname = network + ' ' + zonename else: sendname = zonename log("local zone %s running on %s" % (sendname, swname)) args = [serverid, groupid, scoreid, sendname, password] def do_send_stuff(args=args): log("logging in to remote biller") vie_bprot.send_s2b_login(*args) send_b2s_connectok('bproxy %s' % util.version) # this is a little messy. we check to see if we've contacted the vie # server yet. if so, just send the connectok. if not, wait until we # do. if vie_bprot.stage == vie_bprot.s_connected: do_send_stuff() else: gotconnect = do_send_stuff
def try_sending_outqueue(): global outq, stage, lastping, lastrecv now = util.ticks() # check for disconnect if (now - lastrecv) > timeoutinterval: log("no packets from biller in %s seconds, assuming down" % ((now - lastrecv) / 100)) util.exit(1) # only send stuff once we're connected if stage != s_connected: return for p in outq: if (now - p.lastretry) > reliable_retry: raw_send(p.data) if p.reliable: p.lastretry = now else: outq.remove(p) # check for pings if (now - lastping) > pinginterval: lastping = now queue_pkt('\x01')
def print_property(self, key): if key == 'proxy.public.ip': print self.proxy_public_ip() return else: for section in self.sections(): if self.has_option(section, key): print self.get(section, key) return exit("Property '{0}' was not found".format(key))
def get_ec2_conn(config): access_key = config.get('ec2', 'aws_access_key') secret_key = config.get('ec2', 'aws_secret_key') if access_key == 'access_key' or secret_key == 'secret_key': exit('ERROR - You must set AWS access & secret keys in zetten.props') region = config.get('ec2', 'region') conn = ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) if not conn: exit('ERROR - Failed to connect to region ' + region) return conn
def init_nodes(self): self.node_d = {} for (hostname, value) in self.items('nodes'): if hostname in self.node_d: exit('Hostname {0} already exists twice in nodes'.format(hostname)) service_list = [] for service in value.split(','): if service in SERVICES: service_list.append(service) else: exit('Unknown service "%s" declared for node %s' % (service, hostname)) self.node_d[hostname] = service_list
def get_ec2_conn(config): access_key = config.get('ec2', 'aws_access_key') secret_key = config.get('ec2', 'aws_secret_key') if access_key == 'access_key' or secret_key == 'secret_key': exit('ERROR - You must set AWS access & secret keys in muchos.props') region = config.get('ec2', 'region') conn = ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) if not conn: exit('ERROR - Failed to connect to region ' + region) return conn
def init_nodes(self): self.node_d = {} for (hostname, value) in self.items('nodes'): if hostname in self.node_d: exit('Hostname {0} already exists twice in nodes'.format( hostname)) service_list = [] for service in value.split(','): if service in SERVICES: service_list.append(service) else: exit('Unknown service "%s" declared for node %s' % (service, hostname)) self.node_d[hostname] = service_list
def parse_hosts(self): if not os.path.isfile(self.hosts_path): exit('ERROR - A hosts file does not exist at %s' % self.hosts_path) self.hosts = {} with open(self.hosts_path) as f: for line in f: line = line.strip() if line.startswith("#") or not line: continue args = line.split(' ') if len(args) == 2: self.hosts[args[0]] = (args[1], None) elif len(args) == 3: self.hosts[args[0]] = (args[1], args[2]) else: exit('ERROR - Bad line %s in hosts %s' % (line, self.hosts_path))
def run(self, module): from os import sep from imp import reload import config globals = {module.__name__: module, 'config': config} with open(config.scripts_dir + sep + self.name, 'r') as f: code = f.read() try: exec(code, globals) except: from util import exit, ERR_SCRIPT_RUNTIME_ERROR exit(ERR_SCRIPT_RUNTIME_ERROR) if '_socrates_result' not in globals: from util import exit, ERR_SCRIPT_RUNTIME_ERROR exit(ERR_SCRIPT_RUNTIME_ERROR) return globals['_socrates_result']
def word_to_phone_mlf(model, dict, word_mlf, phone_mlf, mono_list): """ Convert the word-level mlf to a phone level mlf with HLEd """ if not os.path.isfile(word_mlf): util.log_write(model.logfh, 'No word MLF file here [%s]' % word_mlf) util.exit(model.log) if not os.path.isfile(dict): util.log_write(model.logfh, 'No dict file here [%s]' % dict) util.exit(model.log) ## Create mkphones0.led led_file = '%s/mkphones0.led' % model.exp fh = open(led_file, 'w') fh.write('EX\nIS sil sil\n') fh.close() ## Convert the word level MLF into a phone MLF cmd_log = '%s/hhed_word_to_phone.log' % model.exp cmd = 'HLEd -A -T 1 -l "*"' cmd += ' -d %s' % dict cmd += ' -i %s' % phone_mlf cmd += ' %s %s > %s' % (led_file, word_mlf, cmd_log) os.system(cmd) ## Create list of phones (appearing in the phone MLF) monophones = set() for line in open(phone_mlf): phone = line.strip() if phone.isalpha(): monophones.add(phone) monophones = list(monophones) monophones.sort() fh = open(mono_list, 'w') for phone in monophones: fh.write('%s\n' % phone) fh.close() return len(monophones)
def word_to_phone_mlf(model, dict, word_mlf, phone_mlf, mono_list): """ Convert the word-level mlf to a phone level mlf with HLEd """ if not os.path.isfile(word_mlf): util.log_write(model.logfh, 'No word MLF file here [%s]' %word_mlf) util.exit(model.log) if not os.path.isfile(dict): util.log_write(model.logfh, 'No dict file here [%s]' %dict) util.exit(model.log) ## Create mkphones0.led led_file = '%s/mkphones0.led' %model.exp fh = open(led_file, 'w') fh.write('EX\nIS sil sil\n') fh.close() ## Convert the word level MLF into a phone MLF cmd_log = '%s/hhed_word_to_phone.log' %model.exp cmd = 'HLEd -A -T 1 -l "*"' cmd += ' -d %s' %dict cmd += ' -i %s' %phone_mlf cmd += ' %s %s > %s' %(led_file, word_mlf, cmd_log) os.system(cmd) ## Create list of phones (appearing in the phone MLF) monophones = set() for line in open(phone_mlf): phone = line.strip() if phone.isalpha(): monophones.add(phone) monophones = list(monophones) monophones.sort() fh = open(mono_list, 'w') for phone in monophones: fh.write('%s\n' %phone) fh.close() return len(monophones)
def try_read(): global inbuf try: r = sock.recv(1024) except: # probably ewouldblock return if r: inbuf = inbuf + r else: log("lost connection to local game server") vie_bprot.disconnect() util.exit(1) lines = inbuf.splitlines(1) inbuf = '' for l in lines: if l.endswith('\n') or l.endswith('\r'): process_incoming(l.strip()) else: inbuf = l
def verify_config(self, action): proxy = self.get('general', 'proxy_hostname') if not proxy: exit("ERROR - proxy.hostname must be set in fluo-deploy.props") if proxy not in self.node_d: exit("ERROR - The proxy (set by property proxy.hostname={0}) cannot be found in 'nodes' section of fluo-deploy.props".format(proxy)) if action != 'launch': self.proxy_public_ip() if action in ['launch', 'setup']: self.get_image_id(self.get('ec2', 'default_instance_type')) self.get_image_id(self.get('ec2', 'worker_instance_type')) for service in SERVICES: if service not in ['fluo', 'metrics']: if not self.has_service(service): exit("ERROR - Missing '{0}' service from [nodes] section of fluo-deploy.props".format(service))
def verify_config(self, action): proxy = self.get('general', 'proxy_hostname') if not proxy: exit("ERROR - proxy.hostname must be set in muchos.props") if proxy not in self.node_d: exit( "ERROR - The proxy (set by property proxy_hostname={0}) cannot be found in 'nodes' section of muchos.props" .format(proxy)) if action != 'launch': self.proxy_public_ip() if action in ['launch', 'setup']: self.get_image_id(self.get('ec2', 'default_instance_type')) self.get_image_id(self.get('ec2', 'worker_instance_type')) for service in SERVICES: if service not in ['fluo', 'metrics', 'mesosmaster']: if not self.has_service(service): exit( "ERROR - Missing '{0}' service from [nodes] section of muchos.props" .format(service))
def proxy_public_ip(self): retval = self.get_public_ip(self.get('general', 'proxy_hostname')) if not retval: exit("ERROR - Leader {0} does not have a public IP".format(self.get('general', 'proxy_hostname'))) return retval
Script to help deploy a Fluo or Accumulo cluster (optionally to AWS EC2) """ import os, sys import shutil from config import DeployConfig, HOST_VAR_DEFAULTS, PLAY_VAR_DEFAULTS from util import setup_boto, parse_args, exit from os.path import isfile, join, isdir import random import time import urllib import subprocess MUCHOS = os.environ.get('MUCHOS') if MUCHOS is None: exit('ERROR - The env var MUCHOS must be set!') setup_boto(join(MUCHOS, "bin/impl/lib")) import boto from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType from boto import ec2 from boto.exception import EC2ResponseError def get_or_make_group(conn, name, vpc_id): groups = conn.get_all_security_groups() group = [g for g in groups if g.name == name] if len(group) > 0: return group[0] else: print "Creating security group " + name
def main(): deploy_path = os.environ.get('ZETTEN') if not deploy_path: exit('ERROR - The ZETTEN env variable must be set!') if not os.path.isdir(deploy_path): exit('ERROR - Directory set by ZETTEN does not exist: '+deploy_path) config_path = join(deploy_path, "conf/zetten.props") if not isfile(config_path): exit('ERROR - A config file does not exist at '+config_path) hosts_dir = join(deploy_path, "conf/hosts/") # parse command line args retval = parse_args(hosts_dir) if not retval: print "Invalid command line arguments. For help, use 'zetten -h'" sys.exit(1) (opts, action, args) = retval hosts_path = join(hosts_dir, opts.cluster) config = DeployConfig(deploy_path, config_path, hosts_path, opts.cluster) config.verify_config(action) if action == 'launch': conn = get_ec2_conn(config) launch_cluster(conn, config) elif action == 'status': conn = get_ec2_conn(config) nodes = get_cluster(conn, config, ['running']) print "Found {0} nodes in {1} cluster".format(len(nodes), config.cluster_name) for node in nodes: print " ", node.tags.get('Name', 'UNKNOWN_NAME'), node.id, node.private_ip_address, node.ip_address elif action == 'sync': sync_cluster(config) elif action == 'setup': setup_cluster(config) elif action == 'config': if opts.property == 'all': config.print_all() else: config.print_property(opts.property) elif action == 'ssh': wait_until_proxy_ready(config) fwd = '' if config.has_option('general', 'proxy_socks_port'): fwd = "-D "+config.get('general', 'proxy_socks_port') ssh_command = "ssh -C -A -o 'StrictHostKeyChecking no' {fwd} {usr}@{ldr}".format(usr=config.get('general', 'cluster_user'), ldr=config.proxy_public_ip(), fwd=fwd) print "Logging into proxy using: {0}".format(ssh_command) retcode = subprocess.call(ssh_command, shell=True) check_code(retcode, ssh_command) elif action == 'wipe': if not isfile(hosts_path): exit("Hosts file does not exist for cluster: "+hosts_path) print "Killing all processes and wiping data from {0} cluster".format(config.cluster_name) execute_playbook(config, "wipe.yml") elif action == 'run': app = opts.application repo = config.get('apps', app + '_repo') branch = config.get('apps', app + '_branch') command = config.get('apps', app + '_command') run_args = "{0} {1} {2} {3}".format(app, repo, branch, command) if opts.app_args: run_args = "{0} {1}".format(run_args, opts.app_args) basedir = config.get('general', 'cluster_basedir') exec_on_proxy_verified(config, "{0}/apps/run.sh {1}".format(basedir, run_args), opts='-t') elif action == 'terminate': conn = get_ec2_conn(config) nodes = get_active_cluster(conn, config) if len(nodes) == 0: exit("No nodes running in {0} cluster to terminate".format(config.cluster_name)) print "The following {0} nodes in {1} cluster will be terminated:".format(len(nodes), config.cluster_name) for node in nodes: print " ", node.tags.get('Name', 'UNKNOWN_NAME'), node.id, node.private_ip_address, node.ip_address response = raw_input("Do you want to continue? (y/n) ") if response == "y": for node in nodes: node.terminate() print "Terminated instances" if isfile(hosts_path): os.remove(hosts_path) print "Removed hosts file at ",hosts_path else: print "Aborted termination" else: print 'ERROR - Unknown action:', action
def check_code(retcode, command): if retcode != 0: exit("ERROR - Command failed with return code of {0}: {1}".format(retcode, command))
def prompt(choices, mode='*'): if mode not in PROMPT_MODES: raise ValueError("mode '{}' is invalid".format(mode)) if len(choices) > 26: raise ValueError("too many choices") if mode == '*': header = "select zero or more:" max, min = float('inf'), 0 elif mode == '+': header = "select one or more:" max, min = float('inf'), 1 elif mode in [1, '1']: header = "select one:" max, min = 1, 1 elif mode == '?': header = "select zero or one:" max, min = 1, 0 letters = list(map(lambda x: chr(ord('a') + x), range(len(choices)))) num_selections = 0 selections = [] # unique indices into choices list while num_selections < min or num_selections < max: util.print(util.green(header)) for i in range(len(choices)): if i in selections: choice = " × " else: choice = " " choice += str(letters[i]) + '. ' + str(choices[i]) if i in selections: choice = util.yellow(choice) util.print(choice) try: sel = input(util.green("make a selection (or ! to commit): ")) except KeyboardInterrupt: util.exit(util.ERR_INTERRUPTED) if sel == '!': if num_selections < min: util.error("can't stop now; you must make " "{} {}".format(min, util.plural("selection", min))) continue else: break try: if letters.index(sel) in selections: selections.remove(letters.index(sel)) continue selections.append(letters.index(sel)) num_selections += 1 except ValueError: if sel == '': util.print("make a selection (or ! to commit)") else: util.error("invalid selection: not in list") continue return selections
def process_pkt(p): global curchunk, inq, encstate t1 = ord(p[0]) if t1 == 0: t2 = ord(p[1]) if t2 == 2: # key response global stage stage = s_connected # respond asss_bprot.send_connected() elif t2 == 3: # reliable pkt = Pkt(p) inq.append(pkt) ack = struct.pack('<BBI', 0, 4, pkt.seqnum) raw_send(ack) elif t2 == 4: # ack (sn, ) = struct.unpack('<I', p[2:6]) handle_ack(sn) elif t2 == 7: # disconnect # close our sockets and die log("got disconnect from server, exiting") sock.close() util.exit(0) elif t2 == 8: # chunk curchunk = curchunk + p[2:] if len(curchunk) > max_bigpkt: log("big packet too long. discarding.") curchunk = '' elif t2 == 9: # chunk tail curchunk = curchunk + p[2:] process_pkt(curchunk) chrchunk = '' elif t2 == 10: # presize log("got presized packet from remote server") elif t2 == 14: # grouped log("got grouped packet from remote server") else: log("unknown network subtype: %d" % t2) elif t1 == B2S_AUTHRESPONSE: flag, pid, name, squad, bnr, usage, year, month, day, hour, \ minute, second, billerid = \ struct.unpack('< x B i 24s 24s 96s i 6h 4x i 4x', p) name = util.snull(name) squad = util.snull(squad) try: handle_b2s_authresponse(flag, pid, name, squad, usage, year, month, day, hour, minute, second, billerid) except: import traceback traceback.print_exc() raise elif t1 == B2S_SHUTDOWN: # '< B x 4x 4x' log("got b2s shutdown message") elif t1 == B2S_GENMESSAGE: # '< B 4x 2x' # string str = util.snull(p[7:]) handle_b2s_genmessage(str) elif t1 == B2S_RECYCLE: # '< B x 4x 4x' log("got b2s recycle message") elif t1 == B2S_KICKUSER: # '< B i i' log("got b2s kickuser message") elif t1 == B2S_SINGLEMSG: # '< B i' # string (pid, ) = struct.unpack('< x i', p[0:5]) str = util.snull(p[5:]) handle_b2s_singlemsg(pid, str) elif t1 == B2S_CHAT: # '< B i B' # string pid, chan = struct.unpack('< x i B', p[0:6]) str = util.snull(p[6:]) handle_b2s_chat(pid, chan, str) else: log("unknown packet type: %d" % t1)
def grade(criteria, submissions, filename, assume_missing=False, late_check=True): found = [] num_missing = 0 total = criteria.total_points for f in criteria.files: crit_dir, crit_name = os.path.split(f.path) for s in submissions: sub_dir, sub_name = os.path.split(s) if crit_name == sub_name: found.append(f) break else: util.warning("could not find file '{}'".format(f.path)) if len(submissions) < 1: continue if not assume_missing: # find the submission directory (it could be the # current working directory, but maybe not) submission_dir, _ = os.path.split(submissions[0]) if not submission_dir: submission_dir = os.path.abspath(os.curdir) choices = [f for f in os.listdir(submission_dir) if os.path.isfile(os.path.join(submission_dir, f))] choices.append("skip grading this submission now") choices.append("mark the file as missing") util.info("this student may have named the file incorrectly") # we prompt the grader for zero or one choice got = prompt(choices, '1') got = got[0] if got == len(choices) - 1: # declare the file missing num_missing += 1 continue elif got == len(choices) - 2: util.info("skipping this submission") util.exit(util.EXIT_WITH_DEFER) else: # get absolute path to the old and new files sname = choices[got] opath = os.path.join(submission_dir, sname) npath = os.path.join(submission_dir, crit_name) try: os.rename(opath, npath) except: util.error("error renaming incorrectly named file") util.print_traceback() util.exit(util.ERR_GRADING_MISC) found.append(f) out = io.StringIO() try: for f in criteria.files: out.write(util.heading("{} [{} points]".format(f, f.point_value), level=2)) if f not in found: total -= f.point_value out.write("-{}\tnot submitted\n".format(f.point_value)) out.write("\n\n") continue util.info("running tests for " + str(f)) points_taken = 0 points_taken += write_results(out, f.run_tests()) if late_check: file_stat = os.stat(f.path) mtime = datetime.datetime.fromtimestamp(file_stat.st_mtime) mult = criteria.get_late_penalty(mtime) late_penalty = f.point_value * mult if late_penalty != 0: util.warning("taking {}% late penalty".format(mult * 100)) adjusted = min(f.point_value - points_taken, late_penalty) out.write("-{}\tsubmitted late\n".format(adjusted)) points_taken += adjusted total -= min(f.point_value, points_taken) out.write("\n") out.write("\nTotal: {}\n".format(total)) except KeyboardInterrupt: out.close() util.warning("stopping (received interrupt)") util.exit(util.ERR_INTERRUPTED) except: out.close() util.exit(util.ERR_GRADING_MISC) with open(filename, 'w') as f: out.seek(0) f.write(out.read()) return num_missing
def process_pkt(p): global curchunk, inq, encstate t1 = ord(p[0]) if t1 == 0: t2 = ord(p[1]) if t2 == 2: # key response global stage stage = s_connected # respond asss_bprot.send_connected() elif t2 == 3: # reliable pkt = Pkt(p) inq.append(pkt) ack = struct.pack('<BBI', 0, 4, pkt.seqnum) raw_send(ack) elif t2 == 4: # ack (sn,) = struct.unpack('<I', p[2:6]) handle_ack(sn) elif t2 == 7: # disconnect # close our sockets and die log("got disconnect from server, exiting") sock.close() util.exit(0) elif t2 == 8: # chunk curchunk = curchunk + p[2:] if len(curchunk) > max_bigpkt: log("big packet too long. discarding.") curchunk = '' elif t2 == 9: # chunk tail curchunk = curchunk + p[2:] process_pkt(curchunk) chrchunk = '' elif t2 == 10: # presize log("got presized packet from remote server") elif t2 == 14: # grouped log("got grouped packet from remote server") else: log("unknown network subtype: %d" % t2) elif t1 == B2S_AUTHRESPONSE: flag, pid, name, squad, bnr, usage, year, month, day, hour, \ minute, second, billerid = \ struct.unpack('< x B i 24s 24s 96s i 6h 4x i 4x', p) name = util.snull(name) squad = util.snull(squad) try: handle_b2s_authresponse(flag, pid, name, squad, usage, year, month, day, hour, minute, second, billerid) except: import traceback traceback.print_exc() raise elif t1 == B2S_SHUTDOWN: # '< B x 4x 4x' log("got b2s shutdown message") elif t1 == B2S_GENMESSAGE: # '< B 4x 2x' # string str = util.snull(p[7:]) handle_b2s_genmessage(str) elif t1 == B2S_RECYCLE: # '< B x 4x 4x' log("got b2s recycle message") elif t1 == B2S_KICKUSER: # '< B i i' log("got b2s kickuser message") elif t1 == B2S_SINGLEMSG: # '< B i' # string (pid,) = struct.unpack('< x i', p[0:5]) str = util.snull(p[5:]) handle_b2s_singlemsg(pid, str) elif t1 == B2S_CHAT: # '< B i B' # string pid, chan = struct.unpack('< x i B', p[0:6]) str = util.snull(p[6:]) handle_b2s_chat(pid, chan, str) else: log("unknown packet type: %d" % t1)
def get_args(): top_opts = {'description': "Grade student work from the command line", 'epilog': "(try socrates grade -h, " "socrates batch -h, or " "socrates submit -h)"} top_parser = argparse.ArgumentParser(**top_opts) subparsers = top_parser.add_subparsers(dest='mode') # parser for grade mode norm_mode_opts = {'description': "Start an interactive grading session"} norm_mode_parser = subparsers.add_parser('grade', **norm_mode_opts) assignment_opts = {'help': 'assignment name, with group (e.g., "ps2a")'} norm_mode_parser.add_argument('assignment_with_group', **assignment_opts) input_opts = {'help': "submission file(s) to grade", 'nargs': '*'} norm_mode_parser.add_argument('submission_files', **input_opts) norm_mode_parser.add_argument('--assume-missing', help="do not prompt for misnamed files", action='store_true') norm_mode_parser.add_argument('--no-edit', help="do not ask to edit grade file", action='store_true') norm_mode_parser.add_argument('--overwrite', help="overwrite the grade file if it exists", action='store_true') norm_mode_parser.add_argument('--no-late', help="do not check for late submissions", action='store_true') # parser for batch mode batch_mode_opts = {'description': "Start grading in batch mode"} batch_mode_parser = subparsers.add_parser('batch', **batch_mode_opts) assignment_opts = {'help': 'assignment name, with group (e.g., "ps2a")'} batch_mode_parser.add_argument('assignment_with_group', **assignment_opts) input_opts = {'help': "submission directories, one per student", 'nargs': '*'} batch_mode_parser.add_argument('submission_dirs', **input_opts) batch_mode_parser.add_argument('--assume-missing', help="do not prompt for misnamed files", action='store_true') batch_mode_parser.add_argument('--no-edit', help="do not ask to edit grade file", action='store_true') batch_mode_parser.add_argument('--no-late', help="do not check for late submissions", action='store_true') # parser for submit mode submit_mode_opts = {'description': "Submit graded files"} submit_mode_parser = subparsers.add_parser('submit', **submit_mode_opts) assignment_opts = {'help': 'assignment name, with group (e.g., "ps2a")'} submit_mode_parser.add_argument('assignment_with_group', **assignment_opts) input_opts = {'help': "submission directories, one per student", 'nargs': '*'} submit_mode_parser.add_argument('submission_dirs', **input_opts) # parser for config mode config_mode_opts = {'description': "Print current configuration"} subparsers.add_parser('config', **config_mode_opts) # parser for edit mode edit_mode_opts = {'description': "Safely edit a criteria file"} edit_mode_parser = subparsers.add_parser('edit', **edit_mode_opts) assignment_opts = {'help': 'assignment name, with group (e.g., "ps2a")'} edit_mode_parser.add_argument('assignment_with_group', **assignment_opts) args = top_parser.parse_args() if not args.mode: top_parser.parse_args(['-h']) util.exit(util.ERR_ARGS) return args
def launch_cluster(conn, config): key_name = config.get('ec2', 'key_name') if not key_name: exit('ERROR - key.name is not set muchos.props') cur_nodes = get_active_cluster(conn, config) if cur_nodes: exit('ERROR - There are already instances running for {0} cluster'. format(config.cluster_name)) if isfile(config.hosts_path): exit( "ERROR - A hosts file already exists at {0}. Please delete before running launch again" .format(config.hosts_path)) print "Launching {0} cluster".format(config.cluster_name) vpc_id = None if config.has_option('ec2', 'vpc_id'): vpc_id = config.get('ec2', 'vpc_id') subnet_id = None if config.has_option('ec2', 'subnet_id'): subnet_id = config.get('ec2', 'subnet_id') security_group = get_or_make_group(conn, config.cluster_name + "-group", vpc_id) if security_group.rules == []: # Group was just now created if vpc_id is None: security_group.authorize(src_group=security_group) else: security_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1, src_group=security_group) security_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=security_group) security_group.authorize(ip_protocol='udp', from_port=0, to_port=65535, src_group=security_group) security_group.authorize('tcp', 22, 22, '0.0.0.0/0') instance_d = {} for (hostname, services) in config.nodes().items(): if 'worker' in services: instance_type = config.get('ec2', 'worker_instance_type') num_ephemeral = config.worker_num_ephemeral() else: instance_type = config.get('ec2', 'default_instance_type') num_ephemeral = config.default_num_ephemeral() if config.has_option('ec2', 'aws_ami'): host_ami = config.get('ec2', 'aws_ami') else: host_ami = config.get_image_id(instance_type) if not host_ami: exit('ERROR - Image not found for instance type: ' + instance_type) bdm = BlockDeviceMapping() bdm['/dev/sda1'] = BlockDeviceType(delete_on_termination=True) for i in range(0, num_ephemeral): bdt = BlockDeviceType() bdt.ephemeral_name = config.ephemeral_root + str(i) bdm[config.device_root + chr(ord('b') + i)] = bdt try: resv = conn.run_instances(key_name=key_name, image_id=host_ami, security_group_ids=[security_group.id], instance_type=instance_type, subnet_id=subnet_id, min_count=1, max_count=1, block_device_map=bdm) except EC2ResponseError as e: ami_help = """PLEASE NOTE - If you have accepted the software terms for CentOS 7 and still get an error, this could be due to CentOS releasing new images of CentOS 7. When this occurs, the old images are no longer available to new users. If you think this is the case, go to the CentOS 7 product page on AWS Marketplace at the URL below to find the latest AMI: https://aws.amazon.com/marketplace/ordering?productId=b7ee8a69-ee97-4a49-9e68-afaee216db2e On the product page, click 'Manual Launch' to find the latest AMI ID for your EC2 region. This should be used to set the 'aws_ami' property in your muchos.props which will override the default AMI IDs used by Muchos. After setting the 'aws_ami' property, run the launch command again. Also, let us know that this has occured by creating an issue on the Muchos's GitHub page and we'll upgrade the defaults AMIs used by Muchos to be the latest CentOS images. """ exit( "ERROR - Failed to launch EC2 instance due to exception below:\n\n{0}\n\n{1}" .format(e, ami_help)) if len(resv.instances) != 1: exit('ERROR - Failed to start {0} node'.format(hostname)) instance = resv.instances[0] instance_d[hostname] = instance.id print 'Launching {0} node using {1}'.format(hostname, host_ami) while True: time.sleep(5) nodes = get_cluster(conn, config, ['running']) num_actual = len(nodes) num_expected = len(config.nodes()) if num_actual == num_expected: # Tag instances and create hosts file with open(config.hosts_path, 'w') as hosts_file: for (hostname, services) in config.nodes().items(): instance = get_instance(nodes, instance_d[hostname]) instance.add_tag(key='Name', value='{cn}-{id}'.format( cn=config.cluster_name, id=hostname)) for tkey, tval in config.instance_tags().iteritems(): instance.add_tag(key=tkey, value=tval) public_ip = '' if instance.ip_address: public_ip = instance.ip_address private_ip = instance.private_ip_address print >> hosts_file, hostname, private_ip, public_ip print "All {0} nodes have started. Created hosts file at {1}".format( num_actual, config.hosts_path) break else: print "{0} of {1} nodes have started. Waiting another 5 sec..".format( num_actual, num_expected)
def main(): deploy_path = os.environ.get('MUCHOS') if not deploy_path: exit('ERROR - The MUCHOS env variable must be set!') if not os.path.isdir(deploy_path): exit('ERROR - Directory set by MUCHOS does not exist: ' + deploy_path) config_path = join(deploy_path, "conf/muchos.props") if not isfile(config_path): exit('ERROR - A config file does not exist at ' + config_path) hosts_dir = join(deploy_path, "conf/hosts/") # parse command line args retval = parse_args(hosts_dir) if not retval: print "Invalid command line arguments. For help, use 'muchos -h'" sys.exit(1) (opts, action, args) = retval hosts_path = join(hosts_dir, opts.cluster) config = DeployConfig(deploy_path, config_path, hosts_path, opts.cluster) config.verify_config(action) if action == 'launch': conn = get_ec2_conn(config) launch_cluster(conn, config) elif action == 'status': conn = get_ec2_conn(config) nodes = get_cluster(conn, config, ['running']) print "Found {0} nodes in {1} cluster".format(len(nodes), config.cluster_name) for node in nodes: print " ", node.tags.get( 'Name', 'UNKNOWN_NAME' ), node.id, node.private_ip_address, node.ip_address elif action == 'sync': sync_cluster(config) elif action == 'setup': setup_cluster(config) elif action == 'config': if opts.property == 'all': config.print_all() else: config.print_property(opts.property) elif action == 'ssh': wait_until_proxy_ready(config) fwd = '' if config.has_option('general', 'proxy_socks_port'): fwd = "-D " + config.get('general', 'proxy_socks_port') ssh_command = "ssh -C -A -o 'StrictHostKeyChecking no' {fwd} {usr}@{ldr}".format( usr=config.get('general', 'cluster_user'), ldr=config.proxy_public_ip(), fwd=fwd) print "Logging into proxy using: {0}".format(ssh_command) retcode = subprocess.call(ssh_command, shell=True) check_code(retcode, ssh_command) elif action in ('wipe', 'kill'): if not isfile(hosts_path): exit("Hosts file does not exist for cluster: " + hosts_path) if action == 'wipe': print "Killing all processes started by Muchos and wiping Muchos data from {0} cluster".format( config.cluster_name) elif action == 'kill': print "Killing all processes started by Muchos on {0} cluster".format( config.cluster_name) execute_playbook(config, action + ".yml") elif action == 'terminate': conn = get_ec2_conn(config) nodes = get_active_cluster(conn, config) if len(nodes) == 0: exit("No nodes running in {0} cluster to terminate".format( config.cluster_name)) print "The following {0} nodes in {1} cluster will be terminated:".format( len(nodes), config.cluster_name) for node in nodes: print " ", node.tags.get( 'Name', 'UNKNOWN_NAME' ), node.id, node.private_ip_address, node.ip_address response = raw_input("Do you want to continue? (y/n) ") if response == "y": for node in nodes: node.terminate() print "Terminated instances" if isfile(hosts_path): os.remove(hosts_path) print "Removed hosts file at ", hosts_path else: print "Aborted termination" else: print 'ERROR - Unknown action:', action
def proxy_public_ip(self): retval = self.get_public_ip(self.get('general', 'proxy_hostname')) if not retval: exit("ERROR - Leader {0} does not have a public IP".format( self.get('general', 'proxy_hostname'))) return retval
Script to help deploy Fluo cluster (optionally to AWS EC2) """ import os, sys import shutil from config import DeployConfig from util import setup_boto, parse_args, exit from os.path import isfile, join import random import time import urllib import subprocess FLUO_DEPLOY = os.environ.get('FLUO_DEPLOY') if FLUO_DEPLOY is None: exit('ERROR - The env var FLUO_DEPLOY must be set!') setup_boto(join(FLUO_DEPLOY, "bin/impl/lib")) import boto from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType from boto import ec2 def get_or_make_group(conn, name, vpc_id): groups = conn.get_all_security_groups() group = [g for g in groups if g.name == name] if len(group) > 0: return group[0] else: print "Creating security group " + name return conn.create_security_group(name, "Security group created by fluo-deploy script", vpc_id)
Script to help deploy a Fluo or Accumulo cluster (optionally to AWS EC2) """ import os, sys import shutil from config import DeployConfig from util import setup_boto, parse_args, exit from os.path import isfile, join import random import time import urllib import subprocess ZETTEN = os.environ.get('ZETTEN') if ZETTEN is None: exit('ERROR - The env var ZETTEN must be set!') setup_boto(join(ZETTEN, "bin/impl/lib")) import boto from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType from boto import ec2 from boto.exception import EC2ResponseError def get_or_make_group(conn, name, vpc_id): groups = conn.get_all_security_groups() group = [g for g in groups if g.name == name] if len(group) > 0: return group[0] else: print "Creating security group " + name return conn.create_security_group(name, "Security group created by zetten script", vpc_id)
def launch_cluster(conn, config): key_name = config.get('ec2', 'key_name') if not key_name: exit('ERROR - key.name is not set fluo-deploy.props') cur_nodes = get_active_cluster(conn, config) if cur_nodes: exit('ERROR - There are already instances running for {0} cluster'.format(config.cluster_name)) if isfile(config.hosts_path): exit("ERROR - A hosts file already exists at {0}. Please delete before running launch again".format(config.hosts_path)) print "Launching {0} cluster".format(config.cluster_name) vpc_id = None if config.has_option('ec2', 'vpc_id'): vpc_id = config.get('ec2', 'vpc_id') subnet_id = None if config.has_option('ec2', 'subnet_id'): subnet_id = config.get('ec2', 'subnet_id') security_group = get_or_make_group(conn, config.cluster_name + "-group", vpc_id) if security_group.rules == []: # Group was just now created if vpc_id is None: security_group.authorize(src_group=security_group) else: security_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1, src_group=security_group) security_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=security_group) security_group.authorize(ip_protocol='udp', from_port=0, to_port=65535, src_group=security_group) security_group.authorize('tcp', 22, 22, '0.0.0.0/0') instance_d = {} for (hostname, services) in config.nodes().items(): if 'worker' in services: instance_type = config.get('ec2', 'worker_instance_type') num_ephemeral = config.worker_num_ephemeral() else: instance_type = config.get('ec2', 'default_instance_type') num_ephemeral = config.default_num_ephemeral() host_ami = config.get_image_id(instance_type) if not host_ami: exit('ERROR - Image not found for instance type: '+instance_type) bdm = BlockDeviceMapping() bdm['/dev/sda1'] = BlockDeviceType(delete_on_termination=True) for i in range(0, num_ephemeral): bdt = BlockDeviceType() bdt.ephemeral_name=config.ephemeral_root + str(i) bdm[config.device_root + chr(ord('b') + i)] = bdt resv = conn.run_instances(key_name=key_name, image_id=host_ami, security_group_ids=[security_group.id], instance_type=instance_type, subnet_id=subnet_id, min_count=1, max_count=1, block_device_map=bdm) if len(resv.instances) != 1: exit('ERROR - Failed to start {0} node'.format(hostname)) instance = resv.instances[0] instance_d[hostname] = instance.id print 'Launching {0} node'.format(hostname) while True: time.sleep(5) nodes = get_cluster(conn, config, ['running']) num_actual = len(nodes) num_expected = len(config.nodes()) if num_actual == num_expected: # Tag instances and create hosts file with open(config.hosts_path, 'w') as hosts_file: for (hostname, services) in config.nodes().items(): instance = get_instance(nodes, instance_d[hostname]) instance.add_tag(key='Name', value='{cn}-{id}'.format(cn=config.cluster_name, id=hostname)) for tkey, tval in config.instance_tags().iteritems(): instance.add_tag(key=tkey, value=tval) public_ip = '' if instance.ip_address: public_ip = instance.ip_address private_ip = instance.private_ip_address print >>hosts_file, hostname, private_ip, public_ip print "All {0} nodes have started. Created hosts file at {1}".format(num_actual, config.hosts_path) break else: print "{0} of {1} nodes have started. Waiting another 5 sec..".format(num_actual, num_expected)
def get_image_id(self, instance_type): if get_arch(instance_type) == 'pvm': exit( "ERROR - Configuration contains instance type '{0}' that uses pvm architecture. Only hvm architecture is supported!" .format(instance_type)) return get_ami(instance_type, self.get('ec2', 'region'))
def get_image_id(self, instance_type): if get_arch(instance_type) == 'pvm': exit("ERROR - Configuration contains instance type '{0}' that uses pvm architecture. Only hvm architecture is supported!".format(instance_type)) return get_ami(instance_type, self.get('ec2', 'region'))
def launch_cluster(conn, config): key_name = config.get('ec2', 'key_name') if not key_name: exit('ERROR - key.name is not set zetten.props') cur_nodes = get_active_cluster(conn, config) if cur_nodes: exit('ERROR - There are already instances running for {0} cluster'.format(config.cluster_name)) if isfile(config.hosts_path): exit("ERROR - A hosts file already exists at {0}. Please delete before running launch again".format(config.hosts_path)) print "Launching {0} cluster".format(config.cluster_name) vpc_id = None if config.has_option('ec2', 'vpc_id'): vpc_id = config.get('ec2', 'vpc_id') subnet_id = None if config.has_option('ec2', 'subnet_id'): subnet_id = config.get('ec2', 'subnet_id') security_group = get_or_make_group(conn, config.cluster_name + "-group", vpc_id) if security_group.rules == []: # Group was just now created if vpc_id is None: security_group.authorize(src_group=security_group) else: security_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1, src_group=security_group) security_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=security_group) security_group.authorize(ip_protocol='udp', from_port=0, to_port=65535, src_group=security_group) security_group.authorize('tcp', 22, 22, '0.0.0.0/0') instance_d = {} for (hostname, services) in config.nodes().items(): if 'worker' in services: instance_type = config.get('ec2', 'worker_instance_type') num_ephemeral = config.worker_num_ephemeral() else: instance_type = config.get('ec2', 'default_instance_type') num_ephemeral = config.default_num_ephemeral() if config.has_option('ec2', 'aws_ami'): host_ami = config.get('ec2', 'aws_ami') else: host_ami = config.get_image_id(instance_type) if not host_ami: exit('ERROR - Image not found for instance type: '+instance_type) bdm = BlockDeviceMapping() bdm['/dev/sda1'] = BlockDeviceType(delete_on_termination=True) for i in range(0, num_ephemeral): bdt = BlockDeviceType() bdt.ephemeral_name=config.ephemeral_root + str(i) bdm[config.device_root + chr(ord('b') + i)] = bdt try: resv = conn.run_instances(key_name=key_name, image_id=host_ami, security_group_ids=[security_group.id], instance_type=instance_type, subnet_id=subnet_id, min_count=1, max_count=1, block_device_map=bdm) except EC2ResponseError as e: ami_help = """PLEASE NOTE - If you have accepted the software terms for CentOS 7 and still get an error, this could be due to CentOS releasing new images of CentOS 7. When this occurs, the old images are no longer available to new users. If you think this is the case, go to the CentOS 7 product page on AWS Marketplace at the URL below to find the latest AMI: https://aws.amazon.com/marketplace/ordering?productId=b7ee8a69-ee97-4a49-9e68-afaee216db2e On the product page, click 'Manual Launch' to find the latest AMI ID for your EC2 region. This should be used to set the 'aws_ami' property in your zetten.props which will override the default AMI IDs used by Zetten. After setting the 'aws_ami' property, run the launch command again. Also, let us know that this has occured by creating an issue on the Zetten's GitHub page and we'll upgrade the defaults AMIs used by Zetten to be the latest CentOS images. """ exit("ERROR - Failed to launch EC2 instance due to exception below:\n\n{0}\n\n{1}".format(e, ami_help)) if len(resv.instances) != 1: exit('ERROR - Failed to start {0} node'.format(hostname)) instance = resv.instances[0] instance_d[hostname] = instance.id print 'Launching {0} node using {1}'.format(hostname, host_ami) while True: time.sleep(5) nodes = get_cluster(conn, config, ['running']) num_actual = len(nodes) num_expected = len(config.nodes()) if num_actual == num_expected: # Tag instances and create hosts file with open(config.hosts_path, 'w') as hosts_file: for (hostname, services) in config.nodes().items(): instance = get_instance(nodes, instance_d[hostname]) instance.add_tag(key='Name', value='{cn}-{id}'.format(cn=config.cluster_name, id=hostname)) for tkey, tval in config.instance_tags().iteritems(): instance.add_tag(key=tkey, value=tval) public_ip = '' if instance.ip_address: public_ip = instance.ip_address private_ip = instance.private_ip_address print >>hosts_file, hostname, private_ip, public_ip print "All {0} nodes have started. Created hosts file at {1}".format(num_actual, config.hosts_path) break else: print "{0} of {1} nodes have started. Waiting another 5 sec..".format(num_actual, num_expected)
if _parser.has_option('socrates', 'grace_period'): _grace_str = _parser.get('socrates', 'grace_period') grace_period = _td(seconds=int(_grace_str)) else: grace_period = _td(seconds=0) _f = False if not os.path.isdir(hooks_dir): _f = True util.error("hooks directory does not exist or cannot be accessed") if not os.path.isdir(scripts_dir): _f = True util.error("scripts directory does not exist or cannot be accessed") if not os.path.isdir(static_dir): _f = True util.error("static directory does not exist or cannot be accessed") if not os.path.isdir(dropbox_dir): _f = True util.error("dropbox directory does not exist or cannot be accessed") if not os.path.isdir(criteria_dir): _f = True util.error("criteria directory does not exist or cannot be accessed") if _f: util.exit(util.ERR_BAD_CONFIG)
MINECRAFT_DIR = os.path.join(os.getenv('APPDATA'), '.minecraft') elif sys.platform == 'darwin': MINECRAFT_DIR = os.path.join(os.path.expanduser("~"), 'Library', 'Application Support', 'minecraft') else: MINECRAFT_DIR = os.path.join(os.path.expanduser("~"), '.minecraft') BASE_DIR = os.getcwd() VERSIONS_DIR = os.path.join(MINECRAFT_DIR, 'versions') MOD_DIR = os.path.join(MINECRAFT_DIR, 'mods') RESOURCEPACK_DIR = os.path.join(MINECRAFT_DIR, 'resourcepacks') SHADERPACK_DIR = os.path.join(MINECRAFT_DIR, 'shaderpacks') if __name__ == '__main__': if util.DATA['version'] != VERSION: print('Your version of Launchcraft ({}) does not match the minimum version of Launchcraft ({}). Please update.'.format(VERSION, util.DATA['version'])) util.exit() print('This script will ask you yes or no questions.') print('Any answers in square brackets (e.g. [1.7.2]), or that are capitalized (e.g. [Y/n]) are the default answers, and will be selected when you press enter.') util.print_separator() version = raw_input('Which version of Minecraft would you like to use? [1.7.2]:').lower() if version == '': version = '1.7.2' if version not in util.DATA['versions']: print("Invalid version selected.") util.exit() util.MODS = util.DATA['versions'][version]
def check_code(retcode, command): if retcode != 0: exit("ERROR - Command failed with return code of {0}: {1}".format( retcode, command))