def build_and_run(gdb_man, logger, loop_offset, loop_code, loop_size): output_code(loop_offset, loop_code) compiler_bin = arm_platform.compiler_bin_for('gcc') build_flags = arm_platform.build_flags_for('gcc') to_run = [compiler_bin] to_run += build_flags.split() to_run += ['source.s', '-o', 'binary'] try: os.remove('binary') except OSError: # We don't care if the file doesn't already exist pass logger.log_info("builder", "Executing {}".format(" ".join(to_run))) p = subprocess.Popen(to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE) make_out, make_err = p.communicate() if p.returncode != 0: logger.log_warn("builder", "Build failed!") logger.log_warn("builder", "{}\n{}".format(make_out.decode("utf-8"), make_err.decode("utf-8"))) return run_id = logger.add_run(loop_offset, loop_size) run_obj = run.Run("loop", 'gcc', arm_platform, [], run_id) return gdb_man.read_energy('binary', run_obj)
def localize(where, lang_id): """Given a string/list/dict and a key, looks up a localized string using the key.""" if where is None: log_error('localize(): "where" is None, nothing to localize!') return '[ERROR]' if isinstance(where, str): # just one string, nothing to localize, use it as-is return where elif isinstance(where, list): # a list of dicts, merge them where = {k: v for p in where for k, v in p.items()} if lang_id in where: # have a localized string, use it return str(where[lang_id]) if 'en' in where: # no localized string available; try English, it's the default return str(where['en']) # it's a list with only one entry and it's not the language # we want, but we have to use it anyway log_warn('localize(): missing localization for "{0}" in "{1}"'. format(lang_id, where)) return str(where[list(where)[0]])
def score_file_upload(team_id, submitted_answer, real_answer): with open(submitted_answer, mode='rb') as binary_read_file: file_encoding = detect(binary_read_file.read()) with open(submitted_answer, encoding=file_encoding['encoding']) as read_file: submitted_categories = read_file.readlines() tidy_submitted_categories = [] for line in submitted_categories: tidy_submitted_categories.append(line.strip().lower()) with open(real_answer, mode='rb') as binary_read_file: answer_file_encoding = detect(binary_read_file.read()) with open(real_answer, encoding=answer_file_encoding['encoding']) as read_file: real_categories = read_file.readlines() tidy_real_categories = [] for line in real_categories: tidy_real_categories.append(line.strip().lower()) if len(tidy_real_categories) > len(tidy_submitted_categories): logger.log_warn("not enough lines in submission", team_id) return 0.0 total_count = len(tidy_real_categories) correct_count = 0 for i in range(total_count): if tidy_real_categories[i] == tidy_submitted_categories[i]: correct_count += 1 return correct_count / total_count
def score_triple_cat_file_upload(team_id, submitted_answer, real_answer): try: submitted_categories = [x[1] for x in pd.read_csv(submitted_answer, low_memory=False).iterrows()] except: logger.log_warn("malformed csv file", team_id) return [0.0, 0.0] if len(submitted_categories) < len(PHASE_2_ANSWERS): logger.log_warn("not enough lines in submission", team_id) return [0.0, 0.0] score_1 = 0 n_tot = len(PHASE_2_ANSWERS) for i in range(n_tot//2): score_1 += _score_cats(submitted_categories[i], PHASE_2_ANSWERS[i]) score_1 = score_1 / (n_tot//2) / 0.8732799999997414 score_2 = 0 for i in range(n_tot//2, n_tot): score_2 += _score_cats(submitted_categories[i], PHASE_2_ANSWERS[i]) score_2 = score_2 / (n_tot - n_tot//2) / 0.8737679999997485 return [score_1, score_2]
def build_and_run(gdb_man, logger, loop_offset, loop_code, loop_size): output_code(loop_offset, loop_code) compiler_bin = arm_platform.compiler_bin_for('gcc') build_flags = arm_platform.build_flags_for('gcc') to_run = [compiler_bin] to_run += build_flags.split() to_run += ['source.s', '-o', 'binary'] try: os.remove('binary') except OSError: # We don't care if the file doesn't already exist pass logger.log_info("builder", "Executing {}".format(" ".join(to_run))) p = subprocess.Popen(to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE) make_out, make_err = p.communicate() if p.returncode != 0: logger.log_warn("builder", "Build failed!") logger.log_warn( "builder", "{}\n{}".format(make_out.decode("utf-8"), make_err.decode("utf-8"))) return run_id = logger.add_run(loop_offset, loop_size) run_obj = run.Run("loop", 'gcc', arm_platform, [], run_id) return gdb_man.read_energy('binary', run_obj)
def loop_on_keys(keys): for key in keys: if key == 'times': target['times'] = [] times1 = cl1.pop(key) times2 = cl2.pop(key) if len(times1) != len(times2): log_err( f'"{key}" is different for class {CRN}: "{len(times1)}" vs. "{len(times2)}"' ) continue for idx in range(len(times1)): time1 = times1[idx].copy() time2 = times2[idx].copy() target['times'].append( merge_dicts(time1, time2, allowed=allowed)) continue val1 = cl1.pop(key, None) val2 = cl2.pop(key, None) if val1 != None and val2 != None: if val1 != val2 and key not in allowed: log_warn( f'"{key}" is different for class {CRN}: "{val1}" vs. "{val2}"' ) target[key] = val1 else: target[key] = val1 if val1 != None else val2
def setup(self, repo_dir: str, group_id: str, git_url: str): project_dir = update_git(root_dir=repo_dir, group_id=group_id, git_url=git_url) # try: # copyfile( # ContestantProjectHandler.CONTESTANT_SETTINGS_PATH, # os.path.join(project_dir, ContestantProjectHandler.CONTESTANT_SETTINGS_NAME) # ) # except Exception: # logger.log_error("Could not copy django settings for group {}".format(group_id)) # raise Exception("Could not copy django settings") out, error = run_cmd(cmd="./scripts/remove_extra_files.sh " + project_dir, directory=".") # TODO handle logs if len(error) != 0: logger.log_info("error in removing extras: {}".format(str(error))) out, error = run_cmd(cmd="./scripts/build_image.sh " + project_dir, directory=".") logger.log_log("out: " + str(out) + " err: " + str(error)) build_msg = out.decode("utf-8") logger.log_info( "Project for group {} build successfully with message: {}".format( group_id, build_msg)) try: image_id = re.search(r"Successfully built ((\w|\d)+)\n", build_msg).group(1) except Exception: if re.findall(COPY_REQUIREMENTS_REGEX, build_msg) is not None and re.findall( INSTALL_REQUIREMENTS_REGEX, build_msg) is None: logger.log_warn( "Could not find requirements.txt for group {}".format( group_id)) raise Exception("Could not find requirements.txt file") # if re.findall(INSTALL_REQUIREMENTS_REGEX + SUCCESSFUL_STEP_REGEX + ERROR_REGEX, build_msg) is not None: # logger.log_warn("Could not install requirements for group {}".format(group_id)) # raise Exception("Could not install requirements") logger.log_warn( "Failed to build docker image for group {}.".format(group_id)) raise Exception( "Build error - Build Message Follows\n\n{}".format(build_msg)) # try: # os.remove(os.path.join(project_dir, ContestantProjectHandler.CONTESTANT_SETTINGS_NAME)) # except Exception: # logger.log_error("Could not remove django settings for group {}".format(group_id)) # raise Exception("Could not remove django settings") logger.log_success( "Image built for team {} successfully".format(group_id)) return image_id
def _make_sets_repeat(self): if not self._drop_reps and self._drop_rpe and self._fatigue: return pd.DataFrame() start_rpe = analysis.rpe_table.ix[self._drop_reps, self._drop_rpe] desired_end_rpe = start_rpe + (self._fatigue / 100.0) if desired_end_rpe > analysis.rpe_table.ix[self._drop_reps].max(): log_warn('Cannot achieve desired fatigue with given starting set') end_rpe_idx = np.argmin(np.abs(desired_end_rpe - analysis.rpe_table.ix[self._drop_reps])) end_rpe = analysis.rpe_table.ix[self._drop_reps, end_rpe_idx] sets_str = '{}x{}x{}@{}'.format(self._drop_weight, self._drop_reps, self._num_drop_sets, end_rpe) sets_tup = analysis.parse_sets(sets_str, self._e1rm) return self._db.make_workout(self._lift, sets_tup, self._date)
def run_test(test_function, ip, group_id): driver = group_status[group_id]['driver'] driver.get(ip) driver.delete_all_cookies() try: result, string_output = test_function(ip, group_id, driver) except Exception as exception: logger.log_warn( 'test for for team "{}" with group_id {} ended with exception'. format(team_names[int(group_id)], group_id)) return False, ('Exception: ' + str(exception)), 'HMM' return result, string_output, 'HMM'
def run_test(ip, port, test_id, group_id): try: logger.log_info('starting django server for group {} on {}:{}'.format( group_id, ip, port)) result, string_output = tests.run_test(config.TEST_FILES_PATH, config.TEST_MODULE, test_id, ip, port) except Exception as exception: logger.log_warn( 'test for for team with group_id {} ended with exception'.format( group_id)) print(exception) return False, ('Exception: ' + str(exception)), 'HMM' return result, string_output, 'HMM'
def score_interval_number(team_id, submitted_answer, real_answer): try: submitted_answer = float(submitted_answer.strip()) except ValueError: logger.log_warn("invalid float response", team_id) return 0.0 lower_bound, upper_bound = [float(x.strip()) for x in real_answer.strip().split('$')] #if lower_bound > upper_bound: # lower_bound, upper_bound = upper_bound, lower_bound result = 0.0 if lower_bound <= submitted_answer <= upper_bound: result = 1.0 return result
def transform_class(class_data): class_data = deepcopy(class_data) course = class_data['course'] title = class_data['title'] if title.isupper(): # Foothill College titles and past De Anza ones are unfortunately all caps class_data['title'] = fix_title(title) class_data['course'] = clean_course_name_str(course) mapping = { # 'De Anza, Main Campus': {'DA'}, # 'De Anza, Off Campus': {'DO', 'DA'} # 'Foothill Sunnyvale Center': {'FC', 'FH'}, # 'Foothill, Main Campus': {'FO', 'FH'}, # 'Foothill, Off Campus': {'FO', 'FH'}, # '': {'FO', 'FH'} 'De Anza, Main Campus': 'DA', 'De Anza, Off Campus': 'DO', 'Foothill Sunnyvale Center': 'FC', 'Foothill, Main Campus': 'FH', 'Foothill, Off Campus': 'FO' } for idx, time in enumerate(class_data['times']): campus = time.get('campus') if campus and campus not in mapping.values(): if mapping.get(campus) != None: time['campus'] = mapping.get(campus) else: replaced = False for full_str in mapping.keys(): if full_str in campus: replaced = True time['campus'] = mapping.get(full_str) break if not replaced: log_warn( f'Unknown campus string for {class_data["CRN"]} {campus}' ) return class_data
def score_single_number(team_id, submitted_answer, real_answer): logger.log_info("entered score single number", team_id) try: submitted_answer = float(submitted_answer.strip()) except ValueError: logger.log_warn("invalid float response", team_id) return 0.0 logger.log_info("calculating real answer", team_id) logger.log_info("submitted_answer is {}, real answer is {}".format(submitted_answer, real_answer), team_id); real_answer = float(real_answer) result = 0.0 if submitted_answer == real_answer: result = 1.0 logger.log_info("result evaluated", team_id) return result
def score_boolean_file_upload(team_id, submission_path, _): try: submission = pd.read_csv(submission_path, low_memory=False) # submission.columns = ['label'] submission['label'] = submission['label'].apply(boolean_value_cast) except: logger.log_warn("malformed csv file", team_id) return 0.0 if len(submission) < len(PHASE_3_ANSWERS): logger.log_warn("not enough lines in submission", team_id) return 0.0 score = 0.0 for i in range(len(PHASE_3_ANSWERS)): if submission.iloc[i]['label'] == PHASE_3_ANSWERS[i]['label']: score += PHASE_3_ANSWERS[i]['score'] return score / PHASE_3_TOT_SCORE
def scrape_seats(db_dir=DB_DIR, prefix=PREFIX): if not exists(db_dir): makedirs(db_dir, exist_ok=True) terms = list(CURRENT_TERM_CODES.values()) log_info(f'Started FHDA seats scraper for terms {", ".join(terms)}') for term in terms: classes = parse(mine(term)) log_info(f'Scraped {len(classes)} classes in term {term}') db = TinyDB(join(db_dir, f'{PREFIX}{term}_database.json')) new_docs = [] for clazz in db.table('classes'): CRN = clazz['CRN'] try: seat_info = classes.pop(CRN) except KeyError: seat_info = { 'seats': 0, 'wait_seats': 0, 'wait_cap': 0, 'status': 'unknown' } log_warn( f'{clazz["raw_course"]} is not included in the seat scraper data.', details={'CRN': CRN}) new_docs.append({**clazz, **seat_info}) for clazz in classes.values(): log_warn( f'CRN {clazz["CRN"]} from seat scraper data is not in the main data.' ) db.table('classes').truncate() db.table('classes').insert_multiple(new_docs) db.close() return terms
def run(self, image_id: str, port: int): container_name = "webelopers_" + str(port) + "_" + str(uuid.uuid1()) try: result = self.client.containers.run( image=image_id, detach=True, auto_remove=True, mem_limit=str(MAX_MEMORY_CONTESTANT) + "m", mem_reservation=str(MAX_MEMORY_CONTESTANT // 2) + "m", name=container_name, oom_kill_disable=False, ports={"8000": port}, ) logger.log_success( "Project is running on container {} with id {} for image {}". format(container_name, result, image_id)) return container_name except Exception as e: logger.log_warn("Could not run container {} with error {}".format( container_name, str(e))) raise Exception("Could not run docker")
def update_git(root_dir: str, group_id: str, git_url: str, branch_name="master"): folder_name = "g_" + group_id logger.log_info( "updating git for group {} with git url {} initiated".format( group_id, git_url)) group_dir = os.path.join(root_dir, folder_name) if not os.path.exists(group_dir): os.makedirs(group_dir) logger.log_info("making directory for group") if not re.match( r"((git|ssh|http(s)?)|(git@[\w.]+))(:(//)?)([\w.@:/\-~]+)(\.git)(/)?", git_url): logger.log_log("git url for team with id {} is not valid: {}".format( group_id, git_url)) raise Exception("Invalid git url") try: project_name = re.search(r"/([^/]+)\.git", git_url).group(1) except Exception as e: logger.log_warn("Could not find project name in {}".format(git_url)) raise Exception("Unable to find git url") if not os.path.exists(os.path.join(group_dir, project_name)): logger.log_info("Cloning into repository {}".format(git_url)) clone_cmd = "git clone " + git_url out, error = _run_git(group_dir, clone_cmd) if len(out) != 0: logger.log_info("In cloning process for group {}: {}".format( group_id, out)) if len(error) != 0 and re.match(GIT_CLONE_ERROR_REGEX, str(error)): logger.log_log( "Cloning into repository {} failed with error: {}".format( git_url, str(error))) raise Exception("Failed while Cloning the Project") else: logger.log_info("GIT: Project already exists {}".format(project_name)) project_dir = os.path.join(group_dir, project_name) if not os.path.exists(os.path.join(project_dir, ".git")): logger.log_warn( "GIT: Project folder {} dont have a .git. initiating git".format( project_dir)) init_cmd = "git_init" _run_git(project_dir, init_cmd) # TODO log errors else: rename_origin_cmd = "git remote rename origin old-origin" _run_git(project_dir, rename_origin_cmd) # TODO log errors add_origin = "git remote add origin " + git_url _run_git(project_dir, add_origin) # TODO log error checkout_cmd = "git checkout " + branch_name out, error = _run_git(project_dir, checkout_cmd) if len(error) != 0 and re.match(GIT_CHECKOUT_ERROR_REGEX, str(error)): logger.log_warn( "GIT: Failed to checkout {} in project {} with error: {}".format( branch_name, project_name, str(error))) pull_cmd = "git pull origin " + branch_name out, error = _run_git(project_dir, pull_cmd) if len(error) != 0: logger.log_log("GIT: MSG while pulling {} with error: {}".format( project_name, str(error))) # raise Exception("Failed while pulling") reset_origin_cmd = "git reset --hard origin/" + branch_name out, error = _run_git(project_dir, reset_origin_cmd) if len(error) != 0: logger.log_warn( "GIT: Failed while reseting branch {} in project {} with error: {}" .format(branch_name, project_name, str(error))) # raise Exception("Failed while reseting branch") clean_cmd = "git clean -fd" out, error = _run_git(project_dir, clean_cmd) if len(error) != 0: logger.log_warn( "GIT: Failed while cleaning git in project {} with error: {}". format(project_name, str(error))) # raise Exception("Failed while cleaning") logger.log_info( "Updated git for project {} successfully".format(project_name)) return project_dir
def paxos(learner): state = "waiting" hnum = 0 nreceives = 0 client = None request_q = deque() current_i = None while True: if len(request_q) > 0 and state == "waiting": # adds fifo semantics to system data, sock, addr = request_q.popleft() else: data, sock, addr = (yield) if "request" in data: if state != "waiting": request_q.append((data, sock, addr)) log_warn("queue contains %s elements" % len(request_q)) continue # we have been chosen as the proposer # we need to check with all acceptors log_verbose("coordinator received request, preparing %s" % str(data)) client = addr state = "preparing" nreceives = 0 current_i = hnum + 1 prep_msg = json.dumps({"protocol": "paxos", "prepare": data["request"], "i": hnum + 1}) bcast(sock, prep_msg) elif state == "preparing": if "promise" in data and data["i"] == current_i: nreceives += 1 if nreceives > (len(servers) / 2): log_verbose("got more than n/2 promises, moving on") state = "accepting" nreceives = 0 accept_msg = json.dumps({"protocol": "paxos", "accept": data["promise"], "i": hnum}) bcast(sock, accept_msg) elif "nack" in data and data["i"] != current_i: log_warn("caught (and ignored) old nack: %s\tcurrent i: %s" % (data["i"], current_i)) elif "nack" in data and data["i"] == current_i: log_warn("falling back to waiting state from preparing") log_fail("nack id: %s\thnum: %s" % (data["i"], hnum)) state = "waiting" nreceives = 0 elif state == "accepting": if "accepted" in data and data["i"] == current_i: nreceives += 1 if nreceives > (len(servers) / 2): log_verbose("successfully got more than n/2 accepts") success_msg = json.dumps({"protocol": "paxos", "success": True}) sock.sendto(success_msg, client) state = "waiting" nreceives = 0 current_i = None client = None elif "nack" in data and data["i"] != current_i: log_warn("caught (and ignored) old nack: %s\tcurrent i: %s" % (data["i"], current_i)) elif "nack" in data and data["i"] == current_i: log_warn("falling back to waiting state from accepting") log_fail("possible inconsistency") log_fail("nack id: %s\thnum: %s" % (data["i"], hnum)) state = "waiting" nreceives = 0 if "prepare" in data: if data["i"] > hnum: hnum = data["i"] response = json.dumps({"protocol": "paxos", "promise": data["prepare"], "i": data["i"]}) else: log_warn("got prepare for number less than hnum") response = json.dumps({"protocol": "paxos", "nack": data["i"]}) sock.sendto(response, addr) elif "accept" in data: if data["i"] == hnum: learner.send(data["accept"]) response = json.dumps({"protocol": "paxos", "accepted": data["accept"], "i": hnum}) else: log_warn("got accept for number other than hnum") log_fail("possible inconsistency") response = json.dumps({"protocol": "paxos", "nack": data["i"]}) sock.sendto(response, addr)
def paxos(learner): state = 'waiting' hnum = 0 nreceives = 0 client = None request_q = deque() current_i = None while True: if len(request_q) > 0 and state == 'waiting': # adds fifo semantics to system data, sock, addr = request_q.popleft() else: data, sock, addr = (yield) if 'request' in data: if state != 'waiting': request_q.append((data, sock, addr)) log_warn('queue contains %s elements' % len(request_q)) continue # we have been chosen as the proposer # we need to check with all acceptors log_verbose('coordinator received request, preparing %s' % str(data)) client = addr state = 'preparing' nreceives = 0 current_i = hnum + 1 prep_msg = json.dumps({ 'protocol': 'paxos', 'prepare': data['request'], 'i': hnum + 1 }) bcast(sock, prep_msg) elif state == 'preparing': if 'promise' in data and data['i'] == current_i: nreceives += 1 if nreceives > (len(servers) / 2): log_verbose('got more than n/2 promises, moving on') state = 'accepting' nreceives = 0 accept_msg = json.dumps({ 'protocol': 'paxos', 'accept': data['promise'], 'i': hnum }) bcast(sock, accept_msg) elif 'nack' in data and data['i'] != current_i: log_warn('caught (and ignored) old nack: %s\tcurrent i: %s' % (data['i'], current_i)) elif 'nack' in data and data['i'] == current_i: log_warn('falling back to waiting state from preparing') log_fail('nack id: %s\thnum: %s' % (data['i'], hnum)) state = 'waiting' nreceives = 0 elif state == 'accepting': if 'accepted' in data and data['i'] == current_i: nreceives += 1 if nreceives > (len(servers) / 2): log_verbose('successfully got more than n/2 accepts') success_msg = json.dumps({ 'protocol': 'paxos', 'success': True }) sock.sendto(success_msg, client) state = 'waiting' nreceives = 0 current_i = None client = None elif 'nack' in data and data['i'] != current_i: log_warn('caught (and ignored) old nack: %s\tcurrent i: %s' % (data['i'], current_i)) elif 'nack' in data and data['i'] == current_i: log_warn('falling back to waiting state from accepting') log_fail('possible inconsistency') log_fail('nack id: %s\thnum: %s' % (data['i'], hnum)) state = 'waiting' nreceives = 0 if 'prepare' in data: if data['i'] > hnum: hnum = data['i'] response = json.dumps({ 'protocol': 'paxos', 'promise': data['prepare'], 'i': data['i'] }) else: log_warn('got prepare for number less than hnum') response = json.dumps({'protocol': 'paxos', 'nack': data['i']}) sock.sendto(response, addr) elif 'accept' in data: if data['i'] == hnum: learner.send(data['accept']) response = json.dumps({ 'protocol': 'paxos', 'accepted': data['accept'], 'i': hnum }) else: log_warn('got accept for number other than hnum') log_fail('possible inconsistency') response = json.dumps({'protocol': 'paxos', 'nack': data['i']}) sock.sendto(response, addr)