def state_helper(def_state: dict, path: dict, log) -> tuple: no_ini = not os.path.isfile(path['settings']) no_state = not os.path.isfile(path['state']) if no_state and no_ini: return def_state, True, True ini_version, merge, state_save = 0, 0, True if not no_state: try: state = dict_from_file(path['state']) ini_version = state['system']['ini_version'] merge = state['system']['merge'] except RuntimeError as e: log('Broken {}, reset'.format(path['state']), logger.WARN) state = def_state else: state['system'] = def_state['system'] else: state = def_state m = _Merge(merge, state, path, log) state_save = ini_version != m.state['system']['ini_version'] return m.state, m.state['system'][ 'ini_version'] > ini_version, state_save or m.state_save
def build_student_dict(student_data, **kwargs): # returns dict of student objs, complete with course work. student_dict = {} used_ids = set() crs_type_look_up = utils.dict_from_file( "/Users/thomasolson/Documents/workspace/advising_revamp/course_type_lookup.csv", 3, 4, "\n", ",", True) for data in student_data: data = sf_coder.translate_sfsu_data(data) student_id = data[kwargs["student_id"]] i = 0 if len(kwargs) < 3: #less than 3 means no special type. course = Course(data[kwargs["class_int"]], int(data[kwargs["grade_int"]])*10, data[kwargs["year_int"]], 0, 0, 0) else: name_check = data[kwargs["crs_abbr"]]+str(data[kwargs["crs_num"]]) # if "CSC" not in name_check: # if "CSC" not in name_check and "PHYS" not in name_check and "MATH" not in name_check: # continue course_id = student_id + str(i) while course_id in used_ids: i += 1 course_id = student_id + str(i) used_ids.add(course_id) course = Course(data[kwargs["crs_abbr"]] + str(data[kwargs["crs_num"]]), data[kwargs["grade_str"]], int(data[kwargs["year_int"]]),int(data[kwargs["age"]]), int(data[kwargs["standing"]]), course_id, student_id, float(data[kwargs["term_gpa"]]), float(data[kwargs["sfsu_gpa"]]), crs_type_look_up[data[kwargs["crs_abbr"]]+str(data[kwargs["crs_num"]])], data[kwargs["grad_flag"]], float(data[kwargs["term_units"]]), float(data[kwargs["sfsu_units"]]), data[kwargs["spring_19_flag"]], data[kwargs["crs_college_long"]], data[kwargs["crs_dept_long"]], float(data[kwargs["total_units"]])) add_to_student_obj(student_id, course, student_dict, data, kwargs) return student_dict
def merge_1(self): for key in ('backup', 'update'): file_path = os.path.join(self.path['data'], key + '.json') if os.path.isfile(file_path): data = dict_from_file(file_path) os.remove(file_path) if data and isinstance(data, dict): if key == 'update': key = 'updater' self.state[key] = data
def load_dict(self, name: str, format_='json') -> dict or None: file_path = os.path.join(self.path['data'], name + DATA_FORMATS.get(format_, '.json')) if not os.path.isfile(file_path): self.log(F('Файл не найден (это нормально): {}', file_path)) return None try: return utils.dict_from_file(file_path) except RuntimeError as e: self.log(F('Ошибка загрузки {}: {}', file_path, str(e)), logger.ERROR) return None
def score_series_set(path, outpath, add_412, add_211, class_type): #score maps are built from sequence analysis and I have included examples of their format in git. if class_type.lower() == "transfer": seq_score_map = utils.dict_from_file("/Users/thomasolson/Documents/workspace/advising_revamp/group analysis runs/subset_transfer_sequence_score_map_25.csv", 0,1,"\n", ",", True) equiv_score_map = utils.dict_from_file("/Users/thomasolson/Documents/workspace/advising_revamp/group analysis runs/subset_transfer_concurrent_score_map_25.csv", 0,1,"\n", ",", True) elif class_type.lower() == "49_set":#This was some testing work I did seq_score_map = utils.dict_from_file( "/Users/thomasolson/Documents/workspace/advising_revamp/group analysis runs/49_cs_sequence_score_map_25.csv", 0, 1, "\n", ",", True) equiv_score_map = utils.dict_from_file( "/Users/thomasolson/Documents/workspace/advising_revamp/group analysis runs/49_cs_concurrent_score_map_25.csv", 0, 1, "\n", ",", True) else: seq_score_map = utils.dict_from_file( "/Users/thomasolson/Documents/workspace/advising_revamp/group analysis runs/combo_score_seq_mod_bonus.csv", 0, 1, "\n", ",", True) equiv_score_map = utils.dict_from_file( "/Users/thomasolson/Documents/workspace/advising_revamp/group analysis runs/combo_score_equiv_mod_bonus.csv", 0, 1, "\n", ",", True) i = 0 top_100 = [] with open(path, "r") as x: data = x.readline() while data: if i % 10000 == 0: print(i) i+=1 #if "10_" in data or "9_" in data: # data = x.readline() # continue line = data.strip().replace(" ", "").replace("[","").replace("]","").replace("'","").split(",") if add_412 or add_211: for sem_x in range(0,len(line)): if "CSC340" in line[sem_x] and add_412: sem = line[sem_x].split("_")[0] line.insert(sem_x+1, sem+"_CSC412") break if "CSC210" in line[sem_x] and add_211: sem = line[sem_x].split("_")[0] line.insert(sem_x+1, sem+"_CSC211") #if "PHYS220" in line[sem_x]: #Typically unneeded due to presence of PHYS230/222 scores that capture same info. # sem = line[sem_x].split("_")[0] # line.insert(sem_x+1, sem+"_PHYS222") #if "PHYS230" in line[sem_x]: # sem = line[sem_x].split("_")[0] # line.insert(sem_x+1, sem+"_PHYS232") score_line = [] for crs in line: if crs.startswith("0"): continue score_line.append(crs) sem_dict = build_seq_sem_dict(score_line) score = score_seq(sem_dict, equiv_score_map, seq_score_map) top_100 = update_top_100([score, score_line], top_100) data = x.readline() print(i) if add_412: split_path = outpath.split(".") prefix = split_path[0] split_path[0] = prefix + "_412add" outpath = ".".join(split_path) if add_211: split_path = outpath.split(".") prefix = split_path[0] split_path[0] = prefix + "_211add" outpath = ".".join(split_path) utils.list_to_file(outpath, top_100)
def get_new_build_numbers(fn): d = dict_from_file(fn) return int(d['js']), int(d['css'])