def run(): print('loading training data...') X, y = load_training_data() #min_max_scaler = preprocessing.MinMaxScaler() #X = min_max_scaler.fit_transform(X) util.count(y) print('loading data completed.') print('loading models...') models = md.getClassifiers() print('training models...') md.train(models, X, y) print('training models completed...') print('loading test data...') X_test, y_test = load_test_data() #min_max_scaler = preprocessing.MinMaxScaler() #X_test = min_max_scaler.fit_transform(X_test) util.count(y_test) print('loading test data completed...') print('eval models...') md.evaluate(models, X_test, y_test) print('eval models completed...')
def nConflicts(self, var, val, assignment): def conflict(var2): return var2 in assignment and not self.constraints( var, val, var2, assignment[var2]) return util.count([conflict(v) for v in self.neighbours[var]]) + util.count([ val is assignment[k] for k in assignment.keys() if var is not k ])
def benchmark(): np.random.seed(0) T1_N = 200 T2_N = 200 T1_C = 20 T2_C = 20 t1_assign, t2_assign, data, latent_class_matrix = synthdata.create_T1T2_bb( T1_N, T2_N, T1_C, T2_C) config = { 'types': { 't1': { 'hps': 1.0, 'N': T1_N }, 't2': { 'hps': 1.0, 'N': T2_N } }, 'relations': { 'R1': { 'relation': ('t1', 't2'), 'model': 'BetaBernoulli', 'hps': { 'alpha': 1.0, 'beta': 1.0 } } }, 'data': { 'R1': data } } irm_model = irmio.model_from_config(config) t1_obj = irm_model.types['t1'] t2_obj = irm_model.types['t2'] SAMPLES_N = 50 for s in range(SAMPLES_N): print s t1 = time.time() gibbs.gibbs_sample_type(t1_obj) gibbs.gibbs_sample_type(t2_obj) t2 = time.time() print "sample", s, "took", t2 - t1, "secs" print util.count(t1_obj.get_assignments()).values() print util.count(t2_obj.get_assignments()).values()
def test_play(): from util import play from util import read_file_to_list from util import count p1, p2 = read_file_to_list("testinput.txt") p = play(p1, p2) assert 306 == count(p)
def rt_equiv(self, other): if UT.is_satisfies(RT.IPersistentVector, other): if UT.equiv(RT.count.invoke1(self), UT.count(other)) is false: return false for x in range(self._cnt): i = wrap_int(x) if RT.equiv(RT.nth.invoke1(self, i), UT.nth(self, i)) is false: return false return true else: if RT.is_satisfies.invoke1(RT.Sequential, other) is false: return false ms = RT.seq.invoke1(other) for x in range(self._cnt): if ms is nil or UT.equiv(UT.nth(x, wrap_int(x)), UT.first(ms)) is false: return false ms = UT.next(ms) if ms is not nil: return false return true
def test_play_recursive(): from util import play_recursive from util import read_file_to_list from util import count p1, p2 = read_file_to_list("testinput.txt") p, _ = play_recursive(p1, p2, 0) assert 291 == count(p)
def valid_password_a(line): rng, letter, pw = line.split(" ") letter = letter[:1] minc, maxc = map(int, rng.split("-")) letter_count = u.count(letter, pw) # print(f"{rng=} {letter=} {pw=} {minc=} {maxc=} {letter_count=}") return letter_count >= minc and letter_count <= maxc
def parse_g1mg_subchunk_0x10002(schunk_data): log("========", lv=1) log("materials", lv=1) log("========", lv=1) get = get_getter(schunk_data, "<") schunk_type, schunk_size = get(0x0, "2I") mat_count = get(0x8, "I") # dump_data("g1mg_0x10002.bin", schunk_data) off = 0xc material_list = [] for mat_idx in xrange(mat_count): unk0 = get(off + 0x0, "I") assert unk0 == 0 tex_count = get(off + 0x4, "I") unk1, unk2 = get(off + 0x8, "Ii") unk1_equal_tex_count = tex_count == unk1 count(locals(), "unk1_equal_tex_count") log("mat %d, tex_count %d, unk1=%d, unk2=%d, unk1_equal_tex_count=%d" % (mat_idx, tex_count, unk1, unk2, unk1_equal_tex_count), lv=1) assert 1 <= unk1 <= 7 assert unk2 == 1 or unk2 == -1 off += 0x10 material = {"texture_count": tex_count, "textures": []} material_list.append(material) for tex_idx in xrange(tex_count): tex_identifier = get(off + 0x0, "H") uv_chnl_idx, unk6 = get(off + 0x2, "HH") unk3, unk4, unk5 = get(off + 0x6, "3H") count(locals(), "unk6") assert 0 <= unk3 <= 2 assert unk4 == 4 assert unk5 == 4 assert 0 <= uv_chnl_idx <= 4, "works for this game!" off += 0xc log("tex_idx = %d, uv_channel_idx = %d, unk6 = %d, unk3 = %d, unk4 = %d, unk5 = %d" % (tex_identifier, uv_chnl_idx, unk6, unk3, unk4, unk5), lv=1) material["textures"].append([tex_identifier, uv_chnl_idx]) log("") return {"material_list": material_list}
def select(self, ds, preselection=None): counts = util.count(ds) gen = (idx for idx in counts.argsort()[::-1] if counts[idx] >= self.support) if preselection != None: preselection = set(preselection) return (idx for idx in gen if idx in preselection) else: return gen
def run_b(input_data): passports = input_data.split("\n\n") passports = [p.replace("\n", " ").split(" ") for p in passports] passports = list(map(parse, passports)) def is_valid(pp): return REQ <= pp.keys() and all(VALIDATORS[k](pp[k]) for k in REQ) print(u.count(is_valid, passports))
def benchmark(): np.random.seed(0) T1_N = 200 T2_N = 200 T1_C = 20 T2_C = 20 t1_assign, t2_assign, data, latent_class_matrix = synthdata.create_T1T2_bb(T1_N, T2_N, T1_C, T2_C) config = {'types' : {'t1' : {'hps' : 1.0, 'N' : T1_N}, 't2' : {'hps' : 1.0, 'N' : T2_N}}, 'relations' : { 'R1' : {'relation' : ('t1', 't2'), 'model' : 'BetaBernoulli', 'hps' : {'alpha' : 1.0, 'beta' : 1.0}}}, 'data' : {'R1' : data}} irm_model = irmio.model_from_config(config, relation_class=relation.FastRelation) t1_obj = irm_model.types['t1'] t2_obj = irm_model.types['t2'] SAMPLES_N = 50 for s in range(SAMPLES_N): print s t1 = time.time() gibbs.gibbs_sample_type(t1_obj) gibbs.gibbs_sample_type(t2_obj) t2 = time.time() print "sample", s, "took", t2-t1, "secs" print util.count(t1_obj.get_assignments()).values() print util.count(t2_obj.get_assignments()).values()
def watermarking(file_name, mark_file_name, marks="1111000"): # read from file && count by freq contents = read_document(file_name) word_count = count(contents) words = Word.get_words(word_count.keys()) assert len(words) >= len(marks), u"mark的长度超过了文本的字符数,尝试缩小mark或增长文本" # watermarking for mark, word in zip(marks, words[:len(marks)]): if mark == '1': word.set_special_style(True, '00000001') else: word.set_special_style(False, '00000000') # print write_document(words, mark_file_name, contents=contents) return
def encrypt_extract(mark_file_name, len_watermark=7, marks=''): # read && count word_style = read_document_style(mark_file_name) contents = read_document(mark_file_name) word_count = count(contents) # marking watermark = '' for index, word in enumerate(word_count): if index >= math.ceil(len_watermark/8): break special_mark = word_style.get(word) watermark += special_mark # print(watermark) watermark = del_pad(watermark, len_watermark) code = decrypt(origin_code=marks, b=watermark) print(code)
def extract(mark_file_name, len_watermark=7): # read && count word_style = read_document_style(mark_file_name) contents = read_document(mark_file_name) word_count = count(contents) # marking watermark = '' for index, word in enumerate(word_count): if index >= len_watermark: break special = word_style.get(word) if special == '00000001': watermark += '1' else: watermark += '0' print(watermark)
def encrypt_watermarking(file_name, mark_file_name, marks="1111000"): # encrypt & binary encrypt_mark = encrypt(code=marks) len_encrypt_mark = len(encrypt_mark) binary_mark = pad_encrypt(encrypt_mark) # print(binary_mark) binary_mark_len = int(len(binary_mark) / 8) # read from file && count by freq contents = read_document(file_name) word_count = count(contents) words = Word.get_words(word_count.keys()) assert len(words) >= binary_mark_len, u"mark的长度超过了文本的字符数,尝试缩小mark或增长文本" # watermarking for index, word in enumerate(words[:binary_mark_len]): mark = binary_mark[index * 8:index * 8 + 8] word.set_special_style(True, mark) # print write_document(words, mark_file_name, contents=contents) return len_encrypt_mark
def solve(): words_text = requests.get('http://projecteuler.net/project/words.txt').text words = [word[1:-1] for word in words_text.split(',')] return util.count(is_coded_triangle_number, words)
def test_challenge(self): self.assertEqual(8038, count(day_6.loop(last(day_6.loop(challenge)))))
def test_loop_detection(self): self.assertEqual(4, count(day_6.loop(last(day_6.loop((0, 2, 7, 0))))))
def test_challenge(self): self.assertEqual(12841, count(day_6.loop(challenge)))
def steps_v2(pointer, memory): return count( run(pointer=pointer, memory=memory, incr=lambda jump: -1 if jump >= 3 else 1))
def steps(pointer, memory, incr=one): return count(run(pointer=pointer, memory=memory, incr=incr))
# -*- coding: utf-8 -*- """ Created on Thu Jul 19 21:17:00 2018 @author: xiaowen """ import prepare_data as pda import util from global_env import DATA_FOLDER import global_env as env TRAINING_EXAMPLES = 'Data_1500-2000' if __name__ == "__main__": print('prepare Training data...') df_train = pda.prepareExamples(env.LOAD_SEC_START, env.LOAD_SEC_END) util.pickle_dump(df_train, DATA_FOLDER + TRAINING_EXAMPLES + '_df.pickle') print(df_train['X'].shape) util.count(df_train['y'].tolist()) print('prepare Test data...') df_train = pda.prepareExamples(env.TEST_SEC_START, env.TEST_SEC_END) util.pickle_dump(df_train, DATA_FOLDER + TRAINING_EXAMPLES + '_df_test.pickle') print(df_train['X'].shape) util.count(df_train['y'].tolist()) #print(df_train)
def run_a(input_data): passports = input_data.split("\n\n") passports = [p.replace("\n", " ").split(" ") for p in passports] passports = list(map(parse, passports)) print(u.count(lambda pp: REQ <= pp.keys(), passports))
def tab_dealer(line): global transpile, tabnum if transpile: tabnum = util.count(" ", line) line = line.lstrip(" ") return top_level(line)
def run_b(inp): groups = [ (Counter(s.replace("\n", "")), len(s.split("\n"))) for s in inp.split("\n\n") ] print(sum(u.count(lambda v: v == n, c.values()) for c, n in groups))
def test_count(): from util import count assert 1 == count("abcde",'a') assert 0 == count("cdefg",'b') assert 9 == count("ccccccccc",'c')
def validate_iyr(value): return 2010 <= int(value) <= 2020 def validate_eyr(value): return 2020 <= int(value) <= 2030 def validate_hgt(value): if value.endswith("cm"): return 150 <= int(value[:-2]) <= 193 if value.endswith("in"): return 59 <= int(value[:-2]) <= 76 return False def validate_hcl(value): return len(value) == 7 and value.startswith("#") and int(value[1:], 16) >= 0 def validate_ecl(value): return value in ("amb", "blu", "brn", "gry", "grn", "hzl", "oth") def validate_pid(value): return len(value) == 9 and value.isdigit() if __name__ == "__main__": print(util.count(validate, util.readchunks()))
def test_endless_loop(): from util import play from util import count p = play([43, 19], [2, 29, 14]) assert 0 == count(p)
def num_legal_values(csp, var, assignment): return util.count( csp.nConflicts(var, val, assignment) == 0 for val in csp.domains[var])
def say(line: str) -> str: """ Parameters: :param line: string of line that needs to be processed Return: str - text that was printed """ # Exporting it to the transpiler where the while loop works global transpile, tabnum listed = list(line) out = "" start = None quotes = ["'", '"'] quote_used = "" if len(util.groups(line, '"', "+")) > 1: groups = util.groups(line, '"', "+") out = "" tout = [] for i in groups: i = i.strip(" ") if i.startswith("say"): i = i.replace("say ", "") if i.startswith('"'): i = "".join(list(i)[1:]) i = i.rstrip('"') if transpile: if i in variables.keys(): tout.append([i, 0]) else: tout.append([i, 1]) continue print(i, end="") out += str(i) elif i.startswith('"'): i = i.strip('"') if transpile: tout.append([i, 1]) continue print(i, end="") out += str(i) else: try: if transpile: tout.append([i, 0]) continue print(variables[i], end="") out += str(variables[i]) except KeyError: raise Exception("Variable not found") if transpile: transpiler.add_line(" " * tabnum + transpiler.fill_print_text_var(tout)) return "__TRANSPILER.IGNORE.OUT__" print("") return out elif util.count("'", line) == 0 and util.count('"', line) == 0 and "," in line: line = line.rstrip("\n") line = line.lstrip("say") line = line.replace(" ", "") line = line.split(",") full_out = "" for i in line: try: print(variables[i], end=" ") full_out += str(variables[i]) + " " except KeyError: raise Exception("Variable not found") print("\n", end="") full_out += "\n" return full_out elif util.count("'", line) == 0 and util.count('"', line) == 0: line = line.rstrip("\n") line = line.lstrip("say") line = line.lstrip(" ") try: if not transpile: print(variables[line]) else: transpiler.add_line(" " * tabnum + transpiler.fill_print_plain_var(line)) except KeyError: raise Exception("Variable not found") return variables[line] else: to_say = list(re.findall(r"say[ ]*?['\"](.+)['\"]", line)) if len(to_say) > 0: if transpile: transpiler.add_line(" " * tabnum + transpiler.fill_print_plain(to_say[0])) else: print(to_say[0]) else: raise Exception( f"Error on line: {line}\nInvalid syntax for say statement. Did you add too many spaces or forget quotes?" ) return to_say[0]
import util def parse(line): rule, char, password = line.split() atleast, atmost = rule.split("-") return int(atleast), int(atmost), char[0], password def validate(line): atleast, atmost, char, password = parse(line) return atleast <= password.count(char) <= atmost if __name__ == "__main__": print(util.count(validate, util.readlines()))
def count_trees(grid, slope): pt = (0, 0) path_length = len(grid) // slope[1] path = Pt.path((0, 0), slope, path_length) return u.count(lambda pt: tree_at(grid, pt), path)