def downsampling(): crs = load_and_sort(config.downsamplingconfigs) for c, r in crs: print("Downsampling used:", c.embedding.downsampling) print("Error reduction", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def trainsizes(): crs = load_and_sort(config.trainsizeconfigs) for c, r in crs: print("Train size:", c.run.train_size) print("Error reduction:", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def negative(): crs = load_and_sort(config.negativeconfigs) for c, r in crs: print("Negative sampling count:", c.embedding.negative) print("Error reduction", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def estimator(): crs = load_and_sort(config.estimatorconfigs) for c, r in crs: print("Estimator for word embedding:", c.embedding.estimator) print("Error reduction", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def filter_skips(): crs = load_and_sort(config.filter_skipsconfigs) for c, r in crs: print("Skips filtered:", c.gram.filter_skips) print("Error reduction:", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def dimension(): crs = load_and_sort(config.dimensionconfigs) for c, r in crs: print("Dimension:", c.embedding.dimension) print("Error reduction:", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def window(): crs = load_and_sort(config.windowconfigs) for c, r in crs: print("Window size:", c.embedding.window) print("Gram size:", c.gram.gram_size) print("Error reduction", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def skipwords(): crs = load_and_sort(config.skipwordconfigs) skipwordss = {c.gram.skipwords for (c, _) in crs} for skipwords in skipwordss: (c, r) = get_single_one(((c, r) for (c, r) in crs if c.gram.skipwords == skipwords)) print("Skipwords:", *skipwords) print("Error reduction:", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def default(): print("File contents:") with gzip.open(resultpath(config.default_config), mode="rt") as f: for line in f: print(line, end="") print_barline() r = load_result(config.default_config).result print("Error reduction:", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r))
def interesting_embeddings2(): crs = load_and_sort(config.interesting_embeddingconfigs2) for c, r in crs: print("Estimator:", c.embedding.estimator) print("Downsampling:", c.embedding.downsampling) print("Min count:", c.embedding.min_count) print("Error reduction", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline()
def min_count(): crs = load_and_sort(config.min_countconfigs) for filter_unknown in [True, False]: print("Grams with unknown words filtered:", filter_unknown) print_barline() relevant_crs = [(c, r) for (c, r) in crs if c.run.filter_unknown == filter_unknown] for c, r in relevant_crs: print("Min count for word embedding:", c.embedding.min_count) print("Error reduction", error_reduction(r)) print("Prior correctness:", prior_correctness(r)) print("Correctness:", correctness(r)) print_barline() print_barline()
def key(cr): if is_failed(cr[1]): return -float("inf") else: return error_reduction(cr[1])