Beispiel #1
0
def process_one(path):
    res = read_all(path)
    rows = res.split("\n")
    splited_rows = map(lambda x: x.split("\t"), rows)
    res = filter(lambda x: len(x) > 1 and len(x[1]) > 0, splited_rows)
    res = list(res)
    res_en = list(map(lambda x: x[0], res))
    res_zh = list(map(lambda x: x[1], res))
    print(res)

    with open("./output_en.txt", "a", encoding="utf-8") as file_en:
        file_en.write("\n".join(res_en))
    with open("./output_zh-CN.txt", "a", encoding="utf-8") as file_zh:
        file_zh.write("\n".join(res_zh))
Beispiel #2
0
def main():
    """Main function"""
    algorithms, _ = read_all("data")
    proportions = np.asarray([1, 2, 5, 10, 15, 25, 50, 100])
    proportioni = [i for i in range(0, len(proportions))]
    betas = {"1.0": 1, "3.0": 2, "9.0": 3, "27.0": 4, "81.0": 5, "": 0}
    algorithm_names = ["mesdif", "nmefsd", "ssdp"]
    convergences = dict()
    for proportion in proportions:
        convergences[proportion] = get_convergences(proportion, algorithms)
    plt.rc("font", size=14)
    fig, axes = plt.subplots(1, 3, sharex="col")
    alg_i = 0
    lines = [None] * len(betas)
    line_names = [str()] * len(betas)
    for alg_name in algorithm_names:
        i = 0
        for proportion in proportions:
            convs = convergence_by_alg(alg_name, convergences[proportion])
            for beta in betas:
                X[i, betas[beta]] = convs[beta]
            i += 1
        bi = 0
        for beta in betas:
            name = "base" if not beta else r"$\beta:$ " + beta
            lines[bi], = axes[alg_i].plot(proportioni,
                                          X[:, betas[beta]],
                                          marker='o',
                                          label=name,
                                          color=COLORS[bi])
            line_names[bi] = name
            bi += 1
        axes[alg_i].set_xlabel("proportion of features (%)")
        axes[alg_i].set_ylabel("number of convergences")
        axes[alg_i].set_title("Convergence by Initialization - " +
                              alg_name.upper())
        axes[alg_i].set_xticks(proportioni)
        axes[alg_i].set_xticklabels(proportions)
        alg_i += 1
    fig.legend(lines, line_names, loc="lower center", ncol=len(lines))
    fig.set_size_inches(39.29112, 9.82278)
    fig.set_dpi(200)
    fig.savefig("images/convergence.pdf", bbox_inches="tight")
Beispiel #3
0
#!/usr/bin/python
import sys, os, functools, utils


def agg(a, b):
    # print(a, b)
    if not a:
        return b
    return a


utils.help()
res = functools.reduce(agg, utils.read_all(), [])
utils.out("".join(res))
Beispiel #4
0
Datei: wc.py Projekt: otrack/pash
#!/usr/bin/python
import sys, os, functools, utils

PAD_LEN = 7  # needs to add a space character for when they exceed


def parseLine(s):
    global PAD_LEN
    # FIXME: This could identify padding number
    return map(int, s.split())


def emitLine(t):
    global PAD_LEN
    return [" ".join(map(lambda e: str(e).rjust(PAD_LEN, ' '), t))]


def combiner(a, b):
    # print(a, b)
    if not a:
        return b
    az = parseLine(a[0])
    bz = parseLine(b[0])
    return emitLine([(i + j) for (i, j) in zip(az, bz)])


utils.help()
res = functools.reduce(combiner, utils.read_all(), [])
utils.out("".join(res))
Beispiel #5
0
def main():
    """Main function"""
    algorithms, _ = read_all("data")
    compete(algorithms, "WRACC")
    compete(algorithms, "Support")
Beispiel #6
0
def update_types():
    matches = []
    utils.read_all(InputDir, matches)
    utils.write_type_events(MetaDir, matches)
    utils.write_type_outcomes(MetaDir, matches)
    utils.write_events_csv(SortedDir + '/events', matches, '_events')
Beispiel #7
0
#!/usr/bin/python
import sys, os, functools, utils

def agg(a, b):
  return a + b

utils.help()
utils.out("".join(functools.reduce(agg, utils.read_all(), [])))
Beispiel #8
0
args = parser.parse_args()

base_path = args.input_directory + "\\"
path_to_bricks = base_path + "output-brick/"
path_to_linkers = base_path + "output-linker/"

list_of_bricks = os.listdir(path_to_bricks)
list_of_linkers = os.listdir(path_to_linkers)

mol_bricks = [Chem.MolFromMolFile(path_to_bricks + x) for x in list_of_bricks]
mol_linkers = [
    Chem.MolFromMolFile(path_to_linkers + x) for x in list_of_linkers
]

raw_bricks = read_all(list_of_bricks, path_to_bricks)
raw_linkers = read_all(list_of_linkers, path_to_linkers)

brick_linker_pairs, linker_brick_pairs = process_binding_data(
    raw_bricks, raw_linkers)


def gen(i):
    """
    Generates a single molecule under 500MW
    Args:
        i: current iterator, determines file-name

    Returns: i+1 if generation was successful, i otherwise

    """
Beispiel #9
0
def read(f):
    res = list(map(int, bin(int(read_all(f), 16))[2:]))
    if len(res) % 4 != 0:
        res = ([0] * (4 - len(res) % 4)) + res
    return res
Beispiel #10
0
    global PAD_LEN
    # FIXME: This could identify padding number---out of band?
    res = s.split()
    # print(res)
    return (res[0], int(res[1]))  #, res[2])


def emitLine(t):
    global PAD_LEN
    return " ".join(t)


def update_index(index, lst):
    # { index[i]:(index[i] if index[i] else j) for (i, j) in lst}
    # print(index)
    # print(lst)
    print(index, lst)
    index.update(lst)
    return index  # {index[i]: j for i, j in lst}


def agg(a, b):
    # print(a, b)
    # TODO: take and emit out of fold!
    return update_index(a, map(parseLine, b))


utils.help()
res = functools.reduce(agg, utils.read_all(), OrderedDict())
utils.out("\n".join([a + "  " + str(b) for a, b in res.items()]))