示例#1
0
def do_raw_continued_evaluations(n):
    """
    Test that the results from running continued evaluations in the same
    environment match the expected values.
    """
    with open('test_outputs/%02d.json' % n) as f:
        expected = json.load(f)
    env = None
    results = []
    t = make_tester(lab.result_and_env)
    with open('test_inputs/%02d.snek' % n) as f:
        for line in iter(f.readline, ''):
            try:
                parsed = lab.parse(lab.tokenize(line.strip()))
            except lab.SnekSyntaxError:
                results.append({'expression': line.strip(), 'ok': False, 'type': 'SnekSyntaxError', 'when': 'parse'})
                continue
            out = t(*((parsed, ) if env is None else (parsed, env)))
            if out['ok']:
                env = out['output'][1]
            if out['ok']:
                if isinstance(out['output'][0], (int, float)):
                    out['output'] = out['output'][0]
                else:
                    out['output'] = 'SOMETHING'
            out['expression'] = line.strip()
            results.append(out)
    for ix, (result, exp) in enumerate(zip(results, expected)):
        msg = f"for line {ix+1} in test_inputs/%02d.snek:\n    {result['expression']}" % n
        compare_outputs(result, exp, msg=msg)
示例#2
0
def chack_probs(f_probs, num_of_open_line_data=NUM_OF_FIRST_ROWS_AT_CSC,
                num_of_open_line_probs=NUM_OF_FIRST_ROWS_AT_PROBS_FILE, score=0, buffer=250):
    probs_data = pd.read_csv(f_probs, sep=",", compression="gzip", skiprows=[i for i in range(num_of_open_line_probs)],
                             header=0)
    chromosomes = lab.parse(probs_data, "CHR", "MAPINFO", None, True)
    # remove unnecessary probes
    plass = ["tehila/Plass/ENCFF032DEW.bed.gz", "tehila/Plass/ENCFF401ONY.bed.gz", "tehila/Plass/ENCFF543VGD.bed.gz"]
    imm = ["tehila/immortalization/ENCFF449NOT.bed", "tehila/immortalization/ENCFF833FTF.bed"]
    chip = []
    for val in imm:
        chip.append(lab.read_chip_file(val, score))
    for val in plass:
        chip.append(lab.read_chip_file(val, score, True))
    visited = []
    for df in chip:
        for site in df.iterrows():
            ind = site[1]["chrom"][3:]
            if ind == 'X':
                ind = 22
            elif ind == 'Y':
                ind = 23
            else:
                ind = int(ind) - 1
            for item in chromosomes[ind]:
                if site[1]["chromStart"] < item[0] < site[1]["chromEnd"]:
                    if item[1] in visited:
                        print(str(item[1])+"\nchr: " + str(site[1]["chrom"]) + "\nstart: " +
                              str(site[1]["chromStart"]) + "\nend: " + str(site[1]["chromEnd"]) + "\n")
                    else:
                        visited.append(item[1])
    print("done!")
示例#3
0
文件: test.py 项目: jdanielsnyc/SNEK
def test_oldbehaviors():
    run_test_number(1, lab.tokenize)
    run_test_number(2, lab.parse)
    run_test_number(3, lambda i: lab.parse(lab.tokenize(i)))
    run_test_number(4, lab.evaluate)
    run_test_number(5, lab.evaluate)
    do_continued_evaluations(6)
    do_continued_evaluations(7)
    do_continued_evaluations(8)
    do_continued_evaluations(9)
    do_continued_evaluations(10)
    do_continued_evaluations(11)
    do_continued_evaluations(12)
    do_raw_continued_evaluations(13)
    do_raw_continued_evaluations(14)
    do_raw_continued_evaluations(15)
    do_raw_continued_evaluations(16)
    do_raw_continued_evaluations(17)
    do_raw_continued_evaluations(18)
    do_raw_continued_evaluations(19)
    do_raw_continued_evaluations(20)
    do_raw_continued_evaluations(21)
    do_raw_continued_evaluations(22)
    do_raw_continued_evaluations(23)
    do_raw_continued_evaluations(24)
    do_raw_continued_evaluations(25)
    do_raw_continued_evaluations(26)
    do_raw_continued_evaluations(27)
    do_raw_continued_evaluations(28)
    do_raw_continued_evaluations(29)
示例#4
0
 def tests_01_to_29_oldbehaviors(self):
     self.run_test_number(1, lab.tokenize)
     self.run_test_number(2, lab.parse)
     self.run_test_number(3, lambda i: lab.parse(lab.tokenize(i)))
     self.run_test_number(4, lab.evaluate)
     self.run_test_number(5, lab.evaluate)
     self._test_continued_evaluations(6)
     self._test_continued_evaluations(7)
     self._test_continued_evaluations(8)
     self._test_continued_evaluations(9)
     self._test_continued_evaluations(10)
     self._test_continued_evaluations(11)
     self._test_continued_evaluations(12)
     self._test_raw_continued_evaluations(13)
     self._test_raw_continued_evaluations(14)
     self._test_raw_continued_evaluations(15)
     self._test_raw_continued_evaluations(16)
     self._test_raw_continued_evaluations(17)
     self._test_raw_continued_evaluations(18)
     self._test_raw_continued_evaluations(19)
     self._test_raw_continued_evaluations(20)
     self._test_raw_continued_evaluations(21)
     self._test_raw_continued_evaluations(22)
     self._test_raw_continued_evaluations(23)
     self._test_raw_continued_evaluations(24)
     self._test_raw_continued_evaluations(25)
     self._test_raw_continued_evaluations(26)
     self._test_raw_continued_evaluations(27)
     self._test_raw_continued_evaluations(28)
     self._test_raw_continued_evaluations(29)
示例#5
0
 def _test_raw_continued_evaluations(self, n):
     """
     Test that the results from running continued evaluations in the same
     environment match the expected values.
     """
     with open('test_outputs/%02d.json' % n) as f:
         expected = json.load(f)
     msg = "for test_inputs/%02d.carlae" % n
     env = None
     results = []
     try:
         t = LispTest.make_tester(lab.result_and_env)
     except:
         t = LispTest.make_tester(lab.evaluate)
     with open('test_inputs/%02d.carlae' % n) as f:
         for line in iter(f.readline, ''):
             parsed = lab.parse(lab.tokenize(line.strip()))
             print(parsed)
             print('')
             out = t(*((parsed, ) if env is None else (parsed, env)))
             if out['ok']:
                 env = out['output'][1]
             if out['ok']:
                 if isinstance(out['output'][0], (int, float)):
                     out['output'] = out['output'][0]
                 else:
                     out['output'] = 'SOMETHING'
             results.append(out)
     for result, exp in zip(results, expected):
         self._compare_outputs(result, exp, msg=msg)
示例#6
0
 def _test_raw_continued_evaluations(self, n):
     """
     Test that the results from running continued evaluations in the same
     environment match the expected values.
     """
     with open(os.path.join(TEST_DIRECTORY, 'test_outputs',
                            f'{n:02d}.json')) as f:
         expected = json.load(f)
     msg = "for test_inputs/%02d.carlae" % n
     env = None
     results = []
     try:
         t = LispTest.make_tester(lab.result_and_env)
     except:
         t = LispTest.make_tester(lab.evaluate)
     with open(
             os.path.join(TEST_DIRECTORY, 'test_inputs',
                          f'{n:02d}.carlae')) as f:
         for line in iter(f.readline, ''):
             #print(line.strip())
             parsed = lab.parse(lab.tokenize(line.strip()))
             #print(parsed)
             #print('')
             out = t(*((parsed, ) if env is None else (parsed, env)))
             if out['ok']:
                 env = out['output'][1]
             if out['ok']:
                 try:
                     typecheck = (int, float, lab.Pair)
                     func = list_from_ll
                 except:
                     typecheck = (int, float)
                     func = lambda x: x if isinstance(x, typecheck
                                                      ) else 'SOMETHING'
                 out['output'] = func(out['output'][0])
             results.append(out)
     #print(results)
     for result, exp in zip(results, expected):
         #print('RESULT' + str(result))
         #print('EXP: ' + str(exp))
         self._compare_outputs(result, exp, msg=msg)
示例#7
0
def read_micro_data(f_data, f_probs, num_of_open_line_data=NUM_OF_FIRST_ROWS_AT_CSC, L=LST_CSC,
                    num_of_open_line_probs=NUM_OF_FIRST_ROWS_AT_PROBS_FILE, score=0, buffer=250):
                    #todo: decide the buffer and score sizes
    """
    A function that gets file of micro array data and read it
    :param f_data: path to the data file
    :param f_probs: path to the probs file
    :param num_of_open_line_data: number of lines to ignore in f_data
    :param num_of_open_line_probs: number of lines to ignore in f_probs
    :return: the data as dataframe
    """
    array_data = pd.read_csv(f_data, sep="\t", skiprows=[i for i in range(num_of_open_line_data)], header=0)
    probs_data = pd.read_csv(f_probs, sep=",", compression="gzip", skiprows=[i for i in range(num_of_open_line_probs)],
                             header=0)
    chromosomes = lab.parse(probs_data, "CHR", "MAPINFO", None, True)
    # remove unnecessary probes
    cloumns = list(probs_data)
    plass = ["tehila/Plass/ENCFF032DEW.bed.gz", "tehila/Plass/ENCFF401ONY.bed.gz", "tehila/Plass/ENCFF543VGD.bed.gz"]
    imm = ["tehila/immortalization/ENCFF449NOT.bed", "tehila/immortalization/ENCFF833FTF.bed"]
    chip = []
    for val in imm:
        chip.append(lab.read_chip_file(val, score))
    for val in plass:
        chip.append(lab.read_chip_file(val, score, True))
    ids, inds = lab.search(chromosomes, chip, buffer, True)
    new_data = probs_data.loc[probs_data["IlmnID"].isin(ids)]
    array_data = array_data.loc[array_data["ID_REF"].isin(ids)]
    array_data = array_data.sort_values(by="ID_REF")
    inds = pd.DataFrame(inds).sort_values(by=0)
    sites = []
    for id in ids:
        df = inds.loc[inds[0] == id]
        c = df[1].max()
        start = df[2].mean().round()
        end = df[3].mean().round()
        sites.append([id, c, start, end])
    sites = pd.DataFrame(sites).sort_values(by=0)
    sites = sites.drop_duplicates()
    for lst in L:
        build_micro_file(array_data, lst, sites)
示例#8
0
def test_tokenize_and_parse():
    run_test_number(3, lambda i: lab.parse(lab.tokenize(i)))
示例#9
0
 def test_03_tokenize_and_parse(self):
     self.run_test_number(3, lambda i: lab.parse(lab.tokenize(i)))
示例#10
0
文件: test.py 项目: jdanielsnyc/SNEK
import lab
import sys
import json

import pytest

TEST_DIRECTORY = os.path.dirname(__file__)


class NotImplemented:
    def __eq__(self, other):
        return False


try:
    nil_rep = lab.result_and_env(lab.parse(['nil']))[0]
except:
    nil_rep = NotImplemented()


def list_from_ll(ll):
    if isinstance(ll, lab.Pair):
        if ll.cdr == nil_rep:
            return [list_from_ll(ll.car)]
        return [list_from_ll(ll.car)] + list_from_ll(ll.cdr)
    elif ll == nil_rep:
        return []
    elif isinstance(ll, (float, int)):
        return ll
    else:
        return 'SOMETHING'
示例#11
0
           'Resetting to original value of %d.') % orig_recursion_limit)
    lab.sys.setrecursionlimit(orig_recursion_limit)


def make_tester(func):
    def _tester(*args):
        try:
            return {'ok': True, 'output': func(*args)}
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            return {'ok': False, 'type': exc_type.__name__, 'exception': exc_obj.args}
    return _tester 

test_tokenize = lambda x: [make_tester(lab.tokenize)(i) for i in x]
test_parser = lambda x: [make_tester(lab.parse)(i) for i in x]
test_tokenize_and_parse = lambda sources: [make_tester(lambda i: lab.parse(lab.tokenize(i)))(source) for source in sources]
test_eval = lambda x: [make_tester(lab.evaluate)(i) for i in x]


def test_file_eval(*args):
    try:
        out = lab.evaluate_file(*args)
        return {'ok': True, 'output': out}
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        return {'ok': False, 'type': exc_type.__name__, 'exception': exc_obj.args}


def test_continued_evaluations(inps):
    env = None
    outs = []
示例#12
0
            return {'ok': True, 'output': func(*args)}
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            return {
                'ok': False,
                'type': exc_type.__name__,
                'exception': exc_obj.args
            }

    return _tester


test_tokenize = lambda x: [make_tester(lab.tokenize)(i) for i in x]
test_parser = lambda x: [make_tester(lab.parse)(i) for i in x]
test_tokenize_and_parse = lambda sources: [
    make_tester(lambda i: lab.parse(lab.tokenize(i)))(source)
    for source in sources
]
test_eval = lambda x: [make_tester(lab.evaluate)(i) for i in x]


def test_file_eval(*args):
    try:
        out = lab.evaluate_file(*args)
        out = list_from_ll(out)
        return {'ok': True, 'output': out}
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        return {
            'ok': False,
            'type': exc_type.__name__,