def test_userlist():
    assert_lldb_repr(collections.UserList(), r'\[\]', 'UserList()')
    assert_lldb_repr(collections.UserList([1, 2, 3]), r'\[1, 2, 3\]',
                     'UserList([1, 2, 3])')
    assert_lldb_repr(collections.UserList([1, 3.14159, u'hello', False, None]),
                     r'\[1, 3.14159, u?\'hello\', False, None\]',
                     'UserList([1, 3.14159, u"hello", False, None])')
Example #2
0
    def test_basic_proxy(self):
        o = C()
        self.check_proxy(o, weakref.proxy(o))

        L = collections.UserList()
        p = weakref.proxy(L)
        self.assertFalse(p, "proxy for empty UserList should be false")
        p.append(12)
        self.assertEqual(len(L), 1)
        self.assertTrue(p, "proxy for non-empty UserList should be true")
        p[:] = [2, 3]
        self.assertEqual(len(L), 2)
        self.assertEqual(len(p), 2)
        self.assertIn(3, p, "proxy didn't support __contains__() properly")
        p[1] = 5
        self.assertEqual(L[1], 5)
        self.assertEqual(p[1], 5)
        L2 = collections.UserList(L)
        p2 = weakref.proxy(L2)
        self.assertEqual(p, p2)
        ## self.assertEqual(repr(L2), repr(p2))
        L3 = collections.UserList(range(10))
        p3 = weakref.proxy(L3)
        self.assertEqual(L3[:], p3[:])
        self.assertEqual(L3[5:], p3[5:])
        self.assertEqual(L3[:5], p3[:5])
        self.assertEqual(L3[2:5], p3[2:5])
Example #3
0
def test_FixedSequenceChecker5():
    @tc.typecheck
    def foo(a: collections.UserList([str, collections.UserList])):
        return a[1][1]

    assert foo(collections.UserList(["aha!",
                                     collections.UserList([3, 4, 5])])) == 4
    with expected(
            tc.InputParameterError(
                "foo() has got an incompatible value for a: ")):
        assert foo(["aha!", collections.UserList([3, 4, 5])]) == 4
Example #4
0
def shuffle_and_blind_experiments(*experiments, random_seed=0):
    """Shuffle one or more datamodel.Experiments and blind the names.

    datamodel.Position instances from the provided experiment(s) are randomly
    shuffled (based on a fixed, specified random seed, for reproducibility) and
    their names obscured for blind annotation.

    Parameters:
        *experiments: one or more datamodel.Experiment instances, or lists of
            datamodel.Position instances.
        random_seed: string or integer providing random seed for reproducible
            shuffling of the positions.

    Returns: list datamodel.Position instances.
    """
    positions = collections.UserList(datamodel.flatten(experiments))
    # needs to be a UserList so a display_name attribute can be set
    positions.display_name = 'blinded positions'
    # shuffle with a new Random generator from the specified seed. (Don't just re-seed the
    # default random generator, because that would mess up other random streams if in use.)
    random.Random(random_seed).shuffle(positions)
    for i, position in enumerate(positions):
        position.display_name = str(i)
        for j, timepoint in enumerate(position):
            timepoint.display_name = str(j)
    return positions
Example #5
0
    def test_user_list(self):
        d = collections.UserList()
        self.assertEqual(pprint.pformat(d, width=1), "[]")
        words = 'the quick brown fox jumped over a lazy dog'.split()
        d = collections.UserList(zip(words, itertools.count()))
        self.assertEqual(
            pprint.pformat(d), """\
[('the', 0),
 ('quick', 1),
 ('brown', 2),
 ('fox', 3),
 ('jumped', 4),
 ('over', 5),
 ('a', 6),
 ('lazy', 7),
 ('dog', 8)]""")
Example #6
0
def random2():
    # using choice()
    numbers = []
    for i in range(8000):
        numbers.append(i)
    numbersList = col.UserList(numbers)
    for j in range(1000):
        print(str(j+1) + ":" + str(random.choice(numbersList)))
Example #7
0
 def test_basic_eight_statements(self):
     from eight import queue, builtins, reprlib
     from eight.configparser import ConfigParser
     from eight import collections
     from eight.collections import UserList, deque
     q = collections.UserList()
     self.assertEqual(type(map(str, range(5))), map)
     self.assertEqual(open, io.open)
 def preorderTraversal(self, root: TreeNode) -> List[int]:
     result = collections.UserList()
     if (not root):
         return result
     result.append(root.val)
     result.extend(self.preorderTraversal(root.left))
     result.extend(self.preorderTraversal(root.right))
     return result
Example #9
0
def extract_cds_from_bed(bed_file, output_fasta, genome_fasta, fasta_interval_file, check_acgt=None, check_start=None, check_length=None, check_stop=None, check_inframe_stop=None, all_checks=None, uniquify = False):
        '''
        Extract the CDS to fasta file
        Ex.: extract_cds('../feature_file.bed', '../output_file_fasta.fasta', '../source_data/genome_fasta_file.fa')
        '''
        #create dictionaries to hold cds parts
        cds_list = collections.defaultdict(lambda: collections.defaultdict())
        # stop_list = {}
        concat_list = collections.defaultdict(lambda: collections.UserList())
        #create fasta file with extracted parts
        fasta_from_intervals(bed_file, fasta_interval_file, genome_fasta, names = True)
        #read the fasta interval file
        sample_names, sample_seqs = gen.read_fasta(fasta_interval_file)
        #label the stop codons
        for i, name in enumerate(sample_names):
            if len(sample_seqs[i]) == 3 and sample_seqs[i] in ['TAA', 'TAG', 'TGA']:
                sample_names[i] = name + '.stop_codon'
        #iterate through the samples
        for i, sample in enumerate(sample_names):
            entry_meta_splits = sample.split('.')
            #set the sample name: sample(.exon)
            sample_id = entry_meta_splits[0]
            # check if labelled as stop codon, and set to high number so when
            # sorted this is the last thing to be appended
            if entry_meta_splits[-1] == "stop_codon":
                exon_id = 9999999
            else:
                exon_id = int(entry_meta_splits[1])
            cds_list[sample_id][exon_id] = sample_seqs[i]
        #get sorted list of cds exons to build cds
        for sample in sorted(cds_list):
            for part in sorted(cds_list[sample]):
                concat_list[sample].append(cds_list[sample][part])
        #concatenate and write to output
        names = []
        seqs = []
        for sample in sorted(concat_list):
            names.append(sample)
            seqs.append("".join(concat_list[sample]))
        #perform sequence quality control checks
        if check_acgt or check_stop or check_start or check_length or check_inframe_stop or all_checks:
            names, seqs = check_sequence_quality(names, seqs, check_acgt, check_stop, check_start, check_length, check_inframe_stop, all_checks)

        if uniquify:
            #leave only one transcript per gene
            gene_to_trans = link_genes_and_transcripts(bed_file)
            names, seqs = uniquify_trans(names, seqs, gene_to_trans)
            print("After leaving only one transcript per gene, {0} sequences remain.".format(len(seqs)))
        #write to output fasta file
        gen.write_to_fasta(names, seqs, output_fasta)
Example #10
0
    def test___str__(self):
        """Test"""
        n1 = MyNode("n1")
        n2 = MyNode("n2")
        n3 = MyNode("n3")
        nl = SCons.Node.NodeList([n3, n2, n1])

        l = [1]
        ul = collections.UserList([2])
        s = str(nl)
        assert s == "['n3', 'n2', 'n1']", s

        r = repr(nl)
        r = re.sub(r'at (0[xX])?[0-9a-fA-F]+', 'at 0x', r)
        # Don't care about ancestry: just leaf value of MyNode
        r = re.sub(r'<.*?\.MyNode', '<MyNode', r)
        # New-style classes report as "object"; classic classes report
        # as "instance"...
        r = re.sub("object", "instance", r)
        l = ", ".join(["<MyNode instance at 0x>"]*3)
        assert r == '[%s]' % l, r
# collections.Userdict()创建的字典所占空间是普通的dict字典的1/4,但前者还会封装一个data的实例
d0 = dict(a=1, b=2)
d1 = collections.UserDict(d0)
import sys
sys.getsizeof(d0)
# 248
sys.getsizeof(d1)
# 64
sys.getsizeof(d1.data)
# 248

# 8、UserList --- 封装了列表对象,简化了列表子类化,模拟一个列表。
# 这个实例的内容被保存为一个正常列表,通过 UserList 的 data 属性存取
l1 = ['a', 'b', 'c', 'd', 'e', 'f']
import sys
l0 = collections.UserList(l1)
sys.getsizeof(l1)
# 104
sys.getsizeof(l0)
# 48
sys.getsizeof(l0)
# 104

# 9、UserString --- 封装了列表对象,简化了字符串子类化
# 模拟一个字符串对象。这个实例对象的内容保存为一个正常字符串,通过 UserString 的 data 属性存取
import sys
s0 = 'abacdef'
s1 = collections.UserList(s0)
sys.getsizeof(s1)
# 48
sys.getsizeof(s0)
Example #12
0
def test_seq_of_UserList():
    assert tc.seq_of(int)([4, 5])
    assert tc.seq_of(int)(collections.UserList([4, 5]))
Example #13
0
def extract_features(gtf_file,
                     features,
                     output_file,
                     full_chr_name=None,
                     clean_chrom_only=False):
    """
    Given a GTF file, extract coordinates for specific features and write to .bed.

    Args:
        gtf_file (str): path to gtf file
        features (list): list of feature names to be extracted from the file
        output_file (str): path to the output file
        full_chr_name (bool): if true, add 'chr' to start or name entry
        clean_chrom_only (bool): if true, only allow chromosome names in [1,..,23,X,Y]
    """

    feature_list = collections.defaultdict(lambda: collections.defaultdict(
        lambda: collections.defaultdict(lambda: collections.defaultdict(
            lambda: collections.defaultdict(lambda: collections.UserList())))))
    if len(features) > 1:
        list_feature = True
    else:
        list_feature = False

    if clean_chrom_only:
        allowed = [str(i) for i in range(1, 23)] + ["X", "Y"]

    lines = gen.read_many_fields(gtf_file, "\t")
    #ensure protein coding and not entry meta
    lines = [
        line for line in lines
        if not line[0].startswith("#") and "pseudogene" not in line[-1]
    ]
    #compile regex to find genes, transcripts and exons
    gene_regex = re.compile("(?<=gene_id \")ENSG[0-9]*")
    trans_regex = re.compile("(?<=transcript_id \")ENST[0-9]*")
    exon_no_regex = re.compile("(?<=exon_number \")[0-9]*")

    for line in lines:
        #if the feature identifier is in the list
        if line[2] in features:
            #get entry meta
            meta = line[-1]
            gene = re.search(gene_regex, meta).group(0)
            trans = re.search(trans_regex, meta).group(0)
            #I added the try ... except in case you want to extract, say, transcript
            #features where the exon number information wouldn't be present.
            try:
                exon_no = re.search(exon_no_regex, meta).group(0)
            except AttributeError:
                exon_no = 0
            chr_no = line[0]
            add = True
            if clean_chrom_only:
                if chr_no not in allowed:
                    add = False
            if add:
                feature_list[chr_no][gene][trans][exon_no][line[2]].append(
                    [line[3], line[4], line[6]])
    #output features sorted by feature, chr, gene, transcript id
    with open(output_file, 'w') as output:
        for chr_no in sorted(feature_list):
            for gene in sorted(feature_list[chr_no]):
                for trans in sorted(feature_list[chr_no][gene]):
                    for exon in sorted(feature_list[chr_no][gene][trans]):
                        for feature in sorted(
                                feature_list[chr_no][gene][trans][exon]):
                            for item in feature_list[chr_no][gene][trans][
                                    exon][feature]:
                                if not list_feature:
                                    feature = feature
                                if full_chr_name:
                                    chr_name = "chr{0}".format(chr_no)
                                else:
                                    chr_name = str(chr_no)
                                #output and convert to base 0
                                output.write('\t'.join([
                                    chr_name,
                                    str(int(item[0]) -
                                        1), item[1], '{0}.{1}.{2}'.
                                    format(trans, exon, gene), feature, item[2]
                                ]) + '\n')
Example #14
0
 def testUserList(self):
     obj = collections.UserList([1, 2, 3])
     d = serpent.dumps(obj)
     obj2 = serpent.loads(d)
     self.assertEqual([1, 2, 3], obj2)
def test_static_hasattr_metaclass_attr():
    class Meta(type):
        def bar(cls):
            pass

    class Class(metaclass=Meta):
        pass

    assert static_hasattr(Class, 'bar')


@pytest.mark.parametrize(
    'obj',
    [
        3,
        collections.UserList(),
        collections.UserDict,  # the class, not an instance
    ])
def test_static_hasattr_with_missing_attr(obj):
    assert not static_hasattr(obj, 'bar')


def test_static_copy():
    class Foo:
        __slots__ = ['foo', '__dict__']

    foo = Foo()
    foo.foo = []

    foo_copy = static_copy(foo)
print(emptudic)  # >>> {} 可为空
emptudic['test'] = 1234
print(emptudic)  # >>> {'test': 1234} # no difference from regular dict



print()
print('collections.UserList([list])')

# 这个类充当列表对象的包装器。
# 它是一个有用的基类,你自己的类列表类可以从它们继承,覆盖现有的方法或添加新的。这样,可以向列表中添加新的行为。
# 这个类的需要已经部分地被直接从list子类化的能力所取代;
# 但是,此类可以更容易使用,因为底层列表可作为属性访问。

lst = [1,2,3,4]
ulst = collections.UserList(lst)
print(ulst)  # >>> [1, 2, 3, 4]
print(type(ulst.data))  # regular list



print()
print('collections.UserString([sequence])')

# 类UserString充当字符串对象的包装器。这个类的需要已经部分地被从str直接子类化的能力所取代;
# 但是,此类可以更容易使用,因为底层字符串可作为属性访问。

stri = 'abcdefg'
ustri = collections.UserString(stri)
print(ustri)  # >>> abcdefg
print(type(ustri.data))  # >>> regular string
Example #17
0
def user_list_modified(param=collections.UserList()):  # Noncompliant
    param.append()
Example #18
0
    if isinstance(data, collections.Mapping):
        print("MAPPING", end=' ')
    if isinstance(data, numbers.Number):
        print("NUMBER", end=' ')
    if isinstance(data, collections.Sequence):
        print("SEQUENCE", end=' ')
    print()


dump(0)
dump("string")
dump("string"[0])
dump([1, 2, 3])
dump((1, 2, 3))
dump({"a": 1})
dump(len)  # function
dump(UserList)  # module
dump(collections.UserList)  # class
dump(collections.UserList())  # instance

## <type 'int'> => NUMBER
## <type 'string'> => SEQUENCE
## <type 'string'> => SEQUENCE
## <type 'list'> => SEQUENCE
## <type 'tuple'> => SEQUENCE
## <type 'dictionary'> => MAPPING
## <type 'builtin_function_or_method'> => CALLABLE
## <type 'module'> =>
## <type 'class'> => CALLABLE
## <type 'instance'> => MAPPING NUMBER SEQUENCE
Example #19
0
# path 2 - nested ('item/key/val' nodes)
foo.d = {
    'One': date.DateTime(2001, 2, 3, 4, 5, 6.7),
    'Two': date.DateTime(2002, 3, 4, 5, 6, 7.8)
}

foo.ud = UserDict.UserDict()
foo.ud['One'] = date.DateTime(2003, 4, 5, 6, 7, 8.9)
foo.ud['Two'] = date.DateTime(2004, 5, 6, 7, 8, 9.10)

foo.l = []
foo.l.append(date.DateTime(2005, 6, 7, 8, 9, 10.11))
foo.l.append(date.DateTime(2006, 7, 8, 9, 10, 11.12))

foo.ul = collections.UserList()
foo.ul.append(date.DateTime(2007, 8, 9, 10, 11, 12.13))
foo.ul.append(date.DateTime(2008, 9, 10, 11, 12, 13.14))

foo.tup = (date.DateTime(2009, 10, 11, 12, 13,
                         14.15), date.DateTime(2010, 11, 12, 13, 14, 15.16))

#print "---PRE-PICKLE---"
#printfoo(foo)

x1 = xml_pickle.dumps(foo)
#print x1

#print "---POST-PICKLE---"
bar = xml_pickle.loads(x1)
#printfoo(bar)
Example #20
0
def test_seq_of_UserList():
    namespace = None
    assert tc.seq_of(int)([4, 5], namespace)
    assert tc.seq_of(int)(collections.UserList([4, 5]), namespace)
Example #21
0
 def test_isseq_6(self):
     self.assertTrue(y.is_sequence(collections.UserList([3, 4, 5])))
Example #22
0
 def test_itervariant_isuserlist_success(self):
     obj = iter(collections.UserList(self.SOME_LIST))
     assert comma.helpers.is_dict_or_list(obj) is list
def function23(value=collections.UserList()):  # [dangerous-default-value]
    """mutable, dangerous"""
    return value
Example #24
0
 def foo(a: collections.UserList([str, collections.UserList])):
     return a[1][1]
Example #25
0
def make_defaultdict():
    d = collections.defaultdict()
    for i in range(100000):
        d[i] = i
    return d


@pytest.mark.parametrize(
    "param, wanted_size",
    [
        (list(range(100000)), 558),
        (set(range(100000)), 558),
        ({i: i for i in range(100000)}, 1339),
        ("x" * 100000, 1049),
        (b"x" * 100000, 1049),
        (bytearray([64] * 100000), 1049),
        (bytearray([64] * 100000 + [128]), 1049),  # UnicodeDecodeError
        (make_defaultdict(), 1339),
        (collections.OrderedDict({i: i for i in range(100000)}), 1339),
        (collections.UserDict({i: i for i in range(100000)}), 1339),
        (collections.UserList(range(100000)), 558),
        (collections.UserString("x" * 100000), 1049),
        (collections.UserString(b"x" * 100000), 1049),
    ],
)
def test_truncation(param, wanted_size):
    notice = dict(params=dict(param=param))

    b = jsonify_notice(notice)
    assert len(b) == wanted_size
Example #26
0
            oldest = next(iter(self))
            del self[oldest]


lru = LRU(2)
lru["a"] = 10
print(lru)
lru["b"] = 11
print(lru)
lru["c"] = 12
print(lru)

print("-----------------------class collections.UserDict([initialdata])")

ud = collections.UserDict({"a": 1, "b": 2})
print(ud["a"])
print(ud.data)

print("-----------------------class collections.UserList([list])")

ul = collections.UserList([{"a": 1, "b": 2}, {"c": 3, "d": 4}])

print(ul.data)
ul.append({"a": 1, "b": 2})
print(ul.data)

print("-----------------class collections.UserString(seq)")
us = collections.UserString([{"a": 1, "b": 2}, {"c": 3, "d": 4}])

print(us.data)
print(us.upper())
	def test_user_list(self, advanced_file_regression: AdvancedFileRegressionFixture):
		d: collections.UserList = collections.UserList()
		assert FancyPrinter(width=1).pformat(d) == "[]"
		words = "the quick brown fox jumped over a lazy dog".split()
		d = collections.UserList(zip(words, itertools.count()))
		advanced_file_regression.check(FancyPrinter().pformat(d))