def game(player1,player2):

    count1 = 0
    count2 = 0
    game_count=0
    player1.playing=True
    player2.playing=True
    player1.role='defencer'

    my_randoms=[]
    my_randoms2=[]
    guess=0
    guess2=0
    while (player1.point!=5 or player2.point!=5):
        if player1.role=='defencer':

            my_randoms = random.sample(xrange(1, 11), player1.array_length)
            player1.defense_array=my_randoms
            guess = random.choice(MAX_ARRAY)

        elif player2.role=='defencer':


            my_randoms2 = random.sample(xrange(1, 11), player2.array_length)
            player2.defense_array = my_randoms
            guess2 = random.choice(MAX_ARRAY)

        if guess in my_randoms:
            player2.role='defencer'
            count1+=1
            player1.point=count1

        else:
            player1.role='defencer'
            count2 += 1
            player2.point = count1


        if guess2 in my_randoms2:
            player1.role='defencer'
            count2 += 1
            player2.point = count1

        else:
            player2.role='defencer'
            count1+=1
            player1.point=count1


    if player1.point==5:
        return player1
    elif player2.point==5:
        return player2
Beispiel #2
0
def is_prime(n, k=10):
    if n == 2:
        return True
    if not n & 1:
        return False

    def check(a, s, d, n):
        x = pow(a, d, n)
        if x == 1:
            return True
        for i in xrange(s - 1):
            if x == n - 1:
                return True
            x = pow(x, 2, n)
        return x == n - 1

    s = 0
    d = n - 1

    while d % 2 == 0:
        d >>= 1
        s += 1

    for i in xrange(k):
        a = randrange(2, n - 1)
        if not check(a, s, d, n):
            return False
    return True
def main_random():
    repeat_min_n = 13
    repeat_max_n = 13
    repeat = 10**6
    count_not_duplicate_array = []
    for n in range(repeat_min_n, repeat_max_n + 1):
        # print(n)
        count_not_duplicate = 0
        list_arranged = [str(hex(num))[2:] for num in xrange(n)]
        for j in range(repeat):
            rnd = CalcRandom(n, list_arranged)
            duplicate = rnd.isDuplicate()
            if not duplicate:
                count_not_duplicate += 1
            if j % 10000\
                    == 0 and j:
                # print('{0} {1}'.format(rnd.num, 'duplicate' if duplicate else ''))
                print()
                print('P({0}) = {1}/{2}'.format(n, count_not_duplicate, j))
                print('     = {}'.format(count_not_duplicate / j))
        count_not_duplicate_array.append(count_not_duplicate)
        print('P({0}) = {1}/{2}'.format(n, count_not_duplicate, repeat))
        print('     = {}'.format(count_not_duplicate / repeat))
        print()
    print('--- Result ---')
    print()
    for n in range(repeat_min_n, repeat_max_n + 1):
        print('P({0}) = {1}/{2}'.format(
            n, count_not_duplicate_array[n - repeat_min_n], repeat))
        print('     = {}'.format(count_not_duplicate_array[n - repeat_min_n] /
                                 repeat))
        print()
Beispiel #4
0
def prunning(node, pruneFactor):
    # the total # of internal nodes in the tree
    totNode, totLeaf = traverse(node)
    numToPrune = int(totNode * pruneFactor)
    randomInt = random.sample(xrange(totNode), numToPrune)

    for index in randomInt:
        prunningGivenIndex(node, index)
    return
Beispiel #5
0
 def check(a, s, d, n):
     x = pow(a, d, n)
     if x == 1:
         return True
     for i in xrange(s - 1):
         if x == n - 1:
             return True
         x = pow(x, 2, n)
     return x == n - 1
Beispiel #6
0
 def _draw_line_numbers(self):
     """
     Create drawables for the line numbers.
     """
     if not self.line_numbers:
         return
     for p in xrange(self.maxlineno):
         n = p + self.line_number_start
         if (n % self.line_number_step) == 0:
             self._draw_linenumber(p, n)
Beispiel #7
0
 def _draw_line_numbers(self):
     """
     Create drawables for the line numbers.
     """
     if not self.line_numbers:
         return
     for p in xrange(self.maxlineno):
         n = p + self.line_number_start
         if (n % self.line_number_step) == 0:
             self._draw_linenumber(p, n)
Beispiel #8
0
def hexdump(src, length=16):
    result = []
    digits = 4 if isinstance(src, unicode) else 2

    for i in xrange(0, len(src), length):
        s = src[i:i + length]
        hexa = b''.join(["%0*X" % (digits, ord(x)) for x in s])
        text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in s])
        result.append(b"%04X %-*s %s" % (i, length * (digits + 1), hexa, text))

    print(b'\n'.join(result))
Beispiel #9
0
def hexdump(src, length=16):
    'hex dumping function'
    result = []
    digits = 4 if isinstance(src, str) else 2
    for i in xrange(0,len(src), length):
        a = src[i:i+length]
        hexa = b''.join(['%0*X'%(digits, ord(str(x))) for x in a])
        text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in a])
        result.append(b"%04X %-*s %s" % (i, length*(digits+1),hexa, text))

    print(b'\n'.join(result))
Beispiel #10
0
def split_rics_equally(chunks):
    # prep with empty dicts
    return_list = [dict() for _ in xrange(chunks)]
    if len(rics) < RIC_CHUNK_SIZE:
        return [rics]

    idx = 0
    for k, v in rics.items():
        return_list[idx][k] = v
        if idx < chunks - 1:  # indexes start at 0
            idx += 1
        else:
            idx = 0
    return return_list
Beispiel #11
0
              color='black')
rcudaC = ax.bar(indexC + dist * bar_width,
                cudaC,
                bar_width,
                alpha=0.6,
                color='black',
                hatch='//')

ax.set_xticks(index)
ax.set_xticklabels(label)
#label_group_bar(ax, data)

scale = 1. / 3
for pos in [1, 2]:
    add_line(ax, pos * scale, -.1)
for pos in xrange(0, 3):
    lxpos = pos * scale + scale / 2
    ax.text(lxpos, -.1, group_label[pos], ha='center', transform=ax.transAxes)

fig.tight_layout()
plt.subplots_adjust(bottom=0.1)


def autolabel(rects, xpos='center'):
    """
    Attach a text label above each bar in *rects*, displaying its height.

    *xpos* indicates which side to place the text w.r.t. the center of
    the bar. It can be one of the following {'center', 'right', 'left'}.
    """
Beispiel #12
0
# Python code to demonstrate range() vs xrange()
# on  basis of memory

import sys
# from ipython_genutils.py3compat import xrange
from pygments.util import xrange

# initializing a with range()

a = range(1, 10000)

# initializing a with xrange()
x = xrange(1, 10000)

# testing the size of a
# range() takes more memory
print("The size allotted using range() is : ", end="")
print(sys.getsizeof(a))

# testing the size of a
# range() takes less memory
print("The size allotted using xrange() is : ", end="")
print(sys.getsizeof(x))
Beispiel #13
0
def experiment_train_data_distribution():
    train_data, train_labels = get_train_articles()
    label_counts = Counter(train_labels)
    publications = []
    with open('out/articles_train_data.json', 'r') as fin:
        data = json.load(fin)
        for i, rec in enumerate(data):
            publications.append(rec['publication'])
    pub_count = sorted(Counter(publications).items(),
                       key=lambda x: x[1],
                       reverse=True)

    pub_name, pub_val = [], []
    for p in pub_count:
        pub_name.append(p[0])
        pub_val.append(p[1])

    print("Total Test Data Size :", len(train_labels))
    print(label_counts)
    print(pub_count)

    # Plot Label Distribution
    plt.bar(["0", "1"], [label_counts[0], label_counts[1]],
            color=plt.rcParams['axes.prop_cycle'].by_key()['color'])
    plt.xlabel("Labels")
    plt.ylabel("Count")
    plt.tight_layout()
    plt.savefig("plots/train-label-distribution.png")
    plt.close()

    # Plot Publication Distribution
    pub_x = np.arange(len(pub_name))
    print(pub_x)
    set_facecolor(
        plt.bar(pub_x,
                pub_val,
                color=plt.rcParams['axes.prop_cycle'].by_key()['color']))
    plt.xlabel("Publications")
    plt.xticks(pub_x, pub_name, rotation='vertical')
    plt.ylabel("Count")
    plt.tight_layout()
    plt.savefig("plots/train-publication-distribution.png")
    plt.close()

    # Plot Test Data Distribution
    pipeline = Pipeline([('tfidf', TfidfVectorizer())])
    random_samples = sorted(random.sample(xrange(len(train_data)), 1000))
    data = pipeline.fit_transform([train_data[i]
                                   for i in random_samples]).todense()
    pca = PCA(n_components=3).fit(data)
    X = pca.transform(data)
    fig = pyplot.figure()
    ax = Axes3D(fig)
    # ax.scatter(sequence_containing_x_vals, sequence_containing_y_vals, sequence_containing_z_vals)
    # pyplot.show()

    ax.scatter(X[:, 0],
               X[:, 1],
               X[:, 2],
               c=[
                   'tomato' if train_labels[i] == 1 else 'teal'
                   for i in random_samples
               ])
    # plt.legend([a.collections[0], b.collections[0]], ["Label 0", "Label 1"],
    #            loc="upper right")
    scatter1_proxy = matplotlib.lines.Line2D([0], [0],
                                             linestyle="none",
                                             c='tomato',
                                             marker='o')
    scatter2_proxy = matplotlib.lines.Line2D([0], [0],
                                             linestyle="none",
                                             c='teal',
                                             marker='o')
    ax.legend([scatter1_proxy, scatter2_proxy], ['label_1', 'label_0'],
              numpoints=1)
    plt.savefig("plots/train-data-distribution.png")
    plt.close()
Beispiel #14
0
    def format_unencoded(self, tokensource, outfile):
        # TODO: add support for background colors
        t2n = self.ttype2name
        cp = self.commandprefix

        if self.full:
            realoutfile = outfile
            outfile = StringIO()

        outfile.write(u'{\\tt')

        for ttype, value in tokensource:
            if ttype in Token.Comment:
                if self.texcomments:
                    # Try to guess comment starting lexeme and escape it ...
                    start = value[0:1]
                    for i in xrange(1, len(value)):
                        if start[0] != value[i]:
                            break
                        start += value[i]

                    value = value[len(start):]
                    start = escape_tex(start, cp)

                    # ... but do not escape inside comment.
                    value = start + value
                elif self.mathescape:
                    # Only escape parts not inside a math environment.
                    parts = value.split('$')
                    in_math = False
                    for i, part in enumerate(parts):
                        if not in_math:
                            parts[i] = escape_tex(part, cp)
                        in_math = not in_math
                    value = '$'.join(parts)
                elif self.escapeinside:
                    text = value
                    value = ''
                    while text:
                        a, sep1, text = text.partition(self.left)
                        if sep1:
                            b, sep2, text = text.partition(self.right)
                            if sep2:
                                value += escape_tex(a, cp) + b
                            else:
                                value += escape_tex(a + sep1 + b, cp)
                        else:
                            value += escape_tex(a, cp)
                else:
                    value = escape_tex(value, cp)
            elif ttype not in Token.Escape:
                value = escape_tex(value, cp)
            styles = []
            while ttype is not Token:
                try:
                    styles.append(t2n[ttype])
                except KeyError:
                    # not in current style
                    styles.append(_get_ttype_name(ttype))
                ttype = ttype.parent
            styleval = list(reversed(styles))[0]
            if styleval:
                spl = value.split('\n')
                for line in spl[:-1]:
                    if line:
                        outfile.write("\\%s%s{%s}" % (cp, styleval, line))
                    outfile.write('\n')
                if spl[-1]:
                    outfile.write("\\%s%s{%s}" % (cp, styleval, spl[-1]))
            else:
                outfile.write(value)

        outfile.write(u'}\n')

        if self.full:
            encoding = self.encoding or 'utf8'
            # map known existings encodings from LaTeX distribution
            encoding = {
                'utf_8': 'utf8',
                'latin_1': 'latin1',
                'iso_8859_1': 'latin1',
            }.get(encoding.replace('-', '_'), encoding)
            realoutfile.write(DOC_TEMPLATE %
                dict(docclass  = self.docclass,
                     preamble  = self.preamble,
                     title     = self.title,
                     encoding  = encoding,
                     styledefs = self.get_style_defs(),
                     code      = outfile.getvalue()))
Beispiel #15
0
from __future__ import print_function

import random
import unittest

from pygments import lexers, formatters, lex, format
from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
from pygments.util import text_type, StringIO, BytesIO, xrange, ClassNotFound

import support

TESTFILE, TESTDIR = support.location(__file__)

test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'


def test_lexer_instantiate_all():
    # instantiate every lexer, to see if the token type defs are correct
    def verify(name):
        getattr(lexers, name)

    for x in lexers.LEXERS:
        yield verify, x


def test_lexer_classes():
    # test that every lexer class has the correct public API
Beispiel #16
0
    def format_unencoded(self, tokensource, outfile):
        # TODO: add support for background colors
        t2n = self.ttype2name
        cp = self.commandprefix

        if self.full:
            realoutfile = outfile
            outfile = StringIO()

        outfile.write(u'\\begin{' + self.envname +
                      u'}[commandchars=\\\\\\{\\}')
        if self.linenos:
            start, step = self.linenostart, self.linenostep
            outfile.write(u',numbers=left' +
                          (start and u',firstnumber=%d' % start or u'') +
                          (step and u',stepnumber=%d' % step or u''))
        if self.mathescape or self.texcomments or self.escapeinside:
            outfile.write(
                u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
        if self.verboptions:
            outfile.write(u',' + self.verboptions)
        outfile.write(u']\n')

        for ttype, value in tokensource:
            if ttype in Token.Comment:
                if self.texcomments:
                    # Try to guess comment starting lexeme and escape it ...
                    start = value[0:1]
                    for i in xrange(1, len(value)):
                        if start[0] != value[i]:
                            break
                        start += value[i]

                    value = value[len(start):]
                    start = escape_tex(start, cp)

                    # ... but do not escape inside comment.
                    value = start + value
                elif self.mathescape:
                    # Only escape parts not inside a math environment.
                    parts = value.split('$')
                    in_math = False
                    for i, part in enumerate(parts):
                        if not in_math:
                            parts[i] = escape_tex(part, cp)
                        in_math = not in_math
                    value = '$'.join(parts)
                elif self.escapeinside:
                    text = value
                    value = ''
                    while text:
                        a, sep1, text = text.partition(self.left)
                        if sep1:
                            b, sep2, text = text.partition(self.right)
                            if sep2:
                                value += escape_tex(a, cp) + b
                            else:
                                value += escape_tex(a + sep1 + b, cp)
                        else:
                            value += escape_tex(a, cp)
                else:
                    value = escape_tex(value, cp)
            elif ttype not in Token.Escape:
                value = escape_tex(value, cp)
            styles = []
            while ttype is not Token:
                try:
                    styles.append(t2n[ttype])
                except KeyError:
                    # not in current style
                    styles.append(_get_ttype_name(ttype))
                ttype = ttype.parent
            styleval = '+'.join(reversed(styles))
            if styleval:
                spl = value.split('\n')
                for line in spl[:-1]:
                    if line:
                        outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
                    outfile.write('\n')
                if spl[-1]:
                    outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
            else:
                outfile.write(value)

        outfile.write(u'\\end{' + self.envname + u'}\n')

        if self.full:
            encoding = self.encoding or 'utf8'
            # map known existings encodings from LaTeX distribution
            encoding = {
                'utf_8': 'utf8',
                'latin_1': 'latin1',
                'iso_8859_1': 'latin1',
            }.get(encoding.replace('-', '_'), encoding)
            realoutfile.write(DOC_TEMPLATE %
                              dict(docclass=self.docclass,
                                   preamble=self.preamble,
                                   title=self.title,
                                   encoding=encoding,
                                   styledefs=self.get_style_defs(),
                                   code=outfile.getvalue()))
Beispiel #17
0
# 使用 NumPy 生成假数据(phony data), 总共 100 个点.
from pygments.util import xrange

x_data = np.float32(np.random.rand(2, 100))  # 随机输入
y_data = np.dot([0.100, 0.200], x_data) + 0.300

# 构造一个线性模型
#
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b

# 最小化方差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

# 初始化变量
init = tf.initialize_all_variables()

# 启动图 (graph)
sess = tf.Session()
sess.run(init)

# 拟合平面
for step in xrange(0, 201):
    sess.run(train)
    if step % 20 == 0:
        print(step, sess.run(W), sess.run(b))

# 得到最佳拟合结果 W: [[0.100  0.200]], b: [0.300]
Beispiel #18
0
    def format_unencoded(self, tokensource, outfile):
        # TODO: add support for background colors
        t2n = self.ttype2name
        cp = self.commandprefix

        if self.full:
            realoutfile = outfile
            outfile = StringIO()

        outfile.write(u'\\begin{' + self.envname + u'}[commandchars=\\\\\\{\\}')
        if self.linenos:
            start, step = self.linenostart, self.linenostep
            outfile.write(u',numbers=left' +
                          (start and u',firstnumber=%d' % start or u'') +
                          (step and u',stepnumber=%d' % step or u''))
        if self.mathescape or self.texcomments or self.escapeinside:
            outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
        if self.verboptions:
            outfile.write(u',' + self.verboptions)
        outfile.write(u']\n')

        for ttype, value in tokensource:
            if ttype in Token.Comment:
                if self.texcomments:
                    # Try to guess comment starting lexeme and escape it ...
                    start = value[0:1]
                    for i in xrange(1, len(value)):
                        if start[0] != value[i]:
                            break
                        start += value[i]

                    value = value[len(start):]
                    start = escape_tex(start, self.commandprefix)

                    # ... but do not escape inside comment.
                    value = start + value
                elif self.mathescape:
                    # Only escape parts not inside a math environment.
                    parts = value.split('$')
                    in_math = False
                    for i, part in enumerate(parts):
                        if not in_math:
                            parts[i] = escape_tex(part, self.commandprefix)
                        in_math = not in_math
                    value = '$'.join(parts)
                elif self.escapeinside:
                    text = value
                    value = ''
                    while len(text) > 0:
                        a, sep1, text = text.partition(self.left)
                        if len(sep1) > 0:
                            b, sep2, text = text.partition(self.right)
                            if len(sep2) > 0:
                                value += escape_tex(a, self.commandprefix) + b
                            else:
                                value += escape_tex(a + sep1 + b, self.commandprefix)
                        else:
                            value = value + escape_tex(a, self.commandprefix)
                else:
                    value = escape_tex(value, self.commandprefix)
            elif ttype not in Token.Escape:
                value = escape_tex(value, self.commandprefix)
            styles = []
            while ttype is not Token:
                try:
                    styles.append(t2n[ttype])
                except KeyError:
                    # not in current style
                    styles.append(_get_ttype_name(ttype))
                ttype = ttype.parent
            styleval = '+'.join(reversed(styles))
            if styleval:
                spl = value.split('\n')
                for line in spl[:-1]:
                    if line:
                        outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
                    outfile.write('\n')
                if spl[-1]:
                    outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
            else:
                outfile.write(value)

        outfile.write(u'\\end{' + self.envname + u'}\n')

        if self.full:
            realoutfile.write(DOC_TEMPLATE %
                dict(docclass  = self.docclass,
                     preamble  = self.preamble,
                     title     = self.title,
                     encoding  = self.encoding or 'latin1',
                     styledefs = self.get_style_defs(),
                     code      = outfile.getvalue()))
Beispiel #19
0
from __future__ import print_function

import random
import unittest

from pygments import lexers, formatters, filters, format
from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
from pygments.util import text_type, StringIO, xrange, ClassNotFound

import support

TESTFILE, TESTDIR = support.location(__file__)

test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'


def test_lexer_instantiate_all():
    # instantiate every lexer, to see if the token type defs are correct
    def verify(name):
        getattr(lexers, name)
    for x in lexers.LEXERS:
        yield verify, x


def test_lexer_classes():
    # test that every lexer class has the correct public API
    def verify(cls):
Beispiel #20
0
# -*- coding: utf-8 -*-
# @Time    : 2020/3/14 16:41
# @Author  : ZhiMa_Maker
# @Email   :  [email protected]
# @File    : test.py
# @Software : PyCharm
from pygments.util import xrange

if __name__ == '__main__':
    AndroidIconList = [("mipmap-ldpi", 36, "icon-36"),
                       ("mipmap-mdpi", 48, "icon-48"),
                       ("mipmap-hdpi", 72, "icon-72"),
                       ("mipmap-xhdpi", 96, "icon-96"),
                       ("mipmap-xxhdpi", 144, "icon-144"),
                       ("mipmap-xxxhdpi", 192, "icon-192"),
                       ("AppIcon512", 512, "icon-512")]

    for x in xrange(0, len(AndroidIconList)):
        print(AndroidIconList[x][2])
Beispiel #21
0
from pygments.util import xrange

# xrange(start, stop[, step])
for i in xrange(0, 5, 2):
    print(i)
Beispiel #22
0
    def format_unencoded(self, tokensource, outfile):
        # TODO: add support for background colors
        t2n = self.ttype2name
        cp = self.commandprefix

        if self.full:
            realoutfile = outfile
            outfile = StringIO()

        outfile.write(r'\begin{Verbatim}[commandchars=\\\{\}')
        if self.linenos:
            start, step = self.linenostart, self.linenostep
            outfile.write(u',numbers=left' +
                          (start and u',firstnumber=%d' % start or u'') +
                          (step and u',stepnumber=%d' % step or u''))
        if self.mathescape or self.texcomments:
            outfile.write(r',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
        if self.verboptions:
            outfile.write(u',' + self.verboptions)
        outfile.write(u']\n')

        for ttype, value in tokensource:
            if ttype in Token.Comment:
                if self.texcomments:
                    # Try to guess comment starting lexeme and escape it ...
                    start = value[0:1]
                    for i in xrange(1, len(value)):
                        if start[0] != value[i]:
                            break
                        start += value[i]

                    value = value[len(start):]
                    start = escape_tex(start, self.commandprefix)

                    # ... but do not escape inside comment.
                    value = start + value
                elif self.mathescape:
                    # Only escape parts not inside a math environment.
                    parts = value.split('$')
                    in_math = False
                    for i, part in enumerate(parts):
                        if not in_math:
                            parts[i] = escape_tex(part, self.commandprefix)
                        in_math = not in_math
                    value = '$'.join(parts)
                else:
                    value = escape_tex(value, self.commandprefix)
            else:
                value = escape_tex(value, self.commandprefix)
            styles = []
            while ttype is not Token:
                try:
                    styles.append(t2n[ttype])
                except KeyError:
                    # not in current style
                    styles.append(_get_ttype_name(ttype))
                ttype = ttype.parent
            styleval = '+'.join(reversed(styles))
            if styleval:
                spl = value.split('\n')
                for line in spl[:-1]:
                    if line:
                        outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
                    outfile.write('\n')
                if spl[-1]:
                    outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
            else:
                outfile.write(value)

        outfile.write(u'\\end{Verbatim}\n')

        if self.full:
            realoutfile.write(DOC_TEMPLATE %
                              dict(docclass=self.docclass,
                                   preamble=self.preamble,
                                   title=self.title,
                                   encoding=self.encoding or 'latin1',
                                   styledefs=self.get_style_defs(),
                                   code=outfile.getvalue()))