Example #1
0
 def __init__(self, *a, **kw):
     from cStringIO import StringIO
     self.__csio = StringIO(*a, **kw)
Example #2
0
def moderatesignups():
    global commentHashesAndComments
    commentHashesAndComments = {}
    stringio = StringIO()
    stringio.write('<html>\n<head>\n</head>\n\n')

    # redditSession = loginAndReturnRedditSession()
    redditSession = loginOAuthAndReturnRedditSession()
    submissions = getSubmissionsForRedditSession(redditSession)
    flat_comments = getCommentsForSubmissions(submissions)
    retiredHashes = retiredCommentHashes()
    i = 1
    stringio.write(
        '<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
    stringio.write("<h3>")
    stringio.write(os.getcwd())
    stringio.write("<br>\n")
    for submission in submissions:
        stringio.write(submission.title)
        stringio.write("<br>\n")
    stringio.write("</h3>\n\n")
    stringio.write(
        '<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">'
    )
    stringio.write(
        '<input type="submit" value="Copy display-during-signup.py stdout to clipboard">'
    )
    stringio.write('</form>')
    for comment in flat_comments:
        # print comment.is_root
        # print comment.score
        i += 1
        commentHash = sha1()
        commentHash.update(comment.fullname)
        commentHash.update(comment.body.encode('utf-8'))
        commentHash = commentHash.hexdigest()
        if commentHash not in retiredHashes:
            commentHashesAndComments[commentHash] = comment
            authorName = str(
                comment.author
            )  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write("<hr>\n")
            stringio.write('<font color="blue"><b>')
            stringio.write(
                authorName
            )  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write('</b></font><br>')
            if ParticipantCollection().hasParticipantNamed(authorName):
                stringio.write(
                    ' <small><font color="green">(member)</font></small>')
                # if ParticipantCollection().participantNamed(authorName).isStillIn:
                #    stringio.write(' <small><font color="green">(in)</font></small>')
                # else:
                #    stringio.write(' <small><font color="red">(out)</font></small>')
            else:
                stringio.write(
                    ' <small><font color="red">(not a member)</font></small>')
            stringio.write(
                '<form action="takeaction.html" method="post" target="invisibleiframe">'
            )
            stringio.write(
                '<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">'
            )
            # stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
            # stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
            # stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
            stringio.write(
                '<input type="submit" name="actiontotake" value="Skip comment">'
            )
            stringio.write(
                '<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">'
            )
            stringio.write('<input type="hidden" name="username" value="' +
                           b64encode(authorName) + '">')
            stringio.write('<input type="hidden" name="commenthash" value="' +
                           commentHash + '">')
            # stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
            stringio.write('</form>')

            stringio.write(
                bleach.clean(markdown.markdown(comment.body.encode('utf-8')),
                             tags=['p']))
            stringio.write("\n<br><br>\n\n")

    stringio.write('</html>')
    pageString = stringio.getvalue()
    stringio.close()
    return Response(pageString, mimetype='text/html')
Example #3
0
def changeFormatImage( input, outformat, **options ):
    """
        Function to change the format of the input image.
        
        Usage:
        
            >>> from NIST.fingerprint.functions import changeFormatImage
        
        To convert an PIL image to a RAW string, use the following commands:
        
            >>> from PIL import Image
            >>> imgPIL = Image.new( "L", ( 500, 500 ), 255 )
            >>> imgRAW = changeFormatImage( imgPIL, "RAW" )
            >>> imgRAW == chr( 255 ) * 500 * 500
            True
        
        All format supported by PIL are supported as output format:
        
            >>> changeFormatImage( imgPIL, "TIFF" ) # doctest: +ELLIPSIS
            <PIL.TiffImagePlugin.TiffImageFile image mode=L size=500x500 at ...>
            >>> changeFormatImage( imgPIL, "PNG" ) # doctest: +ELLIPSIS
            <PIL.PngImagePlugin.PngImageFile image mode=L size=500x500 at ...>
            
        You can also convert a StringIO buffer:
        
            >>> from cStringIO import StringIO
            >>> imgBuffer = StringIO()
            >>> imgPIL.save( imgBuffer, 'JPEG' )
            >>> imgRAW2 = changeFormatImage( imgBuffer, "RAW" )
            >>> imgRAW == imgRAW2
            True
        
        A `notImplemented` is raised if the input format is not supported or if
        the output format is not implemented in this function or by PIL:
        
            >>> changeFormatImage( None, "RAW" )
            Traceback (most recent call last):
            ...
            notImplemented: Input format not supported
            
            >>> changeFormatImage( imgPIL, "WSQ" )
            Traceback (most recent call last):
            ...
            notImplemented: Output format not supported by PIL
    """
    outformat = outformat.upper()
    
    # Convert the input data to PIL format
    if isinstance( input, Image.Image ):
        img = input
    
    elif isinstance( input, str ):
        try:
            buff = StringIO( input )
            img = Image.open( buff )
            
        except:
            if string_to_hex( input[ 0 : 4 ] ) in [ "FFA0FFA4", "FFA0FFA5", "FFA0FFA6", "FFA0FFA2", "FFA0FFA8" ]:
                img = RAWToPIL( WSQ().decode( input ), **options )
                
            else:
                if outformat == "RAW":
                    return input
                else:
                    img = RAWToPIL( input, **options )
    
    elif isinstance( input, ( OutputType, InputType ) ):
        img = Image.open( input )
    
    else:
        raise notImplemented( "Input format not supported" )
    
    # Convert the PIL format to the output format
    if outformat == "PIL":
        return img
    
    elif outformat == "RAW":
        return PILToRAW( img )
    
    else:
        try:
            buff = StringIO()
            img.save( buff, format = outformat )
            return Image.open( buff )
        
        except:
            raise notImplemented( "Output format not supported by PIL" )
Example #4
0
def SPA4Symbol(modelType="fixed"):
    '''
    model for profit test of each parameter 
    '''
    t0 = time.time()

    if modelType == "fixed":
        n_rvs = range(5, 55, 5)
        hist_periods = range(50, 130, 10)
        alphas = ("0.5", "0.55", "0.6", "0.65", "0.7", "0.75", "0.8", "0.85",
                  "0.9", "0.95")
        myDir = os.path.join(ExpResultsDir, "fixedSymbolSPPortfolio",
                             "LargestMarketValue_200501")

        resFile = os.path.join(ExpResultsDir, "SPA",
                               "SPA_Fixed_eachParam_Profit.csv")

    elif modelType == "dynamic":
        n_rvs = range(5, 55, 5)
        hist_periods = range(90, 120 + 10, 10)
        alphas = ("0.5", "0.55", "0.6", "0.65", "0.7")
        myDir = os.path.join(ExpResultsDir, "dynamicSymbolSPPortfolio",
                             "LargestMarketValue_200501_rv50")

        resFile = os.path.join(ExpResultsDir, "SPA",
                               "SPA_Dynamic_eachParam_Profit.csv")

    avgIO = StringIO()
    if not os.path.exists(resFile):
        with open(resFile, 'ab') as fout:
            fout.write(
                'n, h, alpha, SPA_Q, sampling, n_rule, n_period, P-value\n')

    for n_rv in n_rvs:
        #load exp ROI
        for period in hist_periods:
            if n_rv == 50 and period == 50:
                continue
            for alpha in alphas:

                t1 = time.time()
                if modelType == "fixed":
                    dirName = "fixedSymbolSPPortfolio_n%s_p%s_s200_a%s" % (
                        n_rv, period, alpha)
                elif modelType == "dynamic":
                    dirName = "dynamicSymbolSPPortfolio_n%s_p%s_s200_a%s" % (
                        n_rv, period, alpha)

                exps = glob(os.path.join(myDir, dirName,
                                         "20050103-20131231_*"))
                if len(exps) > 3:
                    exps = exps[:3]

                #set base ROI
                n_periods = 2236
                base_rois = np.zeros(n_periods)
                diffobj = ROIDiffObject(base_rois)

                for exp in exps:
                    #load comparison rois
                    df = pd.read_pickle(os.path.join(exp, 'wealthProcess.pkl'))
                    proc = df.sum(axis=1)
                    exp_finalWealth = proc[-1]
                    if exp_finalWealth >= 0:
                        wrois = proc.pct_change()
                        wrois[0] = 0
                        diffobj.setCompareROIs(wrois)

                print " SPA4Symbol n-h-alpha:%s-%s-%s, load data OK, %.3f secs" % (
                    n_rv, period, alpha, time.time() - t1)

                t2 = time.time()
                #SPA test
                Q = 0.5
                n_samplings = 5000
                verbose = True
                try:
                    pvalue = SPATest.SPATest(diffobj, Q, n_samplings, "SPA_C",
                                             verbose)
                except AssertionError:
                    continue
                print "n-h-alpha:%s-%s-%s, (n_rules, n_periods):(%s, %s), SPA_C:%s elapsed:%.3f secs" % (
                    n_rv, period, alpha, diffobj.n_rules, diffobj.n_periods,
                    pvalue, time.time() - t2)
                avgIO.write("%s,%s,%s,%s,%s,%s,%s,%s\n" %
                            (n_rv, period, alpha, Q, n_samplings,
                             diffobj.n_rules, diffobj.n_periods, pvalue))

    with open(resFile, 'ab') as fout:
        fout.write(avgIO.getvalue())
    avgIO.close()

    print "SPA4Symbol SPA, elapsed %.3f secs" % (time.time() - t0)
Example #5
0
def SPA4BHSymbol(modelType="fixed", years=None):
    '''
    buy-and-hold versus. SP model
    '''
    t0 = time.time()

    if modelType == "fixed":
        n_rvs = range(5, 55, 5)
        hist_periods = range(50, 130, 10)
        alphas = ("0.5", "0.55", "0.6", "0.65", "0.7", "0.75", "0.8", "0.85",
                  "0.9", "0.95")
        myDir = os.path.join(ExpResultsDir, "fixedSymbolSPPortfolio",
                             "LargestMarketValue_200501")
        if not years:
            resFile = os.path.join(ExpResultsDir, "SPA",
                                   "SPA_Fixed_BetterBH.csv")
        else:
            resFile = os.path.join(ExpResultsDir, "SPA",
                                   "SPA_year_Fixed_BetterBH.csv")

    elif modelType == "dynamic":
        n_rvs = range(5, 55, 5)
        hist_periods = range(90, 120 + 10, 10)
        alphas = ("0.5", "0.55", "0.6", "0.65", "0.7")
        myDir = os.path.join(ExpResultsDir, "dynamicSymbolSPPortfolio",
                             "LargestMarketValue_200501_rv50")
        if not years:
            resFile = os.path.join(ExpResultsDir, "SPA",
                                   "SPA_Dynamic_BetterBH.csv")
        else:
            resFile = os.path.join(ExpResultsDir, "SPA",
                                   "SPA_year_Dynamic_BetterBH.csv")

    bhDir = os.path.join(ExpResultsDir, "BuyandHoldPortfolio")

    #stats file
    avgIO = StringIO()
    if not os.path.exists(resFile):
        if not years:
            avgIO.write('n_rv, SPA_Q, sampling, n_rule, n_period, P-value\n')
        else:
            avgIO.write(
                'startDate, endDate, n_rv, SPA_Q, sampling, n_rule, n_period, P-value\n'
            )

    bh_rois = None
    tgt_rois = []

    for n_rv in n_rvs:
        t1 = time.time()

        #load buyhold ROI
        bh_df = pd.read_pickle(os.path.join(bhDir, "wealthSum_n%s.pkl" % n_rv))
        bh_finalWealth = bh_df[-1]
        bh_rois = bh_df.pct_change()
        bh_rois[0] = 0

        #load model ROI
        for period in hist_periods:
            for alpha in alphas:
                if modelType == "fixed":
                    dirName = "fixedSymbolSPPortfolio_n%s_p%s_s200_a%s" % (
                        n_rv, period, alpha)
                elif modelType == "dynamic":
                    dirName = "dynamicSymbolSPPortfolio_n%s_p%s_s200_a%s" % (
                        n_rv, period, alpha)

                exps = glob(os.path.join(myDir, dirName,
                                         "20050103-20131231_*"))
                if len(exps) > 3:
                    exps = exps[:3]

                for exp in exps:
                    #load comparison rois
                    df = pd.read_pickle(os.path.join(exp, 'wealthProcess.pkl'))
                    proc = df.sum(axis=1)
                    exp_finalWealth = proc[-1]

                    if exp_finalWealth >= bh_finalWealth:
                        wrois = proc.pct_change()
                        wrois[0] = 0
                        tgt_rois.append(wrois)

        print " SPA4BHSymbol n_rv: %s, load data OK, %.3f secs" % (
            n_rv, time.time() - t1)

        if not years:
            #set diff obj
            diffobj = ROIDiffObject(bh_rois)
            for rois in tgt_rois:
                diffobj.setCompareROIs(rois)

            #SPA test
            t2 = time.time()
            Q = 0.5
            n_samplings = 5000
            verbose = True
            pvalue = SPATest.SPATest(diffobj, Q, n_samplings, "SPA_C", verbose)
            print "full n_rv:%s, (n_rules, n_periods):(%s, %s), SPA_C:%s elapsed:%.3f secs" % (
                n_rv, diffobj.n_rules, diffobj.n_periods, pvalue,
                time.time() - t2)
            avgIO.write("%s,%s,%s,%s,%s,%s\n" %
                        (n_rv, Q, n_samplings, diffobj.n_rules,
                         diffobj.n_periods, pvalue))
        else:
            for year in years:
                startDate, endDate = date(year, 1, 1), date(year, 12, 31)

                diffobj = ROIDiffObject(bh_rois[startDate:endDate])
                for rois in tgt_rois:
                    diffobj.setCompareROIs(rois[startDate:endDate])

                t2 = time.time()
                Q = 0.5
                n_samplings = 5000
                verbose = True
                pvalue = SPATest.SPATest(diffobj, Q, n_samplings, "SPA_C",
                                         verbose)
                print "year:%s n_rv:%s, (n_rules, n_periods):(%s, %s), SPA_C:%s elapsed:%.3f secs" % (
                    year, n_rv, diffobj.n_rules, diffobj.n_periods, pvalue,
                    time.time() - t2)
                avgIO.write(
                    "%s, %s,%s,%s,%s,%s,%s,%s\n" %
                    (bh_rois[startDate:endDate].index[0],
                     bh_rois[startDate:endDate].index[-1], n_rv, Q,
                     n_samplings, diffobj.n_rules, diffobj.n_periods, pvalue))

    with open(resFile, 'ab') as fout:
        fout.write(avgIO.getvalue())
    avgIO.close()

    print "SPA4BHSymbol SPA, elapsed %.3f secs" % (time.time() - t0)
Example #6
0
 def __init__(self, force_type=None):
     self.buf = StringIO()
     self.force_type = force_type
     self.in_json = False
Example #7
0
 def get_marker_bytes():
     img = make_marker(radius, fill_color, stroke_color, stroke_width, opacity)
     img_sio = StringIO()
     img.save(img_sio, 'PNG')
     return img_sio.getvalue()
Example #8
0
    def test_Publisher(self):
        import rospy
        from rospy.impl.registration import get_topic_manager, Registration
        from rospy.topics import Publisher, DEFAULT_BUFF_SIZE
        # Publisher(self, name, data_class, subscriber_listener=None, tcp_nodelay=False, latch=False, headers=None)

        name = 'foo'
        rname = rospy.resolve_name('foo')
        data_class = test_rospy.msg.Val

        # test invalid params
        for n in [None, '', 1]:
            try:
                Publisher(n, data_class)
                self.fail("should not allow invalid name")
            except ValueError:
                pass
        for d in [None, 1, TestRospyTopics]:
            try:
                Publisher(name, d)
                self.fail("should now allow invalid data_class")
            except ValueError:
                pass
        try:
            Publisher(name, None)
            self.fail("None should not be allowed for data_class")
        except ValueError:
            pass

        # round 1: test basic params
        pub = Publisher(name, data_class)
        self.assertEquals(rname, pub.resolved_name)
        # - pub.name is left in for backwards compatiblity, but resolved_name is preferred
        self.assertEquals(rname, pub.name)
        self.assertEquals(data_class, pub.data_class)
        self.assertEquals('test_rospy/Val', pub.type)
        self.assertEquals(data_class._md5sum, pub.md5sum)
        self.assertEquals(Registration.PUB, pub.reg_type)

        # verify impl as well
        impl = get_topic_manager().get_impl(Registration.PUB, rname)
        self.assert_(impl == pub.impl)
        self.assertEquals(rname, impl.resolved_name)
        self.assertEquals(data_class, impl.data_class)
        self.failIf(impl.is_latch)
        self.assertEquals(None, impl.latch)
        self.assertEquals(0, impl.seq)
        self.assertEquals(1, impl.ref_count)
        self.assertEquals(b'', impl.buff.getvalue())
        self.failIf(impl.closed)
        self.failIf(impl.has_connections())
        # check publish() fall-through
        from test_rospy.msg import Val
        impl.publish(Val('hello world-1'))

        # check stats
        self.assertEquals(0, impl.message_data_sent)
        # check acquire/release don't bomb
        impl.acquire()
        impl.release()

        # do a single publish with connection override. The connection
        # override is a major cheat as the object isn't even an actual
        # connection. I will need to add more integrated tests later
        co1 = ConnectionOverride('co1')
        self.failIf(impl.has_connection('co1'))
        impl.add_connection(co1)
        self.assert_(impl.has_connection('co1'))
        self.assert_(impl.has_connections())
        impl.publish(Val('hello world-1'), connection_override=co1)

        try:
            from cStringIO import StringIO
        except ImportError:
            from io import BytesIO as StringIO
        buff = StringIO()
        Val('hello world-1').serialize(buff)
        # - check equals, but strip length field first
        self.assertEquals(co1.data[4:], buff.getvalue())
        self.assertEquals(None, impl.latch)

        # Now enable latch
        pub = Publisher(name, data_class, latch=True)
        impl = get_topic_manager().get_impl(Registration.PUB, rname)
        # have to verify latching in pub impl
        self.assert_(impl == pub.impl)
        self.assertEquals(True, impl.is_latch)
        self.assertEquals(None, impl.latch)
        self.assertEquals(2, impl.ref_count)

        co2 = ConnectionOverride('co2')
        self.failIf(impl.has_connection('co2'))
        impl.add_connection(co2)
        for n in ['co1', 'co2']:
            self.assert_(impl.has_connection(n))
        self.assert_(impl.has_connections())
        v = Val('hello world-2')
        impl.publish(v, connection_override=co2)
        self.assert_(v == impl.latch)

        buff = StringIO()
        Val('hello world-2').serialize(buff)
        # - strip length and check value
        self.assertEquals(co2.data[4:], buff.getvalue())

        # test that latched value is sent to connections on connect
        co3 = ConnectionOverride('co3')
        self.failIf(impl.has_connection('co3'))
        impl.add_connection(co3)
        for n in ['co1', 'co2', 'co3']:
            self.assert_(impl.has_connection(n))
        self.assert_(impl.has_connections())
        self.assertEquals(co3.data[4:], buff.getvalue())

        # TODO: tcp_nodelay
        # TODO: suscribe listener
        self.assert_(impl.has_connection('co1'))
        impl.remove_connection(co1)
        self.failIf(impl.has_connection('co1'))
        self.assert_(impl.has_connections())

        self.assert_(impl.has_connection('co3'))
        impl.remove_connection(co3)
        self.failIf(impl.has_connection('co3'))
        self.assert_(impl.has_connections())

        self.assert_(impl.has_connection('co2'))
        impl.remove_connection(co2)
        self.failIf(impl.has_connection('co2'))
        self.failIf(impl.has_connections())

        # test publish() latch on a new Publisher object (this was encountered in testing, so I want a test case for it)
        pub = Publisher('bar', data_class, latch=True, queue_size=0)
        v = Val('no connection test')
        pub.impl.publish(v)
        self.assert_(v == pub.impl.latch)

        # test connection header
        h = {'foo': 'bar', 'fuga': 'hoge'}
        pub = Publisher('header_test', data_class, headers=h, queue_size=0)
        self.assertEquals(h, pub.impl.headers)
Example #9
0
 def loads(cls, text):
     return cls.load(StringIO(text))
Example #10
0
    def to_print(self):
        self.ensure_one()
        style_default = xlwt.easyxf('font: height 240')
        style_header = xlwt.easyxf('font: height 280, bold on')
        style_bold = xlwt.easyxf('font: height 240, bold on; align: horz center; '
                                 'borders: left thin, top thin, bottom thin, right thin')
        style_table = xlwt.easyxf('font: height 240; borders: left thin, bottom thin, right thin')

        wb = xlwt.Workbook("UTF-8")
        ws = wb.add_sheet('SPJ')
        ws.footer_str = ''
        title = "SURAT PERINTAH JALAN"

        y = 0
        x = 0

        ws.col(x).width = 5000
        ws.col(x + 1).width = 12000
        ws.col(x + 2).width = 6000

        ws.row(0).height_mismatch = 1
        ws.row(0).height = 300
        ws.row(1).height_mismatch = 1
        ws.row(1).height = 280
        ws.row(2).height_mismatch = 1
        ws.row(2).height = 280
        ws.row(3).height_mismatch = 1
        ws.row(3).height = 280
        ws.row(4).height_mismatch = 1
        ws.row(4).height = 280
        ws.row(5).height_mismatch = 1
        ws.row(5).height = 280

        # ws.col(x + 3).width = 4500
        # ws.col(x + 4).width = 6000

        ws.write(y, x, "{} {}".format(title, self.name), style=style_header)
        y += 1
        ws.write(y, x, "Tanggal SPJ", style=style_default)
        ws.write(y, x + 1, self.dt_spj, style=style_default)
        y += 1
        ws.write(y, x, "Customer", style=style_default)
        ws.write(y, x + 1, self.customer_id.name, style=style_default)
        y += 1
        ws.write(y, x, "Tempat Angkut", style=style_default)
        ws.write(y, x + 1, "{} - {}".format(self.source_group_loc_id.name, self.source_loc_id.name), style=style_default)
        y += 1
        ws.write(y, x, "Tempat Bongkar", style=style_default)
        ws.write(y, x + 1, "{} - {}".format(self.dest_group_loc_id.name, self.dest_loc_id.name), style=style_default)
        y += 1
        ws.write(y, x, "No Bukti", style=style_default)
        ws.write(y, x + 1, self.no_bukti, style=style_default)
        y += 2

        ws.write(y, x, "No", style=style_bold)
        ws.write(y, x + 1, "Nama Barang", style=style_bold)
        ws.write(y, x + 2, "QTY", style=style_bold)
        y += 1

        # idx = 0
        # sum_qty = sum(self.invoice_line_ids.mapped("quantity"))
        # for inv_line_id in self.invoice_line_ids:
        #     ws.row(y).height_mismatch = 1
        #     ws.row(y).height = 280
        #     idx += 1
        #     tax_name = ""
        #     for tax_id in inv_line_id.invoice_line_tax_ids:
        #         tax_name += tax_id.name
        ws.write(y, x, 1, style=style_table)
        ws.write(y, x + 1, self.product_id.name, style=style_table)
        ws.write(y, x + 2, self.jumlah_barang, style=style_table)
        y += 1

        ws.write(y, x + 1, "Jml. Qty:", style=xlwt.easyxf('font: height 240; align: horiz right'))
        ws.write(y, x + 2, self.jumlah_barang, style=style_table)
        y += 3
        ws.write(y, x, "Adm. Penjualan,         Pengambil               Mengetahui, ", style=style_default)
        y += 3
        ws.write(y, x, "(____________)       (___________)         (___________)", style=style_default)

        fp = StringIO()
        wb.save(fp)
        fp.seek(0)
        data = fp.read()
        fp.close()

        return self.env["ss.download"].download(
            "spj_{}.xls".format(self.name),
            data
        )
    def build_select_stmt(self,
                          quals,
                          columns,
                          allow_filtering,
                          verbose=False):

        stmt_str = StringIO()
        usedQuals = {}
        filteredColumns = []
        rowid = None
        binding_values = []
        for col in columns:
            if col != self.ROWIDCOLUMN:
                filteredColumns.append(col)
        if (self.query):
            stmt_str.write(self.query)
        else:
            for col in self.rowIdColumns:
                if col not in filteredColumns:
                    filteredColumns.append(col)
            stmt_str.write(u"SELECT {0} FROM {1}.{2}".format(
                ",".join(map(lambda c: '"{0}"'.format(c), filteredColumns)),
                self.keyspace, self.columnfamily))
            isWhere = None
            eqRestricted = None
            rangeUsed = False

            if self.prepare_select_stmt:
                formatting_str = '?'
            else:
                formatting_str = '%s'

            for qual in quals:
                if qual.field_name == self.ROWIDCOLUMN:
                    rowid = qual.value
                if qual.field_name in self.queryableColumns:
                    qual.componentIdx = self.querableColumnsIdx[
                        qual.field_name]
                else:
                    qual.componentIdx = 10000

            if rowid is not None:
                ids = json.loads(rowid)
                for i in range(0, len(self.rowIdColumns)):
                    columnName = self.rowIdColumns[i]
                    binding_values.append(
                        types_mapper.map_object_to_type(
                            ids[i], self.columnsTypes[columnName]))
                stmt_str.write(u" WHERE {0}".format(u" AND ".join(
                    map(lambda str: str + u" = " + formatting_str,
                        self.rowIdColumns))))
            else:
                sortedQuals = sorted(quals, key=lambda qual: qual.componentIdx)
                last_clustering_key_idx = 0
                for qual in sortedQuals:
                    # Partition key and clustering column can't be null
                    if qual.componentIdx < self.IDX_QUERY_COST and qual.value is None:
                        return None
                    if ISDEBUG or verbose:
                        logger.log(
                            u"qual field {0}; qual index {1}; qual type {2}; qual operator: {4}; qual value {3}"
                            .format(qual.field_name, qual.componentIdx,
                                    type(qual.operator), qual.value,
                                    qual.operator))
                    if qual.operator == "=":
                        if (qual.field_name in self.queryableColumns
                                and self.queryableColumns[qual.field_name] !=
                                self.REGULAR_QUERY_COST):
                            if self.queryableColumns[
                                    qual.
                                    field_name] == self.CLUSTERING_KEY_QUERY_COST:
                                if last_clustering_key_idx == 0 and qual.componentIdx != self.CLUSTERING_KEY_QUERY_COST:
                                    eqRestricted = True
                                elif qual.componentIdx - 1 != last_clustering_key_idx and last_clustering_key_idx != 0:
                                    eqRestricted = True
                            if (qual.field_name not in usedQuals
                                    and not eqRestricted):
                                usedQuals[qual.field_name] = qual.value
                                if self.queryableColumns[
                                        qual.
                                        field_name] == self.CLUSTERING_KEY_QUERY_COST:
                                    last_clustering_key_idx = qual.componentIdx
                                formatted = u" {0} = {1} ".format(
                                    qual.field_name, formatting_str)
                                binding_values.append(
                                    types_mapper.map_object_to_type(
                                        qual.value,
                                        self.columnsTypes[qual.field_name]))
                                if isWhere:
                                    stmt_str.write(u" AND ")
                                    stmt_str.write(formatted)
                                else:
                                    stmt_str.write(u" WHERE ")
                                    stmt_str.write(formatted)
                                    isWhere = 1
                            elif allow_filtering:
                                formatted = u" {0} = {1} ".format(
                                    qual.field_name, formatting_str)
                                binding_values.append(
                                    types_mapper.map_object_to_type(
                                        qual.value,
                                        self.columnsTypes[qual.field_name]))
                                if isWhere:
                                    stmt_str.write(u" AND ")
                                    stmt_str.write(formatted)
                                else:
                                    stmt_str.write(u" WHERE ")
                                    stmt_str.write(formatted)
                                    isWhere = 1
                        elif allow_filtering:
                            formatted = u" {0} = {1} ".format(
                                qual.field_name, formatting_str)
                            binding_values.append(
                                types_mapper.map_object_to_type(
                                    qual.value,
                                    self.columnsTypes[qual.field_name]))
                            if isWhere:
                                stmt_str.write(u" AND ")
                                stmt_str.write(formatted)
                            else:
                                stmt_str.write(u" WHERE ")
                                stmt_str.write(formatted)
                                isWhere = 1
                    # IN operator
                    elif qual.operator == (u"=", True):
                        if (qual.field_name in self.queryableColumns):
                            if (self.queryableColumns[qual.field_name]
                                    == self.CLUSTERING_KEY_QUERY_COST
                                    or self.queryableColumns[qual.field_name]
                                    == self.PARTITION_KEY_QUERY_COST):
                                if (qual.field_name not in usedQuals
                                        and not eqRestricted
                                        and not rangeUsed):
                                    usedQuals[qual.field_name] = qual.value
                                    formatted = u"{0} IN {1}".format(
                                        qual.field_name, formatting_str)
                                    binding_value = []
                                    for el in qual.value:
                                        binding_value.append(
                                            types_mapper.map_object_to_type(
                                                el, self.columnsTypes[
                                                    qual.field_name]))
                                    if self.prepare_select_stmt:
                                        binding_values.append(binding_value)
                                    else:
                                        binding_values.append(
                                            ValueSequence(binding_value))

                                    if isWhere:
                                        stmt_str.write(u" AND ")
                                        stmt_str.write(formatted)
                                    else:
                                        stmt_str.write(u" WHERE ")
                                        stmt_str.write(formatted)
                                        isWhere = 1
                    elif (
                            qual.operator == "~" or qual.operator == "~~"
                    ) and qual.field_name in self.indexes and self.indexes[
                            qual.
                            field_name] == "org.apache.cassandra.index.sasi.SASIIndex":
                        if qual.operator == "~":
                            val = "%{0}%".format(qual.value)
                        else:
                            val = qual.value
                        stmt_str.write(u" AND {0} LIKE {1}".format(
                            qual.field_name, formatting_str))
                        binding_values.append(
                            types_mapper.map_object_to_type(
                                val, self.columnsTypes[qual.field_name]))
                    else:
                        if (qual.operator == ">" or qual.operator == "<"
                                or qual.operator == ">="
                                or qual.operator == "<="):
                            if (qual.field_name in self.queryableColumns and
                                (self.queryableColumns[qual.field_name]
                                 == self.CLUSTERING_KEY_QUERY_COST
                                 # only SASI indexes support <,>,>=,<=
                                 or
                                 (qual.field_name in self.indexes
                                  and self.indexes[qual.field_name] ==
                                  "org.apache.cassandra.index.sasi.SASIIndex"))
                                    or
                                (allow_filtering
                                 and self.queryableColumns[qual.field_name] !=
                                 self.PARTITION_KEY_QUERY_COST)):
                                rangeUsed = True
                                if isWhere:
                                    stmt_str.write(u" AND {0} {1} {2}".format(
                                        qual.field_name, qual.operator,
                                        formatting_str))
                                    binding_values.append(
                                        types_mapper.map_object_to_type(
                                            qual.value, self.columnsTypes[
                                                qual.field_name]))
                                else:
                                    stmt_str.write(
                                        u" WHERE {0} {1} {2}".format(
                                            qual.field_name, qual.operator,
                                            formatting_str))
                                    isWhere = 1
                                    binding_values.append(
                                        types_mapper.map_object_to_type(
                                            qual.value, self.columnsTypes[
                                                qual.field_name]))

        if (self.limit):
            stmt_str.write(u" LIMIT {0}".format(self.limit))
        if allow_filtering:
            stmt_str.write(u" ALLOW FILTERING ")
        statement = stmt_str.getvalue()
        stmt_str.close()
        if ISDEBUG:
            logger.log(u"CQL query: {0}".format(statement), INFO)
        return (statement, binding_values, filteredColumns)
Example #12
0
    def read_file(self, path, maxsize):
        """
        path = relative path/filename in project

        It:

        - *must* resolve to be under self._projects/project_id or get an error
        - it must have size in bytes less than the given limit
        - to download the directory blah/foo, request blah/foo.zip

        Returns base64-encoded file as an object:

            {'base64':'... contents ...'}

        or {'error':"error message..."} in case of an error.
        """
        abspath = os.path.abspath(os.path.join(self.project_path, path))
        base, ext = os.path.splitext(abspath)
        if not abspath.startswith(self.project_path):
            raise RuntimeError(
                "path (=%s) must be contained in project path %s" %
                (path, self.project_path))
        if not os.path.exists(abspath):
            if ext != '.zip':
                raise RuntimeError("path (=%s) does not exist" % path)
            else:
                if os.path.exists(base) and os.path.isdir(base):
                    abspath = os.path.splitext(abspath)[0]
                else:
                    raise RuntimeError(
                        "path (=%s) does not exist and neither does %s" %
                        (path, base))

        filename = os.path.split(abspath)[-1]
        if os.path.isfile(abspath):
            # a regular file
            # TODO: compress the file before base64 encoding (and corresponding decompress
            # in hub before sending to client)
            size = os.lstat(abspath).st_size
            if size > maxsize:
                raise RuntimeError(
                    "path (=%s) must be at most %s bytes, but it is %s bytes" %
                    (path, maxsize, size))
            content = open(abspath).read()
        else:
            # a zip file in memory from a directory tree
            # REFERENCES:
            #   - http://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory
            #   - https://support.google.com/accounts/answer/6135882
            import zipfile
            from cStringIO import StringIO
            output = StringIO()
            relroot = os.path.abspath(os.path.join(abspath, os.pardir))

            size = 0
            zip = zipfile.ZipFile(output, "w", zipfile.ZIP_DEFLATED, False)
            for root, dirs, files in os.walk(abspath):
                # add directory (needed for empty dirs)
                zip.write(root, os.path.relpath(root, relroot))
                for file in files:
                    filename = os.path.join(root, file)
                    if os.path.isfile(filename):  # regular files only
                        size += os.lstat(filename).st_size
                        if size > maxsize:
                            raise RuntimeError(
                                "path (=%s) must be at most %s bytes, but it is at least %s bytes"
                                % (path, maxsize, size))
                        arcname = os.path.join(os.path.relpath(root, relroot),
                                               file)
                        zip.write(filename, arcname)

            # Mark the files as having been created on Windows so that
            # Unix permissions are not inferred as 0000.
            for zfile in zip.filelist:
                zfile.create_system = 0
            zip.close()
            content = output.getvalue()
        import base64
        return {'base64': base64.b64encode(content)}
Example #13
0
	def from_string_cnet(self, string, keep_constants=True, **kwargs):
		"""
		Instanciates a Boolean Network from a string in cnet format.

		Args:
			string (string): A cnet format representation of a Boolean Network.

		Returns:
			(BooleanNetwork)

		Examples:
			String should be structured as follow
			```
			#.v = number of nodes
			.v 1
			#.l = node label
			.l 1 node-a
			.l 2 node-b
			#.n = (node number) (in-degree) (input node 1) … (input node k)
			.n 1 2 4 5
			01 1 # transition rule
			```

		See also:
			:func:`from_file` :func:`from_dict`

		Note: see examples for more information.
		"""
		network_file = StringIO(string)
		logic = defaultdict(dict)

		line = network_file.readline()
		while line != "":
			if line[0] != '#' and line != '\n':
				# .v <#-nodes>
				if '.v' in line:
					Nnodes = int(line.split()[1])
					for inode in range(Nnodes):
						logic[inode] = {'name':'','in':[],'out':[]}
				# .l <node-id> <node-name>
				elif '.l' in line:
					logic[int(line.split()[1])-1]['name'] = line.split()[2]
				# .n <node-id> <#-inputs> <input-node-id>
				elif '.n' in line:
					inode = int(line.split()[1]) - 1
					indegree = int(line.split()[2])
					for jnode in range(indegree):
						logic[inode]['in'].append(int(line.split()[3 + jnode])-1)

					logic[inode]['out'] = [0 for i in range(2**indegree) if indegree > 0]

					logic_line = network_file.readline().strip()

					if indegree <= 0:
						if logic_line == '':
							logic[inode]['in'] = [inode]
							logic[inode]['out'] = [0,1]
						else:
							logic[inode]['out'] = [int(logic_line)]
					else:
						while logic_line != '\n' and logic_line != '' and len(logic_line)>1:
							for nlogicline in expand_logic_line(logic_line):
								logic[inode]['out'][binstate_to_statenum(nlogicline.split()[0])] = int(nlogicline.split()[1])
							logic_line = network_file.readline().strip()

				# .e = end of file
				elif '.e' in line:
					break
			line = network_file.readline()

		return self.from_dict(logic, keep_constants=keep_constants, **kwargs)
Example #14
0
 def __init__(self, finished):
     self._finished = finished
     self._bodybuf = StringIO()
Example #15
0
 def generate_response(self, response):
     f = StringIO()
     json.dump(response, f)
     l = f.tell()
     f.seek(0)
     return f, l
Example #16
0
    def test_unified_build_finder(self):
        self.create_both('chrome.manifest', 'a\nb\nc\n')
        self.create_one('a', 'chrome/chrome.manifest', 'a\nb\nc\n')
        self.create_one('b', 'chrome/chrome.manifest', 'b\nc\na\n')
        self.create_one(
            'a', 'chrome/browser/foo/buildconfig.html', '\n'.join([
                '<html>',
                '<body>',
                '<h1>about:buildconfig</h1>',
                '<div>foo</div>',
                '</body>',
                '</html>',
            ]))
        self.create_one(
            'b', 'chrome/browser/foo/buildconfig.html', '\n'.join([
                '<html>',
                '<body>',
                '<h1>about:buildconfig</h1>',
                '<div>bar</div>',
                '</body>',
                '</html>',
            ]))
        finder = UnifiedBuildFinder(FileFinder(self.tmppath('a')),
                                    FileFinder(self.tmppath('b')))
        self.assertEqual(
            sorted([(f, c.open().read())
                    for f, c in finder.find('**/chrome.manifest')]),
            [('chrome.manifest', 'a\nb\nc\n'),
             ('chrome/chrome.manifest', 'a\nb\nc\n')])

        self.assertEqual(
            sorted([(f, c.open().read())
                    for f, c in finder.find('**/buildconfig.html')]),
            [('chrome/browser/foo/buildconfig.html', '\n'.join([
                '<html>',
                '<body>',
                '<h1>about:buildconfig</h1>',
                '<div>foo</div>',
                '<hr> </hr>',
                '<div>bar</div>',
                '</body>',
                '</html>',
            ]))])

        xpi = MockDest()
        with JarWriter(fileobj=xpi, compress=True) as jar:
            jar.add('foo', 'foo')
            jar.add('bar', 'bar')
        foo_xpi = xpi.read()
        self.create_both('foo.xpi', foo_xpi)

        with JarWriter(fileobj=xpi, compress=True) as jar:
            jar.add('foo', 'bar')
        self.create_one('a', 'bar.xpi', foo_xpi)
        self.create_one('b', 'bar.xpi', xpi.read())

        errors.out = StringIO()
        with self.assertRaises(AccumulatedErrors), errors.accumulate():
            self.assertEqual([(f, c.open().read())
                              for f, c in finder.find('*.xpi')],
                             [('foo.xpi', foo_xpi)])
        errors.out = sys.stderr
Example #17
0
 def __repr__(self):
     from cStringIO import StringIO
     from pprint import pprint
     sio = StringIO()
     pprint(self._adapterCache, sio)
     return sio.getvalue()
Example #18
0
 def __init__(self):
     self._response_header = StringIO()
     self._response_body = StringIO()
Example #19
0
def main(argv):
    indelible_model = 'JC'
    indelible_model = 'LAV0.01a'
    theta = 0.01
    mu = 1
    #k = (1,2)
    k = (1,2,3,4,5)
    m = 100
    n = 5
    nr_genes = 10
    nr_sims = 1
    nr_rows = 3
    nr_cols = 3
    a_max = 0.74
    b_max = 0.74
    #a_max = 0.3
    #b_max = 0.2
    kmer_methods = ['CoalescentJCNJ', 'CoalescentJCLS', 'JCNJ','dstarNJ','concatdJCNJ']
    #kmer_methods = ['dstarNJ','concatdJCNJ']
    distance_formulas = ['ARS2015', 'alignment_based']
    multiple_alignment_method = 'clustalo'
    alignment_method = 'stretcher'
    N = theta/mu
    db_file = 'db.sql'

    try:
        opts, args = getopt.getopt(argv,"hk:m:n:o:",["indelible_model=","theta=","genes=","sims=","rows=","cols=","a_max=","b_max="])
    except getopt.GetoptError as err:
        # print usage information and exit:
        print(str(err))
        usage()
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            usage()
            sys.exit()
        elif opt == "--theta":
            theta = float(arg)
            N = theta
        elif opt == "-k":
            k = tuple([int(k_i) for k_i in re.sub("[()]","",arg).split(",")])
        elif opt == "-m":
            m = int(arg)
        elif opt == "-o":
            db_file = arg
        elif opt == "-n":
            n = int(arg)
        elif opt == "--genes":
            nr_genes = int(arg)
        elif opt == "--method":
            method = arg 
        elif opt == "--distance_formula":
            distance_formula = arg
        elif opt == "--sims":
            nr_sims = int(arg)
        elif opt == "--rows":
            nr_rows = int(arg)
        elif opt == "--cols":
            nr_cols = int(arg)
        elif opt == "--a_max":
            a_max = float(arg)
        elif opt == "--b_max":
            b_max = float(arg)
        elif opt == "--indelible_model":
            indelible_model = arg
        elif opt == "--reconstruct_only":
            reconstruct_only = True
    usage()

    import logging
    sqla_logger = logging.getLogger('sqlalchemy.engine.base.Engine')
    sqla_logger.propagate = False
    sqla_logger.addHandler(logging.FileHandler('/tmp/sqla.log'))

    from sqlalchemy import create_engine
    engine = create_engine('sqlite:///{:s}'.format(os.path.abspath(db_file)), echo=True, convert_unicode=True)
    from sqlalchemy.orm import sessionmaker
    Session = sessionmaker(bind=engine)
    session = Session()
    Base.metadata.create_all(engine)


    import resource
    sim_set = None
    print('Simulating sequence data for a {:d}x{:d} Huelsenbeck diagram with {:d} simulations for each tree, and {:d} gene trees per simulation, using the following parameters: theta={:.2e}, m={:d}, indelible_model={:s}\n'.format(nr_rows,nr_cols,nr_sims,nr_genes,theta,m,indelible_model))
    sim_set = HuelsenbeckSimulationSet(rows=nr_rows, cols=nr_cols, nr_sims=nr_sims, theta=theta, indelible_model=indelible_model, genes=nr_genes, m=m, n=n, a_max=a_max, b_max=b_max)
    for method in ['raxml']:
        tree_estimate_set = HuelsenbeckTreeEstimateSet(simulation_set=sim_set, method=method, distance_formula=None, alignment_method=multiple_alignment_method, k=None)
    session.add(sim_set)
    session.add(tree_estimate_set)
    for row,b in enumerate([(row+1)*(b_max)/nr_rows for row in range(nr_rows)]):
        for col,a in enumerate([(col+1)*(a_max)/nr_cols for col in range(nr_cols)]):
            t_a = abs(-3.0/4.0*log(1-4.0/3.0*a)/mu)
            t_b = abs(-3.0/4.0*log(1-4.0/3.0*b)/mu)
            tree = huelsenbeck_tree(t_a,t_b,5)
            tree_newick = ")".join(tree.format('newick').split(")")[:-1])+")"
            print(tree_newick)

            xtree = XTree(tree,dict((clade,set([clade.name])) for clade in tree.get_terminals()))
            #print(','.join([''.join(split[0])+'|'+''.join(split[1]) for split in xtree.get_splits()])+" (t_a,t_b) = ({:f},{:f}): ".format(t_a,t_b))

            species_set = sorted(tree.get_terminals(),key=lambda species: species.name)
            n = len(species_set)
            species_names = [species.name for species in species_set]
            genes = [GeneLineage(name='s{:d}'.format(i)) for i,_ in enumerate(range(len(species_set)))]
            gene_embedding = dict(zip(species_set,[[gene] for gene in genes]))
            for sim_it in range(nr_sims):
                # Create simulation objects
                sim = Simulation(tree=tree_newick, theta=theta, indelible_model=indelible_model, genes=nr_genes, m=m, n=n)
                session.add(sim)
                huel_sim = HuelsenbeckSimulation(simulation_set=sim_set, simulation=sim, row=row, col=col)
                session.add(huel_sim)
                # Prepare kmer distance matrices to be used to compute averages
                kmer_distance_matrices = dict()
                finite_counts_matrices = dict()
                for distance_formula in ['dstar', 'ARS2015']:
                    kmer_distance_matrices[distance_formula] = dict()
                    finite_counts_matrices[distance_formula] = dict()
                    for k_i in k:
                        kmer_distance_matrices[distance_formula][k_i] = zero_distance_matrix(species_names)
                        finite_counts_matrices[distance_formula][k_i] = zero_distance_matrix(species_names)
                # Prepare concatenated sequence object
                sample_ids = [sample.name for sample in itertools.chain.from_iterable(gene_embedding.values())]
                concatenated_sequences = dict((sample_id,SeqRecord(Seq('',DNAAlphabet()),id=sample_id,name=sample_id,description=sample_id)) for sample_id in sample_ids)

                for gene in range(nr_genes):
                    # generate gene tree and sequences
                    # for each set of sequence, and for each distance formula and value of k, generate a k-mer distance matrix. sum these matrices for all genes
                    # also store the concatenated sequences
                    coalescent = EmbeddedGeneForest(tree, gene_embedding)
                    coalescent.coalesce(theta)
                    genetree = coalescent.genetree()
                    with TemporaryDirectory() as tmpdir:
                        sequences = mutate_indelible(genetree, m, tmpdir, indelible_model, aligned=False)
                        aligned_sequences = SeqIO.to_dict(align_sequences(sequences, multiple_alignment_method, tmpdir))
                        for sample_id in sample_ids:
                            concatenated_sequences[sample_id] += aligned_sequences[sample_id]
                    for distance_formula in ['dstar', 'ARS2015']:
                        for k_i in k:
                            if distance_formula == 'ARS2015':
                                dm = kmer_distance_matrix(sequences, k_i, normalized_kmer_distance, grouping=gene_embedding)
                                #print(dm)
                            elif distance_formula == 'dstar':
                                dm = kmer_distance_matrix(sequences, k_i, dstar_kmer_distance, grouping=gene_embedding)
                            else:
                                raise Exception
                            finite_counts_matrices[distance_formula][k_i] += dm.isfinite()
                            kmer_distance_matrices[distance_formula][k_i] += dm.nantozero()
                            #print(kmer_distance_matrices[distance_formula][k_i])
                for distance_formula in ['dstar', 'ARS2015']:
                    for k_i in k:
                        #print(finite_counts_matrices[distance_formula][k_i])
                        avg_dm = kmer_distance_matrices[distance_formula][k_i]/finite_counts_matrices[distance_formula][k_i]
                        #if distance_formula == 'dstar':
                        #    print(finite_counts_matrices[distance_formula][k_i])
                        #    print(avg_dm)
                        kdm = kmer_distance_matrix_from_dm(avg_dm, sim, distance_formula, None, k_i)
                        session.add(kdm)
                jc_dm = kmer_distance_matrix(concatenated_sequences.values(), 1, aligned_kmer_distance, alignment_fn=stretcher_alignment, grouping=gene_embedding)
                #print(jc_dm)
                kdm = kmer_distance_matrix_from_dm(jc_dm, sim, 'concatdJC', alignment_method, 1)
                session.add(kdm)
                # reconstruct from concatenated sequences using raxml
                with TemporaryDirectory() as tmpdir:
                    t0 = time.clock()
                    xtreehat = RAxML(concatenated_sequences.values(), gene_embedding, tmpdir)
                    t1 = time.clock()

                success = int(xtree.displays(xtreehat))
                print(success)
                tree_estimate = TreeEstimate(simulation=sim, method=tree_estimate_set.method, distance_formula=tree_estimate_set.distance_formula, k=tree_estimate_set.k, splits=','.join([''.join(split[0])+'|'+''.join(split[1]) for split in xtreehat.get_splits()]), success=int(xtree.displays(xtreehat)), dt=t1-t0)
                session.add(tree_estimate)
                #session.commit()
                huel_tree_estimate = HuelsenbeckTreeEstimate(tree_estimate_set=tree_estimate_set, tree_estimate=tree_estimate, huelsenbeck_simulation=huel_sim)
            session.add(huel_tree_estimate)

    session.commit()

    # create tree_estimate sets
    for method in kmer_methods:
        if method in ['CoalescentJCNJ', 'CoalescentJCLS', 'JCNJ']:
            distance_formula = 'ARS2015'
            tree_estimate_set = HuelsenbeckTreeEstimateSet(simulation_set=sim_set, method=method, distance_formula=distance_formula, alignment_method=None, k=",".join([str(k_i) for k_i in k]))
        elif method == 'dstarNJ':
            distance_formula = 'dstar'
            tree_estimate_set = HuelsenbeckTreeEstimateSet(simulation_set=sim_set, method=method, distance_formula=distance_formula, alignment_method=None, k=",".join([str(k_i) for k_i in k]))
        elif method == 'concatdJCNJ':
            distance_formula = 'concatdJC'
            #alignment_method = 'clustalo'
            tree_estimate_set = HuelsenbeckTreeEstimateSet(simulation_set=sim_set, method=method, distance_formula=distance_formula, alignment_method=alignment_method, k="1")
        session.add(tree_estimate_set)
        session.commit()

    # fetch tree_estimate sets that do not require full sequence data
    tree_estimate_sets = session.query(HuelsenbeckTreeEstimateSet).\
                                join(HuelsenbeckTreeEstimateSet.simulation_set).\
                                filter(HuelsenbeckTreeEstimateSet.method.in_(kmer_methods)). \
                                filter(HuelsenbeckTreeEstimateSet.simulation_set==sim_set).all()

    # run tree_estimates
    for tree_estimate_set in tree_estimate_sets:
        method = tree_estimate_set.method
        print(method)
        distance_formula = tree_estimate_set.distance_formula
        #alignment_method = tree_estimate_set.alignment_method
        try:
            k = [int(k_i) for k_i in tree_estimate_set.k.split(",")]
        except AttributeError:
            k = None
        for huel_sim in tree_estimate_set.simulation_set.huelsenbeck_simulations:
            sim = huel_sim.simulation
            treedata = sim.tree
            handle = StringIO(treedata)
            #print(handle.read())
            tree = Phylo.read(handle, "newick")
            xtree = XTree(tree,dict((clade,set([clade.name])) for clade in tree.get_terminals()))
            kmer_distance_matrices = dict((kdm.k,kdm.to_dm()) for kdm in sim.kmer_distance_matrices if kdm.k in k and kdm.distance_formula==distance_formula)

            t0 = time.clock()
            if method == 'CoalescentJCLS':
                xtreehat = TreeMinDistanceFromFiveTaxonCoalescentJCExpectedKmerDistanceParameterizationMap(kmer_distance_matrices)
            elif method == 'CoalescentJCNJ':
                xtreehat = NJArgMinSumOfDistancesFromCoalescentJCExpectedKmerPairDistanceParameterizationMap(kmer_distance_matrices)
            elif method == 'JCNJ':
                #for k,dm in kmer_distance_matrices.items():
                #    print dm
                adjusted_distance_matrices = dict((k,JCKmerDistanceMatrixAdjustment(kmer_distance_matrix,k)) for k,kmer_distance_matrix in kmer_distance_matrices.items()) 
                #for k,dm in adjusted_distance_matrices.items():
                #    print dm
                xtreehat = NJ(adjusted_distance_matrices)
            elif method == 'dstarNJ':
                #for _,dm in kmer_distance_matrices.items():
                #    print dm
                xtreehat = NJ(kmer_distance_matrices)
            elif method == 'concatdJCNJ':
                adjusted_distance_matrices = {1:JCKmerDistanceMatrixAdjustment(kmer_distance_matrices[1],1)}
                xtreehat = NJ(adjusted_distance_matrices)
            else:
                raise(Exception)
            t1 = time.clock()

            success = int(xtree.displays(xtreehat))
            print(','.join([''.join(split[0])+'|'+''.join(split[1]) for split in xtree.get_splits()])+" (t_a,t_b) = ({:f},{:f}): ".format(t_a,t_b)+','.join([''.join(split[0])+'|'+''.join(split[1]) for split in xtreehat.get_splits()])+" ({:d})".format(success))
            #print(k)
            tree_estimate = TreeEstimate(simulation=sim, method=method, distance_formula=distance_formula, k=",".join([str(k_i) for k_i in k]), splits=','.join([''.join(split[0])+'|'+''.join(split[1]) for split in xtreehat.get_splits()]), success=int(xtree.displays(xtreehat)), dt=t1-t0)
            session.add(tree_estimate)
            #session.commit()
            huel_tree_estimate = HuelsenbeckTreeEstimate(tree_estimate_set=tree_estimate_set, tree_estimate=tree_estimate, huelsenbeck_simulation=huel_sim)
            session.add(huel_tree_estimate)
    session.commit()
Example #20
0
def skip_leading_wsp(f):
    "Works on a file, returns a file-like object"
    return StringIO("\n".join(map(string.strip, f.readlines())))
Example #21
0
    def install_from_urls(self, cr, uid, urls, context=None):
        OPENERP = 'openerp'
        tmp = tempfile.mkdtemp()
        _logger.debug('Install from url: %r', urls)
        try:
            # 1. Download & unzip missing modules
            for module_name, url in urls.items():
                if not url:
                    continue  # nothing to download, local version is already the last one
                try:
                    _logger.info('Downloading module `%s` from PengERP Apps',
                                 module_name)
                    content = urllib2.urlopen(url).read()
                except Exception:
                    _logger.exception('Failed to fetch module %s', module_name)
                    raise osv.except_osv(
                        _('Module not found'),
                        _('The `%s` module appears to be unavailable at the moment, please try again later.'
                          ) % module_name)
                else:
                    zipfile.ZipFile(StringIO(content)).extractall(tmp)
                    assert os.path.isdir(os.path.join(tmp, module_name))

            # 2a. Copy/Replace module source in addons path
            for module_name, url in urls.items():
                if module_name == OPENERP or not url:
                    continue  # OPENERP is special case, handled below, and no URL means local module
                module_path = modules.get_module_path(module_name,
                                                      downloaded=True,
                                                      display_warning=False)
                bck = backup(module_path, False)
                _logger.info('Copy downloaded module `%s` to `%s`',
                             module_name, module_path)
                shutil.move(os.path.join(tmp, module_name), module_path)
                if bck:
                    shutil.rmtree(bck)

            # 2b.  Copy/Replace server+base module source if downloaded
            if urls.get(OPENERP, None):
                # special case. it contains the server and the base module.
                # extract path is not the same
                base_path = os.path.dirname(modules.get_module_path('base'))

                # copy all modules in the SERVER/openerp/addons directory to the new "openerp" module (except base itself)
                for d in os.listdir(base_path):
                    if d != 'base' and os.path.isdir(os.path.join(
                            base_path, d)):
                        destdir = os.path.join(
                            tmp, OPENERP, 'addons',
                            d)  # XXX 'openerp' subdirectory ?
                        shutil.copytree(os.path.join(base_path, d), destdir)

                # then replace the server by the new "base" module
                server_dir = openerp.tools.config[
                    'root_path']  # XXX or dirname()
                bck = backup(server_dir)
                _logger.info('Copy downloaded module `openerp` to `%s`',
                             server_dir)
                shutil.move(os.path.join(tmp, OPENERP), server_dir)
                #if bck:
                #    shutil.rmtree(bck)

            self.update_list(cr, uid, context=context)

            with_urls = [m for m, u in urls.items() if u]
            downloaded_ids = self.search(cr,
                                         uid, [('name', 'in', with_urls)],
                                         context=context)
            already_installed = self.search(cr,
                                            uid, [('id', 'in', downloaded_ids),
                                                  ('state', '=', 'installed')],
                                            context=context)

            to_install_ids = self.search(cr,
                                         uid, [('name', 'in', urls.keys()),
                                               ('state', '=', 'uninstalled')],
                                         context=context)
            post_install_action = self.button_immediate_install(
                cr, uid, to_install_ids, context=context)

            if already_installed:
                # in this case, force server restart to reload python code...
                cr.commit()
                openerp.service.restart_server()
                return {
                    'type': 'ir.actions.client',
                    'tag': 'home',
                    'params': {
                        'wait': True
                    },
                }
            return post_install_action
        finally:
            shutil.rmtree(tmp)
Example #22
0
    def write_archive(self, treeish, archive, timestamp=None, prefix=''):
        """Write treeish into an archive

        If no timestamp is provided and 'treeish' is a commit, its committer
        timestamp will be used. Otherwise the current time will be used.

        All path names in the archive are added to 'prefix', which defaults to
        an empty string.

        Arguments:

        treeish
            The treeish to write.
        archive
            An archive from the 'tarfile' module
        timestamp
            Timestamp to use for the files in the archive.
        prefix
            Extra prefix to add to the path names in the archive.

        Example::

            >>> import tarfile, pygit2
            >>>> with tarfile.open('foo.tar', 'w') as archive:
            >>>>     repo = pygit2.Repsitory('.')
            >>>>     repo.write_archive(archive, repo.head.target)
        """

        # Try to get a tree form whatever we got
        if isinstance(treeish, Tree):
            tree = treeish

        if isinstance(treeish, Oid) or is_string(treeish):
            treeish = self[treeish]

        # if we don't have a timestamp, try to get it from a commit
        if not timestamp:
            try:
                commit = treeish.peel(Commit)
                timestamp = commit.committer.time
            except Exception:
                pass

        # as a last resort, use the current timestamp
        if not timestamp:
            timestamp = int(time())

        tree = treeish.peel(Tree)

        index = Index()
        index.read_tree(tree)

        for entry in index:
            content = self[entry.id].read_raw()
            info = tarfile.TarInfo(prefix + entry.path)
            info.size = len(content)
            info.mtime = timestamp
            info.uname = info.gname = 'root'  # just because git does this
            if entry.mode == GIT_FILEMODE_LINK:
                info.type = archive.SYMTYPE
                info.linkname = content
                info.mode = 0o777  # symlinks get placeholder
                info.size = 0
                archive.addfile(info)
            else:
                archive.addfile(info, StringIO(content))
Example #23
0
def parseSPA4SymbolProfit(modelType="fixed"):
    '''
    csv to latex format
    '''
    if modelType == "fixed":
        n_rvs = range(5, 55, 5)
        hist_periods = range(50, 130, 10)
        alphas = ("0.5", "0.55", "0.6", "0.65", "0.7", "0.75", "0.8", "0.85",
                  "0.9", "0.95")

        resFile = os.path.join(ExpResultsDir, "SPA",
                               "SPA_Fixed_eachParam_Profit.csv")

    elif modelType == "dynamic":
        n_rvs = range(5, 55, 5)
        hist_periods = range(90, 120 + 10, 10)
        alphas = ("0.5", "0.55", "0.6", "0.65", "0.7")

        resFile = os.path.join(ExpResultsDir, "SPA",
                               "SPA_Dynamic_eachParam_Profit.csv")

    #read resfile
    SPA = {}
    reader = csv.reader(open(resFile))
    reader.next()
    for row in reader:
        print row
        key = "%s-%s-%s" % (row[0], row[1], row[2])
        pvalue = float(row[7])
        if key not in SPA.keys():
            SPA[key] = pvalue
        elif pvalue > SPA[key]:
            SPA[key] = pvalue

    if modelType == "fixed":
        outFile = os.path.join(ExpResultsDir, "SPA",
                               "SPA_Fixed_Latex_Profit.txt")
    elif modelType == "dynamic":
        outFile = os.path.join(ExpResultsDir, "SPA",
                               "SPA_Dynamic_Latex_Profit.txt")

    for n_rv in n_rvs:
        if modelType == "fixed":
            tableHead = r'''
\begin{table}
\fontsize{8pt}{8pt}\selectfont
\caption{The SPA test $p$-value of our SP model with portfolio size $n=%s$.}
\begin{center} 
\begin{tabular}{| r | r |r |r |r |r |r |r| r | }
\hline
''' % (n_rv)

            tableTail = r'''\end{tabular}
\end{center}
\label{tab:SPA_SP_n%s}
\end{table}
''' % (n_rv)

        elif modelType == "dynamic":
            tableHead = r'''
\begin{table}
\fontsize{8pt}{8pt}\selectfont
\caption{The SPA test $p$-value of our SIP model with portfolio size $n=%s$.}
\begin{center} 
\begin{tabular}{| r | r |r |r |r |}
\hline
''' % (n_rv)

            tableTail = r'''\end{tabular}
\end{center}
\label{tab:SPA_SIP_n%s}
\end{table}
''' % (n_rv)

        statIO = StringIO()
        with open(outFile, 'ab') as fout:
            fout.write(tableHead)
            fout.write(r'$\alpha$ & $h=%4s$ & ' % (hist_periods[0]))
            fout.write(r' & '.join(str(p) for p in hist_periods[1:]))
            fout.write('\\\ \hline \n')

        #load exp ROI
        for alpha in alphas:
            statIO.write('%4s &' % (alpha))

            for hdx, period in enumerate(hist_periods):
                if n_rv == 50 and period == 50:
                    statIO.write('- &')
                    continue

                key = "%s-%s-%s" % (n_rv, period, alpha)
                if SPA[key] <= 0.01:
                    statIO.write('***%4.4f ' % (SPA[key]))
                elif SPA[key] <= 0.05:
                    statIO.write('**%4.4f ' % (SPA[key]))
                elif SPA[key] <= 0.1:
                    statIO.write('*%4.4f ' % (SPA[key]))
                else:
                    statIO.write('%4.4f ' % (SPA[key]))

                if hdx != len(hist_periods) - 1:
                    statIO.write(' & ')
                else:
                    statIO.write('\\\ \hline \n')

        with open(outFile, 'ab') as fout:
            fout.write(statIO.getvalue())
            fout.write(tableTail)
        statIO.close()
                            "{0}: write_var,index 0".format(n))
            p.write_var(all_vars, self.tsvalues[:], self.tsvar, index=1)
            self.assertTrue(
                (all_vars[self.tsvar][1] == self.tsvalues[0]).all(),
                "{0}: write_var,index 1".format(n))

    def test_close(self):

        for p in self.io_ports.itervalues():
            f = p.open_file(self.slice)
            f = p.close_file(f)


#===============================================================================
# Command-Line Operation
#===============================================================================
if __name__ == "__main__":
    hline = '=' * 70
    print hline
    print 'STANDARD OUTPUT FROM ALL TESTS:'
    print hline

    mystream = StringIO()
    tests = unittest.TestLoader().loadTestsFromTestCase(climIOTests)
    unittest.TextTestRunner(stream=mystream).run(tests)

    print hline
    print 'TESTS RESULTS:'
    print hline
    print str(mystream.getvalue())
Example #25
0
 def __enter__(self):
     self._stdout = sys.stdout
     sys.stdout = self._stringio = StringIO()
     return self
Example #26
0
 def process_message(self, peer, mailfrom, rcpttos, data):
     from cStringIO import StringIO
     from Mailman import Utils
     from Mailman import Message
     from Mailman import MailList
     # If the message is to a Mailman mailing list, then we'll invoke the
     # Mailman script directly, without going through the real smtpd.
     # Otherwise we'll forward it to the local proxy for disposition.
     listnames = []
     for rcpt in rcpttos:
         local = rcpt.lower().split('@')[0]
         # We allow the following variations on the theme
         #   listname
         #   listname-admin
         #   listname-owner
         #   listname-request
         #   listname-join
         #   listname-leave
         parts = local.split('-')
         if len(parts) > 2:
             continue
         listname = parts[0]
         if len(parts) == 2:
             command = parts[1]
         else:
             command = ''
         if not Utils.list_exists(listname) or command not in (
                 '', 'admin', 'owner', 'request', 'join', 'leave'):
             continue
         listnames.append((rcpt, listname, command))
     # Remove all list recipients from rcpttos and forward what we're not
     # going to take care of ourselves.  Linear removal should be fine
     # since we don't expect a large number of recipients.
     for rcpt, listname, command in listnames:
         rcpttos.remove(rcpt)
     # If there's any non-list destined recipients left,
     print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
     if rcpttos:
         refused = self._deliver(mailfrom, rcpttos, data)
         # TBD: what to do with refused addresses?
         print >> DEBUGSTREAM, 'we got refusals:', refused
     # Now deliver directly to the list commands
     mlists = {}
     s = StringIO(data)
     msg = Message.Message(s)
     # These headers are required for the proper execution of Mailman.  All
     # MTAs in existance seem to add these if the original message doesn't
     # have them.
     if not msg.getheader('from'):
         msg['From'] = mailfrom
     if not msg.getheader('date'):
         msg['Date'] = time.ctime(time.time())
     for rcpt, listname, command in listnames:
         print >> DEBUGSTREAM, 'sending message to', rcpt
         mlist = mlists.get(listname)
         if not mlist:
             mlist = MailList.MailList(listname, lock=0)
             mlists[listname] = mlist
         # dispatch on the type of command
         if command == '':
             # post
             msg.Enqueue(mlist, tolist=1)
         elif command == 'admin':
             msg.Enqueue(mlist, toadmin=1)
         elif command == 'owner':
             msg.Enqueue(mlist, toowner=1)
         elif command == 'request':
             msg.Enqueue(mlist, torequest=1)
         elif command in ('join', 'leave'):
             # TBD: this is a hack!
             if command == 'join':
                 msg['Subject'] = 'subscribe'
             else:
                 msg['Subject'] = 'unsubscribe'
             msg.Enqueue(mlist, torequest=1)
Example #27
0
def read_info(name):
    data = get_data(__package__, "info/{name}.conf".format(name=name))
    parser = SafeConfigParser(allow_no_value=True)
    parser.readfp(StringIO(data))
    return parser
puncts = set([c for c in string.punctuation])
sentences = []
for line in fin:
    # each sentence is a list of words, we lowercase and remove punctuations
    # same as the Scala code
    sentences.append([w for w in nltk.word_tokenize(line.strip().lower()) 
            if w not in puncts])
fin.close()

# train word2vec with sentences
model = word2vec.Word2Vec(sentences, size=100, window=4, min_count=1, workers=4)
model.init_sims(replace=True)

# find 10 words closest to "day"
print "words most similar to 'day':"
print model.most_similar(positive=["day"], topn=10)

# find closest word to "he"
print "words most similar to 'he':"
print model.most_similar(positive=["he"], topn=1)

# for each word in the vocabulary, write out the word vectors to a file
fvec = open("/tmp/word_vectors.txt", 'wb')
for word in model.vocab.keys():
    vec = model[word]
    for i in range(vec.shape[0]):
    s = StringIO()
    np.savetxt(s, vec, fmt="%.5f", newline=",")
    fvec.write("%s%s\n" % (s.getvalue(), word))
fvec.close()
Example #29
0
    def send(cls, r, resource):
        """
            Method to retrieve updates for a subscription, render the
            notification message and send it - responds to POST?format=msg
            requests to the respective resource.

            @param r: the S3Request
            @param resource: the S3Resource
        """

        _debug = current.log.debug
        _debug("S3Notifications.send()")

        json_message = current.xml.json_message

        # Read subscription data
        source = r.body
        source.seek(0)
        data = source.read()
        subscription = json.loads(data)

        #_debug("Notify PE #%s by %s on %s of %s since %s" % \
        #           (subscription["pe_id"],
        #            str(subscription["method"]),
        #            str(subscription["notify_on"]),
        #            subscription["resource"],
        #            subscription["last_check_time"],
        #            ))

        # Check notification settings
        notify_on = subscription["notify_on"]
        methods = subscription["method"]
        if not notify_on or not methods:
            return json_message(message="No notifications configured "
                                "for this subscription")

        # Authorization (pe_id must not be None)
        pe_id = subscription["pe_id"]

        if not pe_id:
            r.unauthorised()

        # Fields to extract
        fields = resource.list_fields(key="notify_fields")
        if "created_on" not in fields:
            fields.append("created_on")

        # Extract the data
        data = resource.select(fields, represent=True, raw_data=True)
        rows = data["rows"]

        # How many records do we have?
        numrows = len(rows)
        if not numrows:
            return json_message(message="No records found")

        #_debug("%s rows:" % numrows)

        # Prepare meta-data
        get_config = resource.get_config
        settings = current.deployment_settings

        page_url = subscription["page_url"]

        crud_strings = current.response.s3.crud_strings.get(resource.tablename)
        if crud_strings:
            resource_name = crud_strings.title_list
        else:
            resource_name = string.capwords(resource.name, "_")

        last_check_time = s3_decode_iso_datetime(
            subscription["last_check_time"])

        email_format = subscription["email_format"]
        if not email_format:
            email_format = settings.get_msg_notify_email_format()

        filter_query = subscription.get("filter_query")

        meta_data = {
            "systemname": settings.get_system_name(),
            "systemname_short": settings.get_system_name_short(),
            "resource": resource_name,
            "page_url": page_url,
            "notify_on": notify_on,
            "last_check_time": last_check_time,
            "filter_query": filter_query,
            "total_rows": numrows,
        }

        # Render contents for the message template(s)
        renderer = get_config("notify_renderer")
        if not renderer:
            renderer = settings.get_msg_notify_renderer()
        if not renderer:
            renderer = cls._render

        contents = {}
        if email_format == "html" and "EMAIL" in methods:
            contents["html"] = renderer(resource, data, meta_data, "html")
            contents["default"] = contents["html"]
        if email_format != "html" or "EMAIL" not in methods or len(
                methods) > 1:
            contents["text"] = renderer(resource, data, meta_data, "text")
            contents["default"] = contents["text"]

        # Subject line
        subject = get_config("notify_subject")
        if not subject:
            subject = settings.get_msg_notify_subject()
        if callable(subject):
            subject = subject(resource, data, meta_data)

        from string import Template
        subject = Template(subject).safe_substitute(S="%(systemname)s",
                                                    s="%(systemname_short)s",
                                                    r="%(resource)s")
        subject = subject % meta_data

        # Attachment
        attachment = subscription.get("attachment", False)
        document_ids = None
        if attachment:
            attachment_fnc = settings.get_msg_notify_attachment()
            if attachment_fnc:
                document_ids = attachment_fnc(resource, data, meta_data)

        # Helper function to find templates from a priority list
        join = lambda *f: os.path.join(current.request.folder, *f)

        def get_template(path, filenames):
            for fn in filenames:
                filepath = join(path, fn)
                if os.path.exists(filepath):
                    try:
                        return open(filepath, "rb")
                    except:
                        pass
            return None

        # Render and send the message(s)
        themes = settings.get_template()
        prefix = resource.get_config("notify_template", "notify")

        send = current.msg.send_by_pe_id

        success = False
        errors = []

        for method in methods:

            error = None

            # Get the message template
            template = None
            filenames = ["%s_%s.html" % (prefix, method.lower())]
            if method == "EMAIL" and email_format:
                filenames.insert(0,
                                 "%s_email_%s.html" % (prefix, email_format))
            if themes != "default":
                location = settings.get_template_location()
                if not isinstance(themes, (tuple, list)):
                    themes = (themes, )
                for theme in themes[::-1]:
                    path = join(location, "templates", theme, "views", "msg")
                    template = get_template(path, filenames)
                    if template is not None:
                        break
            if template is None:
                path = join("views", "msg")
                template = get_template(path, filenames)
            if template is None:
                template = StringIO(
                    s3_str(current.T("New updates are available.")))

            # Select contents format
            if method == "EMAIL" and email_format == "html":
                output = contents["html"]
            else:
                output = contents["text"]

            # Render the message
            try:
                message = current.response.render(template, output)
            except:
                exc_info = sys.exc_info()[:2]
                error = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
                errors.append(error)
                continue

            if not message:
                continue

            # Send the message
            #_debug("Sending message per %s" % method)
            #_debug(message)
            try:
                sent = send(pe_id,
                            subject=s3_truncate(subject, 78),
                            message=message,
                            contact_method=method,
                            system_generated=True,
                            document_ids=document_ids)
            except:
                exc_info = sys.exc_info()[:2]
                error = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
                sent = False

            if sent:
                # Successful if at least one notification went out
                success = True
            else:
                if not error:
                    error = current.session.error
                    if isinstance(error, list):
                        error = "/".join(error)
                if error:
                    errors.append(error)

        # Done
        if errors:
            message = ", ".join(errors)
        else:
            message = "Success"
        return json_message(success=success,
                            statuscode=200 if success else 403,
                            message=message)
Example #30
0
            gains = CalcGainsFromFps(suppl,
                                     fps,
                                     topN=details.topN,
                                     actName=details.actCol,
                                     nActs=details.nActs,
                                     biasList=details.biasList)
        t2 = time.time()
        message("\tThat took %.2f seconds.\n" % (t2 - t1))
        if details.gainsName:
            outF = open(details.gainsName, 'w+')
            OutputGainsData(outF, gains, cat, nActs=details.nActs)
    else:
        if details.gainsName:
            inF = open(details.gainsName, 'r')
            gains = ProcessGainsData(inF)

    if details.doDetails:
        if not cat:
            message("We require a catalog to get details\n")
            sys.exit(-2)
        if not gains:
            message("We require gains data to get details\n")
            sys.exit(-2)
        io = StringIO()
        io.write('id,SMILES,gain\n')
        ShowDetails(cat, gains, nToDo=details.nBits, outF=io)
        if details.detailsName:
            open(details.detailsName, 'w+').write(io.getvalue())
        else:
            sys.stderr.write(io.getvalue())